file.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU GPL.
  5. See the file COPYING.
  6. */
  7. #include "fuse_i.h"
  8. #include <linux/pagemap.h>
  9. #include <linux/slab.h>
  10. #include <linux/kernel.h>
  11. #include <linux/sched.h>
  12. static const struct file_operations fuse_direct_io_file_operations;
  13. static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
  14. struct fuse_open_out *outargp)
  15. {
  16. struct fuse_conn *fc = get_fuse_conn(inode);
  17. struct fuse_open_in inarg;
  18. struct fuse_req *req;
  19. int err;
  20. req = fuse_get_req(fc);
  21. if (IS_ERR(req))
  22. return PTR_ERR(req);
  23. memset(&inarg, 0, sizeof(inarg));
  24. inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
  25. if (!fc->atomic_o_trunc)
  26. inarg.flags &= ~O_TRUNC;
  27. req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
  28. req->in.h.nodeid = get_node_id(inode);
  29. req->in.numargs = 1;
  30. req->in.args[0].size = sizeof(inarg);
  31. req->in.args[0].value = &inarg;
  32. req->out.numargs = 1;
  33. req->out.args[0].size = sizeof(*outargp);
  34. req->out.args[0].value = outargp;
  35. fuse_request_send(fc, req);
  36. err = req->out.h.error;
  37. fuse_put_request(fc, req);
  38. return err;
  39. }
  40. struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
  41. {
  42. struct fuse_file *ff;
  43. ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
  44. if (ff) {
  45. ff->reserved_req = fuse_request_alloc();
  46. if (!ff->reserved_req) {
  47. kfree(ff);
  48. return NULL;
  49. } else {
  50. INIT_LIST_HEAD(&ff->write_entry);
  51. atomic_set(&ff->count, 0);
  52. spin_lock(&fc->lock);
  53. ff->kh = ++fc->khctr;
  54. spin_unlock(&fc->lock);
  55. }
  56. RB_CLEAR_NODE(&ff->polled_node);
  57. init_waitqueue_head(&ff->poll_wait);
  58. }
  59. return ff;
  60. }
  61. void fuse_file_free(struct fuse_file *ff)
  62. {
  63. fuse_request_free(ff->reserved_req);
  64. kfree(ff);
  65. }
  66. static struct fuse_file *fuse_file_get(struct fuse_file *ff)
  67. {
  68. atomic_inc(&ff->count);
  69. return ff;
  70. }
  71. static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
  72. {
  73. dput(req->misc.release.dentry);
  74. mntput(req->misc.release.vfsmount);
  75. }
  76. static void fuse_file_put(struct fuse_file *ff)
  77. {
  78. if (atomic_dec_and_test(&ff->count)) {
  79. struct fuse_req *req = ff->reserved_req;
  80. struct inode *inode = req->misc.release.dentry->d_inode;
  81. struct fuse_conn *fc = get_fuse_conn(inode);
  82. req->end = fuse_release_end;
  83. fuse_request_send_background(fc, req);
  84. kfree(ff);
  85. }
  86. }
  87. void fuse_finish_open(struct inode *inode, struct file *file,
  88. struct fuse_file *ff, struct fuse_open_out *outarg)
  89. {
  90. if (outarg->open_flags & FOPEN_DIRECT_IO)
  91. file->f_op = &fuse_direct_io_file_operations;
  92. if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
  93. invalidate_inode_pages2(inode->i_mapping);
  94. if (outarg->open_flags & FOPEN_NONSEEKABLE)
  95. nonseekable_open(inode, file);
  96. ff->fh = outarg->fh;
  97. file->private_data = fuse_file_get(ff);
  98. }
  99. int fuse_open_common(struct inode *inode, struct file *file, int isdir)
  100. {
  101. struct fuse_conn *fc = get_fuse_conn(inode);
  102. struct fuse_open_out outarg;
  103. struct fuse_file *ff;
  104. int err;
  105. /* VFS checks this, but only _after_ ->open() */
  106. if (file->f_flags & O_DIRECT)
  107. return -EINVAL;
  108. err = generic_file_open(inode, file);
  109. if (err)
  110. return err;
  111. ff = fuse_file_alloc(fc);
  112. if (!ff)
  113. return -ENOMEM;
  114. err = fuse_send_open(inode, file, isdir, &outarg);
  115. if (err)
  116. fuse_file_free(ff);
  117. else {
  118. if (isdir)
  119. outarg.open_flags &= ~FOPEN_DIRECT_IO;
  120. fuse_finish_open(inode, file, ff, &outarg);
  121. }
  122. return err;
  123. }
  124. void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode)
  125. {
  126. struct fuse_req *req = ff->reserved_req;
  127. struct fuse_release_in *inarg = &req->misc.release.in;
  128. inarg->fh = ff->fh;
  129. inarg->flags = flags;
  130. req->in.h.opcode = opcode;
  131. req->in.h.nodeid = nodeid;
  132. req->in.numargs = 1;
  133. req->in.args[0].size = sizeof(struct fuse_release_in);
  134. req->in.args[0].value = inarg;
  135. }
  136. int fuse_release_common(struct inode *inode, struct file *file, int isdir)
  137. {
  138. struct fuse_file *ff = file->private_data;
  139. if (ff) {
  140. struct fuse_conn *fc = get_fuse_conn(inode);
  141. struct fuse_req *req = ff->reserved_req;
  142. fuse_release_fill(ff, get_node_id(inode), file->f_flags,
  143. isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
  144. /* Hold vfsmount and dentry until release is finished */
  145. req->misc.release.vfsmount = mntget(file->f_path.mnt);
  146. req->misc.release.dentry = dget(file->f_path.dentry);
  147. spin_lock(&fc->lock);
  148. list_del(&ff->write_entry);
  149. if (!RB_EMPTY_NODE(&ff->polled_node))
  150. rb_erase(&ff->polled_node, &fc->polled_files);
  151. spin_unlock(&fc->lock);
  152. wake_up_interruptible_sync(&ff->poll_wait);
  153. /*
  154. * Normally this will send the RELEASE request,
  155. * however if some asynchronous READ or WRITE requests
  156. * are outstanding, the sending will be delayed
  157. */
  158. fuse_file_put(ff);
  159. }
  160. /* Return value is ignored by VFS */
  161. return 0;
  162. }
  163. static int fuse_open(struct inode *inode, struct file *file)
  164. {
  165. return fuse_open_common(inode, file, 0);
  166. }
  167. static int fuse_release(struct inode *inode, struct file *file)
  168. {
  169. return fuse_release_common(inode, file, 0);
  170. }
  171. /*
  172. * Scramble the ID space with XTEA, so that the value of the files_struct
  173. * pointer is not exposed to userspace.
  174. */
  175. u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
  176. {
  177. u32 *k = fc->scramble_key;
  178. u64 v = (unsigned long) id;
  179. u32 v0 = v;
  180. u32 v1 = v >> 32;
  181. u32 sum = 0;
  182. int i;
  183. for (i = 0; i < 32; i++) {
  184. v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
  185. sum += 0x9E3779B9;
  186. v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
  187. }
  188. return (u64) v0 + ((u64) v1 << 32);
  189. }
  190. /*
  191. * Check if page is under writeback
  192. *
  193. * This is currently done by walking the list of writepage requests
  194. * for the inode, which can be pretty inefficient.
  195. */
  196. static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
  197. {
  198. struct fuse_conn *fc = get_fuse_conn(inode);
  199. struct fuse_inode *fi = get_fuse_inode(inode);
  200. struct fuse_req *req;
  201. bool found = false;
  202. spin_lock(&fc->lock);
  203. list_for_each_entry(req, &fi->writepages, writepages_entry) {
  204. pgoff_t curr_index;
  205. BUG_ON(req->inode != inode);
  206. curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
  207. if (curr_index == index) {
  208. found = true;
  209. break;
  210. }
  211. }
  212. spin_unlock(&fc->lock);
  213. return found;
  214. }
  215. /*
  216. * Wait for page writeback to be completed.
  217. *
  218. * Since fuse doesn't rely on the VM writeback tracking, this has to
  219. * use some other means.
  220. */
  221. static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
  222. {
  223. struct fuse_inode *fi = get_fuse_inode(inode);
  224. wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
  225. return 0;
  226. }
  227. static int fuse_flush(struct file *file, fl_owner_t id)
  228. {
  229. struct inode *inode = file->f_path.dentry->d_inode;
  230. struct fuse_conn *fc = get_fuse_conn(inode);
  231. struct fuse_file *ff = file->private_data;
  232. struct fuse_req *req;
  233. struct fuse_flush_in inarg;
  234. int err;
  235. if (is_bad_inode(inode))
  236. return -EIO;
  237. if (fc->no_flush)
  238. return 0;
  239. req = fuse_get_req_nofail(fc, file);
  240. memset(&inarg, 0, sizeof(inarg));
  241. inarg.fh = ff->fh;
  242. inarg.lock_owner = fuse_lock_owner_id(fc, id);
  243. req->in.h.opcode = FUSE_FLUSH;
  244. req->in.h.nodeid = get_node_id(inode);
  245. req->in.numargs = 1;
  246. req->in.args[0].size = sizeof(inarg);
  247. req->in.args[0].value = &inarg;
  248. req->force = 1;
  249. fuse_request_send(fc, req);
  250. err = req->out.h.error;
  251. fuse_put_request(fc, req);
  252. if (err == -ENOSYS) {
  253. fc->no_flush = 1;
  254. err = 0;
  255. }
  256. return err;
  257. }
  258. /*
  259. * Wait for all pending writepages on the inode to finish.
  260. *
  261. * This is currently done by blocking further writes with FUSE_NOWRITE
  262. * and waiting for all sent writes to complete.
  263. *
  264. * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
  265. * could conflict with truncation.
  266. */
  267. static void fuse_sync_writes(struct inode *inode)
  268. {
  269. fuse_set_nowrite(inode);
  270. fuse_release_nowrite(inode);
  271. }
  272. int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
  273. int isdir)
  274. {
  275. struct inode *inode = de->d_inode;
  276. struct fuse_conn *fc = get_fuse_conn(inode);
  277. struct fuse_file *ff = file->private_data;
  278. struct fuse_req *req;
  279. struct fuse_fsync_in inarg;
  280. int err;
  281. if (is_bad_inode(inode))
  282. return -EIO;
  283. if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
  284. return 0;
  285. /*
  286. * Start writeback against all dirty pages of the inode, then
  287. * wait for all outstanding writes, before sending the FSYNC
  288. * request.
  289. */
  290. err = write_inode_now(inode, 0);
  291. if (err)
  292. return err;
  293. fuse_sync_writes(inode);
  294. req = fuse_get_req(fc);
  295. if (IS_ERR(req))
  296. return PTR_ERR(req);
  297. memset(&inarg, 0, sizeof(inarg));
  298. inarg.fh = ff->fh;
  299. inarg.fsync_flags = datasync ? 1 : 0;
  300. req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
  301. req->in.h.nodeid = get_node_id(inode);
  302. req->in.numargs = 1;
  303. req->in.args[0].size = sizeof(inarg);
  304. req->in.args[0].value = &inarg;
  305. fuse_request_send(fc, req);
  306. err = req->out.h.error;
  307. fuse_put_request(fc, req);
  308. if (err == -ENOSYS) {
  309. if (isdir)
  310. fc->no_fsyncdir = 1;
  311. else
  312. fc->no_fsync = 1;
  313. err = 0;
  314. }
  315. return err;
  316. }
  317. static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
  318. {
  319. return fuse_fsync_common(file, de, datasync, 0);
  320. }
  321. void fuse_read_fill(struct fuse_req *req, struct file *file,
  322. struct inode *inode, loff_t pos, size_t count, int opcode)
  323. {
  324. struct fuse_read_in *inarg = &req->misc.read.in;
  325. struct fuse_file *ff = file->private_data;
  326. inarg->fh = ff->fh;
  327. inarg->offset = pos;
  328. inarg->size = count;
  329. inarg->flags = file->f_flags;
  330. req->in.h.opcode = opcode;
  331. req->in.h.nodeid = get_node_id(inode);
  332. req->in.numargs = 1;
  333. req->in.args[0].size = sizeof(struct fuse_read_in);
  334. req->in.args[0].value = inarg;
  335. req->out.argvar = 1;
  336. req->out.numargs = 1;
  337. req->out.args[0].size = count;
  338. }
  339. static size_t fuse_send_read(struct fuse_req *req, struct file *file,
  340. struct inode *inode, loff_t pos, size_t count,
  341. fl_owner_t owner)
  342. {
  343. struct fuse_conn *fc = get_fuse_conn(inode);
  344. fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
  345. if (owner != NULL) {
  346. struct fuse_read_in *inarg = &req->misc.read.in;
  347. inarg->read_flags |= FUSE_READ_LOCKOWNER;
  348. inarg->lock_owner = fuse_lock_owner_id(fc, owner);
  349. }
  350. fuse_request_send(fc, req);
  351. return req->out.args[0].size;
  352. }
  353. static void fuse_read_update_size(struct inode *inode, loff_t size,
  354. u64 attr_ver)
  355. {
  356. struct fuse_conn *fc = get_fuse_conn(inode);
  357. struct fuse_inode *fi = get_fuse_inode(inode);
  358. spin_lock(&fc->lock);
  359. if (attr_ver == fi->attr_version && size < inode->i_size) {
  360. fi->attr_version = ++fc->attr_version;
  361. i_size_write(inode, size);
  362. }
  363. spin_unlock(&fc->lock);
  364. }
  365. static int fuse_readpage(struct file *file, struct page *page)
  366. {
  367. struct inode *inode = page->mapping->host;
  368. struct fuse_conn *fc = get_fuse_conn(inode);
  369. struct fuse_req *req;
  370. size_t num_read;
  371. loff_t pos = page_offset(page);
  372. size_t count = PAGE_CACHE_SIZE;
  373. u64 attr_ver;
  374. int err;
  375. err = -EIO;
  376. if (is_bad_inode(inode))
  377. goto out;
  378. /*
  379. * Page writeback can extend beyond the liftime of the
  380. * page-cache page, so make sure we read a properly synced
  381. * page.
  382. */
  383. fuse_wait_on_page_writeback(inode, page->index);
  384. req = fuse_get_req(fc);
  385. err = PTR_ERR(req);
  386. if (IS_ERR(req))
  387. goto out;
  388. attr_ver = fuse_get_attr_version(fc);
  389. req->out.page_zeroing = 1;
  390. req->out.argpages = 1;
  391. req->num_pages = 1;
  392. req->pages[0] = page;
  393. num_read = fuse_send_read(req, file, inode, pos, count, NULL);
  394. err = req->out.h.error;
  395. fuse_put_request(fc, req);
  396. if (!err) {
  397. /*
  398. * Short read means EOF. If file size is larger, truncate it
  399. */
  400. if (num_read < count)
  401. fuse_read_update_size(inode, pos + num_read, attr_ver);
  402. SetPageUptodate(page);
  403. }
  404. fuse_invalidate_attr(inode); /* atime changed */
  405. out:
  406. unlock_page(page);
  407. return err;
  408. }
  409. static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
  410. {
  411. int i;
  412. size_t count = req->misc.read.in.size;
  413. size_t num_read = req->out.args[0].size;
  414. struct inode *inode = req->pages[0]->mapping->host;
  415. /*
  416. * Short read means EOF. If file size is larger, truncate it
  417. */
  418. if (!req->out.h.error && num_read < count) {
  419. loff_t pos = page_offset(req->pages[0]) + num_read;
  420. fuse_read_update_size(inode, pos, req->misc.read.attr_ver);
  421. }
  422. fuse_invalidate_attr(inode); /* atime changed */
  423. for (i = 0; i < req->num_pages; i++) {
  424. struct page *page = req->pages[i];
  425. if (!req->out.h.error)
  426. SetPageUptodate(page);
  427. else
  428. SetPageError(page);
  429. unlock_page(page);
  430. }
  431. if (req->ff)
  432. fuse_file_put(req->ff);
  433. }
  434. static void fuse_send_readpages(struct fuse_req *req, struct file *file,
  435. struct inode *inode)
  436. {
  437. struct fuse_conn *fc = get_fuse_conn(inode);
  438. loff_t pos = page_offset(req->pages[0]);
  439. size_t count = req->num_pages << PAGE_CACHE_SHIFT;
  440. req->out.argpages = 1;
  441. req->out.page_zeroing = 1;
  442. fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
  443. req->misc.read.attr_ver = fuse_get_attr_version(fc);
  444. if (fc->async_read) {
  445. struct fuse_file *ff = file->private_data;
  446. req->ff = fuse_file_get(ff);
  447. req->end = fuse_readpages_end;
  448. fuse_request_send_background(fc, req);
  449. } else {
  450. fuse_request_send(fc, req);
  451. fuse_readpages_end(fc, req);
  452. fuse_put_request(fc, req);
  453. }
  454. }
  455. struct fuse_fill_data {
  456. struct fuse_req *req;
  457. struct file *file;
  458. struct inode *inode;
  459. };
  460. static int fuse_readpages_fill(void *_data, struct page *page)
  461. {
  462. struct fuse_fill_data *data = _data;
  463. struct fuse_req *req = data->req;
  464. struct inode *inode = data->inode;
  465. struct fuse_conn *fc = get_fuse_conn(inode);
  466. fuse_wait_on_page_writeback(inode, page->index);
  467. if (req->num_pages &&
  468. (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
  469. (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
  470. req->pages[req->num_pages - 1]->index + 1 != page->index)) {
  471. fuse_send_readpages(req, data->file, inode);
  472. data->req = req = fuse_get_req(fc);
  473. if (IS_ERR(req)) {
  474. unlock_page(page);
  475. return PTR_ERR(req);
  476. }
  477. }
  478. req->pages[req->num_pages] = page;
  479. req->num_pages++;
  480. return 0;
  481. }
  482. static int fuse_readpages(struct file *file, struct address_space *mapping,
  483. struct list_head *pages, unsigned nr_pages)
  484. {
  485. struct inode *inode = mapping->host;
  486. struct fuse_conn *fc = get_fuse_conn(inode);
  487. struct fuse_fill_data data;
  488. int err;
  489. err = -EIO;
  490. if (is_bad_inode(inode))
  491. goto out;
  492. data.file = file;
  493. data.inode = inode;
  494. data.req = fuse_get_req(fc);
  495. err = PTR_ERR(data.req);
  496. if (IS_ERR(data.req))
  497. goto out;
  498. err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
  499. if (!err) {
  500. if (data.req->num_pages)
  501. fuse_send_readpages(data.req, file, inode);
  502. else
  503. fuse_put_request(fc, data.req);
  504. }
  505. out:
  506. return err;
  507. }
  508. static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
  509. unsigned long nr_segs, loff_t pos)
  510. {
  511. struct inode *inode = iocb->ki_filp->f_mapping->host;
  512. if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
  513. int err;
  514. /*
  515. * If trying to read past EOF, make sure the i_size
  516. * attribute is up-to-date.
  517. */
  518. err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
  519. if (err)
  520. return err;
  521. }
  522. return generic_file_aio_read(iocb, iov, nr_segs, pos);
  523. }
  524. static void fuse_write_fill(struct fuse_req *req, struct file *file,
  525. struct fuse_file *ff, struct inode *inode,
  526. loff_t pos, size_t count, int writepage)
  527. {
  528. struct fuse_conn *fc = get_fuse_conn(inode);
  529. struct fuse_write_in *inarg = &req->misc.write.in;
  530. struct fuse_write_out *outarg = &req->misc.write.out;
  531. memset(inarg, 0, sizeof(struct fuse_write_in));
  532. inarg->fh = ff->fh;
  533. inarg->offset = pos;
  534. inarg->size = count;
  535. inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0;
  536. inarg->flags = file ? file->f_flags : 0;
  537. req->in.h.opcode = FUSE_WRITE;
  538. req->in.h.nodeid = get_node_id(inode);
  539. req->in.numargs = 2;
  540. if (fc->minor < 9)
  541. req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
  542. else
  543. req->in.args[0].size = sizeof(struct fuse_write_in);
  544. req->in.args[0].value = inarg;
  545. req->in.args[1].size = count;
  546. req->out.numargs = 1;
  547. req->out.args[0].size = sizeof(struct fuse_write_out);
  548. req->out.args[0].value = outarg;
  549. }
  550. static size_t fuse_send_write(struct fuse_req *req, struct file *file,
  551. struct inode *inode, loff_t pos, size_t count,
  552. fl_owner_t owner)
  553. {
  554. struct fuse_conn *fc = get_fuse_conn(inode);
  555. fuse_write_fill(req, file, file->private_data, inode, pos, count, 0);
  556. if (owner != NULL) {
  557. struct fuse_write_in *inarg = &req->misc.write.in;
  558. inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
  559. inarg->lock_owner = fuse_lock_owner_id(fc, owner);
  560. }
  561. fuse_request_send(fc, req);
  562. return req->misc.write.out.size;
  563. }
  564. static int fuse_write_begin(struct file *file, struct address_space *mapping,
  565. loff_t pos, unsigned len, unsigned flags,
  566. struct page **pagep, void **fsdata)
  567. {
  568. pgoff_t index = pos >> PAGE_CACHE_SHIFT;
  569. *pagep = grab_cache_page_write_begin(mapping, index, flags);
  570. if (!*pagep)
  571. return -ENOMEM;
  572. return 0;
  573. }
  574. static void fuse_write_update_size(struct inode *inode, loff_t pos)
  575. {
  576. struct fuse_conn *fc = get_fuse_conn(inode);
  577. struct fuse_inode *fi = get_fuse_inode(inode);
  578. spin_lock(&fc->lock);
  579. fi->attr_version = ++fc->attr_version;
  580. if (pos > inode->i_size)
  581. i_size_write(inode, pos);
  582. spin_unlock(&fc->lock);
  583. }
  584. static int fuse_buffered_write(struct file *file, struct inode *inode,
  585. loff_t pos, unsigned count, struct page *page)
  586. {
  587. int err;
  588. size_t nres;
  589. struct fuse_conn *fc = get_fuse_conn(inode);
  590. unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
  591. struct fuse_req *req;
  592. if (is_bad_inode(inode))
  593. return -EIO;
  594. /*
  595. * Make sure writepages on the same page are not mixed up with
  596. * plain writes.
  597. */
  598. fuse_wait_on_page_writeback(inode, page->index);
  599. req = fuse_get_req(fc);
  600. if (IS_ERR(req))
  601. return PTR_ERR(req);
  602. req->in.argpages = 1;
  603. req->num_pages = 1;
  604. req->pages[0] = page;
  605. req->page_offset = offset;
  606. nres = fuse_send_write(req, file, inode, pos, count, NULL);
  607. err = req->out.h.error;
  608. fuse_put_request(fc, req);
  609. if (!err && !nres)
  610. err = -EIO;
  611. if (!err) {
  612. pos += nres;
  613. fuse_write_update_size(inode, pos);
  614. if (count == PAGE_CACHE_SIZE)
  615. SetPageUptodate(page);
  616. }
  617. fuse_invalidate_attr(inode);
  618. return err ? err : nres;
  619. }
  620. static int fuse_write_end(struct file *file, struct address_space *mapping,
  621. loff_t pos, unsigned len, unsigned copied,
  622. struct page *page, void *fsdata)
  623. {
  624. struct inode *inode = mapping->host;
  625. int res = 0;
  626. if (copied)
  627. res = fuse_buffered_write(file, inode, pos, copied, page);
  628. unlock_page(page);
  629. page_cache_release(page);
  630. return res;
  631. }
  632. static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
  633. struct inode *inode, loff_t pos,
  634. size_t count)
  635. {
  636. size_t res;
  637. unsigned offset;
  638. unsigned i;
  639. for (i = 0; i < req->num_pages; i++)
  640. fuse_wait_on_page_writeback(inode, req->pages[i]->index);
  641. res = fuse_send_write(req, file, inode, pos, count, NULL);
  642. offset = req->page_offset;
  643. count = res;
  644. for (i = 0; i < req->num_pages; i++) {
  645. struct page *page = req->pages[i];
  646. if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
  647. SetPageUptodate(page);
  648. if (count > PAGE_CACHE_SIZE - offset)
  649. count -= PAGE_CACHE_SIZE - offset;
  650. else
  651. count = 0;
  652. offset = 0;
  653. unlock_page(page);
  654. page_cache_release(page);
  655. }
  656. return res;
  657. }
  658. static ssize_t fuse_fill_write_pages(struct fuse_req *req,
  659. struct address_space *mapping,
  660. struct iov_iter *ii, loff_t pos)
  661. {
  662. struct fuse_conn *fc = get_fuse_conn(mapping->host);
  663. unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
  664. size_t count = 0;
  665. int err;
  666. req->in.argpages = 1;
  667. req->page_offset = offset;
  668. do {
  669. size_t tmp;
  670. struct page *page;
  671. pgoff_t index = pos >> PAGE_CACHE_SHIFT;
  672. size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
  673. iov_iter_count(ii));
  674. bytes = min_t(size_t, bytes, fc->max_write - count);
  675. again:
  676. err = -EFAULT;
  677. if (iov_iter_fault_in_readable(ii, bytes))
  678. break;
  679. err = -ENOMEM;
  680. page = grab_cache_page_write_begin(mapping, index, 0);
  681. if (!page)
  682. break;
  683. pagefault_disable();
  684. tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
  685. pagefault_enable();
  686. flush_dcache_page(page);
  687. if (!tmp) {
  688. unlock_page(page);
  689. page_cache_release(page);
  690. bytes = min(bytes, iov_iter_single_seg_count(ii));
  691. goto again;
  692. }
  693. err = 0;
  694. req->pages[req->num_pages] = page;
  695. req->num_pages++;
  696. iov_iter_advance(ii, tmp);
  697. count += tmp;
  698. pos += tmp;
  699. offset += tmp;
  700. if (offset == PAGE_CACHE_SIZE)
  701. offset = 0;
  702. if (!fc->big_writes)
  703. break;
  704. } while (iov_iter_count(ii) && count < fc->max_write &&
  705. req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0);
  706. return count > 0 ? count : err;
  707. }
  708. static ssize_t fuse_perform_write(struct file *file,
  709. struct address_space *mapping,
  710. struct iov_iter *ii, loff_t pos)
  711. {
  712. struct inode *inode = mapping->host;
  713. struct fuse_conn *fc = get_fuse_conn(inode);
  714. int err = 0;
  715. ssize_t res = 0;
  716. if (is_bad_inode(inode))
  717. return -EIO;
  718. do {
  719. struct fuse_req *req;
  720. ssize_t count;
  721. req = fuse_get_req(fc);
  722. if (IS_ERR(req)) {
  723. err = PTR_ERR(req);
  724. break;
  725. }
  726. count = fuse_fill_write_pages(req, mapping, ii, pos);
  727. if (count <= 0) {
  728. err = count;
  729. } else {
  730. size_t num_written;
  731. num_written = fuse_send_write_pages(req, file, inode,
  732. pos, count);
  733. err = req->out.h.error;
  734. if (!err) {
  735. res += num_written;
  736. pos += num_written;
  737. /* break out of the loop on short write */
  738. if (num_written != count)
  739. err = -EIO;
  740. }
  741. }
  742. fuse_put_request(fc, req);
  743. } while (!err && iov_iter_count(ii));
  744. if (res > 0)
  745. fuse_write_update_size(inode, pos);
  746. fuse_invalidate_attr(inode);
  747. return res > 0 ? res : err;
  748. }
  749. static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
  750. unsigned long nr_segs, loff_t pos)
  751. {
  752. struct file *file = iocb->ki_filp;
  753. struct address_space *mapping = file->f_mapping;
  754. size_t count = 0;
  755. ssize_t written = 0;
  756. struct inode *inode = mapping->host;
  757. ssize_t err;
  758. struct iov_iter i;
  759. WARN_ON(iocb->ki_pos != pos);
  760. err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
  761. if (err)
  762. return err;
  763. mutex_lock(&inode->i_mutex);
  764. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  765. /* We can write back this queue in page reclaim */
  766. current->backing_dev_info = mapping->backing_dev_info;
  767. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  768. if (err)
  769. goto out;
  770. if (count == 0)
  771. goto out;
  772. err = file_remove_suid(file);
  773. if (err)
  774. goto out;
  775. file_update_time(file);
  776. iov_iter_init(&i, iov, nr_segs, count, 0);
  777. written = fuse_perform_write(file, mapping, &i, pos);
  778. if (written >= 0)
  779. iocb->ki_pos = pos + written;
  780. out:
  781. current->backing_dev_info = NULL;
  782. mutex_unlock(&inode->i_mutex);
  783. return written ? written : err;
  784. }
  785. static void fuse_release_user_pages(struct fuse_req *req, int write)
  786. {
  787. unsigned i;
  788. for (i = 0; i < req->num_pages; i++) {
  789. struct page *page = req->pages[i];
  790. if (write)
  791. set_page_dirty_lock(page);
  792. put_page(page);
  793. }
  794. }
  795. static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
  796. size_t *nbytesp, int write)
  797. {
  798. size_t nbytes = *nbytesp;
  799. unsigned long user_addr = (unsigned long) buf;
  800. unsigned offset = user_addr & ~PAGE_MASK;
  801. int npages;
  802. /* Special case for kernel I/O: can copy directly into the buffer */
  803. if (segment_eq(get_fs(), KERNEL_DS)) {
  804. if (write)
  805. req->in.args[1].value = (void *) user_addr;
  806. else
  807. req->out.args[0].value = (void *) user_addr;
  808. return 0;
  809. }
  810. nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
  811. npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
  812. npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
  813. down_read(&current->mm->mmap_sem);
  814. npages = get_user_pages(current, current->mm, user_addr, npages, !write,
  815. 0, req->pages, NULL);
  816. up_read(&current->mm->mmap_sem);
  817. if (npages < 0)
  818. return npages;
  819. req->num_pages = npages;
  820. req->page_offset = offset;
  821. if (write)
  822. req->in.argpages = 1;
  823. else
  824. req->out.argpages = 1;
  825. nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
  826. *nbytesp = min(*nbytesp, nbytes);
  827. return 0;
  828. }
  829. static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
  830. size_t count, loff_t *ppos, int write)
  831. {
  832. struct inode *inode = file->f_path.dentry->d_inode;
  833. struct fuse_conn *fc = get_fuse_conn(inode);
  834. size_t nmax = write ? fc->max_write : fc->max_read;
  835. loff_t pos = *ppos;
  836. ssize_t res = 0;
  837. struct fuse_req *req;
  838. if (is_bad_inode(inode))
  839. return -EIO;
  840. req = fuse_get_req(fc);
  841. if (IS_ERR(req))
  842. return PTR_ERR(req);
  843. while (count) {
  844. size_t nres;
  845. size_t nbytes = min(count, nmax);
  846. int err = fuse_get_user_pages(req, buf, &nbytes, write);
  847. if (err) {
  848. res = err;
  849. break;
  850. }
  851. if (write)
  852. nres = fuse_send_write(req, file, inode, pos, nbytes,
  853. current->files);
  854. else
  855. nres = fuse_send_read(req, file, inode, pos, nbytes,
  856. current->files);
  857. fuse_release_user_pages(req, !write);
  858. if (req->out.h.error) {
  859. if (!res)
  860. res = req->out.h.error;
  861. break;
  862. } else if (nres > nbytes) {
  863. res = -EIO;
  864. break;
  865. }
  866. count -= nres;
  867. res += nres;
  868. pos += nres;
  869. buf += nres;
  870. if (nres != nbytes)
  871. break;
  872. if (count) {
  873. fuse_put_request(fc, req);
  874. req = fuse_get_req(fc);
  875. if (IS_ERR(req))
  876. break;
  877. }
  878. }
  879. fuse_put_request(fc, req);
  880. if (res > 0) {
  881. if (write)
  882. fuse_write_update_size(inode, pos);
  883. *ppos = pos;
  884. }
  885. fuse_invalidate_attr(inode);
  886. return res;
  887. }
  888. static ssize_t fuse_direct_read(struct file *file, char __user *buf,
  889. size_t count, loff_t *ppos)
  890. {
  891. return fuse_direct_io(file, buf, count, ppos, 0);
  892. }
  893. static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
  894. size_t count, loff_t *ppos)
  895. {
  896. struct inode *inode = file->f_path.dentry->d_inode;
  897. ssize_t res;
  898. /* Don't allow parallel writes to the same file */
  899. mutex_lock(&inode->i_mutex);
  900. res = generic_write_checks(file, ppos, &count, 0);
  901. if (!res)
  902. res = fuse_direct_io(file, buf, count, ppos, 1);
  903. mutex_unlock(&inode->i_mutex);
  904. return res;
  905. }
  906. static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
  907. {
  908. __free_page(req->pages[0]);
  909. fuse_file_put(req->ff);
  910. }
  911. static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
  912. {
  913. struct inode *inode = req->inode;
  914. struct fuse_inode *fi = get_fuse_inode(inode);
  915. struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
  916. list_del(&req->writepages_entry);
  917. dec_bdi_stat(bdi, BDI_WRITEBACK);
  918. dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
  919. bdi_writeout_inc(bdi);
  920. wake_up(&fi->page_waitq);
  921. }
  922. /* Called under fc->lock, may release and reacquire it */
  923. static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
  924. __releases(&fc->lock)
  925. __acquires(&fc->lock)
  926. {
  927. struct fuse_inode *fi = get_fuse_inode(req->inode);
  928. loff_t size = i_size_read(req->inode);
  929. struct fuse_write_in *inarg = &req->misc.write.in;
  930. if (!fc->connected)
  931. goto out_free;
  932. if (inarg->offset + PAGE_CACHE_SIZE <= size) {
  933. inarg->size = PAGE_CACHE_SIZE;
  934. } else if (inarg->offset < size) {
  935. inarg->size = size & (PAGE_CACHE_SIZE - 1);
  936. } else {
  937. /* Got truncated off completely */
  938. goto out_free;
  939. }
  940. req->in.args[1].size = inarg->size;
  941. fi->writectr++;
  942. fuse_request_send_background_locked(fc, req);
  943. return;
  944. out_free:
  945. fuse_writepage_finish(fc, req);
  946. spin_unlock(&fc->lock);
  947. fuse_writepage_free(fc, req);
  948. fuse_put_request(fc, req);
  949. spin_lock(&fc->lock);
  950. }
  951. /*
  952. * If fi->writectr is positive (no truncate or fsync going on) send
  953. * all queued writepage requests.
  954. *
  955. * Called with fc->lock
  956. */
  957. void fuse_flush_writepages(struct inode *inode)
  958. __releases(&fc->lock)
  959. __acquires(&fc->lock)
  960. {
  961. struct fuse_conn *fc = get_fuse_conn(inode);
  962. struct fuse_inode *fi = get_fuse_inode(inode);
  963. struct fuse_req *req;
  964. while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
  965. req = list_entry(fi->queued_writes.next, struct fuse_req, list);
  966. list_del_init(&req->list);
  967. fuse_send_writepage(fc, req);
  968. }
  969. }
  970. static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
  971. {
  972. struct inode *inode = req->inode;
  973. struct fuse_inode *fi = get_fuse_inode(inode);
  974. mapping_set_error(inode->i_mapping, req->out.h.error);
  975. spin_lock(&fc->lock);
  976. fi->writectr--;
  977. fuse_writepage_finish(fc, req);
  978. spin_unlock(&fc->lock);
  979. fuse_writepage_free(fc, req);
  980. }
  981. static int fuse_writepage_locked(struct page *page)
  982. {
  983. struct address_space *mapping = page->mapping;
  984. struct inode *inode = mapping->host;
  985. struct fuse_conn *fc = get_fuse_conn(inode);
  986. struct fuse_inode *fi = get_fuse_inode(inode);
  987. struct fuse_req *req;
  988. struct fuse_file *ff;
  989. struct page *tmp_page;
  990. set_page_writeback(page);
  991. req = fuse_request_alloc_nofs();
  992. if (!req)
  993. goto err;
  994. tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  995. if (!tmp_page)
  996. goto err_free;
  997. spin_lock(&fc->lock);
  998. BUG_ON(list_empty(&fi->write_files));
  999. ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
  1000. req->ff = fuse_file_get(ff);
  1001. spin_unlock(&fc->lock);
  1002. fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1);
  1003. copy_highpage(tmp_page, page);
  1004. req->in.argpages = 1;
  1005. req->num_pages = 1;
  1006. req->pages[0] = tmp_page;
  1007. req->page_offset = 0;
  1008. req->end = fuse_writepage_end;
  1009. req->inode = inode;
  1010. inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
  1011. inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
  1012. end_page_writeback(page);
  1013. spin_lock(&fc->lock);
  1014. list_add(&req->writepages_entry, &fi->writepages);
  1015. list_add_tail(&req->list, &fi->queued_writes);
  1016. fuse_flush_writepages(inode);
  1017. spin_unlock(&fc->lock);
  1018. return 0;
  1019. err_free:
  1020. fuse_request_free(req);
  1021. err:
  1022. end_page_writeback(page);
  1023. return -ENOMEM;
  1024. }
  1025. static int fuse_writepage(struct page *page, struct writeback_control *wbc)
  1026. {
  1027. int err;
  1028. err = fuse_writepage_locked(page);
  1029. unlock_page(page);
  1030. return err;
  1031. }
  1032. static int fuse_launder_page(struct page *page)
  1033. {
  1034. int err = 0;
  1035. if (clear_page_dirty_for_io(page)) {
  1036. struct inode *inode = page->mapping->host;
  1037. err = fuse_writepage_locked(page);
  1038. if (!err)
  1039. fuse_wait_on_page_writeback(inode, page->index);
  1040. }
  1041. return err;
  1042. }
  1043. /*
  1044. * Write back dirty pages now, because there may not be any suitable
  1045. * open files later
  1046. */
  1047. static void fuse_vma_close(struct vm_area_struct *vma)
  1048. {
  1049. filemap_write_and_wait(vma->vm_file->f_mapping);
  1050. }
  1051. /*
  1052. * Wait for writeback against this page to complete before allowing it
  1053. * to be marked dirty again, and hence written back again, possibly
  1054. * before the previous writepage completed.
  1055. *
  1056. * Block here, instead of in ->writepage(), so that the userspace fs
  1057. * can only block processes actually operating on the filesystem.
  1058. *
  1059. * Otherwise unprivileged userspace fs would be able to block
  1060. * unrelated:
  1061. *
  1062. * - page migration
  1063. * - sync(2)
  1064. * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
  1065. */
  1066. static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  1067. {
  1068. struct page *page = vmf->page;
  1069. /*
  1070. * Don't use page->mapping as it may become NULL from a
  1071. * concurrent truncate.
  1072. */
  1073. struct inode *inode = vma->vm_file->f_mapping->host;
  1074. fuse_wait_on_page_writeback(inode, page->index);
  1075. return 0;
  1076. }
  1077. static struct vm_operations_struct fuse_file_vm_ops = {
  1078. .close = fuse_vma_close,
  1079. .fault = filemap_fault,
  1080. .page_mkwrite = fuse_page_mkwrite,
  1081. };
  1082. static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
  1083. {
  1084. if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
  1085. struct inode *inode = file->f_dentry->d_inode;
  1086. struct fuse_conn *fc = get_fuse_conn(inode);
  1087. struct fuse_inode *fi = get_fuse_inode(inode);
  1088. struct fuse_file *ff = file->private_data;
  1089. /*
  1090. * file may be written through mmap, so chain it onto the
  1091. * inodes's write_file list
  1092. */
  1093. spin_lock(&fc->lock);
  1094. if (list_empty(&ff->write_entry))
  1095. list_add(&ff->write_entry, &fi->write_files);
  1096. spin_unlock(&fc->lock);
  1097. }
  1098. file_accessed(file);
  1099. vma->vm_ops = &fuse_file_vm_ops;
  1100. return 0;
  1101. }
  1102. static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
  1103. {
  1104. /* Can't provide the coherency needed for MAP_SHARED */
  1105. if (vma->vm_flags & VM_MAYSHARE)
  1106. return -ENODEV;
  1107. invalidate_inode_pages2(file->f_mapping);
  1108. return generic_file_mmap(file, vma);
  1109. }
  1110. static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
  1111. struct file_lock *fl)
  1112. {
  1113. switch (ffl->type) {
  1114. case F_UNLCK:
  1115. break;
  1116. case F_RDLCK:
  1117. case F_WRLCK:
  1118. if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
  1119. ffl->end < ffl->start)
  1120. return -EIO;
  1121. fl->fl_start = ffl->start;
  1122. fl->fl_end = ffl->end;
  1123. fl->fl_pid = ffl->pid;
  1124. break;
  1125. default:
  1126. return -EIO;
  1127. }
  1128. fl->fl_type = ffl->type;
  1129. return 0;
  1130. }
  1131. static void fuse_lk_fill(struct fuse_req *req, struct file *file,
  1132. const struct file_lock *fl, int opcode, pid_t pid,
  1133. int flock)
  1134. {
  1135. struct inode *inode = file->f_path.dentry->d_inode;
  1136. struct fuse_conn *fc = get_fuse_conn(inode);
  1137. struct fuse_file *ff = file->private_data;
  1138. struct fuse_lk_in *arg = &req->misc.lk_in;
  1139. arg->fh = ff->fh;
  1140. arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
  1141. arg->lk.start = fl->fl_start;
  1142. arg->lk.end = fl->fl_end;
  1143. arg->lk.type = fl->fl_type;
  1144. arg->lk.pid = pid;
  1145. if (flock)
  1146. arg->lk_flags |= FUSE_LK_FLOCK;
  1147. req->in.h.opcode = opcode;
  1148. req->in.h.nodeid = get_node_id(inode);
  1149. req->in.numargs = 1;
  1150. req->in.args[0].size = sizeof(*arg);
  1151. req->in.args[0].value = arg;
  1152. }
  1153. static int fuse_getlk(struct file *file, struct file_lock *fl)
  1154. {
  1155. struct inode *inode = file->f_path.dentry->d_inode;
  1156. struct fuse_conn *fc = get_fuse_conn(inode);
  1157. struct fuse_req *req;
  1158. struct fuse_lk_out outarg;
  1159. int err;
  1160. req = fuse_get_req(fc);
  1161. if (IS_ERR(req))
  1162. return PTR_ERR(req);
  1163. fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
  1164. req->out.numargs = 1;
  1165. req->out.args[0].size = sizeof(outarg);
  1166. req->out.args[0].value = &outarg;
  1167. fuse_request_send(fc, req);
  1168. err = req->out.h.error;
  1169. fuse_put_request(fc, req);
  1170. if (!err)
  1171. err = convert_fuse_file_lock(&outarg.lk, fl);
  1172. return err;
  1173. }
  1174. static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
  1175. {
  1176. struct inode *inode = file->f_path.dentry->d_inode;
  1177. struct fuse_conn *fc = get_fuse_conn(inode);
  1178. struct fuse_req *req;
  1179. int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
  1180. pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
  1181. int err;
  1182. if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
  1183. /* NLM needs asynchronous locks, which we don't support yet */
  1184. return -ENOLCK;
  1185. }
  1186. /* Unlock on close is handled by the flush method */
  1187. if (fl->fl_flags & FL_CLOSE)
  1188. return 0;
  1189. req = fuse_get_req(fc);
  1190. if (IS_ERR(req))
  1191. return PTR_ERR(req);
  1192. fuse_lk_fill(req, file, fl, opcode, pid, flock);
  1193. fuse_request_send(fc, req);
  1194. err = req->out.h.error;
  1195. /* locking is restartable */
  1196. if (err == -EINTR)
  1197. err = -ERESTARTSYS;
  1198. fuse_put_request(fc, req);
  1199. return err;
  1200. }
  1201. static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
  1202. {
  1203. struct inode *inode = file->f_path.dentry->d_inode;
  1204. struct fuse_conn *fc = get_fuse_conn(inode);
  1205. int err;
  1206. if (cmd == F_CANCELLK) {
  1207. err = 0;
  1208. } else if (cmd == F_GETLK) {
  1209. if (fc->no_lock) {
  1210. posix_test_lock(file, fl);
  1211. err = 0;
  1212. } else
  1213. err = fuse_getlk(file, fl);
  1214. } else {
  1215. if (fc->no_lock)
  1216. err = posix_lock_file(file, fl, NULL);
  1217. else
  1218. err = fuse_setlk(file, fl, 0);
  1219. }
  1220. return err;
  1221. }
  1222. static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
  1223. {
  1224. struct inode *inode = file->f_path.dentry->d_inode;
  1225. struct fuse_conn *fc = get_fuse_conn(inode);
  1226. int err;
  1227. if (fc->no_lock) {
  1228. err = flock_lock_file_wait(file, fl);
  1229. } else {
  1230. /* emulate flock with POSIX locks */
  1231. fl->fl_owner = (fl_owner_t) file;
  1232. err = fuse_setlk(file, fl, 1);
  1233. }
  1234. return err;
  1235. }
  1236. static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
  1237. {
  1238. struct inode *inode = mapping->host;
  1239. struct fuse_conn *fc = get_fuse_conn(inode);
  1240. struct fuse_req *req;
  1241. struct fuse_bmap_in inarg;
  1242. struct fuse_bmap_out outarg;
  1243. int err;
  1244. if (!inode->i_sb->s_bdev || fc->no_bmap)
  1245. return 0;
  1246. req = fuse_get_req(fc);
  1247. if (IS_ERR(req))
  1248. return 0;
  1249. memset(&inarg, 0, sizeof(inarg));
  1250. inarg.block = block;
  1251. inarg.blocksize = inode->i_sb->s_blocksize;
  1252. req->in.h.opcode = FUSE_BMAP;
  1253. req->in.h.nodeid = get_node_id(inode);
  1254. req->in.numargs = 1;
  1255. req->in.args[0].size = sizeof(inarg);
  1256. req->in.args[0].value = &inarg;
  1257. req->out.numargs = 1;
  1258. req->out.args[0].size = sizeof(outarg);
  1259. req->out.args[0].value = &outarg;
  1260. fuse_request_send(fc, req);
  1261. err = req->out.h.error;
  1262. fuse_put_request(fc, req);
  1263. if (err == -ENOSYS)
  1264. fc->no_bmap = 1;
  1265. return err ? 0 : outarg.block;
  1266. }
  1267. static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
  1268. {
  1269. loff_t retval;
  1270. struct inode *inode = file->f_path.dentry->d_inode;
  1271. mutex_lock(&inode->i_mutex);
  1272. switch (origin) {
  1273. case SEEK_END:
  1274. retval = fuse_update_attributes(inode, NULL, file, NULL);
  1275. if (retval)
  1276. goto exit;
  1277. offset += i_size_read(inode);
  1278. break;
  1279. case SEEK_CUR:
  1280. offset += file->f_pos;
  1281. }
  1282. retval = -EINVAL;
  1283. if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
  1284. if (offset != file->f_pos) {
  1285. file->f_pos = offset;
  1286. file->f_version = 0;
  1287. }
  1288. retval = offset;
  1289. }
  1290. exit:
  1291. mutex_unlock(&inode->i_mutex);
  1292. return retval;
  1293. }
  1294. static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
  1295. unsigned int nr_segs, size_t bytes, bool to_user)
  1296. {
  1297. struct iov_iter ii;
  1298. int page_idx = 0;
  1299. if (!bytes)
  1300. return 0;
  1301. iov_iter_init(&ii, iov, nr_segs, bytes, 0);
  1302. while (iov_iter_count(&ii)) {
  1303. struct page *page = pages[page_idx++];
  1304. size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
  1305. void *kaddr, *map;
  1306. kaddr = map = kmap(page);
  1307. while (todo) {
  1308. char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
  1309. size_t iov_len = ii.iov->iov_len - ii.iov_offset;
  1310. size_t copy = min(todo, iov_len);
  1311. size_t left;
  1312. if (!to_user)
  1313. left = copy_from_user(kaddr, uaddr, copy);
  1314. else
  1315. left = copy_to_user(uaddr, kaddr, copy);
  1316. if (unlikely(left))
  1317. return -EFAULT;
  1318. iov_iter_advance(&ii, copy);
  1319. todo -= copy;
  1320. kaddr += copy;
  1321. }
  1322. kunmap(map);
  1323. }
  1324. return 0;
  1325. }
  1326. /*
  1327. * For ioctls, there is no generic way to determine how much memory
  1328. * needs to be read and/or written. Furthermore, ioctls are allowed
  1329. * to dereference the passed pointer, so the parameter requires deep
  1330. * copying but FUSE has no idea whatsoever about what to copy in or
  1331. * out.
  1332. *
  1333. * This is solved by allowing FUSE server to retry ioctl with
  1334. * necessary in/out iovecs. Let's assume the ioctl implementation
  1335. * needs to read in the following structure.
  1336. *
  1337. * struct a {
  1338. * char *buf;
  1339. * size_t buflen;
  1340. * }
  1341. *
  1342. * On the first callout to FUSE server, inarg->in_size and
  1343. * inarg->out_size will be NULL; then, the server completes the ioctl
  1344. * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
  1345. * the actual iov array to
  1346. *
  1347. * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
  1348. *
  1349. * which tells FUSE to copy in the requested area and retry the ioctl.
  1350. * On the second round, the server has access to the structure and
  1351. * from that it can tell what to look for next, so on the invocation,
  1352. * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
  1353. *
  1354. * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
  1355. * { .iov_base = a.buf, .iov_len = a.buflen } }
  1356. *
  1357. * FUSE will copy both struct a and the pointed buffer from the
  1358. * process doing the ioctl and retry ioctl with both struct a and the
  1359. * buffer.
  1360. *
  1361. * This time, FUSE server has everything it needs and completes ioctl
  1362. * without FUSE_IOCTL_RETRY which finishes the ioctl call.
  1363. *
  1364. * Copying data out works the same way.
  1365. *
  1366. * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
  1367. * automatically initializes in and out iovs by decoding @cmd with
  1368. * _IOC_* macros and the server is not allowed to request RETRY. This
  1369. * limits ioctl data transfers to well-formed ioctls and is the forced
  1370. * behavior for all FUSE servers.
  1371. */
  1372. static long fuse_file_do_ioctl(struct file *file, unsigned int cmd,
  1373. unsigned long arg, unsigned int flags)
  1374. {
  1375. struct inode *inode = file->f_dentry->d_inode;
  1376. struct fuse_file *ff = file->private_data;
  1377. struct fuse_conn *fc = get_fuse_conn(inode);
  1378. struct fuse_ioctl_in inarg = {
  1379. .fh = ff->fh,
  1380. .cmd = cmd,
  1381. .arg = arg,
  1382. .flags = flags
  1383. };
  1384. struct fuse_ioctl_out outarg;
  1385. struct fuse_req *req = NULL;
  1386. struct page **pages = NULL;
  1387. struct page *iov_page = NULL;
  1388. struct iovec *in_iov = NULL, *out_iov = NULL;
  1389. unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
  1390. size_t in_size, out_size, transferred;
  1391. int err;
  1392. /* assume all the iovs returned by client always fits in a page */
  1393. BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
  1394. if (!fuse_allow_task(fc, current))
  1395. return -EACCES;
  1396. err = -EIO;
  1397. if (is_bad_inode(inode))
  1398. goto out;
  1399. err = -ENOMEM;
  1400. pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
  1401. iov_page = alloc_page(GFP_KERNEL);
  1402. if (!pages || !iov_page)
  1403. goto out;
  1404. /*
  1405. * If restricted, initialize IO parameters as encoded in @cmd.
  1406. * RETRY from server is not allowed.
  1407. */
  1408. if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
  1409. struct iovec *iov = page_address(iov_page);
  1410. iov->iov_base = (void __user *)arg;
  1411. iov->iov_len = _IOC_SIZE(cmd);
  1412. if (_IOC_DIR(cmd) & _IOC_WRITE) {
  1413. in_iov = iov;
  1414. in_iovs = 1;
  1415. }
  1416. if (_IOC_DIR(cmd) & _IOC_READ) {
  1417. out_iov = iov;
  1418. out_iovs = 1;
  1419. }
  1420. }
  1421. retry:
  1422. inarg.in_size = in_size = iov_length(in_iov, in_iovs);
  1423. inarg.out_size = out_size = iov_length(out_iov, out_iovs);
  1424. /*
  1425. * Out data can be used either for actual out data or iovs,
  1426. * make sure there always is at least one page.
  1427. */
  1428. out_size = max_t(size_t, out_size, PAGE_SIZE);
  1429. max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
  1430. /* make sure there are enough buffer pages and init request with them */
  1431. err = -ENOMEM;
  1432. if (max_pages > FUSE_MAX_PAGES_PER_REQ)
  1433. goto out;
  1434. while (num_pages < max_pages) {
  1435. pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
  1436. if (!pages[num_pages])
  1437. goto out;
  1438. num_pages++;
  1439. }
  1440. req = fuse_get_req(fc);
  1441. if (IS_ERR(req)) {
  1442. err = PTR_ERR(req);
  1443. req = NULL;
  1444. goto out;
  1445. }
  1446. memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
  1447. req->num_pages = num_pages;
  1448. /* okay, let's send it to the client */
  1449. req->in.h.opcode = FUSE_IOCTL;
  1450. req->in.h.nodeid = get_node_id(inode);
  1451. req->in.numargs = 1;
  1452. req->in.args[0].size = sizeof(inarg);
  1453. req->in.args[0].value = &inarg;
  1454. if (in_size) {
  1455. req->in.numargs++;
  1456. req->in.args[1].size = in_size;
  1457. req->in.argpages = 1;
  1458. err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
  1459. false);
  1460. if (err)
  1461. goto out;
  1462. }
  1463. req->out.numargs = 2;
  1464. req->out.args[0].size = sizeof(outarg);
  1465. req->out.args[0].value = &outarg;
  1466. req->out.args[1].size = out_size;
  1467. req->out.argpages = 1;
  1468. req->out.argvar = 1;
  1469. fuse_request_send(fc, req);
  1470. err = req->out.h.error;
  1471. transferred = req->out.args[1].size;
  1472. fuse_put_request(fc, req);
  1473. req = NULL;
  1474. if (err)
  1475. goto out;
  1476. /* did it ask for retry? */
  1477. if (outarg.flags & FUSE_IOCTL_RETRY) {
  1478. char *vaddr;
  1479. /* no retry if in restricted mode */
  1480. err = -EIO;
  1481. if (!(flags & FUSE_IOCTL_UNRESTRICTED))
  1482. goto out;
  1483. in_iovs = outarg.in_iovs;
  1484. out_iovs = outarg.out_iovs;
  1485. /*
  1486. * Make sure things are in boundary, separate checks
  1487. * are to protect against overflow.
  1488. */
  1489. err = -ENOMEM;
  1490. if (in_iovs > FUSE_IOCTL_MAX_IOV ||
  1491. out_iovs > FUSE_IOCTL_MAX_IOV ||
  1492. in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
  1493. goto out;
  1494. err = -EIO;
  1495. if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred)
  1496. goto out;
  1497. /* okay, copy in iovs and retry */
  1498. vaddr = kmap_atomic(pages[0], KM_USER0);
  1499. memcpy(page_address(iov_page), vaddr, transferred);
  1500. kunmap_atomic(vaddr, KM_USER0);
  1501. in_iov = page_address(iov_page);
  1502. out_iov = in_iov + in_iovs;
  1503. goto retry;
  1504. }
  1505. err = -EIO;
  1506. if (transferred > inarg.out_size)
  1507. goto out;
  1508. err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
  1509. out:
  1510. if (req)
  1511. fuse_put_request(fc, req);
  1512. if (iov_page)
  1513. __free_page(iov_page);
  1514. while (num_pages)
  1515. __free_page(pages[--num_pages]);
  1516. kfree(pages);
  1517. return err ? err : outarg.result;
  1518. }
  1519. static long fuse_file_ioctl(struct file *file, unsigned int cmd,
  1520. unsigned long arg)
  1521. {
  1522. return fuse_file_do_ioctl(file, cmd, arg, 0);
  1523. }
  1524. static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
  1525. unsigned long arg)
  1526. {
  1527. return fuse_file_do_ioctl(file, cmd, arg, FUSE_IOCTL_COMPAT);
  1528. }
  1529. /*
  1530. * All files which have been polled are linked to RB tree
  1531. * fuse_conn->polled_files which is indexed by kh. Walk the tree and
  1532. * find the matching one.
  1533. */
  1534. static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
  1535. struct rb_node **parent_out)
  1536. {
  1537. struct rb_node **link = &fc->polled_files.rb_node;
  1538. struct rb_node *last = NULL;
  1539. while (*link) {
  1540. struct fuse_file *ff;
  1541. last = *link;
  1542. ff = rb_entry(last, struct fuse_file, polled_node);
  1543. if (kh < ff->kh)
  1544. link = &last->rb_left;
  1545. else if (kh > ff->kh)
  1546. link = &last->rb_right;
  1547. else
  1548. return link;
  1549. }
  1550. if (parent_out)
  1551. *parent_out = last;
  1552. return link;
  1553. }
  1554. /*
  1555. * The file is about to be polled. Make sure it's on the polled_files
  1556. * RB tree. Note that files once added to the polled_files tree are
  1557. * not removed before the file is released. This is because a file
  1558. * polled once is likely to be polled again.
  1559. */
  1560. static void fuse_register_polled_file(struct fuse_conn *fc,
  1561. struct fuse_file *ff)
  1562. {
  1563. spin_lock(&fc->lock);
  1564. if (RB_EMPTY_NODE(&ff->polled_node)) {
  1565. struct rb_node **link, *parent;
  1566. link = fuse_find_polled_node(fc, ff->kh, &parent);
  1567. BUG_ON(*link);
  1568. rb_link_node(&ff->polled_node, parent, link);
  1569. rb_insert_color(&ff->polled_node, &fc->polled_files);
  1570. }
  1571. spin_unlock(&fc->lock);
  1572. }
  1573. static unsigned fuse_file_poll(struct file *file, poll_table *wait)
  1574. {
  1575. struct inode *inode = file->f_dentry->d_inode;
  1576. struct fuse_file *ff = file->private_data;
  1577. struct fuse_conn *fc = get_fuse_conn(inode);
  1578. struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
  1579. struct fuse_poll_out outarg;
  1580. struct fuse_req *req;
  1581. int err;
  1582. if (fc->no_poll)
  1583. return DEFAULT_POLLMASK;
  1584. poll_wait(file, &ff->poll_wait, wait);
  1585. /*
  1586. * Ask for notification iff there's someone waiting for it.
  1587. * The client may ignore the flag and always notify.
  1588. */
  1589. if (waitqueue_active(&ff->poll_wait)) {
  1590. inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
  1591. fuse_register_polled_file(fc, ff);
  1592. }
  1593. req = fuse_get_req(fc);
  1594. if (IS_ERR(req))
  1595. return PTR_ERR(req);
  1596. req->in.h.opcode = FUSE_POLL;
  1597. req->in.h.nodeid = get_node_id(inode);
  1598. req->in.numargs = 1;
  1599. req->in.args[0].size = sizeof(inarg);
  1600. req->in.args[0].value = &inarg;
  1601. req->out.numargs = 1;
  1602. req->out.args[0].size = sizeof(outarg);
  1603. req->out.args[0].value = &outarg;
  1604. fuse_request_send(fc, req);
  1605. err = req->out.h.error;
  1606. fuse_put_request(fc, req);
  1607. if (!err)
  1608. return outarg.revents;
  1609. if (err == -ENOSYS) {
  1610. fc->no_poll = 1;
  1611. return DEFAULT_POLLMASK;
  1612. }
  1613. return POLLERR;
  1614. }
  1615. /*
  1616. * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
  1617. * wakes up the poll waiters.
  1618. */
  1619. int fuse_notify_poll_wakeup(struct fuse_conn *fc,
  1620. struct fuse_notify_poll_wakeup_out *outarg)
  1621. {
  1622. u64 kh = outarg->kh;
  1623. struct rb_node **link;
  1624. spin_lock(&fc->lock);
  1625. link = fuse_find_polled_node(fc, kh, NULL);
  1626. if (*link) {
  1627. struct fuse_file *ff;
  1628. ff = rb_entry(*link, struct fuse_file, polled_node);
  1629. wake_up_interruptible_sync(&ff->poll_wait);
  1630. }
  1631. spin_unlock(&fc->lock);
  1632. return 0;
  1633. }
  1634. static const struct file_operations fuse_file_operations = {
  1635. .llseek = fuse_file_llseek,
  1636. .read = do_sync_read,
  1637. .aio_read = fuse_file_aio_read,
  1638. .write = do_sync_write,
  1639. .aio_write = fuse_file_aio_write,
  1640. .mmap = fuse_file_mmap,
  1641. .open = fuse_open,
  1642. .flush = fuse_flush,
  1643. .release = fuse_release,
  1644. .fsync = fuse_fsync,
  1645. .lock = fuse_file_lock,
  1646. .flock = fuse_file_flock,
  1647. .splice_read = generic_file_splice_read,
  1648. .unlocked_ioctl = fuse_file_ioctl,
  1649. .compat_ioctl = fuse_file_compat_ioctl,
  1650. .poll = fuse_file_poll,
  1651. };
  1652. static const struct file_operations fuse_direct_io_file_operations = {
  1653. .llseek = fuse_file_llseek,
  1654. .read = fuse_direct_read,
  1655. .write = fuse_direct_write,
  1656. .mmap = fuse_direct_mmap,
  1657. .open = fuse_open,
  1658. .flush = fuse_flush,
  1659. .release = fuse_release,
  1660. .fsync = fuse_fsync,
  1661. .lock = fuse_file_lock,
  1662. .flock = fuse_file_flock,
  1663. .unlocked_ioctl = fuse_file_ioctl,
  1664. .compat_ioctl = fuse_file_compat_ioctl,
  1665. .poll = fuse_file_poll,
  1666. /* no splice_read */
  1667. };
  1668. static const struct address_space_operations fuse_file_aops = {
  1669. .readpage = fuse_readpage,
  1670. .writepage = fuse_writepage,
  1671. .launder_page = fuse_launder_page,
  1672. .write_begin = fuse_write_begin,
  1673. .write_end = fuse_write_end,
  1674. .readpages = fuse_readpages,
  1675. .set_page_dirty = __set_page_dirty_nobuffers,
  1676. .bmap = fuse_bmap,
  1677. };
  1678. void fuse_init_file_inode(struct inode *inode)
  1679. {
  1680. inode->i_fop = &fuse_file_operations;
  1681. inode->i_data.a_ops = &fuse_file_aops;
  1682. }