file.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU GPL.
  5. See the file COPYING.
  6. */
  7. #include "fuse_i.h"
  8. #include <linux/pagemap.h>
  9. #include <linux/slab.h>
  10. #include <linux/kernel.h>
  11. #include <linux/sched.h>
  12. #include <linux/module.h>
  13. static const struct file_operations fuse_direct_io_file_operations;
  14. static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
  15. int opcode, struct fuse_open_out *outargp)
  16. {
  17. struct fuse_open_in inarg;
  18. struct fuse_req *req;
  19. int err;
  20. req = fuse_get_req(fc);
  21. if (IS_ERR(req))
  22. return PTR_ERR(req);
  23. memset(&inarg, 0, sizeof(inarg));
  24. inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
  25. if (!fc->atomic_o_trunc)
  26. inarg.flags &= ~O_TRUNC;
  27. req->in.h.opcode = opcode;
  28. req->in.h.nodeid = nodeid;
  29. req->in.numargs = 1;
  30. req->in.args[0].size = sizeof(inarg);
  31. req->in.args[0].value = &inarg;
  32. req->out.numargs = 1;
  33. req->out.args[0].size = sizeof(*outargp);
  34. req->out.args[0].value = outargp;
  35. fuse_request_send(fc, req);
  36. err = req->out.h.error;
  37. fuse_put_request(fc, req);
  38. return err;
  39. }
  40. struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
  41. {
  42. struct fuse_file *ff;
  43. ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
  44. if (unlikely(!ff))
  45. return NULL;
  46. ff->fc = fc;
  47. ff->reserved_req = fuse_request_alloc();
  48. if (unlikely(!ff->reserved_req)) {
  49. kfree(ff);
  50. return NULL;
  51. }
  52. INIT_LIST_HEAD(&ff->write_entry);
  53. atomic_set(&ff->count, 0);
  54. RB_CLEAR_NODE(&ff->polled_node);
  55. init_waitqueue_head(&ff->poll_wait);
  56. spin_lock(&fc->lock);
  57. ff->kh = ++fc->khctr;
  58. spin_unlock(&fc->lock);
  59. return ff;
  60. }
  61. void fuse_file_free(struct fuse_file *ff)
  62. {
  63. fuse_request_free(ff->reserved_req);
  64. kfree(ff);
  65. }
  66. struct fuse_file *fuse_file_get(struct fuse_file *ff)
  67. {
  68. atomic_inc(&ff->count);
  69. return ff;
  70. }
  71. static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
  72. {
  73. path_put(&req->misc.release.path);
  74. }
  75. static void fuse_file_put(struct fuse_file *ff)
  76. {
  77. if (atomic_dec_and_test(&ff->count)) {
  78. struct fuse_req *req = ff->reserved_req;
  79. req->end = fuse_release_end;
  80. fuse_request_send_background(ff->fc, req);
  81. kfree(ff);
  82. }
  83. }
  84. int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
  85. bool isdir)
  86. {
  87. struct fuse_open_out outarg;
  88. struct fuse_file *ff;
  89. int err;
  90. int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
  91. ff = fuse_file_alloc(fc);
  92. if (!ff)
  93. return -ENOMEM;
  94. err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
  95. if (err) {
  96. fuse_file_free(ff);
  97. return err;
  98. }
  99. if (isdir)
  100. outarg.open_flags &= ~FOPEN_DIRECT_IO;
  101. ff->fh = outarg.fh;
  102. ff->nodeid = nodeid;
  103. ff->open_flags = outarg.open_flags;
  104. file->private_data = fuse_file_get(ff);
  105. return 0;
  106. }
  107. EXPORT_SYMBOL_GPL(fuse_do_open);
  108. void fuse_finish_open(struct inode *inode, struct file *file)
  109. {
  110. struct fuse_file *ff = file->private_data;
  111. if (ff->open_flags & FOPEN_DIRECT_IO)
  112. file->f_op = &fuse_direct_io_file_operations;
  113. if (!(ff->open_flags & FOPEN_KEEP_CACHE))
  114. invalidate_inode_pages2(inode->i_mapping);
  115. if (ff->open_flags & FOPEN_NONSEEKABLE)
  116. nonseekable_open(inode, file);
  117. }
  118. int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
  119. {
  120. struct fuse_conn *fc = get_fuse_conn(inode);
  121. int err;
  122. /* VFS checks this, but only _after_ ->open() */
  123. if (file->f_flags & O_DIRECT)
  124. return -EINVAL;
  125. err = generic_file_open(inode, file);
  126. if (err)
  127. return err;
  128. err = fuse_do_open(fc, get_node_id(inode), file, isdir);
  129. if (err)
  130. return err;
  131. fuse_finish_open(inode, file);
  132. return 0;
  133. }
  134. static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
  135. {
  136. struct fuse_conn *fc = ff->fc;
  137. struct fuse_req *req = ff->reserved_req;
  138. struct fuse_release_in *inarg = &req->misc.release.in;
  139. spin_lock(&fc->lock);
  140. list_del(&ff->write_entry);
  141. if (!RB_EMPTY_NODE(&ff->polled_node))
  142. rb_erase(&ff->polled_node, &fc->polled_files);
  143. spin_unlock(&fc->lock);
  144. wake_up_interruptible_sync(&ff->poll_wait);
  145. inarg->fh = ff->fh;
  146. inarg->flags = flags;
  147. req->in.h.opcode = opcode;
  148. req->in.h.nodeid = ff->nodeid;
  149. req->in.numargs = 1;
  150. req->in.args[0].size = sizeof(struct fuse_release_in);
  151. req->in.args[0].value = inarg;
  152. }
  153. void fuse_release_common(struct file *file, int opcode)
  154. {
  155. struct fuse_file *ff;
  156. struct fuse_req *req;
  157. ff = file->private_data;
  158. if (unlikely(!ff))
  159. return;
  160. req = ff->reserved_req;
  161. fuse_prepare_release(ff, file->f_flags, opcode);
  162. /* Hold vfsmount and dentry until release is finished */
  163. path_get(&file->f_path);
  164. req->misc.release.path = file->f_path;
  165. /*
  166. * Normally this will send the RELEASE request, however if
  167. * some asynchronous READ or WRITE requests are outstanding,
  168. * the sending will be delayed.
  169. */
  170. fuse_file_put(ff);
  171. }
  172. static int fuse_open(struct inode *inode, struct file *file)
  173. {
  174. return fuse_open_common(inode, file, false);
  175. }
  176. static int fuse_release(struct inode *inode, struct file *file)
  177. {
  178. fuse_release_common(file, FUSE_RELEASE);
  179. /* return value is ignored by VFS */
  180. return 0;
  181. }
  182. void fuse_sync_release(struct fuse_file *ff, int flags)
  183. {
  184. WARN_ON(atomic_read(&ff->count) > 1);
  185. fuse_prepare_release(ff, flags, FUSE_RELEASE);
  186. ff->reserved_req->force = 1;
  187. fuse_request_send(ff->fc, ff->reserved_req);
  188. fuse_put_request(ff->fc, ff->reserved_req);
  189. kfree(ff);
  190. }
  191. EXPORT_SYMBOL_GPL(fuse_sync_release);
  192. /*
  193. * Scramble the ID space with XTEA, so that the value of the files_struct
  194. * pointer is not exposed to userspace.
  195. */
  196. u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
  197. {
  198. u32 *k = fc->scramble_key;
  199. u64 v = (unsigned long) id;
  200. u32 v0 = v;
  201. u32 v1 = v >> 32;
  202. u32 sum = 0;
  203. int i;
  204. for (i = 0; i < 32; i++) {
  205. v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
  206. sum += 0x9E3779B9;
  207. v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
  208. }
  209. return (u64) v0 + ((u64) v1 << 32);
  210. }
  211. /*
  212. * Check if page is under writeback
  213. *
  214. * This is currently done by walking the list of writepage requests
  215. * for the inode, which can be pretty inefficient.
  216. */
  217. static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
  218. {
  219. struct fuse_conn *fc = get_fuse_conn(inode);
  220. struct fuse_inode *fi = get_fuse_inode(inode);
  221. struct fuse_req *req;
  222. bool found = false;
  223. spin_lock(&fc->lock);
  224. list_for_each_entry(req, &fi->writepages, writepages_entry) {
  225. pgoff_t curr_index;
  226. BUG_ON(req->inode != inode);
  227. curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
  228. if (curr_index == index) {
  229. found = true;
  230. break;
  231. }
  232. }
  233. spin_unlock(&fc->lock);
  234. return found;
  235. }
  236. /*
  237. * Wait for page writeback to be completed.
  238. *
  239. * Since fuse doesn't rely on the VM writeback tracking, this has to
  240. * use some other means.
  241. */
  242. static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
  243. {
  244. struct fuse_inode *fi = get_fuse_inode(inode);
  245. wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
  246. return 0;
  247. }
  248. static int fuse_flush(struct file *file, fl_owner_t id)
  249. {
  250. struct inode *inode = file->f_path.dentry->d_inode;
  251. struct fuse_conn *fc = get_fuse_conn(inode);
  252. struct fuse_file *ff = file->private_data;
  253. struct fuse_req *req;
  254. struct fuse_flush_in inarg;
  255. int err;
  256. if (is_bad_inode(inode))
  257. return -EIO;
  258. if (fc->no_flush)
  259. return 0;
  260. req = fuse_get_req_nofail(fc, file);
  261. memset(&inarg, 0, sizeof(inarg));
  262. inarg.fh = ff->fh;
  263. inarg.lock_owner = fuse_lock_owner_id(fc, id);
  264. req->in.h.opcode = FUSE_FLUSH;
  265. req->in.h.nodeid = get_node_id(inode);
  266. req->in.numargs = 1;
  267. req->in.args[0].size = sizeof(inarg);
  268. req->in.args[0].value = &inarg;
  269. req->force = 1;
  270. fuse_request_send(fc, req);
  271. err = req->out.h.error;
  272. fuse_put_request(fc, req);
  273. if (err == -ENOSYS) {
  274. fc->no_flush = 1;
  275. err = 0;
  276. }
  277. return err;
  278. }
  279. /*
  280. * Wait for all pending writepages on the inode to finish.
  281. *
  282. * This is currently done by blocking further writes with FUSE_NOWRITE
  283. * and waiting for all sent writes to complete.
  284. *
  285. * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
  286. * could conflict with truncation.
  287. */
  288. static void fuse_sync_writes(struct inode *inode)
  289. {
  290. fuse_set_nowrite(inode);
  291. fuse_release_nowrite(inode);
  292. }
  293. int fuse_fsync_common(struct file *file, int datasync, int isdir)
  294. {
  295. struct inode *inode = file->f_mapping->host;
  296. struct fuse_conn *fc = get_fuse_conn(inode);
  297. struct fuse_file *ff = file->private_data;
  298. struct fuse_req *req;
  299. struct fuse_fsync_in inarg;
  300. int err;
  301. if (is_bad_inode(inode))
  302. return -EIO;
  303. if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
  304. return 0;
  305. /*
  306. * Start writeback against all dirty pages of the inode, then
  307. * wait for all outstanding writes, before sending the FSYNC
  308. * request.
  309. */
  310. err = write_inode_now(inode, 0);
  311. if (err)
  312. return err;
  313. fuse_sync_writes(inode);
  314. req = fuse_get_req(fc);
  315. if (IS_ERR(req))
  316. return PTR_ERR(req);
  317. memset(&inarg, 0, sizeof(inarg));
  318. inarg.fh = ff->fh;
  319. inarg.fsync_flags = datasync ? 1 : 0;
  320. req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
  321. req->in.h.nodeid = get_node_id(inode);
  322. req->in.numargs = 1;
  323. req->in.args[0].size = sizeof(inarg);
  324. req->in.args[0].value = &inarg;
  325. fuse_request_send(fc, req);
  326. err = req->out.h.error;
  327. fuse_put_request(fc, req);
  328. if (err == -ENOSYS) {
  329. if (isdir)
  330. fc->no_fsyncdir = 1;
  331. else
  332. fc->no_fsync = 1;
  333. err = 0;
  334. }
  335. return err;
  336. }
  337. static int fuse_fsync(struct file *file, int datasync)
  338. {
  339. return fuse_fsync_common(file, datasync, 0);
  340. }
  341. void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
  342. size_t count, int opcode)
  343. {
  344. struct fuse_read_in *inarg = &req->misc.read.in;
  345. struct fuse_file *ff = file->private_data;
  346. inarg->fh = ff->fh;
  347. inarg->offset = pos;
  348. inarg->size = count;
  349. inarg->flags = file->f_flags;
  350. req->in.h.opcode = opcode;
  351. req->in.h.nodeid = ff->nodeid;
  352. req->in.numargs = 1;
  353. req->in.args[0].size = sizeof(struct fuse_read_in);
  354. req->in.args[0].value = inarg;
  355. req->out.argvar = 1;
  356. req->out.numargs = 1;
  357. req->out.args[0].size = count;
  358. }
  359. static size_t fuse_send_read(struct fuse_req *req, struct file *file,
  360. loff_t pos, size_t count, fl_owner_t owner)
  361. {
  362. struct fuse_file *ff = file->private_data;
  363. struct fuse_conn *fc = ff->fc;
  364. fuse_read_fill(req, file, pos, count, FUSE_READ);
  365. if (owner != NULL) {
  366. struct fuse_read_in *inarg = &req->misc.read.in;
  367. inarg->read_flags |= FUSE_READ_LOCKOWNER;
  368. inarg->lock_owner = fuse_lock_owner_id(fc, owner);
  369. }
  370. fuse_request_send(fc, req);
  371. return req->out.args[0].size;
  372. }
  373. static void fuse_read_update_size(struct inode *inode, loff_t size,
  374. u64 attr_ver)
  375. {
  376. struct fuse_conn *fc = get_fuse_conn(inode);
  377. struct fuse_inode *fi = get_fuse_inode(inode);
  378. spin_lock(&fc->lock);
  379. if (attr_ver == fi->attr_version && size < inode->i_size) {
  380. fi->attr_version = ++fc->attr_version;
  381. i_size_write(inode, size);
  382. }
  383. spin_unlock(&fc->lock);
  384. }
  385. static int fuse_readpage(struct file *file, struct page *page)
  386. {
  387. struct inode *inode = page->mapping->host;
  388. struct fuse_conn *fc = get_fuse_conn(inode);
  389. struct fuse_req *req;
  390. size_t num_read;
  391. loff_t pos = page_offset(page);
  392. size_t count = PAGE_CACHE_SIZE;
  393. u64 attr_ver;
  394. int err;
  395. err = -EIO;
  396. if (is_bad_inode(inode))
  397. goto out;
  398. /*
  399. * Page writeback can extend beyond the liftime of the
  400. * page-cache page, so make sure we read a properly synced
  401. * page.
  402. */
  403. fuse_wait_on_page_writeback(inode, page->index);
  404. req = fuse_get_req(fc);
  405. err = PTR_ERR(req);
  406. if (IS_ERR(req))
  407. goto out;
  408. attr_ver = fuse_get_attr_version(fc);
  409. req->out.page_zeroing = 1;
  410. req->out.argpages = 1;
  411. req->num_pages = 1;
  412. req->pages[0] = page;
  413. num_read = fuse_send_read(req, file, pos, count, NULL);
  414. err = req->out.h.error;
  415. fuse_put_request(fc, req);
  416. if (!err) {
  417. /*
  418. * Short read means EOF. If file size is larger, truncate it
  419. */
  420. if (num_read < count)
  421. fuse_read_update_size(inode, pos + num_read, attr_ver);
  422. SetPageUptodate(page);
  423. }
  424. fuse_invalidate_attr(inode); /* atime changed */
  425. out:
  426. unlock_page(page);
  427. return err;
  428. }
  429. static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
  430. {
  431. int i;
  432. size_t count = req->misc.read.in.size;
  433. size_t num_read = req->out.args[0].size;
  434. struct inode *inode = req->pages[0]->mapping->host;
  435. /*
  436. * Short read means EOF. If file size is larger, truncate it
  437. */
  438. if (!req->out.h.error && num_read < count) {
  439. loff_t pos = page_offset(req->pages[0]) + num_read;
  440. fuse_read_update_size(inode, pos, req->misc.read.attr_ver);
  441. }
  442. fuse_invalidate_attr(inode); /* atime changed */
  443. for (i = 0; i < req->num_pages; i++) {
  444. struct page *page = req->pages[i];
  445. if (!req->out.h.error)
  446. SetPageUptodate(page);
  447. else
  448. SetPageError(page);
  449. unlock_page(page);
  450. }
  451. if (req->ff)
  452. fuse_file_put(req->ff);
  453. }
  454. static void fuse_send_readpages(struct fuse_req *req, struct file *file)
  455. {
  456. struct fuse_file *ff = file->private_data;
  457. struct fuse_conn *fc = ff->fc;
  458. loff_t pos = page_offset(req->pages[0]);
  459. size_t count = req->num_pages << PAGE_CACHE_SHIFT;
  460. req->out.argpages = 1;
  461. req->out.page_zeroing = 1;
  462. fuse_read_fill(req, file, pos, count, FUSE_READ);
  463. req->misc.read.attr_ver = fuse_get_attr_version(fc);
  464. if (fc->async_read) {
  465. req->ff = fuse_file_get(ff);
  466. req->end = fuse_readpages_end;
  467. fuse_request_send_background(fc, req);
  468. } else {
  469. fuse_request_send(fc, req);
  470. fuse_readpages_end(fc, req);
  471. fuse_put_request(fc, req);
  472. }
  473. }
  474. struct fuse_fill_data {
  475. struct fuse_req *req;
  476. struct file *file;
  477. struct inode *inode;
  478. };
  479. static int fuse_readpages_fill(void *_data, struct page *page)
  480. {
  481. struct fuse_fill_data *data = _data;
  482. struct fuse_req *req = data->req;
  483. struct inode *inode = data->inode;
  484. struct fuse_conn *fc = get_fuse_conn(inode);
  485. fuse_wait_on_page_writeback(inode, page->index);
  486. if (req->num_pages &&
  487. (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
  488. (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
  489. req->pages[req->num_pages - 1]->index + 1 != page->index)) {
  490. fuse_send_readpages(req, data->file);
  491. data->req = req = fuse_get_req(fc);
  492. if (IS_ERR(req)) {
  493. unlock_page(page);
  494. return PTR_ERR(req);
  495. }
  496. }
  497. req->pages[req->num_pages] = page;
  498. req->num_pages++;
  499. return 0;
  500. }
  501. static int fuse_readpages(struct file *file, struct address_space *mapping,
  502. struct list_head *pages, unsigned nr_pages)
  503. {
  504. struct inode *inode = mapping->host;
  505. struct fuse_conn *fc = get_fuse_conn(inode);
  506. struct fuse_fill_data data;
  507. int err;
  508. err = -EIO;
  509. if (is_bad_inode(inode))
  510. goto out;
  511. data.file = file;
  512. data.inode = inode;
  513. data.req = fuse_get_req(fc);
  514. err = PTR_ERR(data.req);
  515. if (IS_ERR(data.req))
  516. goto out;
  517. err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
  518. if (!err) {
  519. if (data.req->num_pages)
  520. fuse_send_readpages(data.req, file);
  521. else
  522. fuse_put_request(fc, data.req);
  523. }
  524. out:
  525. return err;
  526. }
  527. static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
  528. unsigned long nr_segs, loff_t pos)
  529. {
  530. struct inode *inode = iocb->ki_filp->f_mapping->host;
  531. if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
  532. int err;
  533. /*
  534. * If trying to read past EOF, make sure the i_size
  535. * attribute is up-to-date.
  536. */
  537. err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
  538. if (err)
  539. return err;
  540. }
  541. return generic_file_aio_read(iocb, iov, nr_segs, pos);
  542. }
  543. static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
  544. loff_t pos, size_t count)
  545. {
  546. struct fuse_write_in *inarg = &req->misc.write.in;
  547. struct fuse_write_out *outarg = &req->misc.write.out;
  548. inarg->fh = ff->fh;
  549. inarg->offset = pos;
  550. inarg->size = count;
  551. req->in.h.opcode = FUSE_WRITE;
  552. req->in.h.nodeid = ff->nodeid;
  553. req->in.numargs = 2;
  554. if (ff->fc->minor < 9)
  555. req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
  556. else
  557. req->in.args[0].size = sizeof(struct fuse_write_in);
  558. req->in.args[0].value = inarg;
  559. req->in.args[1].size = count;
  560. req->out.numargs = 1;
  561. req->out.args[0].size = sizeof(struct fuse_write_out);
  562. req->out.args[0].value = outarg;
  563. }
  564. static size_t fuse_send_write(struct fuse_req *req, struct file *file,
  565. loff_t pos, size_t count, fl_owner_t owner)
  566. {
  567. struct fuse_file *ff = file->private_data;
  568. struct fuse_conn *fc = ff->fc;
  569. struct fuse_write_in *inarg = &req->misc.write.in;
  570. fuse_write_fill(req, ff, pos, count);
  571. inarg->flags = file->f_flags;
  572. if (owner != NULL) {
  573. inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
  574. inarg->lock_owner = fuse_lock_owner_id(fc, owner);
  575. }
  576. fuse_request_send(fc, req);
  577. return req->misc.write.out.size;
  578. }
  579. static int fuse_write_begin(struct file *file, struct address_space *mapping,
  580. loff_t pos, unsigned len, unsigned flags,
  581. struct page **pagep, void **fsdata)
  582. {
  583. pgoff_t index = pos >> PAGE_CACHE_SHIFT;
  584. *pagep = grab_cache_page_write_begin(mapping, index, flags);
  585. if (!*pagep)
  586. return -ENOMEM;
  587. return 0;
  588. }
  589. static void fuse_write_update_size(struct inode *inode, loff_t pos)
  590. {
  591. struct fuse_conn *fc = get_fuse_conn(inode);
  592. struct fuse_inode *fi = get_fuse_inode(inode);
  593. spin_lock(&fc->lock);
  594. fi->attr_version = ++fc->attr_version;
  595. if (pos > inode->i_size)
  596. i_size_write(inode, pos);
  597. spin_unlock(&fc->lock);
  598. }
  599. static int fuse_buffered_write(struct file *file, struct inode *inode,
  600. loff_t pos, unsigned count, struct page *page)
  601. {
  602. int err;
  603. size_t nres;
  604. struct fuse_conn *fc = get_fuse_conn(inode);
  605. unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
  606. struct fuse_req *req;
  607. if (is_bad_inode(inode))
  608. return -EIO;
  609. /*
  610. * Make sure writepages on the same page are not mixed up with
  611. * plain writes.
  612. */
  613. fuse_wait_on_page_writeback(inode, page->index);
  614. req = fuse_get_req(fc);
  615. if (IS_ERR(req))
  616. return PTR_ERR(req);
  617. req->in.argpages = 1;
  618. req->num_pages = 1;
  619. req->pages[0] = page;
  620. req->page_offset = offset;
  621. nres = fuse_send_write(req, file, pos, count, NULL);
  622. err = req->out.h.error;
  623. fuse_put_request(fc, req);
  624. if (!err && !nres)
  625. err = -EIO;
  626. if (!err) {
  627. pos += nres;
  628. fuse_write_update_size(inode, pos);
  629. if (count == PAGE_CACHE_SIZE)
  630. SetPageUptodate(page);
  631. }
  632. fuse_invalidate_attr(inode);
  633. return err ? err : nres;
  634. }
  635. static int fuse_write_end(struct file *file, struct address_space *mapping,
  636. loff_t pos, unsigned len, unsigned copied,
  637. struct page *page, void *fsdata)
  638. {
  639. struct inode *inode = mapping->host;
  640. int res = 0;
  641. if (copied)
  642. res = fuse_buffered_write(file, inode, pos, copied, page);
  643. unlock_page(page);
  644. page_cache_release(page);
  645. return res;
  646. }
  647. static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
  648. struct inode *inode, loff_t pos,
  649. size_t count)
  650. {
  651. size_t res;
  652. unsigned offset;
  653. unsigned i;
  654. for (i = 0; i < req->num_pages; i++)
  655. fuse_wait_on_page_writeback(inode, req->pages[i]->index);
  656. res = fuse_send_write(req, file, pos, count, NULL);
  657. offset = req->page_offset;
  658. count = res;
  659. for (i = 0; i < req->num_pages; i++) {
  660. struct page *page = req->pages[i];
  661. if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
  662. SetPageUptodate(page);
  663. if (count > PAGE_CACHE_SIZE - offset)
  664. count -= PAGE_CACHE_SIZE - offset;
  665. else
  666. count = 0;
  667. offset = 0;
  668. unlock_page(page);
  669. page_cache_release(page);
  670. }
  671. return res;
  672. }
  673. static ssize_t fuse_fill_write_pages(struct fuse_req *req,
  674. struct address_space *mapping,
  675. struct iov_iter *ii, loff_t pos)
  676. {
  677. struct fuse_conn *fc = get_fuse_conn(mapping->host);
  678. unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
  679. size_t count = 0;
  680. int err;
  681. req->in.argpages = 1;
  682. req->page_offset = offset;
  683. do {
  684. size_t tmp;
  685. struct page *page;
  686. pgoff_t index = pos >> PAGE_CACHE_SHIFT;
  687. size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
  688. iov_iter_count(ii));
  689. bytes = min_t(size_t, bytes, fc->max_write - count);
  690. again:
  691. err = -EFAULT;
  692. if (iov_iter_fault_in_readable(ii, bytes))
  693. break;
  694. err = -ENOMEM;
  695. page = grab_cache_page_write_begin(mapping, index, 0);
  696. if (!page)
  697. break;
  698. if (mapping_writably_mapped(mapping))
  699. flush_dcache_page(page);
  700. pagefault_disable();
  701. tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
  702. pagefault_enable();
  703. flush_dcache_page(page);
  704. if (!tmp) {
  705. unlock_page(page);
  706. page_cache_release(page);
  707. bytes = min(bytes, iov_iter_single_seg_count(ii));
  708. goto again;
  709. }
  710. err = 0;
  711. req->pages[req->num_pages] = page;
  712. req->num_pages++;
  713. iov_iter_advance(ii, tmp);
  714. count += tmp;
  715. pos += tmp;
  716. offset += tmp;
  717. if (offset == PAGE_CACHE_SIZE)
  718. offset = 0;
  719. if (!fc->big_writes)
  720. break;
  721. } while (iov_iter_count(ii) && count < fc->max_write &&
  722. req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0);
  723. return count > 0 ? count : err;
  724. }
  725. static ssize_t fuse_perform_write(struct file *file,
  726. struct address_space *mapping,
  727. struct iov_iter *ii, loff_t pos)
  728. {
  729. struct inode *inode = mapping->host;
  730. struct fuse_conn *fc = get_fuse_conn(inode);
  731. int err = 0;
  732. ssize_t res = 0;
  733. if (is_bad_inode(inode))
  734. return -EIO;
  735. do {
  736. struct fuse_req *req;
  737. ssize_t count;
  738. req = fuse_get_req(fc);
  739. if (IS_ERR(req)) {
  740. err = PTR_ERR(req);
  741. break;
  742. }
  743. count = fuse_fill_write_pages(req, mapping, ii, pos);
  744. if (count <= 0) {
  745. err = count;
  746. } else {
  747. size_t num_written;
  748. num_written = fuse_send_write_pages(req, file, inode,
  749. pos, count);
  750. err = req->out.h.error;
  751. if (!err) {
  752. res += num_written;
  753. pos += num_written;
  754. /* break out of the loop on short write */
  755. if (num_written != count)
  756. err = -EIO;
  757. }
  758. }
  759. fuse_put_request(fc, req);
  760. } while (!err && iov_iter_count(ii));
  761. if (res > 0)
  762. fuse_write_update_size(inode, pos);
  763. fuse_invalidate_attr(inode);
  764. return res > 0 ? res : err;
  765. }
  766. static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
  767. unsigned long nr_segs, loff_t pos)
  768. {
  769. struct file *file = iocb->ki_filp;
  770. struct address_space *mapping = file->f_mapping;
  771. size_t count = 0;
  772. ssize_t written = 0;
  773. struct inode *inode = mapping->host;
  774. ssize_t err;
  775. struct iov_iter i;
  776. WARN_ON(iocb->ki_pos != pos);
  777. err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
  778. if (err)
  779. return err;
  780. mutex_lock(&inode->i_mutex);
  781. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  782. /* We can write back this queue in page reclaim */
  783. current->backing_dev_info = mapping->backing_dev_info;
  784. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  785. if (err)
  786. goto out;
  787. if (count == 0)
  788. goto out;
  789. err = file_remove_suid(file);
  790. if (err)
  791. goto out;
  792. file_update_time(file);
  793. iov_iter_init(&i, iov, nr_segs, count, 0);
  794. written = fuse_perform_write(file, mapping, &i, pos);
  795. if (written >= 0)
  796. iocb->ki_pos = pos + written;
  797. out:
  798. current->backing_dev_info = NULL;
  799. mutex_unlock(&inode->i_mutex);
  800. return written ? written : err;
  801. }
  802. static void fuse_release_user_pages(struct fuse_req *req, int write)
  803. {
  804. unsigned i;
  805. for (i = 0; i < req->num_pages; i++) {
  806. struct page *page = req->pages[i];
  807. if (write)
  808. set_page_dirty_lock(page);
  809. put_page(page);
  810. }
  811. }
  812. static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
  813. size_t *nbytesp, int write)
  814. {
  815. size_t nbytes = *nbytesp;
  816. unsigned long user_addr = (unsigned long) buf;
  817. unsigned offset = user_addr & ~PAGE_MASK;
  818. int npages;
  819. /* Special case for kernel I/O: can copy directly into the buffer */
  820. if (segment_eq(get_fs(), KERNEL_DS)) {
  821. if (write)
  822. req->in.args[1].value = (void *) user_addr;
  823. else
  824. req->out.args[0].value = (void *) user_addr;
  825. return 0;
  826. }
  827. nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
  828. npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
  829. npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
  830. down_read(&current->mm->mmap_sem);
  831. npages = get_user_pages(current, current->mm, user_addr, npages, !write,
  832. 0, req->pages, NULL);
  833. up_read(&current->mm->mmap_sem);
  834. if (npages < 0)
  835. return npages;
  836. req->num_pages = npages;
  837. req->page_offset = offset;
  838. if (write)
  839. req->in.argpages = 1;
  840. else
  841. req->out.argpages = 1;
  842. nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
  843. *nbytesp = min(*nbytesp, nbytes);
  844. return 0;
  845. }
  846. ssize_t fuse_direct_io(struct file *file, const char __user *buf,
  847. size_t count, loff_t *ppos, int write)
  848. {
  849. struct fuse_file *ff = file->private_data;
  850. struct fuse_conn *fc = ff->fc;
  851. size_t nmax = write ? fc->max_write : fc->max_read;
  852. loff_t pos = *ppos;
  853. ssize_t res = 0;
  854. struct fuse_req *req;
  855. req = fuse_get_req(fc);
  856. if (IS_ERR(req))
  857. return PTR_ERR(req);
  858. while (count) {
  859. size_t nres;
  860. fl_owner_t owner = current->files;
  861. size_t nbytes = min(count, nmax);
  862. int err = fuse_get_user_pages(req, buf, &nbytes, write);
  863. if (err) {
  864. res = err;
  865. break;
  866. }
  867. if (write)
  868. nres = fuse_send_write(req, file, pos, nbytes, owner);
  869. else
  870. nres = fuse_send_read(req, file, pos, nbytes, owner);
  871. fuse_release_user_pages(req, !write);
  872. if (req->out.h.error) {
  873. if (!res)
  874. res = req->out.h.error;
  875. break;
  876. } else if (nres > nbytes) {
  877. res = -EIO;
  878. break;
  879. }
  880. count -= nres;
  881. res += nres;
  882. pos += nres;
  883. buf += nres;
  884. if (nres != nbytes)
  885. break;
  886. if (count) {
  887. fuse_put_request(fc, req);
  888. req = fuse_get_req(fc);
  889. if (IS_ERR(req))
  890. break;
  891. }
  892. }
  893. if (!IS_ERR(req))
  894. fuse_put_request(fc, req);
  895. if (res > 0)
  896. *ppos = pos;
  897. return res;
  898. }
  899. EXPORT_SYMBOL_GPL(fuse_direct_io);
  900. static ssize_t fuse_direct_read(struct file *file, char __user *buf,
  901. size_t count, loff_t *ppos)
  902. {
  903. ssize_t res;
  904. struct inode *inode = file->f_path.dentry->d_inode;
  905. if (is_bad_inode(inode))
  906. return -EIO;
  907. res = fuse_direct_io(file, buf, count, ppos, 0);
  908. fuse_invalidate_attr(inode);
  909. return res;
  910. }
  911. static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
  912. size_t count, loff_t *ppos)
  913. {
  914. struct inode *inode = file->f_path.dentry->d_inode;
  915. ssize_t res;
  916. if (is_bad_inode(inode))
  917. return -EIO;
  918. /* Don't allow parallel writes to the same file */
  919. mutex_lock(&inode->i_mutex);
  920. res = generic_write_checks(file, ppos, &count, 0);
  921. if (!res) {
  922. res = fuse_direct_io(file, buf, count, ppos, 1);
  923. if (res > 0)
  924. fuse_write_update_size(inode, *ppos);
  925. }
  926. mutex_unlock(&inode->i_mutex);
  927. fuse_invalidate_attr(inode);
  928. return res;
  929. }
  930. static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
  931. {
  932. __free_page(req->pages[0]);
  933. fuse_file_put(req->ff);
  934. }
  935. static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
  936. {
  937. struct inode *inode = req->inode;
  938. struct fuse_inode *fi = get_fuse_inode(inode);
  939. struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
  940. list_del(&req->writepages_entry);
  941. dec_bdi_stat(bdi, BDI_WRITEBACK);
  942. dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
  943. bdi_writeout_inc(bdi);
  944. wake_up(&fi->page_waitq);
  945. }
  946. /* Called under fc->lock, may release and reacquire it */
  947. static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
  948. __releases(&fc->lock)
  949. __acquires(&fc->lock)
  950. {
  951. struct fuse_inode *fi = get_fuse_inode(req->inode);
  952. loff_t size = i_size_read(req->inode);
  953. struct fuse_write_in *inarg = &req->misc.write.in;
  954. if (!fc->connected)
  955. goto out_free;
  956. if (inarg->offset + PAGE_CACHE_SIZE <= size) {
  957. inarg->size = PAGE_CACHE_SIZE;
  958. } else if (inarg->offset < size) {
  959. inarg->size = size & (PAGE_CACHE_SIZE - 1);
  960. } else {
  961. /* Got truncated off completely */
  962. goto out_free;
  963. }
  964. req->in.args[1].size = inarg->size;
  965. fi->writectr++;
  966. fuse_request_send_background_locked(fc, req);
  967. return;
  968. out_free:
  969. fuse_writepage_finish(fc, req);
  970. spin_unlock(&fc->lock);
  971. fuse_writepage_free(fc, req);
  972. fuse_put_request(fc, req);
  973. spin_lock(&fc->lock);
  974. }
  975. /*
  976. * If fi->writectr is positive (no truncate or fsync going on) send
  977. * all queued writepage requests.
  978. *
  979. * Called with fc->lock
  980. */
  981. void fuse_flush_writepages(struct inode *inode)
  982. __releases(&fc->lock)
  983. __acquires(&fc->lock)
  984. {
  985. struct fuse_conn *fc = get_fuse_conn(inode);
  986. struct fuse_inode *fi = get_fuse_inode(inode);
  987. struct fuse_req *req;
  988. while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
  989. req = list_entry(fi->queued_writes.next, struct fuse_req, list);
  990. list_del_init(&req->list);
  991. fuse_send_writepage(fc, req);
  992. }
  993. }
  994. static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
  995. {
  996. struct inode *inode = req->inode;
  997. struct fuse_inode *fi = get_fuse_inode(inode);
  998. mapping_set_error(inode->i_mapping, req->out.h.error);
  999. spin_lock(&fc->lock);
  1000. fi->writectr--;
  1001. fuse_writepage_finish(fc, req);
  1002. spin_unlock(&fc->lock);
  1003. fuse_writepage_free(fc, req);
  1004. }
  1005. static int fuse_writepage_locked(struct page *page)
  1006. {
  1007. struct address_space *mapping = page->mapping;
  1008. struct inode *inode = mapping->host;
  1009. struct fuse_conn *fc = get_fuse_conn(inode);
  1010. struct fuse_inode *fi = get_fuse_inode(inode);
  1011. struct fuse_req *req;
  1012. struct fuse_file *ff;
  1013. struct page *tmp_page;
  1014. set_page_writeback(page);
  1015. req = fuse_request_alloc_nofs();
  1016. if (!req)
  1017. goto err;
  1018. tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  1019. if (!tmp_page)
  1020. goto err_free;
  1021. spin_lock(&fc->lock);
  1022. BUG_ON(list_empty(&fi->write_files));
  1023. ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
  1024. req->ff = fuse_file_get(ff);
  1025. spin_unlock(&fc->lock);
  1026. fuse_write_fill(req, ff, page_offset(page), 0);
  1027. copy_highpage(tmp_page, page);
  1028. req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
  1029. req->in.argpages = 1;
  1030. req->num_pages = 1;
  1031. req->pages[0] = tmp_page;
  1032. req->page_offset = 0;
  1033. req->end = fuse_writepage_end;
  1034. req->inode = inode;
  1035. inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
  1036. inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
  1037. end_page_writeback(page);
  1038. spin_lock(&fc->lock);
  1039. list_add(&req->writepages_entry, &fi->writepages);
  1040. list_add_tail(&req->list, &fi->queued_writes);
  1041. fuse_flush_writepages(inode);
  1042. spin_unlock(&fc->lock);
  1043. return 0;
  1044. err_free:
  1045. fuse_request_free(req);
  1046. err:
  1047. end_page_writeback(page);
  1048. return -ENOMEM;
  1049. }
  1050. static int fuse_writepage(struct page *page, struct writeback_control *wbc)
  1051. {
  1052. int err;
  1053. err = fuse_writepage_locked(page);
  1054. unlock_page(page);
  1055. return err;
  1056. }
  1057. static int fuse_launder_page(struct page *page)
  1058. {
  1059. int err = 0;
  1060. if (clear_page_dirty_for_io(page)) {
  1061. struct inode *inode = page->mapping->host;
  1062. err = fuse_writepage_locked(page);
  1063. if (!err)
  1064. fuse_wait_on_page_writeback(inode, page->index);
  1065. }
  1066. return err;
  1067. }
  1068. /*
  1069. * Write back dirty pages now, because there may not be any suitable
  1070. * open files later
  1071. */
  1072. static void fuse_vma_close(struct vm_area_struct *vma)
  1073. {
  1074. filemap_write_and_wait(vma->vm_file->f_mapping);
  1075. }
  1076. /*
  1077. * Wait for writeback against this page to complete before allowing it
  1078. * to be marked dirty again, and hence written back again, possibly
  1079. * before the previous writepage completed.
  1080. *
  1081. * Block here, instead of in ->writepage(), so that the userspace fs
  1082. * can only block processes actually operating on the filesystem.
  1083. *
  1084. * Otherwise unprivileged userspace fs would be able to block
  1085. * unrelated:
  1086. *
  1087. * - page migration
  1088. * - sync(2)
  1089. * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
  1090. */
  1091. static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  1092. {
  1093. struct page *page = vmf->page;
  1094. /*
  1095. * Don't use page->mapping as it may become NULL from a
  1096. * concurrent truncate.
  1097. */
  1098. struct inode *inode = vma->vm_file->f_mapping->host;
  1099. fuse_wait_on_page_writeback(inode, page->index);
  1100. return 0;
  1101. }
  1102. static const struct vm_operations_struct fuse_file_vm_ops = {
  1103. .close = fuse_vma_close,
  1104. .fault = filemap_fault,
  1105. .page_mkwrite = fuse_page_mkwrite,
  1106. };
  1107. static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
  1108. {
  1109. if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
  1110. struct inode *inode = file->f_dentry->d_inode;
  1111. struct fuse_conn *fc = get_fuse_conn(inode);
  1112. struct fuse_inode *fi = get_fuse_inode(inode);
  1113. struct fuse_file *ff = file->private_data;
  1114. /*
  1115. * file may be written through mmap, so chain it onto the
  1116. * inodes's write_file list
  1117. */
  1118. spin_lock(&fc->lock);
  1119. if (list_empty(&ff->write_entry))
  1120. list_add(&ff->write_entry, &fi->write_files);
  1121. spin_unlock(&fc->lock);
  1122. }
  1123. file_accessed(file);
  1124. vma->vm_ops = &fuse_file_vm_ops;
  1125. return 0;
  1126. }
  1127. static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
  1128. {
  1129. /* Can't provide the coherency needed for MAP_SHARED */
  1130. if (vma->vm_flags & VM_MAYSHARE)
  1131. return -ENODEV;
  1132. invalidate_inode_pages2(file->f_mapping);
  1133. return generic_file_mmap(file, vma);
  1134. }
  1135. static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
  1136. struct file_lock *fl)
  1137. {
  1138. switch (ffl->type) {
  1139. case F_UNLCK:
  1140. break;
  1141. case F_RDLCK:
  1142. case F_WRLCK:
  1143. if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
  1144. ffl->end < ffl->start)
  1145. return -EIO;
  1146. fl->fl_start = ffl->start;
  1147. fl->fl_end = ffl->end;
  1148. fl->fl_pid = ffl->pid;
  1149. break;
  1150. default:
  1151. return -EIO;
  1152. }
  1153. fl->fl_type = ffl->type;
  1154. return 0;
  1155. }
  1156. static void fuse_lk_fill(struct fuse_req *req, struct file *file,
  1157. const struct file_lock *fl, int opcode, pid_t pid,
  1158. int flock)
  1159. {
  1160. struct inode *inode = file->f_path.dentry->d_inode;
  1161. struct fuse_conn *fc = get_fuse_conn(inode);
  1162. struct fuse_file *ff = file->private_data;
  1163. struct fuse_lk_in *arg = &req->misc.lk_in;
  1164. arg->fh = ff->fh;
  1165. arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
  1166. arg->lk.start = fl->fl_start;
  1167. arg->lk.end = fl->fl_end;
  1168. arg->lk.type = fl->fl_type;
  1169. arg->lk.pid = pid;
  1170. if (flock)
  1171. arg->lk_flags |= FUSE_LK_FLOCK;
  1172. req->in.h.opcode = opcode;
  1173. req->in.h.nodeid = get_node_id(inode);
  1174. req->in.numargs = 1;
  1175. req->in.args[0].size = sizeof(*arg);
  1176. req->in.args[0].value = arg;
  1177. }
  1178. static int fuse_getlk(struct file *file, struct file_lock *fl)
  1179. {
  1180. struct inode *inode = file->f_path.dentry->d_inode;
  1181. struct fuse_conn *fc = get_fuse_conn(inode);
  1182. struct fuse_req *req;
  1183. struct fuse_lk_out outarg;
  1184. int err;
  1185. req = fuse_get_req(fc);
  1186. if (IS_ERR(req))
  1187. return PTR_ERR(req);
  1188. fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
  1189. req->out.numargs = 1;
  1190. req->out.args[0].size = sizeof(outarg);
  1191. req->out.args[0].value = &outarg;
  1192. fuse_request_send(fc, req);
  1193. err = req->out.h.error;
  1194. fuse_put_request(fc, req);
  1195. if (!err)
  1196. err = convert_fuse_file_lock(&outarg.lk, fl);
  1197. return err;
  1198. }
  1199. static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
  1200. {
  1201. struct inode *inode = file->f_path.dentry->d_inode;
  1202. struct fuse_conn *fc = get_fuse_conn(inode);
  1203. struct fuse_req *req;
  1204. int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
  1205. pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
  1206. int err;
  1207. if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
  1208. /* NLM needs asynchronous locks, which we don't support yet */
  1209. return -ENOLCK;
  1210. }
  1211. /* Unlock on close is handled by the flush method */
  1212. if (fl->fl_flags & FL_CLOSE)
  1213. return 0;
  1214. req = fuse_get_req(fc);
  1215. if (IS_ERR(req))
  1216. return PTR_ERR(req);
  1217. fuse_lk_fill(req, file, fl, opcode, pid, flock);
  1218. fuse_request_send(fc, req);
  1219. err = req->out.h.error;
  1220. /* locking is restartable */
  1221. if (err == -EINTR)
  1222. err = -ERESTARTSYS;
  1223. fuse_put_request(fc, req);
  1224. return err;
  1225. }
  1226. static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
  1227. {
  1228. struct inode *inode = file->f_path.dentry->d_inode;
  1229. struct fuse_conn *fc = get_fuse_conn(inode);
  1230. int err;
  1231. if (cmd == F_CANCELLK) {
  1232. err = 0;
  1233. } else if (cmd == F_GETLK) {
  1234. if (fc->no_lock) {
  1235. posix_test_lock(file, fl);
  1236. err = 0;
  1237. } else
  1238. err = fuse_getlk(file, fl);
  1239. } else {
  1240. if (fc->no_lock)
  1241. err = posix_lock_file(file, fl, NULL);
  1242. else
  1243. err = fuse_setlk(file, fl, 0);
  1244. }
  1245. return err;
  1246. }
  1247. static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
  1248. {
  1249. struct inode *inode = file->f_path.dentry->d_inode;
  1250. struct fuse_conn *fc = get_fuse_conn(inode);
  1251. int err;
  1252. if (fc->no_lock) {
  1253. err = flock_lock_file_wait(file, fl);
  1254. } else {
  1255. /* emulate flock with POSIX locks */
  1256. fl->fl_owner = (fl_owner_t) file;
  1257. err = fuse_setlk(file, fl, 1);
  1258. }
  1259. return err;
  1260. }
  1261. static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
  1262. {
  1263. struct inode *inode = mapping->host;
  1264. struct fuse_conn *fc = get_fuse_conn(inode);
  1265. struct fuse_req *req;
  1266. struct fuse_bmap_in inarg;
  1267. struct fuse_bmap_out outarg;
  1268. int err;
  1269. if (!inode->i_sb->s_bdev || fc->no_bmap)
  1270. return 0;
  1271. req = fuse_get_req(fc);
  1272. if (IS_ERR(req))
  1273. return 0;
  1274. memset(&inarg, 0, sizeof(inarg));
  1275. inarg.block = block;
  1276. inarg.blocksize = inode->i_sb->s_blocksize;
  1277. req->in.h.opcode = FUSE_BMAP;
  1278. req->in.h.nodeid = get_node_id(inode);
  1279. req->in.numargs = 1;
  1280. req->in.args[0].size = sizeof(inarg);
  1281. req->in.args[0].value = &inarg;
  1282. req->out.numargs = 1;
  1283. req->out.args[0].size = sizeof(outarg);
  1284. req->out.args[0].value = &outarg;
  1285. fuse_request_send(fc, req);
  1286. err = req->out.h.error;
  1287. fuse_put_request(fc, req);
  1288. if (err == -ENOSYS)
  1289. fc->no_bmap = 1;
  1290. return err ? 0 : outarg.block;
  1291. }
  1292. static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
  1293. {
  1294. loff_t retval;
  1295. struct inode *inode = file->f_path.dentry->d_inode;
  1296. mutex_lock(&inode->i_mutex);
  1297. switch (origin) {
  1298. case SEEK_END:
  1299. retval = fuse_update_attributes(inode, NULL, file, NULL);
  1300. if (retval)
  1301. goto exit;
  1302. offset += i_size_read(inode);
  1303. break;
  1304. case SEEK_CUR:
  1305. offset += file->f_pos;
  1306. }
  1307. retval = -EINVAL;
  1308. if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
  1309. if (offset != file->f_pos) {
  1310. file->f_pos = offset;
  1311. file->f_version = 0;
  1312. }
  1313. retval = offset;
  1314. }
  1315. exit:
  1316. mutex_unlock(&inode->i_mutex);
  1317. return retval;
  1318. }
  1319. static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
  1320. unsigned int nr_segs, size_t bytes, bool to_user)
  1321. {
  1322. struct iov_iter ii;
  1323. int page_idx = 0;
  1324. if (!bytes)
  1325. return 0;
  1326. iov_iter_init(&ii, iov, nr_segs, bytes, 0);
  1327. while (iov_iter_count(&ii)) {
  1328. struct page *page = pages[page_idx++];
  1329. size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
  1330. void *kaddr, *map;
  1331. kaddr = map = kmap(page);
  1332. while (todo) {
  1333. char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
  1334. size_t iov_len = ii.iov->iov_len - ii.iov_offset;
  1335. size_t copy = min(todo, iov_len);
  1336. size_t left;
  1337. if (!to_user)
  1338. left = copy_from_user(kaddr, uaddr, copy);
  1339. else
  1340. left = copy_to_user(uaddr, kaddr, copy);
  1341. if (unlikely(left))
  1342. return -EFAULT;
  1343. iov_iter_advance(&ii, copy);
  1344. todo -= copy;
  1345. kaddr += copy;
  1346. }
  1347. kunmap(page);
  1348. }
  1349. return 0;
  1350. }
  1351. /*
  1352. * For ioctls, there is no generic way to determine how much memory
  1353. * needs to be read and/or written. Furthermore, ioctls are allowed
  1354. * to dereference the passed pointer, so the parameter requires deep
  1355. * copying but FUSE has no idea whatsoever about what to copy in or
  1356. * out.
  1357. *
  1358. * This is solved by allowing FUSE server to retry ioctl with
  1359. * necessary in/out iovecs. Let's assume the ioctl implementation
  1360. * needs to read in the following structure.
  1361. *
  1362. * struct a {
  1363. * char *buf;
  1364. * size_t buflen;
  1365. * }
  1366. *
  1367. * On the first callout to FUSE server, inarg->in_size and
  1368. * inarg->out_size will be NULL; then, the server completes the ioctl
  1369. * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
  1370. * the actual iov array to
  1371. *
  1372. * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
  1373. *
  1374. * which tells FUSE to copy in the requested area and retry the ioctl.
  1375. * On the second round, the server has access to the structure and
  1376. * from that it can tell what to look for next, so on the invocation,
  1377. * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
  1378. *
  1379. * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
  1380. * { .iov_base = a.buf, .iov_len = a.buflen } }
  1381. *
  1382. * FUSE will copy both struct a and the pointed buffer from the
  1383. * process doing the ioctl and retry ioctl with both struct a and the
  1384. * buffer.
  1385. *
  1386. * This time, FUSE server has everything it needs and completes ioctl
  1387. * without FUSE_IOCTL_RETRY which finishes the ioctl call.
  1388. *
  1389. * Copying data out works the same way.
  1390. *
  1391. * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
  1392. * automatically initializes in and out iovs by decoding @cmd with
  1393. * _IOC_* macros and the server is not allowed to request RETRY. This
  1394. * limits ioctl data transfers to well-formed ioctls and is the forced
  1395. * behavior for all FUSE servers.
  1396. */
  1397. long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
  1398. unsigned int flags)
  1399. {
  1400. struct fuse_file *ff = file->private_data;
  1401. struct fuse_conn *fc = ff->fc;
  1402. struct fuse_ioctl_in inarg = {
  1403. .fh = ff->fh,
  1404. .cmd = cmd,
  1405. .arg = arg,
  1406. .flags = flags
  1407. };
  1408. struct fuse_ioctl_out outarg;
  1409. struct fuse_req *req = NULL;
  1410. struct page **pages = NULL;
  1411. struct page *iov_page = NULL;
  1412. struct iovec *in_iov = NULL, *out_iov = NULL;
  1413. unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
  1414. size_t in_size, out_size, transferred;
  1415. int err;
  1416. /* assume all the iovs returned by client always fits in a page */
  1417. BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
  1418. err = -ENOMEM;
  1419. pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
  1420. iov_page = alloc_page(GFP_KERNEL);
  1421. if (!pages || !iov_page)
  1422. goto out;
  1423. /*
  1424. * If restricted, initialize IO parameters as encoded in @cmd.
  1425. * RETRY from server is not allowed.
  1426. */
  1427. if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
  1428. struct iovec *iov = page_address(iov_page);
  1429. iov->iov_base = (void __user *)arg;
  1430. iov->iov_len = _IOC_SIZE(cmd);
  1431. if (_IOC_DIR(cmd) & _IOC_WRITE) {
  1432. in_iov = iov;
  1433. in_iovs = 1;
  1434. }
  1435. if (_IOC_DIR(cmd) & _IOC_READ) {
  1436. out_iov = iov;
  1437. out_iovs = 1;
  1438. }
  1439. }
  1440. retry:
  1441. inarg.in_size = in_size = iov_length(in_iov, in_iovs);
  1442. inarg.out_size = out_size = iov_length(out_iov, out_iovs);
  1443. /*
  1444. * Out data can be used either for actual out data or iovs,
  1445. * make sure there always is at least one page.
  1446. */
  1447. out_size = max_t(size_t, out_size, PAGE_SIZE);
  1448. max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
  1449. /* make sure there are enough buffer pages and init request with them */
  1450. err = -ENOMEM;
  1451. if (max_pages > FUSE_MAX_PAGES_PER_REQ)
  1452. goto out;
  1453. while (num_pages < max_pages) {
  1454. pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
  1455. if (!pages[num_pages])
  1456. goto out;
  1457. num_pages++;
  1458. }
  1459. req = fuse_get_req(fc);
  1460. if (IS_ERR(req)) {
  1461. err = PTR_ERR(req);
  1462. req = NULL;
  1463. goto out;
  1464. }
  1465. memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
  1466. req->num_pages = num_pages;
  1467. /* okay, let's send it to the client */
  1468. req->in.h.opcode = FUSE_IOCTL;
  1469. req->in.h.nodeid = ff->nodeid;
  1470. req->in.numargs = 1;
  1471. req->in.args[0].size = sizeof(inarg);
  1472. req->in.args[0].value = &inarg;
  1473. if (in_size) {
  1474. req->in.numargs++;
  1475. req->in.args[1].size = in_size;
  1476. req->in.argpages = 1;
  1477. err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
  1478. false);
  1479. if (err)
  1480. goto out;
  1481. }
  1482. req->out.numargs = 2;
  1483. req->out.args[0].size = sizeof(outarg);
  1484. req->out.args[0].value = &outarg;
  1485. req->out.args[1].size = out_size;
  1486. req->out.argpages = 1;
  1487. req->out.argvar = 1;
  1488. fuse_request_send(fc, req);
  1489. err = req->out.h.error;
  1490. transferred = req->out.args[1].size;
  1491. fuse_put_request(fc, req);
  1492. req = NULL;
  1493. if (err)
  1494. goto out;
  1495. /* did it ask for retry? */
  1496. if (outarg.flags & FUSE_IOCTL_RETRY) {
  1497. char *vaddr;
  1498. /* no retry if in restricted mode */
  1499. err = -EIO;
  1500. if (!(flags & FUSE_IOCTL_UNRESTRICTED))
  1501. goto out;
  1502. in_iovs = outarg.in_iovs;
  1503. out_iovs = outarg.out_iovs;
  1504. /*
  1505. * Make sure things are in boundary, separate checks
  1506. * are to protect against overflow.
  1507. */
  1508. err = -ENOMEM;
  1509. if (in_iovs > FUSE_IOCTL_MAX_IOV ||
  1510. out_iovs > FUSE_IOCTL_MAX_IOV ||
  1511. in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
  1512. goto out;
  1513. err = -EIO;
  1514. if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred)
  1515. goto out;
  1516. /* okay, copy in iovs and retry */
  1517. vaddr = kmap_atomic(pages[0], KM_USER0);
  1518. memcpy(page_address(iov_page), vaddr, transferred);
  1519. kunmap_atomic(vaddr, KM_USER0);
  1520. in_iov = page_address(iov_page);
  1521. out_iov = in_iov + in_iovs;
  1522. goto retry;
  1523. }
  1524. err = -EIO;
  1525. if (transferred > inarg.out_size)
  1526. goto out;
  1527. err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
  1528. out:
  1529. if (req)
  1530. fuse_put_request(fc, req);
  1531. if (iov_page)
  1532. __free_page(iov_page);
  1533. while (num_pages)
  1534. __free_page(pages[--num_pages]);
  1535. kfree(pages);
  1536. return err ? err : outarg.result;
  1537. }
  1538. EXPORT_SYMBOL_GPL(fuse_do_ioctl);
  1539. static long fuse_file_ioctl_common(struct file *file, unsigned int cmd,
  1540. unsigned long arg, unsigned int flags)
  1541. {
  1542. struct inode *inode = file->f_dentry->d_inode;
  1543. struct fuse_conn *fc = get_fuse_conn(inode);
  1544. if (!fuse_allow_task(fc, current))
  1545. return -EACCES;
  1546. if (is_bad_inode(inode))
  1547. return -EIO;
  1548. return fuse_do_ioctl(file, cmd, arg, flags);
  1549. }
  1550. static long fuse_file_ioctl(struct file *file, unsigned int cmd,
  1551. unsigned long arg)
  1552. {
  1553. return fuse_file_ioctl_common(file, cmd, arg, 0);
  1554. }
  1555. static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
  1556. unsigned long arg)
  1557. {
  1558. return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
  1559. }
  1560. /*
  1561. * All files which have been polled are linked to RB tree
  1562. * fuse_conn->polled_files which is indexed by kh. Walk the tree and
  1563. * find the matching one.
  1564. */
  1565. static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
  1566. struct rb_node **parent_out)
  1567. {
  1568. struct rb_node **link = &fc->polled_files.rb_node;
  1569. struct rb_node *last = NULL;
  1570. while (*link) {
  1571. struct fuse_file *ff;
  1572. last = *link;
  1573. ff = rb_entry(last, struct fuse_file, polled_node);
  1574. if (kh < ff->kh)
  1575. link = &last->rb_left;
  1576. else if (kh > ff->kh)
  1577. link = &last->rb_right;
  1578. else
  1579. return link;
  1580. }
  1581. if (parent_out)
  1582. *parent_out = last;
  1583. return link;
  1584. }
  1585. /*
  1586. * The file is about to be polled. Make sure it's on the polled_files
  1587. * RB tree. Note that files once added to the polled_files tree are
  1588. * not removed before the file is released. This is because a file
  1589. * polled once is likely to be polled again.
  1590. */
  1591. static void fuse_register_polled_file(struct fuse_conn *fc,
  1592. struct fuse_file *ff)
  1593. {
  1594. spin_lock(&fc->lock);
  1595. if (RB_EMPTY_NODE(&ff->polled_node)) {
  1596. struct rb_node **link, *parent;
  1597. link = fuse_find_polled_node(fc, ff->kh, &parent);
  1598. BUG_ON(*link);
  1599. rb_link_node(&ff->polled_node, parent, link);
  1600. rb_insert_color(&ff->polled_node, &fc->polled_files);
  1601. }
  1602. spin_unlock(&fc->lock);
  1603. }
  1604. unsigned fuse_file_poll(struct file *file, poll_table *wait)
  1605. {
  1606. struct fuse_file *ff = file->private_data;
  1607. struct fuse_conn *fc = ff->fc;
  1608. struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
  1609. struct fuse_poll_out outarg;
  1610. struct fuse_req *req;
  1611. int err;
  1612. if (fc->no_poll)
  1613. return DEFAULT_POLLMASK;
  1614. poll_wait(file, &ff->poll_wait, wait);
  1615. /*
  1616. * Ask for notification iff there's someone waiting for it.
  1617. * The client may ignore the flag and always notify.
  1618. */
  1619. if (waitqueue_active(&ff->poll_wait)) {
  1620. inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
  1621. fuse_register_polled_file(fc, ff);
  1622. }
  1623. req = fuse_get_req(fc);
  1624. if (IS_ERR(req))
  1625. return POLLERR;
  1626. req->in.h.opcode = FUSE_POLL;
  1627. req->in.h.nodeid = ff->nodeid;
  1628. req->in.numargs = 1;
  1629. req->in.args[0].size = sizeof(inarg);
  1630. req->in.args[0].value = &inarg;
  1631. req->out.numargs = 1;
  1632. req->out.args[0].size = sizeof(outarg);
  1633. req->out.args[0].value = &outarg;
  1634. fuse_request_send(fc, req);
  1635. err = req->out.h.error;
  1636. fuse_put_request(fc, req);
  1637. if (!err)
  1638. return outarg.revents;
  1639. if (err == -ENOSYS) {
  1640. fc->no_poll = 1;
  1641. return DEFAULT_POLLMASK;
  1642. }
  1643. return POLLERR;
  1644. }
  1645. EXPORT_SYMBOL_GPL(fuse_file_poll);
  1646. /*
  1647. * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
  1648. * wakes up the poll waiters.
  1649. */
  1650. int fuse_notify_poll_wakeup(struct fuse_conn *fc,
  1651. struct fuse_notify_poll_wakeup_out *outarg)
  1652. {
  1653. u64 kh = outarg->kh;
  1654. struct rb_node **link;
  1655. spin_lock(&fc->lock);
  1656. link = fuse_find_polled_node(fc, kh, NULL);
  1657. if (*link) {
  1658. struct fuse_file *ff;
  1659. ff = rb_entry(*link, struct fuse_file, polled_node);
  1660. wake_up_interruptible_sync(&ff->poll_wait);
  1661. }
  1662. spin_unlock(&fc->lock);
  1663. return 0;
  1664. }
  1665. static const struct file_operations fuse_file_operations = {
  1666. .llseek = fuse_file_llseek,
  1667. .read = do_sync_read,
  1668. .aio_read = fuse_file_aio_read,
  1669. .write = do_sync_write,
  1670. .aio_write = fuse_file_aio_write,
  1671. .mmap = fuse_file_mmap,
  1672. .open = fuse_open,
  1673. .flush = fuse_flush,
  1674. .release = fuse_release,
  1675. .fsync = fuse_fsync,
  1676. .lock = fuse_file_lock,
  1677. .flock = fuse_file_flock,
  1678. .splice_read = generic_file_splice_read,
  1679. .unlocked_ioctl = fuse_file_ioctl,
  1680. .compat_ioctl = fuse_file_compat_ioctl,
  1681. .poll = fuse_file_poll,
  1682. };
  1683. static const struct file_operations fuse_direct_io_file_operations = {
  1684. .llseek = fuse_file_llseek,
  1685. .read = fuse_direct_read,
  1686. .write = fuse_direct_write,
  1687. .mmap = fuse_direct_mmap,
  1688. .open = fuse_open,
  1689. .flush = fuse_flush,
  1690. .release = fuse_release,
  1691. .fsync = fuse_fsync,
  1692. .lock = fuse_file_lock,
  1693. .flock = fuse_file_flock,
  1694. .unlocked_ioctl = fuse_file_ioctl,
  1695. .compat_ioctl = fuse_file_compat_ioctl,
  1696. .poll = fuse_file_poll,
  1697. /* no splice_read */
  1698. };
  1699. static const struct address_space_operations fuse_file_aops = {
  1700. .readpage = fuse_readpage,
  1701. .writepage = fuse_writepage,
  1702. .launder_page = fuse_launder_page,
  1703. .write_begin = fuse_write_begin,
  1704. .write_end = fuse_write_end,
  1705. .readpages = fuse_readpages,
  1706. .set_page_dirty = __set_page_dirty_nobuffers,
  1707. .bmap = fuse_bmap,
  1708. };
  1709. void fuse_init_file_inode(struct inode *inode)
  1710. {
  1711. inode->i_fop = &fuse_file_operations;
  1712. inode->i_data.a_ops = &fuse_file_aops;
  1713. }