dev.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU GPL.
  5. See the file COPYING.
  6. */
  7. #include "fuse_i.h"
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/poll.h>
  11. #include <linux/uio.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/file.h>
  15. #include <linux/slab.h>
  16. MODULE_ALIAS_MISCDEV(FUSE_MINOR);
  17. static struct kmem_cache *fuse_req_cachep;
  18. static struct fuse_conn *fuse_get_conn(struct file *file)
  19. {
  20. /*
  21. * Lockless access is OK, because file->private data is set
  22. * once during mount and is valid until the file is released.
  23. */
  24. return file->private_data;
  25. }
  26. static void fuse_request_init(struct fuse_req *req)
  27. {
  28. memset(req, 0, sizeof(*req));
  29. INIT_LIST_HEAD(&req->list);
  30. INIT_LIST_HEAD(&req->intr_entry);
  31. init_waitqueue_head(&req->waitq);
  32. atomic_set(&req->count, 1);
  33. }
  34. struct fuse_req *fuse_request_alloc(void)
  35. {
  36. struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
  37. if (req)
  38. fuse_request_init(req);
  39. return req;
  40. }
  41. void fuse_request_free(struct fuse_req *req)
  42. {
  43. kmem_cache_free(fuse_req_cachep, req);
  44. }
  45. static void block_sigs(sigset_t *oldset)
  46. {
  47. sigset_t mask;
  48. siginitsetinv(&mask, sigmask(SIGKILL));
  49. sigprocmask(SIG_BLOCK, &mask, oldset);
  50. }
  51. static void restore_sigs(sigset_t *oldset)
  52. {
  53. sigprocmask(SIG_SETMASK, oldset, NULL);
  54. }
  55. static void __fuse_get_request(struct fuse_req *req)
  56. {
  57. atomic_inc(&req->count);
  58. }
  59. /* Must be called with > 1 refcount */
  60. static void __fuse_put_request(struct fuse_req *req)
  61. {
  62. BUG_ON(atomic_read(&req->count) < 2);
  63. atomic_dec(&req->count);
  64. }
  65. static void fuse_req_init_context(struct fuse_req *req)
  66. {
  67. req->in.h.uid = current->fsuid;
  68. req->in.h.gid = current->fsgid;
  69. req->in.h.pid = current->pid;
  70. }
  71. struct fuse_req *fuse_get_req(struct fuse_conn *fc)
  72. {
  73. struct fuse_req *req;
  74. sigset_t oldset;
  75. int intr;
  76. int err;
  77. atomic_inc(&fc->num_waiting);
  78. block_sigs(&oldset);
  79. intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
  80. restore_sigs(&oldset);
  81. err = -EINTR;
  82. if (intr)
  83. goto out;
  84. err = -ENOTCONN;
  85. if (!fc->connected)
  86. goto out;
  87. req = fuse_request_alloc();
  88. err = -ENOMEM;
  89. if (!req)
  90. goto out;
  91. fuse_req_init_context(req);
  92. req->waiting = 1;
  93. return req;
  94. out:
  95. atomic_dec(&fc->num_waiting);
  96. return ERR_PTR(err);
  97. }
  98. /*
  99. * Return request in fuse_file->reserved_req. However that may
  100. * currently be in use. If that is the case, wait for it to become
  101. * available.
  102. */
  103. static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
  104. struct file *file)
  105. {
  106. struct fuse_req *req = NULL;
  107. struct fuse_file *ff = file->private_data;
  108. do {
  109. wait_event(fc->reserved_req_waitq, ff->reserved_req);
  110. spin_lock(&fc->lock);
  111. if (ff->reserved_req) {
  112. req = ff->reserved_req;
  113. ff->reserved_req = NULL;
  114. get_file(file);
  115. req->stolen_file = file;
  116. }
  117. spin_unlock(&fc->lock);
  118. } while (!req);
  119. return req;
  120. }
  121. /*
  122. * Put stolen request back into fuse_file->reserved_req
  123. */
  124. static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
  125. {
  126. struct file *file = req->stolen_file;
  127. struct fuse_file *ff = file->private_data;
  128. spin_lock(&fc->lock);
  129. fuse_request_init(req);
  130. BUG_ON(ff->reserved_req);
  131. ff->reserved_req = req;
  132. wake_up_all(&fc->reserved_req_waitq);
  133. spin_unlock(&fc->lock);
  134. fput(file);
  135. }
  136. /*
  137. * Gets a requests for a file operation, always succeeds
  138. *
  139. * This is used for sending the FLUSH request, which must get to
  140. * userspace, due to POSIX locks which may need to be unlocked.
  141. *
  142. * If allocation fails due to OOM, use the reserved request in
  143. * fuse_file.
  144. *
  145. * This is very unlikely to deadlock accidentally, since the
  146. * filesystem should not have it's own file open. If deadlock is
  147. * intentional, it can still be broken by "aborting" the filesystem.
  148. */
  149. struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
  150. {
  151. struct fuse_req *req;
  152. atomic_inc(&fc->num_waiting);
  153. wait_event(fc->blocked_waitq, !fc->blocked);
  154. req = fuse_request_alloc();
  155. if (!req)
  156. req = get_reserved_req(fc, file);
  157. fuse_req_init_context(req);
  158. req->waiting = 1;
  159. return req;
  160. }
  161. void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
  162. {
  163. if (atomic_dec_and_test(&req->count)) {
  164. if (req->waiting)
  165. atomic_dec(&fc->num_waiting);
  166. if (req->stolen_file)
  167. put_reserved_req(fc, req);
  168. else
  169. fuse_request_free(req);
  170. }
  171. }
  172. /*
  173. * This function is called when a request is finished. Either a reply
  174. * has arrived or it was aborted (and not yet sent) or some error
  175. * occurred during communication with userspace, or the device file
  176. * was closed. The requester thread is woken up (if still waiting),
  177. * the 'end' callback is called if given, else the reference to the
  178. * request is released
  179. *
  180. * Called with fc->lock, unlocks it
  181. */
  182. static void request_end(struct fuse_conn *fc, struct fuse_req *req)
  183. __releases(fc->lock)
  184. {
  185. void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
  186. req->end = NULL;
  187. list_del(&req->list);
  188. list_del(&req->intr_entry);
  189. req->state = FUSE_REQ_FINISHED;
  190. if (req->background) {
  191. if (fc->num_background == FUSE_MAX_BACKGROUND) {
  192. fc->blocked = 0;
  193. wake_up_all(&fc->blocked_waitq);
  194. }
  195. if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
  196. clear_bdi_congested(&fc->bdi, READ);
  197. clear_bdi_congested(&fc->bdi, WRITE);
  198. }
  199. fc->num_background--;
  200. }
  201. spin_unlock(&fc->lock);
  202. dput(req->dentry);
  203. mntput(req->vfsmount);
  204. if (req->file)
  205. fput(req->file);
  206. wake_up(&req->waitq);
  207. if (end)
  208. end(fc, req);
  209. else
  210. fuse_put_request(fc, req);
  211. }
  212. static void wait_answer_interruptible(struct fuse_conn *fc,
  213. struct fuse_req *req)
  214. {
  215. if (signal_pending(current))
  216. return;
  217. spin_unlock(&fc->lock);
  218. wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
  219. spin_lock(&fc->lock);
  220. }
  221. static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
  222. {
  223. list_add_tail(&req->intr_entry, &fc->interrupts);
  224. wake_up(&fc->waitq);
  225. kill_fasync(&fc->fasync, SIGIO, POLL_IN);
  226. }
  227. /* Called with fc->lock held. Releases, and then reacquires it. */
  228. static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
  229. {
  230. if (!fc->no_interrupt) {
  231. /* Any signal may interrupt this */
  232. wait_answer_interruptible(fc, req);
  233. if (req->aborted)
  234. goto aborted;
  235. if (req->state == FUSE_REQ_FINISHED)
  236. return;
  237. req->interrupted = 1;
  238. if (req->state == FUSE_REQ_SENT)
  239. queue_interrupt(fc, req);
  240. }
  241. if (req->force) {
  242. spin_unlock(&fc->lock);
  243. wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
  244. spin_lock(&fc->lock);
  245. } else {
  246. sigset_t oldset;
  247. /* Only fatal signals may interrupt this */
  248. block_sigs(&oldset);
  249. wait_answer_interruptible(fc, req);
  250. restore_sigs(&oldset);
  251. }
  252. if (req->aborted)
  253. goto aborted;
  254. if (req->state == FUSE_REQ_FINISHED)
  255. return;
  256. req->out.h.error = -EINTR;
  257. req->aborted = 1;
  258. aborted:
  259. if (req->locked) {
  260. /* This is uninterruptible sleep, because data is
  261. being copied to/from the buffers of req. During
  262. locked state, there mustn't be any filesystem
  263. operation (e.g. page fault), since that could lead
  264. to deadlock */
  265. spin_unlock(&fc->lock);
  266. wait_event(req->waitq, !req->locked);
  267. spin_lock(&fc->lock);
  268. }
  269. if (req->state == FUSE_REQ_PENDING) {
  270. list_del(&req->list);
  271. __fuse_put_request(req);
  272. } else if (req->state == FUSE_REQ_SENT) {
  273. spin_unlock(&fc->lock);
  274. wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
  275. spin_lock(&fc->lock);
  276. }
  277. }
  278. static unsigned len_args(unsigned numargs, struct fuse_arg *args)
  279. {
  280. unsigned nbytes = 0;
  281. unsigned i;
  282. for (i = 0; i < numargs; i++)
  283. nbytes += args[i].size;
  284. return nbytes;
  285. }
  286. static u64 fuse_get_unique(struct fuse_conn *fc)
  287. {
  288. fc->reqctr++;
  289. /* zero is special */
  290. if (fc->reqctr == 0)
  291. fc->reqctr = 1;
  292. return fc->reqctr;
  293. }
  294. static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
  295. {
  296. req->in.h.unique = fuse_get_unique(fc);
  297. req->in.h.len = sizeof(struct fuse_in_header) +
  298. len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
  299. list_add_tail(&req->list, &fc->pending);
  300. req->state = FUSE_REQ_PENDING;
  301. if (!req->waiting) {
  302. req->waiting = 1;
  303. atomic_inc(&fc->num_waiting);
  304. }
  305. wake_up(&fc->waitq);
  306. kill_fasync(&fc->fasync, SIGIO, POLL_IN);
  307. }
  308. void request_send(struct fuse_conn *fc, struct fuse_req *req)
  309. {
  310. req->isreply = 1;
  311. spin_lock(&fc->lock);
  312. if (!fc->connected)
  313. req->out.h.error = -ENOTCONN;
  314. else if (fc->conn_error)
  315. req->out.h.error = -ECONNREFUSED;
  316. else {
  317. queue_request(fc, req);
  318. /* acquire extra reference, since request is still needed
  319. after request_end() */
  320. __fuse_get_request(req);
  321. request_wait_answer(fc, req);
  322. }
  323. spin_unlock(&fc->lock);
  324. }
  325. static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
  326. {
  327. spin_lock(&fc->lock);
  328. if (fc->connected) {
  329. req->background = 1;
  330. fc->num_background++;
  331. if (fc->num_background == FUSE_MAX_BACKGROUND)
  332. fc->blocked = 1;
  333. if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
  334. set_bdi_congested(&fc->bdi, READ);
  335. set_bdi_congested(&fc->bdi, WRITE);
  336. }
  337. queue_request(fc, req);
  338. spin_unlock(&fc->lock);
  339. } else {
  340. req->out.h.error = -ENOTCONN;
  341. request_end(fc, req);
  342. }
  343. }
  344. void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
  345. {
  346. req->isreply = 0;
  347. request_send_nowait(fc, req);
  348. }
  349. void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
  350. {
  351. req->isreply = 1;
  352. request_send_nowait(fc, req);
  353. }
  354. /*
  355. * Lock the request. Up to the next unlock_request() there mustn't be
  356. * anything that could cause a page-fault. If the request was already
  357. * aborted bail out.
  358. */
  359. static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
  360. {
  361. int err = 0;
  362. if (req) {
  363. spin_lock(&fc->lock);
  364. if (req->aborted)
  365. err = -ENOENT;
  366. else
  367. req->locked = 1;
  368. spin_unlock(&fc->lock);
  369. }
  370. return err;
  371. }
  372. /*
  373. * Unlock request. If it was aborted during being locked, the
  374. * requester thread is currently waiting for it to be unlocked, so
  375. * wake it up.
  376. */
  377. static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
  378. {
  379. if (req) {
  380. spin_lock(&fc->lock);
  381. req->locked = 0;
  382. if (req->aborted)
  383. wake_up(&req->waitq);
  384. spin_unlock(&fc->lock);
  385. }
  386. }
  387. struct fuse_copy_state {
  388. struct fuse_conn *fc;
  389. int write;
  390. struct fuse_req *req;
  391. const struct iovec *iov;
  392. unsigned long nr_segs;
  393. unsigned long seglen;
  394. unsigned long addr;
  395. struct page *pg;
  396. void *mapaddr;
  397. void *buf;
  398. unsigned len;
  399. };
  400. static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
  401. int write, struct fuse_req *req,
  402. const struct iovec *iov, unsigned long nr_segs)
  403. {
  404. memset(cs, 0, sizeof(*cs));
  405. cs->fc = fc;
  406. cs->write = write;
  407. cs->req = req;
  408. cs->iov = iov;
  409. cs->nr_segs = nr_segs;
  410. }
  411. /* Unmap and put previous page of userspace buffer */
  412. static void fuse_copy_finish(struct fuse_copy_state *cs)
  413. {
  414. if (cs->mapaddr) {
  415. kunmap_atomic(cs->mapaddr, KM_USER0);
  416. if (cs->write) {
  417. flush_dcache_page(cs->pg);
  418. set_page_dirty_lock(cs->pg);
  419. }
  420. put_page(cs->pg);
  421. cs->mapaddr = NULL;
  422. }
  423. }
  424. /*
  425. * Get another pagefull of userspace buffer, and map it to kernel
  426. * address space, and lock request
  427. */
  428. static int fuse_copy_fill(struct fuse_copy_state *cs)
  429. {
  430. unsigned long offset;
  431. int err;
  432. unlock_request(cs->fc, cs->req);
  433. fuse_copy_finish(cs);
  434. if (!cs->seglen) {
  435. BUG_ON(!cs->nr_segs);
  436. cs->seglen = cs->iov[0].iov_len;
  437. cs->addr = (unsigned long) cs->iov[0].iov_base;
  438. cs->iov ++;
  439. cs->nr_segs --;
  440. }
  441. down_read(&current->mm->mmap_sem);
  442. err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
  443. &cs->pg, NULL);
  444. up_read(&current->mm->mmap_sem);
  445. if (err < 0)
  446. return err;
  447. BUG_ON(err != 1);
  448. offset = cs->addr % PAGE_SIZE;
  449. cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
  450. cs->buf = cs->mapaddr + offset;
  451. cs->len = min(PAGE_SIZE - offset, cs->seglen);
  452. cs->seglen -= cs->len;
  453. cs->addr += cs->len;
  454. return lock_request(cs->fc, cs->req);
  455. }
  456. /* Do as much copy to/from userspace buffer as we can */
  457. static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
  458. {
  459. unsigned ncpy = min(*size, cs->len);
  460. if (val) {
  461. if (cs->write)
  462. memcpy(cs->buf, *val, ncpy);
  463. else
  464. memcpy(*val, cs->buf, ncpy);
  465. *val += ncpy;
  466. }
  467. *size -= ncpy;
  468. cs->len -= ncpy;
  469. cs->buf += ncpy;
  470. return ncpy;
  471. }
  472. /*
  473. * Copy a page in the request to/from the userspace buffer. Must be
  474. * done atomically
  475. */
  476. static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
  477. unsigned offset, unsigned count, int zeroing)
  478. {
  479. if (page && zeroing && count < PAGE_SIZE) {
  480. void *mapaddr = kmap_atomic(page, KM_USER1);
  481. memset(mapaddr, 0, PAGE_SIZE);
  482. kunmap_atomic(mapaddr, KM_USER1);
  483. }
  484. while (count) {
  485. int err;
  486. if (!cs->len && (err = fuse_copy_fill(cs)))
  487. return err;
  488. if (page) {
  489. void *mapaddr = kmap_atomic(page, KM_USER1);
  490. void *buf = mapaddr + offset;
  491. offset += fuse_copy_do(cs, &buf, &count);
  492. kunmap_atomic(mapaddr, KM_USER1);
  493. } else
  494. offset += fuse_copy_do(cs, NULL, &count);
  495. }
  496. if (page && !cs->write)
  497. flush_dcache_page(page);
  498. return 0;
  499. }
  500. /* Copy pages in the request to/from userspace buffer */
  501. static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
  502. int zeroing)
  503. {
  504. unsigned i;
  505. struct fuse_req *req = cs->req;
  506. unsigned offset = req->page_offset;
  507. unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
  508. for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
  509. struct page *page = req->pages[i];
  510. int err = fuse_copy_page(cs, page, offset, count, zeroing);
  511. if (err)
  512. return err;
  513. nbytes -= count;
  514. count = min(nbytes, (unsigned) PAGE_SIZE);
  515. offset = 0;
  516. }
  517. return 0;
  518. }
  519. /* Copy a single argument in the request to/from userspace buffer */
  520. static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
  521. {
  522. while (size) {
  523. int err;
  524. if (!cs->len && (err = fuse_copy_fill(cs)))
  525. return err;
  526. fuse_copy_do(cs, &val, &size);
  527. }
  528. return 0;
  529. }
  530. /* Copy request arguments to/from userspace buffer */
  531. static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
  532. unsigned argpages, struct fuse_arg *args,
  533. int zeroing)
  534. {
  535. int err = 0;
  536. unsigned i;
  537. for (i = 0; !err && i < numargs; i++) {
  538. struct fuse_arg *arg = &args[i];
  539. if (i == numargs - 1 && argpages)
  540. err = fuse_copy_pages(cs, arg->size, zeroing);
  541. else
  542. err = fuse_copy_one(cs, arg->value, arg->size);
  543. }
  544. return err;
  545. }
  546. static int request_pending(struct fuse_conn *fc)
  547. {
  548. return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
  549. }
  550. /* Wait until a request is available on the pending list */
  551. static void request_wait(struct fuse_conn *fc)
  552. {
  553. DECLARE_WAITQUEUE(wait, current);
  554. add_wait_queue_exclusive(&fc->waitq, &wait);
  555. while (fc->connected && !request_pending(fc)) {
  556. set_current_state(TASK_INTERRUPTIBLE);
  557. if (signal_pending(current))
  558. break;
  559. spin_unlock(&fc->lock);
  560. schedule();
  561. spin_lock(&fc->lock);
  562. }
  563. set_current_state(TASK_RUNNING);
  564. remove_wait_queue(&fc->waitq, &wait);
  565. }
  566. /*
  567. * Transfer an interrupt request to userspace
  568. *
  569. * Unlike other requests this is assembled on demand, without a need
  570. * to allocate a separate fuse_req structure.
  571. *
  572. * Called with fc->lock held, releases it
  573. */
  574. static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
  575. const struct iovec *iov, unsigned long nr_segs)
  576. __releases(fc->lock)
  577. {
  578. struct fuse_copy_state cs;
  579. struct fuse_in_header ih;
  580. struct fuse_interrupt_in arg;
  581. unsigned reqsize = sizeof(ih) + sizeof(arg);
  582. int err;
  583. list_del_init(&req->intr_entry);
  584. req->intr_unique = fuse_get_unique(fc);
  585. memset(&ih, 0, sizeof(ih));
  586. memset(&arg, 0, sizeof(arg));
  587. ih.len = reqsize;
  588. ih.opcode = FUSE_INTERRUPT;
  589. ih.unique = req->intr_unique;
  590. arg.unique = req->in.h.unique;
  591. spin_unlock(&fc->lock);
  592. if (iov_length(iov, nr_segs) < reqsize)
  593. return -EINVAL;
  594. fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
  595. err = fuse_copy_one(&cs, &ih, sizeof(ih));
  596. if (!err)
  597. err = fuse_copy_one(&cs, &arg, sizeof(arg));
  598. fuse_copy_finish(&cs);
  599. return err ? err : reqsize;
  600. }
  601. /*
  602. * Read a single request into the userspace filesystem's buffer. This
  603. * function waits until a request is available, then removes it from
  604. * the pending list and copies request data to userspace buffer. If
  605. * no reply is needed (FORGET) or request has been aborted or there
  606. * was an error during the copying then it's finished by calling
  607. * request_end(). Otherwise add it to the processing list, and set
  608. * the 'sent' flag.
  609. */
  610. static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
  611. unsigned long nr_segs, loff_t pos)
  612. {
  613. int err;
  614. struct fuse_req *req;
  615. struct fuse_in *in;
  616. struct fuse_copy_state cs;
  617. unsigned reqsize;
  618. struct file *file = iocb->ki_filp;
  619. struct fuse_conn *fc = fuse_get_conn(file);
  620. if (!fc)
  621. return -EPERM;
  622. restart:
  623. spin_lock(&fc->lock);
  624. err = -EAGAIN;
  625. if ((file->f_flags & O_NONBLOCK) && fc->connected &&
  626. !request_pending(fc))
  627. goto err_unlock;
  628. request_wait(fc);
  629. err = -ENODEV;
  630. if (!fc->connected)
  631. goto err_unlock;
  632. err = -ERESTARTSYS;
  633. if (!request_pending(fc))
  634. goto err_unlock;
  635. if (!list_empty(&fc->interrupts)) {
  636. req = list_entry(fc->interrupts.next, struct fuse_req,
  637. intr_entry);
  638. return fuse_read_interrupt(fc, req, iov, nr_segs);
  639. }
  640. req = list_entry(fc->pending.next, struct fuse_req, list);
  641. req->state = FUSE_REQ_READING;
  642. list_move(&req->list, &fc->io);
  643. in = &req->in;
  644. reqsize = in->h.len;
  645. /* If request is too large, reply with an error and restart the read */
  646. if (iov_length(iov, nr_segs) < reqsize) {
  647. req->out.h.error = -EIO;
  648. /* SETXATTR is special, since it may contain too large data */
  649. if (in->h.opcode == FUSE_SETXATTR)
  650. req->out.h.error = -E2BIG;
  651. request_end(fc, req);
  652. goto restart;
  653. }
  654. spin_unlock(&fc->lock);
  655. fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
  656. err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
  657. if (!err)
  658. err = fuse_copy_args(&cs, in->numargs, in->argpages,
  659. (struct fuse_arg *) in->args, 0);
  660. fuse_copy_finish(&cs);
  661. spin_lock(&fc->lock);
  662. req->locked = 0;
  663. if (!err && req->aborted)
  664. err = -ENOENT;
  665. if (err) {
  666. if (!req->aborted)
  667. req->out.h.error = -EIO;
  668. request_end(fc, req);
  669. return err;
  670. }
  671. if (!req->isreply)
  672. request_end(fc, req);
  673. else {
  674. req->state = FUSE_REQ_SENT;
  675. list_move_tail(&req->list, &fc->processing);
  676. if (req->interrupted)
  677. queue_interrupt(fc, req);
  678. spin_unlock(&fc->lock);
  679. }
  680. return reqsize;
  681. err_unlock:
  682. spin_unlock(&fc->lock);
  683. return err;
  684. }
  685. /* Look up request on processing list by unique ID */
  686. static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
  687. {
  688. struct list_head *entry;
  689. list_for_each(entry, &fc->processing) {
  690. struct fuse_req *req;
  691. req = list_entry(entry, struct fuse_req, list);
  692. if (req->in.h.unique == unique || req->intr_unique == unique)
  693. return req;
  694. }
  695. return NULL;
  696. }
  697. static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
  698. unsigned nbytes)
  699. {
  700. unsigned reqsize = sizeof(struct fuse_out_header);
  701. if (out->h.error)
  702. return nbytes != reqsize ? -EINVAL : 0;
  703. reqsize += len_args(out->numargs, out->args);
  704. if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
  705. return -EINVAL;
  706. else if (reqsize > nbytes) {
  707. struct fuse_arg *lastarg = &out->args[out->numargs-1];
  708. unsigned diffsize = reqsize - nbytes;
  709. if (diffsize > lastarg->size)
  710. return -EINVAL;
  711. lastarg->size -= diffsize;
  712. }
  713. return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
  714. out->page_zeroing);
  715. }
  716. /*
  717. * Write a single reply to a request. First the header is copied from
  718. * the write buffer. The request is then searched on the processing
  719. * list by the unique ID found in the header. If found, then remove
  720. * it from the list and copy the rest of the buffer to the request.
  721. * The request is finished by calling request_end()
  722. */
  723. static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
  724. unsigned long nr_segs, loff_t pos)
  725. {
  726. int err;
  727. unsigned nbytes = iov_length(iov, nr_segs);
  728. struct fuse_req *req;
  729. struct fuse_out_header oh;
  730. struct fuse_copy_state cs;
  731. struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
  732. if (!fc)
  733. return -EPERM;
  734. fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
  735. if (nbytes < sizeof(struct fuse_out_header))
  736. return -EINVAL;
  737. err = fuse_copy_one(&cs, &oh, sizeof(oh));
  738. if (err)
  739. goto err_finish;
  740. err = -EINVAL;
  741. if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
  742. oh.len != nbytes)
  743. goto err_finish;
  744. spin_lock(&fc->lock);
  745. err = -ENOENT;
  746. if (!fc->connected)
  747. goto err_unlock;
  748. req = request_find(fc, oh.unique);
  749. if (!req)
  750. goto err_unlock;
  751. if (req->aborted) {
  752. spin_unlock(&fc->lock);
  753. fuse_copy_finish(&cs);
  754. spin_lock(&fc->lock);
  755. request_end(fc, req);
  756. return -ENOENT;
  757. }
  758. /* Is it an interrupt reply? */
  759. if (req->intr_unique == oh.unique) {
  760. err = -EINVAL;
  761. if (nbytes != sizeof(struct fuse_out_header))
  762. goto err_unlock;
  763. if (oh.error == -ENOSYS)
  764. fc->no_interrupt = 1;
  765. else if (oh.error == -EAGAIN)
  766. queue_interrupt(fc, req);
  767. spin_unlock(&fc->lock);
  768. fuse_copy_finish(&cs);
  769. return nbytes;
  770. }
  771. req->state = FUSE_REQ_WRITING;
  772. list_move(&req->list, &fc->io);
  773. req->out.h = oh;
  774. req->locked = 1;
  775. cs.req = req;
  776. spin_unlock(&fc->lock);
  777. err = copy_out_args(&cs, &req->out, nbytes);
  778. fuse_copy_finish(&cs);
  779. spin_lock(&fc->lock);
  780. req->locked = 0;
  781. if (!err) {
  782. if (req->aborted)
  783. err = -ENOENT;
  784. } else if (!req->aborted)
  785. req->out.h.error = -EIO;
  786. request_end(fc, req);
  787. return err ? err : nbytes;
  788. err_unlock:
  789. spin_unlock(&fc->lock);
  790. err_finish:
  791. fuse_copy_finish(&cs);
  792. return err;
  793. }
  794. static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  795. {
  796. unsigned mask = POLLOUT | POLLWRNORM;
  797. struct fuse_conn *fc = fuse_get_conn(file);
  798. if (!fc)
  799. return POLLERR;
  800. poll_wait(file, &fc->waitq, wait);
  801. spin_lock(&fc->lock);
  802. if (!fc->connected)
  803. mask = POLLERR;
  804. else if (request_pending(fc))
  805. mask |= POLLIN | POLLRDNORM;
  806. spin_unlock(&fc->lock);
  807. return mask;
  808. }
  809. /*
  810. * Abort all requests on the given list (pending or processing)
  811. *
  812. * This function releases and reacquires fc->lock
  813. */
  814. static void end_requests(struct fuse_conn *fc, struct list_head *head)
  815. {
  816. while (!list_empty(head)) {
  817. struct fuse_req *req;
  818. req = list_entry(head->next, struct fuse_req, list);
  819. req->out.h.error = -ECONNABORTED;
  820. request_end(fc, req);
  821. spin_lock(&fc->lock);
  822. }
  823. }
  824. /*
  825. * Abort requests under I/O
  826. *
  827. * The requests are set to aborted and finished, and the request
  828. * waiter is woken up. This will make request_wait_answer() wait
  829. * until the request is unlocked and then return.
  830. *
  831. * If the request is asynchronous, then the end function needs to be
  832. * called after waiting for the request to be unlocked (if it was
  833. * locked).
  834. */
  835. static void end_io_requests(struct fuse_conn *fc)
  836. {
  837. while (!list_empty(&fc->io)) {
  838. struct fuse_req *req =
  839. list_entry(fc->io.next, struct fuse_req, list);
  840. void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
  841. req->aborted = 1;
  842. req->out.h.error = -ECONNABORTED;
  843. req->state = FUSE_REQ_FINISHED;
  844. list_del_init(&req->list);
  845. wake_up(&req->waitq);
  846. if (end) {
  847. req->end = NULL;
  848. /* The end function will consume this reference */
  849. __fuse_get_request(req);
  850. spin_unlock(&fc->lock);
  851. wait_event(req->waitq, !req->locked);
  852. end(fc, req);
  853. spin_lock(&fc->lock);
  854. }
  855. }
  856. }
  857. /*
  858. * Abort all requests.
  859. *
  860. * Emergency exit in case of a malicious or accidental deadlock, or
  861. * just a hung filesystem.
  862. *
  863. * The same effect is usually achievable through killing the
  864. * filesystem daemon and all users of the filesystem. The exception
  865. * is the combination of an asynchronous request and the tricky
  866. * deadlock (see Documentation/filesystems/fuse.txt).
  867. *
  868. * During the aborting, progression of requests from the pending and
  869. * processing lists onto the io list, and progression of new requests
  870. * onto the pending list is prevented by req->connected being false.
  871. *
  872. * Progression of requests under I/O to the processing list is
  873. * prevented by the req->aborted flag being true for these requests.
  874. * For this reason requests on the io list must be aborted first.
  875. */
  876. void fuse_abort_conn(struct fuse_conn *fc)
  877. {
  878. spin_lock(&fc->lock);
  879. if (fc->connected) {
  880. fc->connected = 0;
  881. fc->blocked = 0;
  882. end_io_requests(fc);
  883. end_requests(fc, &fc->pending);
  884. end_requests(fc, &fc->processing);
  885. wake_up_all(&fc->waitq);
  886. wake_up_all(&fc->blocked_waitq);
  887. kill_fasync(&fc->fasync, SIGIO, POLL_IN);
  888. }
  889. spin_unlock(&fc->lock);
  890. }
  891. static int fuse_dev_release(struct inode *inode, struct file *file)
  892. {
  893. struct fuse_conn *fc = fuse_get_conn(file);
  894. if (fc) {
  895. spin_lock(&fc->lock);
  896. fc->connected = 0;
  897. end_requests(fc, &fc->pending);
  898. end_requests(fc, &fc->processing);
  899. spin_unlock(&fc->lock);
  900. fasync_helper(-1, file, 0, &fc->fasync);
  901. fuse_conn_put(fc);
  902. }
  903. return 0;
  904. }
  905. static int fuse_dev_fasync(int fd, struct file *file, int on)
  906. {
  907. struct fuse_conn *fc = fuse_get_conn(file);
  908. if (!fc)
  909. return -EPERM;
  910. /* No locking - fasync_helper does its own locking */
  911. return fasync_helper(fd, file, on, &fc->fasync);
  912. }
  913. const struct file_operations fuse_dev_operations = {
  914. .owner = THIS_MODULE,
  915. .llseek = no_llseek,
  916. .read = do_sync_read,
  917. .aio_read = fuse_dev_read,
  918. .write = do_sync_write,
  919. .aio_write = fuse_dev_write,
  920. .poll = fuse_dev_poll,
  921. .release = fuse_dev_release,
  922. .fasync = fuse_dev_fasync,
  923. };
  924. static struct miscdevice fuse_miscdevice = {
  925. .minor = FUSE_MINOR,
  926. .name = "fuse",
  927. .fops = &fuse_dev_operations,
  928. };
  929. int __init fuse_dev_init(void)
  930. {
  931. int err = -ENOMEM;
  932. fuse_req_cachep = kmem_cache_create("fuse_request",
  933. sizeof(struct fuse_req),
  934. 0, 0, NULL);
  935. if (!fuse_req_cachep)
  936. goto out;
  937. err = misc_register(&fuse_miscdevice);
  938. if (err)
  939. goto out_cache_clean;
  940. return 0;
  941. out_cache_clean:
  942. kmem_cache_destroy(fuse_req_cachep);
  943. out:
  944. return err;
  945. }
  946. void fuse_dev_cleanup(void)
  947. {
  948. misc_deregister(&fuse_miscdevice);
  949. kmem_cache_destroy(fuse_req_cachep);
  950. }