pipe.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294
  1. /*
  2. * linux/fs/pipe.c
  3. *
  4. * Copyright (C) 1991, 1992, 1999 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/file.h>
  8. #include <linux/poll.h>
  9. #include <linux/slab.h>
  10. #include <linux/module.h>
  11. #include <linux/init.h>
  12. #include <linux/fs.h>
  13. #include <linux/log2.h>
  14. #include <linux/mount.h>
  15. #include <linux/magic.h>
  16. #include <linux/pipe_fs_i.h>
  17. #include <linux/uio.h>
  18. #include <linux/highmem.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/audit.h>
  21. #include <linux/syscalls.h>
  22. #include <linux/fcntl.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/ioctls.h>
  25. /*
  26. * The max size that a non-root user is allowed to grow the pipe. Can
  27. * be set by root in /proc/sys/fs/pipe-max-size
  28. */
  29. unsigned int pipe_max_size = 1048576;
  30. /*
  31. * Minimum pipe size, as required by POSIX
  32. */
  33. unsigned int pipe_min_size = PAGE_SIZE;
  34. /*
  35. * We use a start+len construction, which provides full use of the
  36. * allocated memory.
  37. * -- Florian Coosmann (FGC)
  38. *
  39. * Reads with count = 0 should always return 0.
  40. * -- Julian Bradfield 1999-06-07.
  41. *
  42. * FIFOs and Pipes now generate SIGIO for both readers and writers.
  43. * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
  44. *
  45. * pipe_read & write cleanup
  46. * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
  47. */
  48. static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
  49. {
  50. if (pipe->inode)
  51. mutex_lock_nested(&pipe->inode->i_mutex, subclass);
  52. }
  53. void pipe_lock(struct pipe_inode_info *pipe)
  54. {
  55. /*
  56. * pipe_lock() nests non-pipe inode locks (for writing to a file)
  57. */
  58. pipe_lock_nested(pipe, I_MUTEX_PARENT);
  59. }
  60. EXPORT_SYMBOL(pipe_lock);
  61. void pipe_unlock(struct pipe_inode_info *pipe)
  62. {
  63. if (pipe->inode)
  64. mutex_unlock(&pipe->inode->i_mutex);
  65. }
  66. EXPORT_SYMBOL(pipe_unlock);
  67. void pipe_double_lock(struct pipe_inode_info *pipe1,
  68. struct pipe_inode_info *pipe2)
  69. {
  70. BUG_ON(pipe1 == pipe2);
  71. if (pipe1 < pipe2) {
  72. pipe_lock_nested(pipe1, I_MUTEX_PARENT);
  73. pipe_lock_nested(pipe2, I_MUTEX_CHILD);
  74. } else {
  75. pipe_lock_nested(pipe2, I_MUTEX_PARENT);
  76. pipe_lock_nested(pipe1, I_MUTEX_CHILD);
  77. }
  78. }
  79. /* Drop the inode semaphore and wait for a pipe event, atomically */
  80. void pipe_wait(struct pipe_inode_info *pipe)
  81. {
  82. DEFINE_WAIT(wait);
  83. /*
  84. * Pipes are system-local resources, so sleeping on them
  85. * is considered a noninteractive wait:
  86. */
  87. prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
  88. pipe_unlock(pipe);
  89. schedule();
  90. finish_wait(&pipe->wait, &wait);
  91. pipe_lock(pipe);
  92. }
  93. static int
  94. pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
  95. int atomic)
  96. {
  97. unsigned long copy;
  98. while (len > 0) {
  99. while (!iov->iov_len)
  100. iov++;
  101. copy = min_t(unsigned long, len, iov->iov_len);
  102. if (atomic) {
  103. if (__copy_from_user_inatomic(to, iov->iov_base, copy))
  104. return -EFAULT;
  105. } else {
  106. if (copy_from_user(to, iov->iov_base, copy))
  107. return -EFAULT;
  108. }
  109. to += copy;
  110. len -= copy;
  111. iov->iov_base += copy;
  112. iov->iov_len -= copy;
  113. }
  114. return 0;
  115. }
  116. static int
  117. pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
  118. int atomic)
  119. {
  120. unsigned long copy;
  121. while (len > 0) {
  122. while (!iov->iov_len)
  123. iov++;
  124. copy = min_t(unsigned long, len, iov->iov_len);
  125. if (atomic) {
  126. if (__copy_to_user_inatomic(iov->iov_base, from, copy))
  127. return -EFAULT;
  128. } else {
  129. if (copy_to_user(iov->iov_base, from, copy))
  130. return -EFAULT;
  131. }
  132. from += copy;
  133. len -= copy;
  134. iov->iov_base += copy;
  135. iov->iov_len -= copy;
  136. }
  137. return 0;
  138. }
  139. /*
  140. * Attempt to pre-fault in the user memory, so we can use atomic copies.
  141. * Returns the number of bytes not faulted in.
  142. */
  143. static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
  144. {
  145. while (!iov->iov_len)
  146. iov++;
  147. while (len > 0) {
  148. unsigned long this_len;
  149. this_len = min_t(unsigned long, len, iov->iov_len);
  150. if (fault_in_pages_writeable(iov->iov_base, this_len))
  151. break;
  152. len -= this_len;
  153. iov++;
  154. }
  155. return len;
  156. }
  157. /*
  158. * Pre-fault in the user memory, so we can use atomic copies.
  159. */
  160. static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
  161. {
  162. while (!iov->iov_len)
  163. iov++;
  164. while (len > 0) {
  165. unsigned long this_len;
  166. this_len = min_t(unsigned long, len, iov->iov_len);
  167. fault_in_pages_readable(iov->iov_base, this_len);
  168. len -= this_len;
  169. iov++;
  170. }
  171. }
  172. static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
  173. struct pipe_buffer *buf)
  174. {
  175. struct page *page = buf->page;
  176. /*
  177. * If nobody else uses this page, and we don't already have a
  178. * temporary page, let's keep track of it as a one-deep
  179. * allocation cache. (Otherwise just release our reference to it)
  180. */
  181. if (page_count(page) == 1 && !pipe->tmp_page)
  182. pipe->tmp_page = page;
  183. else
  184. page_cache_release(page);
  185. }
  186. /**
  187. * generic_pipe_buf_map - virtually map a pipe buffer
  188. * @pipe: the pipe that the buffer belongs to
  189. * @buf: the buffer that should be mapped
  190. * @atomic: whether to use an atomic map
  191. *
  192. * Description:
  193. * This function returns a kernel virtual address mapping for the
  194. * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
  195. * and the caller has to be careful not to fault before calling
  196. * the unmap function.
  197. *
  198. * Note that this function occupies KM_USER0 if @atomic != 0.
  199. */
  200. void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
  201. struct pipe_buffer *buf, int atomic)
  202. {
  203. if (atomic) {
  204. buf->flags |= PIPE_BUF_FLAG_ATOMIC;
  205. return kmap_atomic(buf->page);
  206. }
  207. return kmap(buf->page);
  208. }
  209. EXPORT_SYMBOL(generic_pipe_buf_map);
  210. /**
  211. * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
  212. * @pipe: the pipe that the buffer belongs to
  213. * @buf: the buffer that should be unmapped
  214. * @map_data: the data that the mapping function returned
  215. *
  216. * Description:
  217. * This function undoes the mapping that ->map() provided.
  218. */
  219. void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
  220. struct pipe_buffer *buf, void *map_data)
  221. {
  222. if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
  223. buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
  224. kunmap_atomic(map_data);
  225. } else
  226. kunmap(buf->page);
  227. }
  228. EXPORT_SYMBOL(generic_pipe_buf_unmap);
  229. /**
  230. * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
  231. * @pipe: the pipe that the buffer belongs to
  232. * @buf: the buffer to attempt to steal
  233. *
  234. * Description:
  235. * This function attempts to steal the &struct page attached to
  236. * @buf. If successful, this function returns 0 and returns with
  237. * the page locked. The caller may then reuse the page for whatever
  238. * he wishes; the typical use is insertion into a different file
  239. * page cache.
  240. */
  241. int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
  242. struct pipe_buffer *buf)
  243. {
  244. struct page *page = buf->page;
  245. /*
  246. * A reference of one is golden, that means that the owner of this
  247. * page is the only one holding a reference to it. lock the page
  248. * and return OK.
  249. */
  250. if (page_count(page) == 1) {
  251. lock_page(page);
  252. return 0;
  253. }
  254. return 1;
  255. }
  256. EXPORT_SYMBOL(generic_pipe_buf_steal);
  257. /**
  258. * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
  259. * @pipe: the pipe that the buffer belongs to
  260. * @buf: the buffer to get a reference to
  261. *
  262. * Description:
  263. * This function grabs an extra reference to @buf. It's used in
  264. * in the tee() system call, when we duplicate the buffers in one
  265. * pipe into another.
  266. */
  267. void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
  268. {
  269. page_cache_get(buf->page);
  270. }
  271. EXPORT_SYMBOL(generic_pipe_buf_get);
  272. /**
  273. * generic_pipe_buf_confirm - verify contents of the pipe buffer
  274. * @info: the pipe that the buffer belongs to
  275. * @buf: the buffer to confirm
  276. *
  277. * Description:
  278. * This function does nothing, because the generic pipe code uses
  279. * pages that are always good when inserted into the pipe.
  280. */
  281. int generic_pipe_buf_confirm(struct pipe_inode_info *info,
  282. struct pipe_buffer *buf)
  283. {
  284. return 0;
  285. }
  286. EXPORT_SYMBOL(generic_pipe_buf_confirm);
  287. /**
  288. * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
  289. * @pipe: the pipe that the buffer belongs to
  290. * @buf: the buffer to put a reference to
  291. *
  292. * Description:
  293. * This function releases a reference to @buf.
  294. */
  295. void generic_pipe_buf_release(struct pipe_inode_info *pipe,
  296. struct pipe_buffer *buf)
  297. {
  298. page_cache_release(buf->page);
  299. }
  300. EXPORT_SYMBOL(generic_pipe_buf_release);
  301. static const struct pipe_buf_operations anon_pipe_buf_ops = {
  302. .can_merge = 1,
  303. .map = generic_pipe_buf_map,
  304. .unmap = generic_pipe_buf_unmap,
  305. .confirm = generic_pipe_buf_confirm,
  306. .release = anon_pipe_buf_release,
  307. .steal = generic_pipe_buf_steal,
  308. .get = generic_pipe_buf_get,
  309. };
  310. static ssize_t
  311. pipe_read(struct kiocb *iocb, const struct iovec *_iov,
  312. unsigned long nr_segs, loff_t pos)
  313. {
  314. struct file *filp = iocb->ki_filp;
  315. struct inode *inode = filp->f_path.dentry->d_inode;
  316. struct pipe_inode_info *pipe;
  317. int do_wakeup;
  318. ssize_t ret;
  319. struct iovec *iov = (struct iovec *)_iov;
  320. size_t total_len;
  321. total_len = iov_length(iov, nr_segs);
  322. /* Null read succeeds. */
  323. if (unlikely(total_len == 0))
  324. return 0;
  325. do_wakeup = 0;
  326. ret = 0;
  327. mutex_lock(&inode->i_mutex);
  328. pipe = inode->i_pipe;
  329. for (;;) {
  330. int bufs = pipe->nrbufs;
  331. if (bufs) {
  332. int curbuf = pipe->curbuf;
  333. struct pipe_buffer *buf = pipe->bufs + curbuf;
  334. const struct pipe_buf_operations *ops = buf->ops;
  335. void *addr;
  336. size_t chars = buf->len;
  337. int error, atomic;
  338. if (chars > total_len)
  339. chars = total_len;
  340. error = ops->confirm(pipe, buf);
  341. if (error) {
  342. if (!ret)
  343. ret = error;
  344. break;
  345. }
  346. atomic = !iov_fault_in_pages_write(iov, chars);
  347. redo:
  348. addr = ops->map(pipe, buf, atomic);
  349. error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
  350. ops->unmap(pipe, buf, addr);
  351. if (unlikely(error)) {
  352. /*
  353. * Just retry with the slow path if we failed.
  354. */
  355. if (atomic) {
  356. atomic = 0;
  357. goto redo;
  358. }
  359. if (!ret)
  360. ret = error;
  361. break;
  362. }
  363. ret += chars;
  364. buf->offset += chars;
  365. buf->len -= chars;
  366. if (!buf->len) {
  367. buf->ops = NULL;
  368. ops->release(pipe, buf);
  369. curbuf = (curbuf + 1) & (pipe->buffers - 1);
  370. pipe->curbuf = curbuf;
  371. pipe->nrbufs = --bufs;
  372. do_wakeup = 1;
  373. }
  374. total_len -= chars;
  375. if (!total_len)
  376. break; /* common path: read succeeded */
  377. }
  378. if (bufs) /* More to do? */
  379. continue;
  380. if (!pipe->writers)
  381. break;
  382. if (!pipe->waiting_writers) {
  383. /* syscall merging: Usually we must not sleep
  384. * if O_NONBLOCK is set, or if we got some data.
  385. * But if a writer sleeps in kernel space, then
  386. * we can wait for that data without violating POSIX.
  387. */
  388. if (ret)
  389. break;
  390. if (filp->f_flags & O_NONBLOCK) {
  391. ret = -EAGAIN;
  392. break;
  393. }
  394. }
  395. if (signal_pending(current)) {
  396. if (!ret)
  397. ret = -ERESTARTSYS;
  398. break;
  399. }
  400. if (do_wakeup) {
  401. wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
  402. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  403. }
  404. pipe_wait(pipe);
  405. }
  406. mutex_unlock(&inode->i_mutex);
  407. /* Signal writers asynchronously that there is more room. */
  408. if (do_wakeup) {
  409. wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
  410. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  411. }
  412. if (ret > 0)
  413. file_accessed(filp);
  414. return ret;
  415. }
  416. static ssize_t
  417. pipe_write(struct kiocb *iocb, const struct iovec *_iov,
  418. unsigned long nr_segs, loff_t ppos)
  419. {
  420. struct file *filp = iocb->ki_filp;
  421. struct inode *inode = filp->f_path.dentry->d_inode;
  422. struct pipe_inode_info *pipe;
  423. ssize_t ret;
  424. int do_wakeup;
  425. struct iovec *iov = (struct iovec *)_iov;
  426. size_t total_len;
  427. ssize_t chars;
  428. total_len = iov_length(iov, nr_segs);
  429. /* Null write succeeds. */
  430. if (unlikely(total_len == 0))
  431. return 0;
  432. do_wakeup = 0;
  433. ret = 0;
  434. mutex_lock(&inode->i_mutex);
  435. pipe = inode->i_pipe;
  436. if (!pipe->readers) {
  437. send_sig(SIGPIPE, current, 0);
  438. ret = -EPIPE;
  439. goto out;
  440. }
  441. /* We try to merge small writes */
  442. chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
  443. if (pipe->nrbufs && chars != 0) {
  444. int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
  445. (pipe->buffers - 1);
  446. struct pipe_buffer *buf = pipe->bufs + lastbuf;
  447. const struct pipe_buf_operations *ops = buf->ops;
  448. int offset = buf->offset + buf->len;
  449. if (ops->can_merge && offset + chars <= PAGE_SIZE) {
  450. int error, atomic = 1;
  451. void *addr;
  452. error = ops->confirm(pipe, buf);
  453. if (error)
  454. goto out;
  455. iov_fault_in_pages_read(iov, chars);
  456. redo1:
  457. addr = ops->map(pipe, buf, atomic);
  458. error = pipe_iov_copy_from_user(offset + addr, iov,
  459. chars, atomic);
  460. ops->unmap(pipe, buf, addr);
  461. ret = error;
  462. do_wakeup = 1;
  463. if (error) {
  464. if (atomic) {
  465. atomic = 0;
  466. goto redo1;
  467. }
  468. goto out;
  469. }
  470. buf->len += chars;
  471. total_len -= chars;
  472. ret = chars;
  473. if (!total_len)
  474. goto out;
  475. }
  476. }
  477. for (;;) {
  478. int bufs;
  479. if (!pipe->readers) {
  480. send_sig(SIGPIPE, current, 0);
  481. if (!ret)
  482. ret = -EPIPE;
  483. break;
  484. }
  485. bufs = pipe->nrbufs;
  486. if (bufs < pipe->buffers) {
  487. int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
  488. struct pipe_buffer *buf = pipe->bufs + newbuf;
  489. struct page *page = pipe->tmp_page;
  490. char *src;
  491. int error, atomic = 1;
  492. if (!page) {
  493. page = alloc_page(GFP_HIGHUSER);
  494. if (unlikely(!page)) {
  495. ret = ret ? : -ENOMEM;
  496. break;
  497. }
  498. pipe->tmp_page = page;
  499. }
  500. /* Always wake up, even if the copy fails. Otherwise
  501. * we lock up (O_NONBLOCK-)readers that sleep due to
  502. * syscall merging.
  503. * FIXME! Is this really true?
  504. */
  505. do_wakeup = 1;
  506. chars = PAGE_SIZE;
  507. if (chars > total_len)
  508. chars = total_len;
  509. iov_fault_in_pages_read(iov, chars);
  510. redo2:
  511. if (atomic)
  512. src = kmap_atomic(page);
  513. else
  514. src = kmap(page);
  515. error = pipe_iov_copy_from_user(src, iov, chars,
  516. atomic);
  517. if (atomic)
  518. kunmap_atomic(src);
  519. else
  520. kunmap(page);
  521. if (unlikely(error)) {
  522. if (atomic) {
  523. atomic = 0;
  524. goto redo2;
  525. }
  526. if (!ret)
  527. ret = error;
  528. break;
  529. }
  530. ret += chars;
  531. /* Insert it into the buffer array */
  532. buf->page = page;
  533. buf->ops = &anon_pipe_buf_ops;
  534. buf->offset = 0;
  535. buf->len = chars;
  536. pipe->nrbufs = ++bufs;
  537. pipe->tmp_page = NULL;
  538. total_len -= chars;
  539. if (!total_len)
  540. break;
  541. }
  542. if (bufs < pipe->buffers)
  543. continue;
  544. if (filp->f_flags & O_NONBLOCK) {
  545. if (!ret)
  546. ret = -EAGAIN;
  547. break;
  548. }
  549. if (signal_pending(current)) {
  550. if (!ret)
  551. ret = -ERESTARTSYS;
  552. break;
  553. }
  554. if (do_wakeup) {
  555. wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
  556. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  557. do_wakeup = 0;
  558. }
  559. pipe->waiting_writers++;
  560. pipe_wait(pipe);
  561. pipe->waiting_writers--;
  562. }
  563. out:
  564. mutex_unlock(&inode->i_mutex);
  565. if (do_wakeup) {
  566. wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
  567. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  568. }
  569. if (ret > 0)
  570. file_update_time(filp);
  571. return ret;
  572. }
  573. static ssize_t
  574. bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
  575. {
  576. return -EBADF;
  577. }
  578. static ssize_t
  579. bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
  580. loff_t *ppos)
  581. {
  582. return -EBADF;
  583. }
  584. static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  585. {
  586. struct inode *inode = filp->f_path.dentry->d_inode;
  587. struct pipe_inode_info *pipe;
  588. int count, buf, nrbufs;
  589. switch (cmd) {
  590. case FIONREAD:
  591. mutex_lock(&inode->i_mutex);
  592. pipe = inode->i_pipe;
  593. count = 0;
  594. buf = pipe->curbuf;
  595. nrbufs = pipe->nrbufs;
  596. while (--nrbufs >= 0) {
  597. count += pipe->bufs[buf].len;
  598. buf = (buf+1) & (pipe->buffers - 1);
  599. }
  600. mutex_unlock(&inode->i_mutex);
  601. return put_user(count, (int __user *)arg);
  602. default:
  603. return -EINVAL;
  604. }
  605. }
  606. /* No kernel lock held - fine */
  607. static unsigned int
  608. pipe_poll(struct file *filp, poll_table *wait)
  609. {
  610. unsigned int mask;
  611. struct inode *inode = filp->f_path.dentry->d_inode;
  612. struct pipe_inode_info *pipe = inode->i_pipe;
  613. int nrbufs;
  614. poll_wait(filp, &pipe->wait, wait);
  615. /* Reading only -- no need for acquiring the semaphore. */
  616. nrbufs = pipe->nrbufs;
  617. mask = 0;
  618. if (filp->f_mode & FMODE_READ) {
  619. mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
  620. if (!pipe->writers && filp->f_version != pipe->w_counter)
  621. mask |= POLLHUP;
  622. }
  623. if (filp->f_mode & FMODE_WRITE) {
  624. mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
  625. /*
  626. * Most Unices do not set POLLERR for FIFOs but on Linux they
  627. * behave exactly like pipes for poll().
  628. */
  629. if (!pipe->readers)
  630. mask |= POLLERR;
  631. }
  632. return mask;
  633. }
  634. static int
  635. pipe_release(struct inode *inode, int decr, int decw)
  636. {
  637. struct pipe_inode_info *pipe;
  638. mutex_lock(&inode->i_mutex);
  639. pipe = inode->i_pipe;
  640. pipe->readers -= decr;
  641. pipe->writers -= decw;
  642. if (!pipe->readers && !pipe->writers) {
  643. free_pipe_info(inode);
  644. } else {
  645. wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
  646. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  647. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  648. }
  649. mutex_unlock(&inode->i_mutex);
  650. return 0;
  651. }
  652. static int
  653. pipe_read_fasync(int fd, struct file *filp, int on)
  654. {
  655. struct inode *inode = filp->f_path.dentry->d_inode;
  656. int retval;
  657. mutex_lock(&inode->i_mutex);
  658. retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
  659. mutex_unlock(&inode->i_mutex);
  660. return retval;
  661. }
  662. static int
  663. pipe_write_fasync(int fd, struct file *filp, int on)
  664. {
  665. struct inode *inode = filp->f_path.dentry->d_inode;
  666. int retval;
  667. mutex_lock(&inode->i_mutex);
  668. retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
  669. mutex_unlock(&inode->i_mutex);
  670. return retval;
  671. }
  672. static int
  673. pipe_rdwr_fasync(int fd, struct file *filp, int on)
  674. {
  675. struct inode *inode = filp->f_path.dentry->d_inode;
  676. struct pipe_inode_info *pipe = inode->i_pipe;
  677. int retval;
  678. mutex_lock(&inode->i_mutex);
  679. retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
  680. if (retval >= 0) {
  681. retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
  682. if (retval < 0) /* this can happen only if on == T */
  683. fasync_helper(-1, filp, 0, &pipe->fasync_readers);
  684. }
  685. mutex_unlock(&inode->i_mutex);
  686. return retval;
  687. }
  688. static int
  689. pipe_read_release(struct inode *inode, struct file *filp)
  690. {
  691. return pipe_release(inode, 1, 0);
  692. }
  693. static int
  694. pipe_write_release(struct inode *inode, struct file *filp)
  695. {
  696. return pipe_release(inode, 0, 1);
  697. }
  698. static int
  699. pipe_rdwr_release(struct inode *inode, struct file *filp)
  700. {
  701. int decr, decw;
  702. decr = (filp->f_mode & FMODE_READ) != 0;
  703. decw = (filp->f_mode & FMODE_WRITE) != 0;
  704. return pipe_release(inode, decr, decw);
  705. }
  706. static int
  707. pipe_read_open(struct inode *inode, struct file *filp)
  708. {
  709. int ret = -ENOENT;
  710. mutex_lock(&inode->i_mutex);
  711. if (inode->i_pipe) {
  712. ret = 0;
  713. inode->i_pipe->readers++;
  714. }
  715. mutex_unlock(&inode->i_mutex);
  716. return ret;
  717. }
  718. static int
  719. pipe_write_open(struct inode *inode, struct file *filp)
  720. {
  721. int ret = -ENOENT;
  722. mutex_lock(&inode->i_mutex);
  723. if (inode->i_pipe) {
  724. ret = 0;
  725. inode->i_pipe->writers++;
  726. }
  727. mutex_unlock(&inode->i_mutex);
  728. return ret;
  729. }
  730. static int
  731. pipe_rdwr_open(struct inode *inode, struct file *filp)
  732. {
  733. int ret = -ENOENT;
  734. mutex_lock(&inode->i_mutex);
  735. if (inode->i_pipe) {
  736. ret = 0;
  737. if (filp->f_mode & FMODE_READ)
  738. inode->i_pipe->readers++;
  739. if (filp->f_mode & FMODE_WRITE)
  740. inode->i_pipe->writers++;
  741. }
  742. mutex_unlock(&inode->i_mutex);
  743. return ret;
  744. }
  745. /*
  746. * The file_operations structs are not static because they
  747. * are also used in linux/fs/fifo.c to do operations on FIFOs.
  748. *
  749. * Pipes reuse fifos' file_operations structs.
  750. */
  751. const struct file_operations read_pipefifo_fops = {
  752. .llseek = no_llseek,
  753. .read = do_sync_read,
  754. .aio_read = pipe_read,
  755. .write = bad_pipe_w,
  756. .poll = pipe_poll,
  757. .unlocked_ioctl = pipe_ioctl,
  758. .open = pipe_read_open,
  759. .release = pipe_read_release,
  760. .fasync = pipe_read_fasync,
  761. };
  762. const struct file_operations write_pipefifo_fops = {
  763. .llseek = no_llseek,
  764. .read = bad_pipe_r,
  765. .write = do_sync_write,
  766. .aio_write = pipe_write,
  767. .poll = pipe_poll,
  768. .unlocked_ioctl = pipe_ioctl,
  769. .open = pipe_write_open,
  770. .release = pipe_write_release,
  771. .fasync = pipe_write_fasync,
  772. };
  773. const struct file_operations rdwr_pipefifo_fops = {
  774. .llseek = no_llseek,
  775. .read = do_sync_read,
  776. .aio_read = pipe_read,
  777. .write = do_sync_write,
  778. .aio_write = pipe_write,
  779. .poll = pipe_poll,
  780. .unlocked_ioctl = pipe_ioctl,
  781. .open = pipe_rdwr_open,
  782. .release = pipe_rdwr_release,
  783. .fasync = pipe_rdwr_fasync,
  784. };
  785. struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
  786. {
  787. struct pipe_inode_info *pipe;
  788. pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
  789. if (pipe) {
  790. pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
  791. if (pipe->bufs) {
  792. init_waitqueue_head(&pipe->wait);
  793. pipe->r_counter = pipe->w_counter = 1;
  794. pipe->inode = inode;
  795. pipe->buffers = PIPE_DEF_BUFFERS;
  796. return pipe;
  797. }
  798. kfree(pipe);
  799. }
  800. return NULL;
  801. }
  802. void __free_pipe_info(struct pipe_inode_info *pipe)
  803. {
  804. int i;
  805. for (i = 0; i < pipe->buffers; i++) {
  806. struct pipe_buffer *buf = pipe->bufs + i;
  807. if (buf->ops)
  808. buf->ops->release(pipe, buf);
  809. }
  810. if (pipe->tmp_page)
  811. __free_page(pipe->tmp_page);
  812. kfree(pipe->bufs);
  813. kfree(pipe);
  814. }
  815. void free_pipe_info(struct inode *inode)
  816. {
  817. __free_pipe_info(inode->i_pipe);
  818. inode->i_pipe = NULL;
  819. }
  820. static struct vfsmount *pipe_mnt __read_mostly;
  821. /*
  822. * pipefs_dname() is called from d_path().
  823. */
  824. static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
  825. {
  826. return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
  827. dentry->d_inode->i_ino);
  828. }
  829. static const struct dentry_operations pipefs_dentry_operations = {
  830. .d_dname = pipefs_dname,
  831. };
  832. static struct inode * get_pipe_inode(void)
  833. {
  834. struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
  835. struct pipe_inode_info *pipe;
  836. if (!inode)
  837. goto fail_inode;
  838. inode->i_ino = get_next_ino();
  839. pipe = alloc_pipe_info(inode);
  840. if (!pipe)
  841. goto fail_iput;
  842. inode->i_pipe = pipe;
  843. pipe->readers = pipe->writers = 1;
  844. inode->i_fop = &rdwr_pipefifo_fops;
  845. /*
  846. * Mark the inode dirty from the very beginning,
  847. * that way it will never be moved to the dirty
  848. * list because "mark_inode_dirty()" will think
  849. * that it already _is_ on the dirty list.
  850. */
  851. inode->i_state = I_DIRTY;
  852. inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
  853. inode->i_uid = current_fsuid();
  854. inode->i_gid = current_fsgid();
  855. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  856. return inode;
  857. fail_iput:
  858. iput(inode);
  859. fail_inode:
  860. return NULL;
  861. }
  862. struct file *create_write_pipe(int flags)
  863. {
  864. int err;
  865. struct inode *inode;
  866. struct file *f;
  867. struct path path;
  868. struct qstr name = { .name = "" };
  869. err = -ENFILE;
  870. inode = get_pipe_inode();
  871. if (!inode)
  872. goto err;
  873. err = -ENOMEM;
  874. path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
  875. if (!path.dentry)
  876. goto err_inode;
  877. path.mnt = mntget(pipe_mnt);
  878. d_instantiate(path.dentry, inode);
  879. err = -ENFILE;
  880. f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
  881. if (!f)
  882. goto err_dentry;
  883. f->f_mapping = inode->i_mapping;
  884. f->f_flags = O_WRONLY | (flags & O_NONBLOCK);
  885. f->f_version = 0;
  886. return f;
  887. err_dentry:
  888. free_pipe_info(inode);
  889. path_put(&path);
  890. return ERR_PTR(err);
  891. err_inode:
  892. free_pipe_info(inode);
  893. iput(inode);
  894. err:
  895. return ERR_PTR(err);
  896. }
  897. void free_write_pipe(struct file *f)
  898. {
  899. free_pipe_info(f->f_dentry->d_inode);
  900. path_put(&f->f_path);
  901. put_filp(f);
  902. }
  903. struct file *create_read_pipe(struct file *wrf, int flags)
  904. {
  905. /* Grab pipe from the writer */
  906. struct file *f = alloc_file(&wrf->f_path, FMODE_READ,
  907. &read_pipefifo_fops);
  908. if (!f)
  909. return ERR_PTR(-ENFILE);
  910. path_get(&wrf->f_path);
  911. f->f_flags = O_RDONLY | (flags & O_NONBLOCK);
  912. return f;
  913. }
  914. int do_pipe_flags(int *fd, int flags)
  915. {
  916. struct file *fw, *fr;
  917. int error;
  918. int fdw, fdr;
  919. if (flags & ~(O_CLOEXEC | O_NONBLOCK))
  920. return -EINVAL;
  921. fw = create_write_pipe(flags);
  922. if (IS_ERR(fw))
  923. return PTR_ERR(fw);
  924. fr = create_read_pipe(fw, flags);
  925. error = PTR_ERR(fr);
  926. if (IS_ERR(fr))
  927. goto err_write_pipe;
  928. error = get_unused_fd_flags(flags);
  929. if (error < 0)
  930. goto err_read_pipe;
  931. fdr = error;
  932. error = get_unused_fd_flags(flags);
  933. if (error < 0)
  934. goto err_fdr;
  935. fdw = error;
  936. audit_fd_pair(fdr, fdw);
  937. fd_install(fdr, fr);
  938. fd_install(fdw, fw);
  939. fd[0] = fdr;
  940. fd[1] = fdw;
  941. return 0;
  942. err_fdr:
  943. put_unused_fd(fdr);
  944. err_read_pipe:
  945. path_put(&fr->f_path);
  946. put_filp(fr);
  947. err_write_pipe:
  948. free_write_pipe(fw);
  949. return error;
  950. }
  951. /*
  952. * sys_pipe() is the normal C calling standard for creating
  953. * a pipe. It's not the way Unix traditionally does this, though.
  954. */
  955. SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
  956. {
  957. int fd[2];
  958. int error;
  959. error = do_pipe_flags(fd, flags);
  960. if (!error) {
  961. if (copy_to_user(fildes, fd, sizeof(fd))) {
  962. sys_close(fd[0]);
  963. sys_close(fd[1]);
  964. error = -EFAULT;
  965. }
  966. }
  967. return error;
  968. }
  969. SYSCALL_DEFINE1(pipe, int __user *, fildes)
  970. {
  971. return sys_pipe2(fildes, 0);
  972. }
  973. /*
  974. * Allocate a new array of pipe buffers and copy the info over. Returns the
  975. * pipe size if successful, or return -ERROR on error.
  976. */
  977. static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
  978. {
  979. struct pipe_buffer *bufs;
  980. /*
  981. * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
  982. * expect a lot of shrink+grow operations, just free and allocate
  983. * again like we would do for growing. If the pipe currently
  984. * contains more buffers than arg, then return busy.
  985. */
  986. if (nr_pages < pipe->nrbufs)
  987. return -EBUSY;
  988. bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
  989. if (unlikely(!bufs))
  990. return -ENOMEM;
  991. /*
  992. * The pipe array wraps around, so just start the new one at zero
  993. * and adjust the indexes.
  994. */
  995. if (pipe->nrbufs) {
  996. unsigned int tail;
  997. unsigned int head;
  998. tail = pipe->curbuf + pipe->nrbufs;
  999. if (tail < pipe->buffers)
  1000. tail = 0;
  1001. else
  1002. tail &= (pipe->buffers - 1);
  1003. head = pipe->nrbufs - tail;
  1004. if (head)
  1005. memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
  1006. if (tail)
  1007. memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
  1008. }
  1009. pipe->curbuf = 0;
  1010. kfree(pipe->bufs);
  1011. pipe->bufs = bufs;
  1012. pipe->buffers = nr_pages;
  1013. return nr_pages * PAGE_SIZE;
  1014. }
  1015. /*
  1016. * Currently we rely on the pipe array holding a power-of-2 number
  1017. * of pages.
  1018. */
  1019. static inline unsigned int round_pipe_size(unsigned int size)
  1020. {
  1021. unsigned long nr_pages;
  1022. nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1023. return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
  1024. }
  1025. /*
  1026. * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
  1027. * will return an error.
  1028. */
  1029. int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
  1030. size_t *lenp, loff_t *ppos)
  1031. {
  1032. int ret;
  1033. ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
  1034. if (ret < 0 || !write)
  1035. return ret;
  1036. pipe_max_size = round_pipe_size(pipe_max_size);
  1037. return ret;
  1038. }
  1039. /*
  1040. * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
  1041. * location, so checking ->i_pipe is not enough to verify that this is a
  1042. * pipe.
  1043. */
  1044. struct pipe_inode_info *get_pipe_info(struct file *file)
  1045. {
  1046. struct inode *i = file->f_path.dentry->d_inode;
  1047. return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
  1048. }
  1049. long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
  1050. {
  1051. struct pipe_inode_info *pipe;
  1052. long ret;
  1053. pipe = get_pipe_info(file);
  1054. if (!pipe)
  1055. return -EBADF;
  1056. mutex_lock(&pipe->inode->i_mutex);
  1057. switch (cmd) {
  1058. case F_SETPIPE_SZ: {
  1059. unsigned int size, nr_pages;
  1060. size = round_pipe_size(arg);
  1061. nr_pages = size >> PAGE_SHIFT;
  1062. ret = -EINVAL;
  1063. if (!nr_pages)
  1064. goto out;
  1065. if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
  1066. ret = -EPERM;
  1067. goto out;
  1068. }
  1069. ret = pipe_set_size(pipe, nr_pages);
  1070. break;
  1071. }
  1072. case F_GETPIPE_SZ:
  1073. ret = pipe->buffers * PAGE_SIZE;
  1074. break;
  1075. default:
  1076. ret = -EINVAL;
  1077. break;
  1078. }
  1079. out:
  1080. mutex_unlock(&pipe->inode->i_mutex);
  1081. return ret;
  1082. }
  1083. static const struct super_operations pipefs_ops = {
  1084. .destroy_inode = free_inode_nonrcu,
  1085. .statfs = simple_statfs,
  1086. };
  1087. /*
  1088. * pipefs should _never_ be mounted by userland - too much of security hassle,
  1089. * no real gain from having the whole whorehouse mounted. So we don't need
  1090. * any operations on the root directory. However, we need a non-trivial
  1091. * d_name - pipe: will go nicely and kill the special-casing in procfs.
  1092. */
  1093. static struct dentry *pipefs_mount(struct file_system_type *fs_type,
  1094. int flags, const char *dev_name, void *data)
  1095. {
  1096. return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
  1097. &pipefs_dentry_operations, PIPEFS_MAGIC);
  1098. }
  1099. static struct file_system_type pipe_fs_type = {
  1100. .name = "pipefs",
  1101. .mount = pipefs_mount,
  1102. .kill_sb = kill_anon_super,
  1103. };
  1104. static int __init init_pipe_fs(void)
  1105. {
  1106. int err = register_filesystem(&pipe_fs_type);
  1107. if (!err) {
  1108. pipe_mnt = kern_mount(&pipe_fs_type);
  1109. if (IS_ERR(pipe_mnt)) {
  1110. err = PTR_ERR(pipe_mnt);
  1111. unregister_filesystem(&pipe_fs_type);
  1112. }
  1113. }
  1114. return err;
  1115. }
  1116. fs_initcall(init_pipe_fs);