splice.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /*
  2. * "splice": joining two ropes together by interweaving their strands.
  3. *
  4. * This is the "extended pipe" functionality, where a pipe is used as
  5. * an arbitrary in-memory buffer. Think of a pipe as a small kernel
  6. * buffer that you can use to transfer data from one end to the other.
  7. *
  8. * The traditional unix read/write is extended with a "splice()" operation
  9. * that transfers data buffers to or from a pipe buffer.
  10. *
  11. * Named by Larry McVoy, original implementation from Linus, extended by
  12. * Jens to support splicing to files and fixing the initial implementation
  13. * bugs.
  14. *
  15. * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
  16. * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
  17. *
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/file.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/pipe_fs_i.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/swap.h>
  25. #include <linux/writeback.h>
  26. #include <linux/buffer_head.h>
  27. #include <linux/module.h>
  28. #include <linux/syscalls.h>
  29. /*
  30. * Passed to the actors
  31. */
  32. struct splice_desc {
  33. unsigned int len, total_len; /* current and remaining length */
  34. unsigned int flags; /* splice flags */
  35. struct file *file; /* file to read/write */
  36. loff_t pos; /* file position */
  37. };
  38. /*
  39. * Attempt to steal a page from a pipe buffer. This should perhaps go into
  40. * a vm helper function, it's already simplified quite a bit by the
  41. * addition of remove_mapping(). If success is returned, the caller may
  42. * attempt to reuse this page for another destination.
  43. */
  44. static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
  45. struct pipe_buffer *buf)
  46. {
  47. struct page *page = buf->page;
  48. struct address_space *mapping = page_mapping(page);
  49. WARN_ON(!PageLocked(page));
  50. WARN_ON(!PageUptodate(page));
  51. /*
  52. * At least for ext2 with nobh option, we need to wait on writeback
  53. * completing on this page, since we'll remove it from the pagecache.
  54. * Otherwise truncate wont wait on the page, allowing the disk
  55. * blocks to be reused by someone else before we actually wrote our
  56. * data to them. fs corruption ensues.
  57. */
  58. wait_on_page_writeback(page);
  59. if (PagePrivate(page))
  60. try_to_release_page(page, mapping_gfp_mask(mapping));
  61. if (!remove_mapping(mapping, page))
  62. return 1;
  63. buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
  64. return 0;
  65. }
  66. static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
  67. struct pipe_buffer *buf)
  68. {
  69. page_cache_release(buf->page);
  70. buf->page = NULL;
  71. buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
  72. }
  73. static void *page_cache_pipe_buf_map(struct file *file,
  74. struct pipe_inode_info *info,
  75. struct pipe_buffer *buf)
  76. {
  77. struct page *page = buf->page;
  78. int err;
  79. if (!PageUptodate(page)) {
  80. lock_page(page);
  81. /*
  82. * Page got truncated/unhashed. This will cause a 0-byte
  83. * splice, if this is the first page
  84. */
  85. if (!page->mapping) {
  86. err = -ENODATA;
  87. goto error;
  88. }
  89. /*
  90. * uh oh, read-error from disk
  91. */
  92. if (!PageUptodate(page)) {
  93. err = -EIO;
  94. goto error;
  95. }
  96. /*
  97. * page is ok afterall, fall through to mapping
  98. */
  99. unlock_page(page);
  100. }
  101. return kmap(page);
  102. error:
  103. unlock_page(page);
  104. return ERR_PTR(err);
  105. }
  106. static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
  107. struct pipe_buffer *buf)
  108. {
  109. kunmap(buf->page);
  110. }
  111. static struct pipe_buf_operations page_cache_pipe_buf_ops = {
  112. .can_merge = 0,
  113. .map = page_cache_pipe_buf_map,
  114. .unmap = page_cache_pipe_buf_unmap,
  115. .release = page_cache_pipe_buf_release,
  116. .steal = page_cache_pipe_buf_steal,
  117. };
  118. /*
  119. * Pipe output worker. This sets up our pipe format with the page cache
  120. * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
  121. */
  122. static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
  123. int nr_pages, unsigned long offset,
  124. unsigned long len, unsigned int flags)
  125. {
  126. int ret, do_wakeup, i;
  127. ret = 0;
  128. do_wakeup = 0;
  129. i = 0;
  130. if (pipe->inode)
  131. mutex_lock(&pipe->inode->i_mutex);
  132. for (;;) {
  133. if (!pipe->readers) {
  134. send_sig(SIGPIPE, current, 0);
  135. if (!ret)
  136. ret = -EPIPE;
  137. break;
  138. }
  139. if (pipe->nrbufs < PIPE_BUFFERS) {
  140. int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
  141. struct pipe_buffer *buf = pipe->bufs + newbuf;
  142. struct page *page = pages[i++];
  143. unsigned long this_len;
  144. this_len = PAGE_CACHE_SIZE - offset;
  145. if (this_len > len)
  146. this_len = len;
  147. buf->page = page;
  148. buf->offset = offset;
  149. buf->len = this_len;
  150. buf->ops = &page_cache_pipe_buf_ops;
  151. pipe->nrbufs++;
  152. if (pipe->inode)
  153. do_wakeup = 1;
  154. ret += this_len;
  155. len -= this_len;
  156. offset = 0;
  157. if (!--nr_pages)
  158. break;
  159. if (!len)
  160. break;
  161. if (pipe->nrbufs < PIPE_BUFFERS)
  162. continue;
  163. break;
  164. }
  165. if (flags & SPLICE_F_NONBLOCK) {
  166. if (!ret)
  167. ret = -EAGAIN;
  168. break;
  169. }
  170. if (signal_pending(current)) {
  171. if (!ret)
  172. ret = -ERESTARTSYS;
  173. break;
  174. }
  175. if (do_wakeup) {
  176. smp_mb();
  177. if (waitqueue_active(&pipe->wait))
  178. wake_up_interruptible_sync(&pipe->wait);
  179. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  180. do_wakeup = 0;
  181. }
  182. pipe->waiting_writers++;
  183. pipe_wait(pipe);
  184. pipe->waiting_writers--;
  185. }
  186. if (pipe->inode)
  187. mutex_unlock(&pipe->inode->i_mutex);
  188. if (do_wakeup) {
  189. smp_mb();
  190. if (waitqueue_active(&pipe->wait))
  191. wake_up_interruptible(&pipe->wait);
  192. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  193. }
  194. while (i < nr_pages)
  195. page_cache_release(pages[i++]);
  196. return ret;
  197. }
  198. static int
  199. __generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
  200. size_t len, unsigned int flags)
  201. {
  202. struct address_space *mapping = in->f_mapping;
  203. unsigned int offset, nr_pages;
  204. struct page *pages[PIPE_BUFFERS];
  205. struct page *page;
  206. pgoff_t index;
  207. int i, error;
  208. index = in->f_pos >> PAGE_CACHE_SHIFT;
  209. offset = in->f_pos & ~PAGE_CACHE_MASK;
  210. nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  211. if (nr_pages > PIPE_BUFFERS)
  212. nr_pages = PIPE_BUFFERS;
  213. /*
  214. * initiate read-ahead on this page range. however, don't call into
  215. * read-ahead if this is a non-zero offset (we are likely doing small
  216. * chunk splice and the page is already there) for a single page.
  217. */
  218. if (!offset || nr_pages > 1)
  219. do_page_cache_readahead(mapping, in, index, nr_pages);
  220. /*
  221. * now fill in the holes
  222. */
  223. error = 0;
  224. for (i = 0; i < nr_pages; i++, index++) {
  225. find_page:
  226. /*
  227. * lookup the page for this index
  228. */
  229. page = find_get_page(mapping, index);
  230. if (!page) {
  231. /*
  232. * If in nonblock mode then dont block on
  233. * readpage (we've kicked readahead so there
  234. * will be asynchronous progress):
  235. */
  236. if (flags & SPLICE_F_NONBLOCK)
  237. break;
  238. /*
  239. * page didn't exist, allocate one
  240. */
  241. page = page_cache_alloc_cold(mapping);
  242. if (!page)
  243. break;
  244. error = add_to_page_cache_lru(page, mapping, index,
  245. mapping_gfp_mask(mapping));
  246. if (unlikely(error)) {
  247. page_cache_release(page);
  248. break;
  249. }
  250. goto readpage;
  251. }
  252. /*
  253. * If the page isn't uptodate, we may need to start io on it
  254. */
  255. if (!PageUptodate(page)) {
  256. lock_page(page);
  257. /*
  258. * page was truncated, stop here. if this isn't the
  259. * first page, we'll just complete what we already
  260. * added
  261. */
  262. if (!page->mapping) {
  263. unlock_page(page);
  264. page_cache_release(page);
  265. break;
  266. }
  267. /*
  268. * page was already under io and is now done, great
  269. */
  270. if (PageUptodate(page)) {
  271. unlock_page(page);
  272. goto fill_it;
  273. }
  274. readpage:
  275. /*
  276. * need to read in the page
  277. */
  278. error = mapping->a_ops->readpage(in, page);
  279. if (unlikely(error)) {
  280. page_cache_release(page);
  281. if (error == AOP_TRUNCATED_PAGE)
  282. goto find_page;
  283. break;
  284. }
  285. }
  286. fill_it:
  287. pages[i] = page;
  288. }
  289. if (i)
  290. return move_to_pipe(pipe, pages, i, offset, len, flags);
  291. return error;
  292. }
  293. /**
  294. * generic_file_splice_read - splice data from file to a pipe
  295. * @in: file to splice from
  296. * @pipe: pipe to splice to
  297. * @len: number of bytes to splice
  298. * @flags: splice modifier flags
  299. *
  300. * Will read pages from given file and fill them into a pipe.
  301. */
  302. ssize_t generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
  303. size_t len, unsigned int flags)
  304. {
  305. ssize_t spliced;
  306. int ret;
  307. ret = 0;
  308. spliced = 0;
  309. while (len) {
  310. ret = __generic_file_splice_read(in, pipe, len, flags);
  311. if (ret <= 0)
  312. break;
  313. in->f_pos += ret;
  314. len -= ret;
  315. spliced += ret;
  316. if (!(flags & SPLICE_F_NONBLOCK))
  317. continue;
  318. ret = -EAGAIN;
  319. break;
  320. }
  321. if (spliced)
  322. return spliced;
  323. return ret;
  324. }
  325. EXPORT_SYMBOL(generic_file_splice_read);
  326. /*
  327. * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
  328. * using sendpage().
  329. */
  330. static int pipe_to_sendpage(struct pipe_inode_info *info,
  331. struct pipe_buffer *buf, struct splice_desc *sd)
  332. {
  333. struct file *file = sd->file;
  334. loff_t pos = sd->pos;
  335. unsigned int offset;
  336. ssize_t ret;
  337. void *ptr;
  338. int more;
  339. /*
  340. * sub-optimal, but we are limited by the pipe ->map. we don't
  341. * need a kmap'ed buffer here, we just want to make sure we
  342. * have the page pinned if the pipe page originates from the
  343. * page cache
  344. */
  345. ptr = buf->ops->map(file, info, buf);
  346. if (IS_ERR(ptr))
  347. return PTR_ERR(ptr);
  348. offset = pos & ~PAGE_CACHE_MASK;
  349. more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
  350. ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
  351. buf->ops->unmap(info, buf);
  352. if (ret == sd->len)
  353. return 0;
  354. return -EIO;
  355. }
  356. /*
  357. * This is a little more tricky than the file -> pipe splicing. There are
  358. * basically three cases:
  359. *
  360. * - Destination page already exists in the address space and there
  361. * are users of it. For that case we have no other option that
  362. * copying the data. Tough luck.
  363. * - Destination page already exists in the address space, but there
  364. * are no users of it. Make sure it's uptodate, then drop it. Fall
  365. * through to last case.
  366. * - Destination page does not exist, we can add the pipe page to
  367. * the page cache and avoid the copy.
  368. *
  369. * If asked to move pages to the output file (SPLICE_F_MOVE is set in
  370. * sd->flags), we attempt to migrate pages from the pipe to the output
  371. * file address space page cache. This is possible if no one else has
  372. * the pipe page referenced outside of the pipe and page cache. If
  373. * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
  374. * a new page in the output file page cache and fill/dirty that.
  375. */
  376. static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
  377. struct splice_desc *sd)
  378. {
  379. struct file *file = sd->file;
  380. struct address_space *mapping = file->f_mapping;
  381. gfp_t gfp_mask = mapping_gfp_mask(mapping);
  382. unsigned int offset;
  383. struct page *page;
  384. pgoff_t index;
  385. char *src;
  386. int ret;
  387. /*
  388. * make sure the data in this buffer is uptodate
  389. */
  390. src = buf->ops->map(file, info, buf);
  391. if (IS_ERR(src))
  392. return PTR_ERR(src);
  393. index = sd->pos >> PAGE_CACHE_SHIFT;
  394. offset = sd->pos & ~PAGE_CACHE_MASK;
  395. /*
  396. * reuse buf page, if SPLICE_F_MOVE is set
  397. */
  398. if (sd->flags & SPLICE_F_MOVE) {
  399. /*
  400. * If steal succeeds, buf->page is now pruned from the vm
  401. * side (LRU and page cache) and we can reuse it.
  402. */
  403. if (buf->ops->steal(info, buf))
  404. goto find_page;
  405. /*
  406. * this will also set the page locked
  407. */
  408. page = buf->page;
  409. if (add_to_page_cache(page, mapping, index, gfp_mask))
  410. goto find_page;
  411. if (!(buf->flags & PIPE_BUF_FLAG_LRU))
  412. lru_cache_add(page);
  413. } else {
  414. find_page:
  415. ret = -ENOMEM;
  416. page = find_or_create_page(mapping, index, gfp_mask);
  417. if (!page)
  418. goto out_nomem;
  419. /*
  420. * If the page is uptodate, it is also locked. If it isn't
  421. * uptodate, we can mark it uptodate if we are filling the
  422. * full page. Otherwise we need to read it in first...
  423. */
  424. if (!PageUptodate(page)) {
  425. if (sd->len < PAGE_CACHE_SIZE) {
  426. ret = mapping->a_ops->readpage(file, page);
  427. if (unlikely(ret))
  428. goto out;
  429. lock_page(page);
  430. if (!PageUptodate(page)) {
  431. /*
  432. * page got invalidated, repeat
  433. */
  434. if (!page->mapping) {
  435. unlock_page(page);
  436. page_cache_release(page);
  437. goto find_page;
  438. }
  439. ret = -EIO;
  440. goto out;
  441. }
  442. } else {
  443. WARN_ON(!PageLocked(page));
  444. SetPageUptodate(page);
  445. }
  446. }
  447. }
  448. ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
  449. if (ret == AOP_TRUNCATED_PAGE) {
  450. page_cache_release(page);
  451. goto find_page;
  452. } else if (ret)
  453. goto out;
  454. if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
  455. char *dst = kmap_atomic(page, KM_USER0);
  456. memcpy(dst + offset, src + buf->offset, sd->len);
  457. flush_dcache_page(page);
  458. kunmap_atomic(dst, KM_USER0);
  459. }
  460. ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
  461. if (ret == AOP_TRUNCATED_PAGE) {
  462. page_cache_release(page);
  463. goto find_page;
  464. } else if (ret)
  465. goto out;
  466. mark_page_accessed(page);
  467. balance_dirty_pages_ratelimited(mapping);
  468. out:
  469. if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
  470. page_cache_release(page);
  471. unlock_page(page);
  472. }
  473. out_nomem:
  474. buf->ops->unmap(info, buf);
  475. return ret;
  476. }
  477. typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
  478. struct splice_desc *);
  479. /*
  480. * Pipe input worker. Most of this logic works like a regular pipe, the
  481. * key here is the 'actor' worker passed in that actually moves the data
  482. * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
  483. */
  484. static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
  485. size_t len, unsigned int flags,
  486. splice_actor *actor)
  487. {
  488. int ret, do_wakeup, err;
  489. struct splice_desc sd;
  490. ret = 0;
  491. do_wakeup = 0;
  492. sd.total_len = len;
  493. sd.flags = flags;
  494. sd.file = out;
  495. sd.pos = out->f_pos;
  496. if (pipe->inode)
  497. mutex_lock(&pipe->inode->i_mutex);
  498. for (;;) {
  499. if (pipe->nrbufs) {
  500. struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
  501. struct pipe_buf_operations *ops = buf->ops;
  502. sd.len = buf->len;
  503. if (sd.len > sd.total_len)
  504. sd.len = sd.total_len;
  505. err = actor(pipe, buf, &sd);
  506. if (err) {
  507. if (!ret && err != -ENODATA)
  508. ret = err;
  509. break;
  510. }
  511. ret += sd.len;
  512. buf->offset += sd.len;
  513. buf->len -= sd.len;
  514. if (!buf->len) {
  515. buf->ops = NULL;
  516. ops->release(pipe, buf);
  517. pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
  518. pipe->nrbufs--;
  519. if (pipe->inode)
  520. do_wakeup = 1;
  521. }
  522. sd.pos += sd.len;
  523. sd.total_len -= sd.len;
  524. if (!sd.total_len)
  525. break;
  526. }
  527. if (pipe->nrbufs)
  528. continue;
  529. if (!pipe->writers)
  530. break;
  531. if (!pipe->waiting_writers) {
  532. if (ret)
  533. break;
  534. }
  535. if (flags & SPLICE_F_NONBLOCK) {
  536. if (!ret)
  537. ret = -EAGAIN;
  538. break;
  539. }
  540. if (signal_pending(current)) {
  541. if (!ret)
  542. ret = -ERESTARTSYS;
  543. break;
  544. }
  545. if (do_wakeup) {
  546. smp_mb();
  547. if (waitqueue_active(&pipe->wait))
  548. wake_up_interruptible_sync(&pipe->wait);
  549. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  550. do_wakeup = 0;
  551. }
  552. pipe_wait(pipe);
  553. }
  554. if (pipe->inode)
  555. mutex_unlock(&pipe->inode->i_mutex);
  556. if (do_wakeup) {
  557. smp_mb();
  558. if (waitqueue_active(&pipe->wait))
  559. wake_up_interruptible(&pipe->wait);
  560. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  561. }
  562. out->f_pos = sd.pos;
  563. return ret;
  564. }
  565. /**
  566. * generic_file_splice_write - splice data from a pipe to a file
  567. * @pipe: pipe info
  568. * @out: file to write to
  569. * @len: number of bytes to splice
  570. * @flags: splice modifier flags
  571. *
  572. * Will either move or copy pages (determined by @flags options) from
  573. * the given pipe inode to the given file.
  574. *
  575. */
  576. ssize_t
  577. generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
  578. size_t len, unsigned int flags)
  579. {
  580. struct address_space *mapping = out->f_mapping;
  581. ssize_t ret;
  582. ret = move_from_pipe(pipe, out, len, flags, pipe_to_file);
  583. /*
  584. * if file or inode is SYNC and we actually wrote some data, sync it
  585. */
  586. if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
  587. && ret > 0) {
  588. struct inode *inode = mapping->host;
  589. int err;
  590. mutex_lock(&inode->i_mutex);
  591. err = generic_osync_inode(mapping->host, mapping,
  592. OSYNC_METADATA|OSYNC_DATA);
  593. mutex_unlock(&inode->i_mutex);
  594. if (err)
  595. ret = err;
  596. }
  597. return ret;
  598. }
  599. EXPORT_SYMBOL(generic_file_splice_write);
  600. /**
  601. * generic_splice_sendpage - splice data from a pipe to a socket
  602. * @inode: pipe inode
  603. * @out: socket to write to
  604. * @len: number of bytes to splice
  605. * @flags: splice modifier flags
  606. *
  607. * Will send @len bytes from the pipe to a network socket. No data copying
  608. * is involved.
  609. *
  610. */
  611. ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
  612. size_t len, unsigned int flags)
  613. {
  614. return move_from_pipe(pipe, out, len, flags, pipe_to_sendpage);
  615. }
  616. EXPORT_SYMBOL(generic_splice_sendpage);
  617. /*
  618. * Attempt to initiate a splice from pipe to file.
  619. */
  620. static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
  621. size_t len, unsigned int flags)
  622. {
  623. loff_t pos;
  624. int ret;
  625. if (unlikely(!out->f_op || !out->f_op->splice_write))
  626. return -EINVAL;
  627. if (unlikely(!(out->f_mode & FMODE_WRITE)))
  628. return -EBADF;
  629. pos = out->f_pos;
  630. ret = rw_verify_area(WRITE, out, &pos, len);
  631. if (unlikely(ret < 0))
  632. return ret;
  633. return out->f_op->splice_write(pipe, out, len, flags);
  634. }
  635. /*
  636. * Attempt to initiate a splice from a file to a pipe.
  637. */
  638. static long do_splice_to(struct file *in, struct pipe_inode_info *pipe,
  639. size_t len, unsigned int flags)
  640. {
  641. loff_t pos, isize, left;
  642. int ret;
  643. if (unlikely(!in->f_op || !in->f_op->splice_read))
  644. return -EINVAL;
  645. if (unlikely(!(in->f_mode & FMODE_READ)))
  646. return -EBADF;
  647. pos = in->f_pos;
  648. ret = rw_verify_area(READ, in, &pos, len);
  649. if (unlikely(ret < 0))
  650. return ret;
  651. isize = i_size_read(in->f_mapping->host);
  652. if (unlikely(in->f_pos >= isize))
  653. return 0;
  654. left = isize - in->f_pos;
  655. if (unlikely(left < len))
  656. len = left;
  657. return in->f_op->splice_read(in, pipe, len, flags);
  658. }
  659. long do_splice_direct(struct file *in, struct file *out, size_t len,
  660. unsigned int flags)
  661. {
  662. struct pipe_inode_info *pipe;
  663. long ret, bytes;
  664. umode_t i_mode;
  665. int i;
  666. /*
  667. * We require the input being a regular file, as we don't want to
  668. * randomly drop data for eg socket -> socket splicing. Use the
  669. * piped splicing for that!
  670. */
  671. i_mode = in->f_dentry->d_inode->i_mode;
  672. if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
  673. return -EINVAL;
  674. /*
  675. * neither in nor out is a pipe, setup an internal pipe attached to
  676. * 'out' and transfer the wanted data from 'in' to 'out' through that
  677. */
  678. pipe = current->splice_pipe;
  679. if (unlikely(!pipe)) {
  680. pipe = alloc_pipe_info(NULL);
  681. if (!pipe)
  682. return -ENOMEM;
  683. /*
  684. * We don't have an immediate reader, but we'll read the stuff
  685. * out of the pipe right after the move_to_pipe(). So set
  686. * PIPE_READERS appropriately.
  687. */
  688. pipe->readers = 1;
  689. current->splice_pipe = pipe;
  690. }
  691. /*
  692. * do the splice
  693. */
  694. ret = 0;
  695. bytes = 0;
  696. while (len) {
  697. size_t read_len, max_read_len;
  698. /*
  699. * Do at most PIPE_BUFFERS pages worth of transfer:
  700. */
  701. max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
  702. ret = do_splice_to(in, pipe, max_read_len, flags);
  703. if (unlikely(ret < 0))
  704. goto out_release;
  705. read_len = ret;
  706. /*
  707. * NOTE: nonblocking mode only applies to the input. We
  708. * must not do the output in nonblocking mode as then we
  709. * could get stuck data in the internal pipe:
  710. */
  711. ret = do_splice_from(pipe, out, read_len,
  712. flags & ~SPLICE_F_NONBLOCK);
  713. if (unlikely(ret < 0))
  714. goto out_release;
  715. bytes += ret;
  716. len -= ret;
  717. /*
  718. * In nonblocking mode, if we got back a short read then
  719. * that was due to either an IO error or due to the
  720. * pagecache entry not being there. In the IO error case
  721. * the _next_ splice attempt will produce a clean IO error
  722. * return value (not a short read), so in both cases it's
  723. * correct to break out of the loop here:
  724. */
  725. if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
  726. break;
  727. }
  728. pipe->nrbufs = pipe->curbuf = 0;
  729. return bytes;
  730. out_release:
  731. /*
  732. * If we did an incomplete transfer we must release
  733. * the pipe buffers in question:
  734. */
  735. for (i = 0; i < PIPE_BUFFERS; i++) {
  736. struct pipe_buffer *buf = pipe->bufs + i;
  737. if (buf->ops) {
  738. buf->ops->release(pipe, buf);
  739. buf->ops = NULL;
  740. }
  741. }
  742. pipe->nrbufs = pipe->curbuf = 0;
  743. /*
  744. * If we transferred some data, return the number of bytes:
  745. */
  746. if (bytes > 0)
  747. return bytes;
  748. return ret;
  749. }
  750. EXPORT_SYMBOL(do_splice_direct);
  751. /*
  752. * Determine where to splice to/from.
  753. */
  754. static long do_splice(struct file *in, loff_t __user *off_in,
  755. struct file *out, loff_t __user *off_out,
  756. size_t len, unsigned int flags)
  757. {
  758. struct pipe_inode_info *pipe;
  759. pipe = in->f_dentry->d_inode->i_pipe;
  760. if (pipe) {
  761. if (off_in)
  762. return -ESPIPE;
  763. if (off_out) {
  764. if (out->f_op->llseek == no_llseek)
  765. return -EINVAL;
  766. if (copy_from_user(&out->f_pos, off_out,
  767. sizeof(loff_t)))
  768. return -EFAULT;
  769. }
  770. return do_splice_from(pipe, out, len, flags);
  771. }
  772. pipe = out->f_dentry->d_inode->i_pipe;
  773. if (pipe) {
  774. if (off_out)
  775. return -ESPIPE;
  776. if (off_in) {
  777. if (in->f_op->llseek == no_llseek)
  778. return -EINVAL;
  779. if (copy_from_user(&in->f_pos, off_in, sizeof(loff_t)))
  780. return -EFAULT;
  781. }
  782. return do_splice_to(in, pipe, len, flags);
  783. }
  784. return -EINVAL;
  785. }
  786. asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
  787. int fd_out, loff_t __user *off_out,
  788. size_t len, unsigned int flags)
  789. {
  790. long error;
  791. struct file *in, *out;
  792. int fput_in, fput_out;
  793. if (unlikely(!len))
  794. return 0;
  795. error = -EBADF;
  796. in = fget_light(fd_in, &fput_in);
  797. if (in) {
  798. if (in->f_mode & FMODE_READ) {
  799. out = fget_light(fd_out, &fput_out);
  800. if (out) {
  801. if (out->f_mode & FMODE_WRITE)
  802. error = do_splice(in, off_in,
  803. out, off_out,
  804. len, flags);
  805. fput_light(out, fput_out);
  806. }
  807. }
  808. fput_light(in, fput_in);
  809. }
  810. return error;
  811. }