splice.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962
  1. /*
  2. * "splice": joining two ropes together by interweaving their strands.
  3. *
  4. * This is the "extended pipe" functionality, where a pipe is used as
  5. * an arbitrary in-memory buffer. Think of a pipe as a small kernel
  6. * buffer that you can use to transfer data from one end to the other.
  7. *
  8. * The traditional unix read/write is extended with a "splice()" operation
  9. * that transfers data buffers to or from a pipe buffer.
  10. *
  11. * Named by Larry McVoy, original implementation from Linus, extended by
  12. * Jens to support splicing to files and fixing the initial implementation
  13. * bugs.
  14. *
  15. * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
  16. * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
  17. *
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/file.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/pipe_fs_i.h>
  23. #include <linux/mm_inline.h>
  24. #include <linux/swap.h>
  25. #include <linux/writeback.h>
  26. #include <linux/buffer_head.h>
  27. #include <linux/module.h>
  28. #include <linux/syscalls.h>
  29. /*
  30. * Passed to the actors
  31. */
  32. struct splice_desc {
  33. unsigned int len, total_len; /* current and remaining length */
  34. unsigned int flags; /* splice flags */
  35. struct file *file; /* file to read/write */
  36. loff_t pos; /* file position */
  37. };
  38. /*
  39. * Attempt to steal a page from a pipe buffer. This should perhaps go into
  40. * a vm helper function, it's already simplified quite a bit by the
  41. * addition of remove_mapping(). If success is returned, the caller may
  42. * attempt to reuse this page for another destination.
  43. */
  44. static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
  45. struct pipe_buffer *buf)
  46. {
  47. struct page *page = buf->page;
  48. struct address_space *mapping = page_mapping(page);
  49. WARN_ON(!PageLocked(page));
  50. WARN_ON(!PageUptodate(page));
  51. /*
  52. * At least for ext2 with nobh option, we need to wait on writeback
  53. * completing on this page, since we'll remove it from the pagecache.
  54. * Otherwise truncate wont wait on the page, allowing the disk
  55. * blocks to be reused by someone else before we actually wrote our
  56. * data to them. fs corruption ensues.
  57. */
  58. wait_on_page_writeback(page);
  59. if (PagePrivate(page))
  60. try_to_release_page(page, mapping_gfp_mask(mapping));
  61. if (!remove_mapping(mapping, page))
  62. return 1;
  63. buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
  64. return 0;
  65. }
  66. static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
  67. struct pipe_buffer *buf)
  68. {
  69. page_cache_release(buf->page);
  70. buf->page = NULL;
  71. buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
  72. }
  73. static void *page_cache_pipe_buf_map(struct file *file,
  74. struct pipe_inode_info *info,
  75. struct pipe_buffer *buf)
  76. {
  77. struct page *page = buf->page;
  78. int err;
  79. if (!PageUptodate(page)) {
  80. lock_page(page);
  81. /*
  82. * Page got truncated/unhashed. This will cause a 0-byte
  83. * splice, if this is the first page
  84. */
  85. if (!page->mapping) {
  86. err = -ENODATA;
  87. goto error;
  88. }
  89. /*
  90. * uh oh, read-error from disk
  91. */
  92. if (!PageUptodate(page)) {
  93. err = -EIO;
  94. goto error;
  95. }
  96. /*
  97. * page is ok afterall, fall through to mapping
  98. */
  99. unlock_page(page);
  100. }
  101. return kmap(page);
  102. error:
  103. unlock_page(page);
  104. return ERR_PTR(err);
  105. }
  106. static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
  107. struct pipe_buffer *buf)
  108. {
  109. kunmap(buf->page);
  110. }
  111. static struct pipe_buf_operations page_cache_pipe_buf_ops = {
  112. .can_merge = 0,
  113. .map = page_cache_pipe_buf_map,
  114. .unmap = page_cache_pipe_buf_unmap,
  115. .release = page_cache_pipe_buf_release,
  116. .steal = page_cache_pipe_buf_steal,
  117. };
  118. /*
  119. * Pipe output worker. This sets up our pipe format with the page cache
  120. * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
  121. */
  122. static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
  123. int nr_pages, unsigned long offset,
  124. unsigned long len, unsigned int flags)
  125. {
  126. int ret, do_wakeup, i;
  127. ret = 0;
  128. do_wakeup = 0;
  129. i = 0;
  130. if (pipe->inode)
  131. mutex_lock(&pipe->inode->i_mutex);
  132. for (;;) {
  133. if (!pipe->readers) {
  134. send_sig(SIGPIPE, current, 0);
  135. if (!ret)
  136. ret = -EPIPE;
  137. break;
  138. }
  139. if (pipe->nrbufs < PIPE_BUFFERS) {
  140. int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
  141. struct pipe_buffer *buf = pipe->bufs + newbuf;
  142. struct page *page = pages[i++];
  143. unsigned long this_len;
  144. this_len = PAGE_CACHE_SIZE - offset;
  145. if (this_len > len)
  146. this_len = len;
  147. buf->page = page;
  148. buf->offset = offset;
  149. buf->len = this_len;
  150. buf->ops = &page_cache_pipe_buf_ops;
  151. pipe->nrbufs++;
  152. if (pipe->inode)
  153. do_wakeup = 1;
  154. ret += this_len;
  155. len -= this_len;
  156. offset = 0;
  157. if (!--nr_pages)
  158. break;
  159. if (!len)
  160. break;
  161. if (pipe->nrbufs < PIPE_BUFFERS)
  162. continue;
  163. break;
  164. }
  165. if (flags & SPLICE_F_NONBLOCK) {
  166. if (!ret)
  167. ret = -EAGAIN;
  168. break;
  169. }
  170. if (signal_pending(current)) {
  171. if (!ret)
  172. ret = -ERESTARTSYS;
  173. break;
  174. }
  175. if (do_wakeup) {
  176. smp_mb();
  177. if (waitqueue_active(&pipe->wait))
  178. wake_up_interruptible_sync(&pipe->wait);
  179. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  180. do_wakeup = 0;
  181. }
  182. pipe->waiting_writers++;
  183. pipe_wait(pipe);
  184. pipe->waiting_writers--;
  185. }
  186. if (pipe->inode)
  187. mutex_unlock(&pipe->inode->i_mutex);
  188. if (do_wakeup) {
  189. smp_mb();
  190. if (waitqueue_active(&pipe->wait))
  191. wake_up_interruptible(&pipe->wait);
  192. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  193. }
  194. while (i < nr_pages)
  195. page_cache_release(pages[i++]);
  196. return ret;
  197. }
  198. static int
  199. __generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
  200. size_t len, unsigned int flags)
  201. {
  202. struct address_space *mapping = in->f_mapping;
  203. unsigned int offset, nr_pages;
  204. struct page *pages[PIPE_BUFFERS];
  205. struct page *page;
  206. pgoff_t index;
  207. int i, error;
  208. index = in->f_pos >> PAGE_CACHE_SHIFT;
  209. offset = in->f_pos & ~PAGE_CACHE_MASK;
  210. nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  211. if (nr_pages > PIPE_BUFFERS)
  212. nr_pages = PIPE_BUFFERS;
  213. /*
  214. * initiate read-ahead on this page range. however, don't call into
  215. * read-ahead if this is a non-zero offset (we are likely doing small
  216. * chunk splice and the page is already there) for a single page.
  217. */
  218. if (!offset || nr_pages > 1)
  219. do_page_cache_readahead(mapping, in, index, nr_pages);
  220. /*
  221. * now fill in the holes
  222. */
  223. error = 0;
  224. for (i = 0; i < nr_pages; i++, index++) {
  225. find_page:
  226. /*
  227. * lookup the page for this index
  228. */
  229. page = find_get_page(mapping, index);
  230. if (!page) {
  231. /*
  232. * If in nonblock mode then dont block on
  233. * readpage (we've kicked readahead so there
  234. * will be asynchronous progress):
  235. */
  236. if (flags & SPLICE_F_NONBLOCK)
  237. break;
  238. /*
  239. * page didn't exist, allocate one
  240. */
  241. page = page_cache_alloc_cold(mapping);
  242. if (!page)
  243. break;
  244. error = add_to_page_cache_lru(page, mapping, index,
  245. mapping_gfp_mask(mapping));
  246. if (unlikely(error)) {
  247. page_cache_release(page);
  248. break;
  249. }
  250. goto readpage;
  251. }
  252. /*
  253. * If the page isn't uptodate, we may need to start io on it
  254. */
  255. if (!PageUptodate(page)) {
  256. lock_page(page);
  257. /*
  258. * page was truncated, stop here. if this isn't the
  259. * first page, we'll just complete what we already
  260. * added
  261. */
  262. if (!page->mapping) {
  263. unlock_page(page);
  264. page_cache_release(page);
  265. break;
  266. }
  267. /*
  268. * page was already under io and is now done, great
  269. */
  270. if (PageUptodate(page)) {
  271. unlock_page(page);
  272. goto fill_it;
  273. }
  274. readpage:
  275. /*
  276. * need to read in the page
  277. */
  278. error = mapping->a_ops->readpage(in, page);
  279. if (unlikely(error)) {
  280. page_cache_release(page);
  281. if (error == AOP_TRUNCATED_PAGE)
  282. goto find_page;
  283. break;
  284. }
  285. }
  286. fill_it:
  287. pages[i] = page;
  288. }
  289. if (i)
  290. return move_to_pipe(pipe, pages, i, offset, len, flags);
  291. return error;
  292. }
  293. /**
  294. * generic_file_splice_read - splice data from file to a pipe
  295. * @in: file to splice from
  296. * @pipe: pipe to splice to
  297. * @len: number of bytes to splice
  298. * @flags: splice modifier flags
  299. *
  300. * Will read pages from given file and fill them into a pipe.
  301. *
  302. */
  303. ssize_t generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
  304. size_t len, unsigned int flags)
  305. {
  306. ssize_t spliced;
  307. int ret;
  308. ret = 0;
  309. spliced = 0;
  310. while (len) {
  311. ret = __generic_file_splice_read(in, pipe, len, flags);
  312. if (ret <= 0)
  313. break;
  314. in->f_pos += ret;
  315. len -= ret;
  316. spliced += ret;
  317. if (!(flags & SPLICE_F_NONBLOCK))
  318. continue;
  319. ret = -EAGAIN;
  320. break;
  321. }
  322. if (spliced)
  323. return spliced;
  324. return ret;
  325. }
  326. EXPORT_SYMBOL(generic_file_splice_read);
  327. /*
  328. * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
  329. * using sendpage().
  330. */
  331. static int pipe_to_sendpage(struct pipe_inode_info *info,
  332. struct pipe_buffer *buf, struct splice_desc *sd)
  333. {
  334. struct file *file = sd->file;
  335. loff_t pos = sd->pos;
  336. unsigned int offset;
  337. ssize_t ret;
  338. void *ptr;
  339. int more;
  340. /*
  341. * sub-optimal, but we are limited by the pipe ->map. we don't
  342. * need a kmap'ed buffer here, we just want to make sure we
  343. * have the page pinned if the pipe page originates from the
  344. * page cache
  345. */
  346. ptr = buf->ops->map(file, info, buf);
  347. if (IS_ERR(ptr))
  348. return PTR_ERR(ptr);
  349. offset = pos & ~PAGE_CACHE_MASK;
  350. more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
  351. ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
  352. buf->ops->unmap(info, buf);
  353. if (ret == sd->len)
  354. return 0;
  355. return -EIO;
  356. }
  357. /*
  358. * This is a little more tricky than the file -> pipe splicing. There are
  359. * basically three cases:
  360. *
  361. * - Destination page already exists in the address space and there
  362. * are users of it. For that case we have no other option that
  363. * copying the data. Tough luck.
  364. * - Destination page already exists in the address space, but there
  365. * are no users of it. Make sure it's uptodate, then drop it. Fall
  366. * through to last case.
  367. * - Destination page does not exist, we can add the pipe page to
  368. * the page cache and avoid the copy.
  369. *
  370. * If asked to move pages to the output file (SPLICE_F_MOVE is set in
  371. * sd->flags), we attempt to migrate pages from the pipe to the output
  372. * file address space page cache. This is possible if no one else has
  373. * the pipe page referenced outside of the pipe and page cache. If
  374. * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
  375. * a new page in the output file page cache and fill/dirty that.
  376. */
  377. static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
  378. struct splice_desc *sd)
  379. {
  380. struct file *file = sd->file;
  381. struct address_space *mapping = file->f_mapping;
  382. gfp_t gfp_mask = mapping_gfp_mask(mapping);
  383. unsigned int offset;
  384. struct page *page;
  385. pgoff_t index;
  386. char *src;
  387. int ret;
  388. /*
  389. * make sure the data in this buffer is uptodate
  390. */
  391. src = buf->ops->map(file, info, buf);
  392. if (IS_ERR(src))
  393. return PTR_ERR(src);
  394. index = sd->pos >> PAGE_CACHE_SHIFT;
  395. offset = sd->pos & ~PAGE_CACHE_MASK;
  396. /*
  397. * reuse buf page, if SPLICE_F_MOVE is set
  398. */
  399. if (sd->flags & SPLICE_F_MOVE) {
  400. /*
  401. * If steal succeeds, buf->page is now pruned from the vm
  402. * side (LRU and page cache) and we can reuse it.
  403. */
  404. if (buf->ops->steal(info, buf))
  405. goto find_page;
  406. /*
  407. * this will also set the page locked
  408. */
  409. page = buf->page;
  410. if (add_to_page_cache(page, mapping, index, gfp_mask))
  411. goto find_page;
  412. if (!(buf->flags & PIPE_BUF_FLAG_LRU))
  413. lru_cache_add(page);
  414. } else {
  415. find_page:
  416. ret = -ENOMEM;
  417. page = find_or_create_page(mapping, index, gfp_mask);
  418. if (!page)
  419. goto out_nomem;
  420. /*
  421. * If the page is uptodate, it is also locked. If it isn't
  422. * uptodate, we can mark it uptodate if we are filling the
  423. * full page. Otherwise we need to read it in first...
  424. */
  425. if (!PageUptodate(page)) {
  426. if (sd->len < PAGE_CACHE_SIZE) {
  427. ret = mapping->a_ops->readpage(file, page);
  428. if (unlikely(ret))
  429. goto out;
  430. lock_page(page);
  431. if (!PageUptodate(page)) {
  432. /*
  433. * page got invalidated, repeat
  434. */
  435. if (!page->mapping) {
  436. unlock_page(page);
  437. page_cache_release(page);
  438. goto find_page;
  439. }
  440. ret = -EIO;
  441. goto out;
  442. }
  443. } else {
  444. WARN_ON(!PageLocked(page));
  445. SetPageUptodate(page);
  446. }
  447. }
  448. }
  449. ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
  450. if (ret == AOP_TRUNCATED_PAGE) {
  451. page_cache_release(page);
  452. goto find_page;
  453. } else if (ret)
  454. goto out;
  455. if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
  456. char *dst = kmap_atomic(page, KM_USER0);
  457. memcpy(dst + offset, src + buf->offset, sd->len);
  458. flush_dcache_page(page);
  459. kunmap_atomic(dst, KM_USER0);
  460. }
  461. ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
  462. if (ret == AOP_TRUNCATED_PAGE) {
  463. page_cache_release(page);
  464. goto find_page;
  465. } else if (ret)
  466. goto out;
  467. mark_page_accessed(page);
  468. balance_dirty_pages_ratelimited(mapping);
  469. out:
  470. if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
  471. page_cache_release(page);
  472. unlock_page(page);
  473. }
  474. out_nomem:
  475. buf->ops->unmap(info, buf);
  476. return ret;
  477. }
  478. typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
  479. struct splice_desc *);
  480. /*
  481. * Pipe input worker. Most of this logic works like a regular pipe, the
  482. * key here is the 'actor' worker passed in that actually moves the data
  483. * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
  484. */
  485. static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
  486. size_t len, unsigned int flags,
  487. splice_actor *actor)
  488. {
  489. int ret, do_wakeup, err;
  490. struct splice_desc sd;
  491. ret = 0;
  492. do_wakeup = 0;
  493. sd.total_len = len;
  494. sd.flags = flags;
  495. sd.file = out;
  496. sd.pos = out->f_pos;
  497. if (pipe->inode)
  498. mutex_lock(&pipe->inode->i_mutex);
  499. for (;;) {
  500. if (pipe->nrbufs) {
  501. struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
  502. struct pipe_buf_operations *ops = buf->ops;
  503. sd.len = buf->len;
  504. if (sd.len > sd.total_len)
  505. sd.len = sd.total_len;
  506. err = actor(pipe, buf, &sd);
  507. if (err) {
  508. if (!ret && err != -ENODATA)
  509. ret = err;
  510. break;
  511. }
  512. ret += sd.len;
  513. buf->offset += sd.len;
  514. buf->len -= sd.len;
  515. if (!buf->len) {
  516. buf->ops = NULL;
  517. ops->release(pipe, buf);
  518. pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
  519. pipe->nrbufs--;
  520. if (pipe->inode)
  521. do_wakeup = 1;
  522. }
  523. sd.pos += sd.len;
  524. sd.total_len -= sd.len;
  525. if (!sd.total_len)
  526. break;
  527. }
  528. if (pipe->nrbufs)
  529. continue;
  530. if (!pipe->writers)
  531. break;
  532. if (!pipe->waiting_writers) {
  533. if (ret)
  534. break;
  535. }
  536. if (flags & SPLICE_F_NONBLOCK) {
  537. if (!ret)
  538. ret = -EAGAIN;
  539. break;
  540. }
  541. if (signal_pending(current)) {
  542. if (!ret)
  543. ret = -ERESTARTSYS;
  544. break;
  545. }
  546. if (do_wakeup) {
  547. smp_mb();
  548. if (waitqueue_active(&pipe->wait))
  549. wake_up_interruptible_sync(&pipe->wait);
  550. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  551. do_wakeup = 0;
  552. }
  553. pipe_wait(pipe);
  554. }
  555. if (pipe->inode)
  556. mutex_unlock(&pipe->inode->i_mutex);
  557. if (do_wakeup) {
  558. smp_mb();
  559. if (waitqueue_active(&pipe->wait))
  560. wake_up_interruptible(&pipe->wait);
  561. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  562. }
  563. out->f_pos = sd.pos;
  564. return ret;
  565. }
  566. /**
  567. * generic_file_splice_write - splice data from a pipe to a file
  568. * @pipe: pipe info
  569. * @out: file to write to
  570. * @len: number of bytes to splice
  571. * @flags: splice modifier flags
  572. *
  573. * Will either move or copy pages (determined by @flags options) from
  574. * the given pipe inode to the given file.
  575. *
  576. */
  577. ssize_t
  578. generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
  579. size_t len, unsigned int flags)
  580. {
  581. struct address_space *mapping = out->f_mapping;
  582. ssize_t ret;
  583. ret = move_from_pipe(pipe, out, len, flags, pipe_to_file);
  584. /*
  585. * if file or inode is SYNC and we actually wrote some data, sync it
  586. */
  587. if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
  588. && ret > 0) {
  589. struct inode *inode = mapping->host;
  590. int err;
  591. mutex_lock(&inode->i_mutex);
  592. err = generic_osync_inode(mapping->host, mapping,
  593. OSYNC_METADATA|OSYNC_DATA);
  594. mutex_unlock(&inode->i_mutex);
  595. if (err)
  596. ret = err;
  597. }
  598. return ret;
  599. }
  600. EXPORT_SYMBOL(generic_file_splice_write);
  601. /**
  602. * generic_splice_sendpage - splice data from a pipe to a socket
  603. * @inode: pipe inode
  604. * @out: socket to write to
  605. * @len: number of bytes to splice
  606. * @flags: splice modifier flags
  607. *
  608. * Will send @len bytes from the pipe to a network socket. No data copying
  609. * is involved.
  610. *
  611. */
  612. ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
  613. size_t len, unsigned int flags)
  614. {
  615. return move_from_pipe(pipe, out, len, flags, pipe_to_sendpage);
  616. }
  617. EXPORT_SYMBOL(generic_splice_sendpage);
  618. /*
  619. * Attempt to initiate a splice from pipe to file.
  620. */
  621. static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
  622. size_t len, unsigned int flags)
  623. {
  624. loff_t pos;
  625. int ret;
  626. if (!out->f_op || !out->f_op->splice_write)
  627. return -EINVAL;
  628. if (!(out->f_mode & FMODE_WRITE))
  629. return -EBADF;
  630. pos = out->f_pos;
  631. ret = rw_verify_area(WRITE, out, &pos, len);
  632. if (unlikely(ret < 0))
  633. return ret;
  634. return out->f_op->splice_write(pipe, out, len, flags);
  635. }
  636. /*
  637. * Attempt to initiate a splice from a file to a pipe.
  638. */
  639. static long do_splice_to(struct file *in, struct pipe_inode_info *pipe,
  640. size_t len, unsigned int flags)
  641. {
  642. loff_t pos, isize, left;
  643. int ret;
  644. if (!in->f_op || !in->f_op->splice_read)
  645. return -EINVAL;
  646. if (!(in->f_mode & FMODE_READ))
  647. return -EBADF;
  648. pos = in->f_pos;
  649. ret = rw_verify_area(READ, in, &pos, len);
  650. if (unlikely(ret < 0))
  651. return ret;
  652. isize = i_size_read(in->f_mapping->host);
  653. if (unlikely(in->f_pos >= isize))
  654. return 0;
  655. left = isize - in->f_pos;
  656. if (left < len)
  657. len = left;
  658. return in->f_op->splice_read(in, pipe, len, flags);
  659. }
  660. long do_splice_direct(struct file *in, struct file *out, size_t len,
  661. unsigned int flags)
  662. {
  663. struct pipe_inode_info *pipe;
  664. long ret, bytes;
  665. umode_t i_mode;
  666. int i;
  667. /*
  668. * We require the input being a regular file, as we don't want to
  669. * randomly drop data for eg socket -> socket splicing. Use the
  670. * piped splicing for that!
  671. */
  672. i_mode = in->f_dentry->d_inode->i_mode;
  673. if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
  674. return -EINVAL;
  675. /*
  676. * neither in nor out is a pipe, setup an internal pipe attached to
  677. * 'out' and transfer the wanted data from 'in' to 'out' through that
  678. */
  679. pipe = current->splice_pipe;
  680. if (!pipe) {
  681. pipe = alloc_pipe_info(NULL);
  682. if (!pipe)
  683. return -ENOMEM;
  684. /*
  685. * We don't have an immediate reader, but we'll read the stuff
  686. * out of the pipe right after the move_to_pipe(). So set
  687. * PIPE_READERS appropriately.
  688. */
  689. pipe->readers = 1;
  690. current->splice_pipe = pipe;
  691. }
  692. /*
  693. * do the splice
  694. */
  695. ret = 0;
  696. bytes = 0;
  697. while (len) {
  698. size_t read_len, max_read_len;
  699. /*
  700. * Do at most PIPE_BUFFERS pages worth of transfer:
  701. */
  702. max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
  703. ret = do_splice_to(in, pipe, max_read_len, flags);
  704. if (unlikely(ret < 0))
  705. goto out_release;
  706. read_len = ret;
  707. /*
  708. * NOTE: nonblocking mode only applies to the input. We
  709. * must not do the output in nonblocking mode as then we
  710. * could get stuck data in the internal pipe:
  711. */
  712. ret = do_splice_from(pipe, out, read_len,
  713. flags & ~SPLICE_F_NONBLOCK);
  714. if (unlikely(ret < 0))
  715. goto out_release;
  716. bytes += ret;
  717. len -= ret;
  718. /*
  719. * In nonblocking mode, if we got back a short read then
  720. * that was due to either an IO error or due to the
  721. * pagecache entry not being there. In the IO error case
  722. * the _next_ splice attempt will produce a clean IO error
  723. * return value (not a short read), so in both cases it's
  724. * correct to break out of the loop here:
  725. */
  726. if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
  727. break;
  728. }
  729. pipe->nrbufs = pipe->curbuf = 0;
  730. return bytes;
  731. out_release:
  732. /*
  733. * If we did an incomplete transfer we must release
  734. * the pipe buffers in question:
  735. */
  736. for (i = 0; i < PIPE_BUFFERS; i++) {
  737. struct pipe_buffer *buf = pipe->bufs + i;
  738. if (buf->ops) {
  739. buf->ops->release(pipe, buf);
  740. buf->ops = NULL;
  741. }
  742. }
  743. pipe->nrbufs = pipe->curbuf = 0;
  744. /*
  745. * If we transferred some data, return the number of bytes:
  746. */
  747. if (bytes > 0)
  748. return bytes;
  749. return ret;
  750. }
  751. EXPORT_SYMBOL(do_splice_direct);
  752. /*
  753. * Determine where to splice to/from.
  754. */
  755. static long do_splice(struct file *in, loff_t __user *off_in,
  756. struct file *out, loff_t __user *off_out,
  757. size_t len, unsigned int flags)
  758. {
  759. struct pipe_inode_info *pipe;
  760. pipe = in->f_dentry->d_inode->i_pipe;
  761. if (pipe) {
  762. if (off_in)
  763. return -ESPIPE;
  764. if (off_out) {
  765. if (out->f_op->llseek == no_llseek)
  766. return -EINVAL;
  767. if (copy_from_user(&out->f_pos, off_out,
  768. sizeof(loff_t)))
  769. return -EFAULT;
  770. }
  771. return do_splice_from(pipe, out, len, flags);
  772. }
  773. pipe = out->f_dentry->d_inode->i_pipe;
  774. if (pipe) {
  775. if (off_out)
  776. return -ESPIPE;
  777. if (off_in) {
  778. if (in->f_op->llseek == no_llseek)
  779. return -EINVAL;
  780. if (copy_from_user(&in->f_pos, off_in, sizeof(loff_t)))
  781. return -EFAULT;
  782. }
  783. return do_splice_to(in, pipe, len, flags);
  784. }
  785. return -EINVAL;
  786. }
  787. asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
  788. int fd_out, loff_t __user *off_out,
  789. size_t len, unsigned int flags)
  790. {
  791. long error;
  792. struct file *in, *out;
  793. int fput_in, fput_out;
  794. if (unlikely(!len))
  795. return 0;
  796. error = -EBADF;
  797. in = fget_light(fd_in, &fput_in);
  798. if (in) {
  799. if (in->f_mode & FMODE_READ) {
  800. out = fget_light(fd_out, &fput_out);
  801. if (out) {
  802. if (out->f_mode & FMODE_WRITE)
  803. error = do_splice(in, off_in,
  804. out, off_out,
  805. len, flags);
  806. fput_light(out, fput_out);
  807. }
  808. }
  809. fput_light(in, fput_in);
  810. }
  811. return error;
  812. }