splice.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963
  1. /*
  2. * "splice": joining two ropes together by interweaving their strands.
  3. *
  4. * This is the "extended pipe" functionality, where a pipe is used as
  5. * an arbitrary in-memory buffer. Think of a pipe as a small kernel
  6. * buffer that you can use to transfer data from one end to the other.
  7. *
  8. * The traditional unix read/write is extended with a "splice()" operation
  9. * that transfers data buffers to or from a pipe buffer.
  10. *
  11. * Named by Larry McVoy, original implementation from Linus, extended by
  12. * Jens to support splicing to files, network, direct splicing, etc and
  13. * fixing lots of bugs.
  14. *
  15. * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
  16. * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
  17. * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
  18. *
  19. */
  20. #include <linux/fs.h>
  21. #include <linux/file.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pipe_fs_i.h>
  24. #include <linux/mm_inline.h>
  25. #include <linux/swap.h>
  26. #include <linux/writeback.h>
  27. #include <linux/buffer_head.h>
  28. #include <linux/module.h>
  29. #include <linux/syscalls.h>
  30. /*
  31. * Passed to the actors
  32. */
  33. struct splice_desc {
  34. unsigned int len, total_len; /* current and remaining length */
  35. unsigned int flags; /* splice flags */
  36. struct file *file; /* file to read/write */
  37. loff_t pos; /* file position */
  38. };
  39. /*
  40. * Attempt to steal a page from a pipe buffer. This should perhaps go into
  41. * a vm helper function, it's already simplified quite a bit by the
  42. * addition of remove_mapping(). If success is returned, the caller may
  43. * attempt to reuse this page for another destination.
  44. */
  45. static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
  46. struct pipe_buffer *buf)
  47. {
  48. struct page *page = buf->page;
  49. struct address_space *mapping = page_mapping(page);
  50. WARN_ON(!PageLocked(page));
  51. WARN_ON(!PageUptodate(page));
  52. /*
  53. * At least for ext2 with nobh option, we need to wait on writeback
  54. * completing on this page, since we'll remove it from the pagecache.
  55. * Otherwise truncate wont wait on the page, allowing the disk
  56. * blocks to be reused by someone else before we actually wrote our
  57. * data to them. fs corruption ensues.
  58. */
  59. wait_on_page_writeback(page);
  60. if (PagePrivate(page))
  61. try_to_release_page(page, mapping_gfp_mask(mapping));
  62. if (!remove_mapping(mapping, page))
  63. return 1;
  64. buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
  65. return 0;
  66. }
  67. static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
  68. struct pipe_buffer *buf)
  69. {
  70. page_cache_release(buf->page);
  71. buf->page = NULL;
  72. buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
  73. }
  74. static void *page_cache_pipe_buf_map(struct file *file,
  75. struct pipe_inode_info *info,
  76. struct pipe_buffer *buf)
  77. {
  78. struct page *page = buf->page;
  79. int err;
  80. if (!PageUptodate(page)) {
  81. lock_page(page);
  82. /*
  83. * Page got truncated/unhashed. This will cause a 0-byte
  84. * splice, if this is the first page.
  85. */
  86. if (!page->mapping) {
  87. err = -ENODATA;
  88. goto error;
  89. }
  90. /*
  91. * Uh oh, read-error from disk.
  92. */
  93. if (!PageUptodate(page)) {
  94. err = -EIO;
  95. goto error;
  96. }
  97. /*
  98. * Page is ok afterall, fall through to mapping.
  99. */
  100. unlock_page(page);
  101. }
  102. return kmap(page);
  103. error:
  104. unlock_page(page);
  105. return ERR_PTR(err);
  106. }
  107. static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
  108. struct pipe_buffer *buf)
  109. {
  110. kunmap(buf->page);
  111. }
  112. static struct pipe_buf_operations page_cache_pipe_buf_ops = {
  113. .can_merge = 0,
  114. .map = page_cache_pipe_buf_map,
  115. .unmap = page_cache_pipe_buf_unmap,
  116. .release = page_cache_pipe_buf_release,
  117. .steal = page_cache_pipe_buf_steal,
  118. };
  119. /*
  120. * Pipe output worker. This sets up our pipe format with the page cache
  121. * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
  122. */
  123. static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
  124. int nr_pages, unsigned long offset,
  125. unsigned long len, unsigned int flags)
  126. {
  127. int ret, do_wakeup, i;
  128. ret = 0;
  129. do_wakeup = 0;
  130. i = 0;
  131. if (pipe->inode)
  132. mutex_lock(&pipe->inode->i_mutex);
  133. for (;;) {
  134. if (!pipe->readers) {
  135. send_sig(SIGPIPE, current, 0);
  136. if (!ret)
  137. ret = -EPIPE;
  138. break;
  139. }
  140. if (pipe->nrbufs < PIPE_BUFFERS) {
  141. int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
  142. struct pipe_buffer *buf = pipe->bufs + newbuf;
  143. struct page *page = pages[i++];
  144. unsigned long this_len;
  145. this_len = PAGE_CACHE_SIZE - offset;
  146. if (this_len > len)
  147. this_len = len;
  148. buf->page = page;
  149. buf->offset = offset;
  150. buf->len = this_len;
  151. buf->ops = &page_cache_pipe_buf_ops;
  152. pipe->nrbufs++;
  153. if (pipe->inode)
  154. do_wakeup = 1;
  155. ret += this_len;
  156. len -= this_len;
  157. offset = 0;
  158. if (!--nr_pages)
  159. break;
  160. if (!len)
  161. break;
  162. if (pipe->nrbufs < PIPE_BUFFERS)
  163. continue;
  164. break;
  165. }
  166. if (flags & SPLICE_F_NONBLOCK) {
  167. if (!ret)
  168. ret = -EAGAIN;
  169. break;
  170. }
  171. if (signal_pending(current)) {
  172. if (!ret)
  173. ret = -ERESTARTSYS;
  174. break;
  175. }
  176. if (do_wakeup) {
  177. smp_mb();
  178. if (waitqueue_active(&pipe->wait))
  179. wake_up_interruptible_sync(&pipe->wait);
  180. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  181. do_wakeup = 0;
  182. }
  183. pipe->waiting_writers++;
  184. pipe_wait(pipe);
  185. pipe->waiting_writers--;
  186. }
  187. if (pipe->inode)
  188. mutex_unlock(&pipe->inode->i_mutex);
  189. if (do_wakeup) {
  190. smp_mb();
  191. if (waitqueue_active(&pipe->wait))
  192. wake_up_interruptible(&pipe->wait);
  193. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  194. }
  195. while (i < nr_pages)
  196. page_cache_release(pages[i++]);
  197. return ret;
  198. }
  199. static int
  200. __generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
  201. size_t len, unsigned int flags)
  202. {
  203. struct address_space *mapping = in->f_mapping;
  204. unsigned int offset, nr_pages;
  205. struct page *pages[PIPE_BUFFERS];
  206. struct page *page;
  207. pgoff_t index;
  208. int i, error;
  209. index = in->f_pos >> PAGE_CACHE_SHIFT;
  210. offset = in->f_pos & ~PAGE_CACHE_MASK;
  211. nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  212. if (nr_pages > PIPE_BUFFERS)
  213. nr_pages = PIPE_BUFFERS;
  214. /*
  215. * Initiate read-ahead on this page range. however, don't call into
  216. * read-ahead if this is a non-zero offset (we are likely doing small
  217. * chunk splice and the page is already there) for a single page.
  218. */
  219. if (!offset || nr_pages > 1)
  220. do_page_cache_readahead(mapping, in, index, nr_pages);
  221. /*
  222. * Now fill in the holes:
  223. */
  224. error = 0;
  225. for (i = 0; i < nr_pages; i++, index++) {
  226. find_page:
  227. /*
  228. * lookup the page for this index
  229. */
  230. page = find_get_page(mapping, index);
  231. if (!page) {
  232. /*
  233. * If in nonblock mode then dont block on
  234. * readpage (we've kicked readahead so there
  235. * will be asynchronous progress):
  236. */
  237. if (flags & SPLICE_F_NONBLOCK)
  238. break;
  239. /*
  240. * page didn't exist, allocate one
  241. */
  242. page = page_cache_alloc_cold(mapping);
  243. if (!page)
  244. break;
  245. error = add_to_page_cache_lru(page, mapping, index,
  246. mapping_gfp_mask(mapping));
  247. if (unlikely(error)) {
  248. page_cache_release(page);
  249. break;
  250. }
  251. goto readpage;
  252. }
  253. /*
  254. * If the page isn't uptodate, we may need to start io on it
  255. */
  256. if (!PageUptodate(page)) {
  257. lock_page(page);
  258. /*
  259. * page was truncated, stop here. if this isn't the
  260. * first page, we'll just complete what we already
  261. * added
  262. */
  263. if (!page->mapping) {
  264. unlock_page(page);
  265. page_cache_release(page);
  266. break;
  267. }
  268. /*
  269. * page was already under io and is now done, great
  270. */
  271. if (PageUptodate(page)) {
  272. unlock_page(page);
  273. goto fill_it;
  274. }
  275. readpage:
  276. /*
  277. * need to read in the page
  278. */
  279. error = mapping->a_ops->readpage(in, page);
  280. if (unlikely(error)) {
  281. page_cache_release(page);
  282. if (error == AOP_TRUNCATED_PAGE)
  283. goto find_page;
  284. break;
  285. }
  286. }
  287. fill_it:
  288. pages[i] = page;
  289. }
  290. if (i)
  291. return move_to_pipe(pipe, pages, i, offset, len, flags);
  292. return error;
  293. }
  294. /**
  295. * generic_file_splice_read - splice data from file to a pipe
  296. * @in: file to splice from
  297. * @pipe: pipe to splice to
  298. * @len: number of bytes to splice
  299. * @flags: splice modifier flags
  300. *
  301. * Will read pages from given file and fill them into a pipe.
  302. */
  303. ssize_t generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
  304. size_t len, unsigned int flags)
  305. {
  306. ssize_t spliced;
  307. int ret;
  308. ret = 0;
  309. spliced = 0;
  310. while (len) {
  311. ret = __generic_file_splice_read(in, pipe, len, flags);
  312. if (ret <= 0)
  313. break;
  314. in->f_pos += ret;
  315. len -= ret;
  316. spliced += ret;
  317. if (!(flags & SPLICE_F_NONBLOCK))
  318. continue;
  319. ret = -EAGAIN;
  320. break;
  321. }
  322. if (spliced)
  323. return spliced;
  324. return ret;
  325. }
  326. EXPORT_SYMBOL(generic_file_splice_read);
  327. /*
  328. * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
  329. * using sendpage().
  330. */
  331. static int pipe_to_sendpage(struct pipe_inode_info *info,
  332. struct pipe_buffer *buf, struct splice_desc *sd)
  333. {
  334. struct file *file = sd->file;
  335. loff_t pos = sd->pos;
  336. unsigned int offset;
  337. ssize_t ret;
  338. void *ptr;
  339. int more;
  340. /*
  341. * Sub-optimal, but we are limited by the pipe ->map. We don't
  342. * need a kmap'ed buffer here, we just want to make sure we
  343. * have the page pinned if the pipe page originates from the
  344. * page cache.
  345. */
  346. ptr = buf->ops->map(file, info, buf);
  347. if (IS_ERR(ptr))
  348. return PTR_ERR(ptr);
  349. offset = pos & ~PAGE_CACHE_MASK;
  350. more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
  351. ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
  352. buf->ops->unmap(info, buf);
  353. if (ret == sd->len)
  354. return 0;
  355. return -EIO;
  356. }
  357. /*
  358. * This is a little more tricky than the file -> pipe splicing. There are
  359. * basically three cases:
  360. *
  361. * - Destination page already exists in the address space and there
  362. * are users of it. For that case we have no other option that
  363. * copying the data. Tough luck.
  364. * - Destination page already exists in the address space, but there
  365. * are no users of it. Make sure it's uptodate, then drop it. Fall
  366. * through to last case.
  367. * - Destination page does not exist, we can add the pipe page to
  368. * the page cache and avoid the copy.
  369. *
  370. * If asked to move pages to the output file (SPLICE_F_MOVE is set in
  371. * sd->flags), we attempt to migrate pages from the pipe to the output
  372. * file address space page cache. This is possible if no one else has
  373. * the pipe page referenced outside of the pipe and page cache. If
  374. * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
  375. * a new page in the output file page cache and fill/dirty that.
  376. */
  377. static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
  378. struct splice_desc *sd)
  379. {
  380. struct file *file = sd->file;
  381. struct address_space *mapping = file->f_mapping;
  382. gfp_t gfp_mask = mapping_gfp_mask(mapping);
  383. unsigned int offset;
  384. struct page *page;
  385. pgoff_t index;
  386. char *src;
  387. int ret;
  388. /*
  389. * make sure the data in this buffer is uptodate
  390. */
  391. src = buf->ops->map(file, info, buf);
  392. if (IS_ERR(src))
  393. return PTR_ERR(src);
  394. index = sd->pos >> PAGE_CACHE_SHIFT;
  395. offset = sd->pos & ~PAGE_CACHE_MASK;
  396. /*
  397. * Reuse buf page, if SPLICE_F_MOVE is set.
  398. */
  399. if (sd->flags & SPLICE_F_MOVE) {
  400. /*
  401. * If steal succeeds, buf->page is now pruned from the vm
  402. * side (LRU and page cache) and we can reuse it.
  403. */
  404. if (buf->ops->steal(info, buf))
  405. goto find_page;
  406. /*
  407. * this will also set the page locked
  408. */
  409. page = buf->page;
  410. if (add_to_page_cache(page, mapping, index, gfp_mask))
  411. goto find_page;
  412. if (!(buf->flags & PIPE_BUF_FLAG_LRU))
  413. lru_cache_add(page);
  414. } else {
  415. find_page:
  416. ret = -ENOMEM;
  417. page = find_or_create_page(mapping, index, gfp_mask);
  418. if (!page)
  419. goto out_nomem;
  420. /*
  421. * If the page is uptodate, it is also locked. If it isn't
  422. * uptodate, we can mark it uptodate if we are filling the
  423. * full page. Otherwise we need to read it in first...
  424. */
  425. if (!PageUptodate(page)) {
  426. if (sd->len < PAGE_CACHE_SIZE) {
  427. ret = mapping->a_ops->readpage(file, page);
  428. if (unlikely(ret))
  429. goto out;
  430. lock_page(page);
  431. if (!PageUptodate(page)) {
  432. /*
  433. * Page got invalidated, repeat.
  434. */
  435. if (!page->mapping) {
  436. unlock_page(page);
  437. page_cache_release(page);
  438. goto find_page;
  439. }
  440. ret = -EIO;
  441. goto out;
  442. }
  443. } else {
  444. WARN_ON(!PageLocked(page));
  445. SetPageUptodate(page);
  446. }
  447. }
  448. }
  449. ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
  450. if (ret == AOP_TRUNCATED_PAGE) {
  451. page_cache_release(page);
  452. goto find_page;
  453. } else if (ret)
  454. goto out;
  455. if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
  456. char *dst = kmap_atomic(page, KM_USER0);
  457. memcpy(dst + offset, src + buf->offset, sd->len);
  458. flush_dcache_page(page);
  459. kunmap_atomic(dst, KM_USER0);
  460. }
  461. ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
  462. if (ret == AOP_TRUNCATED_PAGE) {
  463. page_cache_release(page);
  464. goto find_page;
  465. } else if (ret)
  466. goto out;
  467. mark_page_accessed(page);
  468. balance_dirty_pages_ratelimited(mapping);
  469. out:
  470. if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
  471. page_cache_release(page);
  472. unlock_page(page);
  473. }
  474. out_nomem:
  475. buf->ops->unmap(info, buf);
  476. return ret;
  477. }
  478. typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
  479. struct splice_desc *);
  480. /*
  481. * Pipe input worker. Most of this logic works like a regular pipe, the
  482. * key here is the 'actor' worker passed in that actually moves the data
  483. * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
  484. */
  485. static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
  486. size_t len, unsigned int flags,
  487. splice_actor *actor)
  488. {
  489. int ret, do_wakeup, err;
  490. struct splice_desc sd;
  491. ret = 0;
  492. do_wakeup = 0;
  493. sd.total_len = len;
  494. sd.flags = flags;
  495. sd.file = out;
  496. sd.pos = out->f_pos;
  497. if (pipe->inode)
  498. mutex_lock(&pipe->inode->i_mutex);
  499. for (;;) {
  500. if (pipe->nrbufs) {
  501. struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
  502. struct pipe_buf_operations *ops = buf->ops;
  503. sd.len = buf->len;
  504. if (sd.len > sd.total_len)
  505. sd.len = sd.total_len;
  506. err = actor(pipe, buf, &sd);
  507. if (err) {
  508. if (!ret && err != -ENODATA)
  509. ret = err;
  510. break;
  511. }
  512. ret += sd.len;
  513. buf->offset += sd.len;
  514. buf->len -= sd.len;
  515. if (!buf->len) {
  516. buf->ops = NULL;
  517. ops->release(pipe, buf);
  518. pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
  519. pipe->nrbufs--;
  520. if (pipe->inode)
  521. do_wakeup = 1;
  522. }
  523. sd.pos += sd.len;
  524. sd.total_len -= sd.len;
  525. if (!sd.total_len)
  526. break;
  527. }
  528. if (pipe->nrbufs)
  529. continue;
  530. if (!pipe->writers)
  531. break;
  532. if (!pipe->waiting_writers) {
  533. if (ret)
  534. break;
  535. }
  536. if (flags & SPLICE_F_NONBLOCK) {
  537. if (!ret)
  538. ret = -EAGAIN;
  539. break;
  540. }
  541. if (signal_pending(current)) {
  542. if (!ret)
  543. ret = -ERESTARTSYS;
  544. break;
  545. }
  546. if (do_wakeup) {
  547. smp_mb();
  548. if (waitqueue_active(&pipe->wait))
  549. wake_up_interruptible_sync(&pipe->wait);
  550. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  551. do_wakeup = 0;
  552. }
  553. pipe_wait(pipe);
  554. }
  555. if (pipe->inode)
  556. mutex_unlock(&pipe->inode->i_mutex);
  557. if (do_wakeup) {
  558. smp_mb();
  559. if (waitqueue_active(&pipe->wait))
  560. wake_up_interruptible(&pipe->wait);
  561. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  562. }
  563. out->f_pos = sd.pos;
  564. return ret;
  565. }
  566. /**
  567. * generic_file_splice_write - splice data from a pipe to a file
  568. * @pipe: pipe info
  569. * @out: file to write to
  570. * @len: number of bytes to splice
  571. * @flags: splice modifier flags
  572. *
  573. * Will either move or copy pages (determined by @flags options) from
  574. * the given pipe inode to the given file.
  575. *
  576. */
  577. ssize_t
  578. generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
  579. size_t len, unsigned int flags)
  580. {
  581. struct address_space *mapping = out->f_mapping;
  582. ssize_t ret;
  583. ret = move_from_pipe(pipe, out, len, flags, pipe_to_file);
  584. /*
  585. * If file or inode is SYNC and we actually wrote some data, sync it.
  586. */
  587. if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
  588. && ret > 0) {
  589. struct inode *inode = mapping->host;
  590. int err;
  591. mutex_lock(&inode->i_mutex);
  592. err = generic_osync_inode(mapping->host, mapping,
  593. OSYNC_METADATA|OSYNC_DATA);
  594. mutex_unlock(&inode->i_mutex);
  595. if (err)
  596. ret = err;
  597. }
  598. return ret;
  599. }
  600. EXPORT_SYMBOL(generic_file_splice_write);
  601. /**
  602. * generic_splice_sendpage - splice data from a pipe to a socket
  603. * @inode: pipe inode
  604. * @out: socket to write to
  605. * @len: number of bytes to splice
  606. * @flags: splice modifier flags
  607. *
  608. * Will send @len bytes from the pipe to a network socket. No data copying
  609. * is involved.
  610. *
  611. */
  612. ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
  613. size_t len, unsigned int flags)
  614. {
  615. return move_from_pipe(pipe, out, len, flags, pipe_to_sendpage);
  616. }
  617. EXPORT_SYMBOL(generic_splice_sendpage);
  618. /*
  619. * Attempt to initiate a splice from pipe to file.
  620. */
  621. static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
  622. size_t len, unsigned int flags)
  623. {
  624. loff_t pos;
  625. int ret;
  626. if (unlikely(!out->f_op || !out->f_op->splice_write))
  627. return -EINVAL;
  628. if (unlikely(!(out->f_mode & FMODE_WRITE)))
  629. return -EBADF;
  630. pos = out->f_pos;
  631. ret = rw_verify_area(WRITE, out, &pos, len);
  632. if (unlikely(ret < 0))
  633. return ret;
  634. return out->f_op->splice_write(pipe, out, len, flags);
  635. }
  636. /*
  637. * Attempt to initiate a splice from a file to a pipe.
  638. */
  639. static long do_splice_to(struct file *in, struct pipe_inode_info *pipe,
  640. size_t len, unsigned int flags)
  641. {
  642. loff_t pos, isize, left;
  643. int ret;
  644. if (unlikely(!in->f_op || !in->f_op->splice_read))
  645. return -EINVAL;
  646. if (unlikely(!(in->f_mode & FMODE_READ)))
  647. return -EBADF;
  648. pos = in->f_pos;
  649. ret = rw_verify_area(READ, in, &pos, len);
  650. if (unlikely(ret < 0))
  651. return ret;
  652. isize = i_size_read(in->f_mapping->host);
  653. if (unlikely(in->f_pos >= isize))
  654. return 0;
  655. left = isize - in->f_pos;
  656. if (unlikely(left < len))
  657. len = left;
  658. return in->f_op->splice_read(in, pipe, len, flags);
  659. }
  660. long do_splice_direct(struct file *in, struct file *out, size_t len,
  661. unsigned int flags)
  662. {
  663. struct pipe_inode_info *pipe;
  664. long ret, bytes;
  665. umode_t i_mode;
  666. int i;
  667. /*
  668. * We require the input being a regular file, as we don't want to
  669. * randomly drop data for eg socket -> socket splicing. Use the
  670. * piped splicing for that!
  671. */
  672. i_mode = in->f_dentry->d_inode->i_mode;
  673. if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
  674. return -EINVAL;
  675. /*
  676. * neither in nor out is a pipe, setup an internal pipe attached to
  677. * 'out' and transfer the wanted data from 'in' to 'out' through that
  678. */
  679. pipe = current->splice_pipe;
  680. if (unlikely(!pipe)) {
  681. pipe = alloc_pipe_info(NULL);
  682. if (!pipe)
  683. return -ENOMEM;
  684. /*
  685. * We don't have an immediate reader, but we'll read the stuff
  686. * out of the pipe right after the move_to_pipe(). So set
  687. * PIPE_READERS appropriately.
  688. */
  689. pipe->readers = 1;
  690. current->splice_pipe = pipe;
  691. }
  692. /*
  693. * Do the splice.
  694. */
  695. ret = 0;
  696. bytes = 0;
  697. while (len) {
  698. size_t read_len, max_read_len;
  699. /*
  700. * Do at most PIPE_BUFFERS pages worth of transfer:
  701. */
  702. max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
  703. ret = do_splice_to(in, pipe, max_read_len, flags);
  704. if (unlikely(ret < 0))
  705. goto out_release;
  706. read_len = ret;
  707. /*
  708. * NOTE: nonblocking mode only applies to the input. We
  709. * must not do the output in nonblocking mode as then we
  710. * could get stuck data in the internal pipe:
  711. */
  712. ret = do_splice_from(pipe, out, read_len,
  713. flags & ~SPLICE_F_NONBLOCK);
  714. if (unlikely(ret < 0))
  715. goto out_release;
  716. bytes += ret;
  717. len -= ret;
  718. /*
  719. * In nonblocking mode, if we got back a short read then
  720. * that was due to either an IO error or due to the
  721. * pagecache entry not being there. In the IO error case
  722. * the _next_ splice attempt will produce a clean IO error
  723. * return value (not a short read), so in both cases it's
  724. * correct to break out of the loop here:
  725. */
  726. if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
  727. break;
  728. }
  729. pipe->nrbufs = pipe->curbuf = 0;
  730. return bytes;
  731. out_release:
  732. /*
  733. * If we did an incomplete transfer we must release
  734. * the pipe buffers in question:
  735. */
  736. for (i = 0; i < PIPE_BUFFERS; i++) {
  737. struct pipe_buffer *buf = pipe->bufs + i;
  738. if (buf->ops) {
  739. buf->ops->release(pipe, buf);
  740. buf->ops = NULL;
  741. }
  742. }
  743. pipe->nrbufs = pipe->curbuf = 0;
  744. /*
  745. * If we transferred some data, return the number of bytes:
  746. */
  747. if (bytes > 0)
  748. return bytes;
  749. return ret;
  750. }
  751. EXPORT_SYMBOL(do_splice_direct);
  752. /*
  753. * Determine where to splice to/from.
  754. */
  755. static long do_splice(struct file *in, loff_t __user *off_in,
  756. struct file *out, loff_t __user *off_out,
  757. size_t len, unsigned int flags)
  758. {
  759. struct pipe_inode_info *pipe;
  760. pipe = in->f_dentry->d_inode->i_pipe;
  761. if (pipe) {
  762. if (off_in)
  763. return -ESPIPE;
  764. if (off_out) {
  765. if (out->f_op->llseek == no_llseek)
  766. return -EINVAL;
  767. if (copy_from_user(&out->f_pos, off_out,
  768. sizeof(loff_t)))
  769. return -EFAULT;
  770. }
  771. return do_splice_from(pipe, out, len, flags);
  772. }
  773. pipe = out->f_dentry->d_inode->i_pipe;
  774. if (pipe) {
  775. if (off_out)
  776. return -ESPIPE;
  777. if (off_in) {
  778. if (in->f_op->llseek == no_llseek)
  779. return -EINVAL;
  780. if (copy_from_user(&in->f_pos, off_in, sizeof(loff_t)))
  781. return -EFAULT;
  782. }
  783. return do_splice_to(in, pipe, len, flags);
  784. }
  785. return -EINVAL;
  786. }
  787. asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
  788. int fd_out, loff_t __user *off_out,
  789. size_t len, unsigned int flags)
  790. {
  791. long error;
  792. struct file *in, *out;
  793. int fput_in, fput_out;
  794. if (unlikely(!len))
  795. return 0;
  796. error = -EBADF;
  797. in = fget_light(fd_in, &fput_in);
  798. if (in) {
  799. if (in->f_mode & FMODE_READ) {
  800. out = fget_light(fd_out, &fput_out);
  801. if (out) {
  802. if (out->f_mode & FMODE_WRITE)
  803. error = do_splice(in, off_in,
  804. out, off_out,
  805. len, flags);
  806. fput_light(out, fput_out);
  807. }
  808. }
  809. fput_light(in, fput_in);
  810. }
  811. return error;
  812. }