splice.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151
  1. /*
  2. * "splice": joining two ropes together by interweaving their strands.
  3. *
  4. * This is the "extended pipe" functionality, where a pipe is used as
  5. * an arbitrary in-memory buffer. Think of a pipe as a small kernel
  6. * buffer that you can use to transfer data from one end to the other.
  7. *
  8. * The traditional unix read/write is extended with a "splice()" operation
  9. * that transfers data buffers to or from a pipe buffer.
  10. *
  11. * Named by Larry McVoy, original implementation from Linus, extended by
  12. * Jens to support splicing to files, network, direct splicing, etc and
  13. * fixing lots of bugs.
  14. *
  15. * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
  16. * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
  17. * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
  18. *
  19. */
  20. #include <linux/fs.h>
  21. #include <linux/file.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pipe_fs_i.h>
  24. #include <linux/mm_inline.h>
  25. #include <linux/swap.h>
  26. #include <linux/writeback.h>
  27. #include <linux/buffer_head.h>
  28. #include <linux/module.h>
  29. #include <linux/syscalls.h>
  30. /*
  31. * Passed to the actors
  32. */
  33. struct splice_desc {
  34. unsigned int len, total_len; /* current and remaining length */
  35. unsigned int flags; /* splice flags */
  36. struct file *file; /* file to read/write */
  37. loff_t pos; /* file position */
  38. };
  39. /*
  40. * Attempt to steal a page from a pipe buffer. This should perhaps go into
  41. * a vm helper function, it's already simplified quite a bit by the
  42. * addition of remove_mapping(). If success is returned, the caller may
  43. * attempt to reuse this page for another destination.
  44. */
  45. static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
  46. struct pipe_buffer *buf)
  47. {
  48. struct page *page = buf->page;
  49. struct address_space *mapping = page_mapping(page);
  50. WARN_ON(!PageLocked(page));
  51. WARN_ON(!PageUptodate(page));
  52. /*
  53. * At least for ext2 with nobh option, we need to wait on writeback
  54. * completing on this page, since we'll remove it from the pagecache.
  55. * Otherwise truncate wont wait on the page, allowing the disk
  56. * blocks to be reused by someone else before we actually wrote our
  57. * data to them. fs corruption ensues.
  58. */
  59. wait_on_page_writeback(page);
  60. if (PagePrivate(page))
  61. try_to_release_page(page, mapping_gfp_mask(mapping));
  62. if (!remove_mapping(mapping, page))
  63. return 1;
  64. buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
  65. return 0;
  66. }
  67. static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
  68. struct pipe_buffer *buf)
  69. {
  70. page_cache_release(buf->page);
  71. buf->page = NULL;
  72. buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
  73. }
  74. static void *page_cache_pipe_buf_map(struct file *file,
  75. struct pipe_inode_info *info,
  76. struct pipe_buffer *buf)
  77. {
  78. struct page *page = buf->page;
  79. int err;
  80. if (!PageUptodate(page)) {
  81. lock_page(page);
  82. /*
  83. * Page got truncated/unhashed. This will cause a 0-byte
  84. * splice, if this is the first page.
  85. */
  86. if (!page->mapping) {
  87. err = -ENODATA;
  88. goto error;
  89. }
  90. /*
  91. * Uh oh, read-error from disk.
  92. */
  93. if (!PageUptodate(page)) {
  94. err = -EIO;
  95. goto error;
  96. }
  97. /*
  98. * Page is ok afterall, fall through to mapping.
  99. */
  100. unlock_page(page);
  101. }
  102. return kmap(page);
  103. error:
  104. unlock_page(page);
  105. return ERR_PTR(err);
  106. }
  107. static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
  108. struct pipe_buffer *buf)
  109. {
  110. kunmap(buf->page);
  111. }
  112. static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
  113. struct pipe_buffer *buf)
  114. {
  115. page_cache_get(buf->page);
  116. }
  117. static struct pipe_buf_operations page_cache_pipe_buf_ops = {
  118. .can_merge = 0,
  119. .map = page_cache_pipe_buf_map,
  120. .unmap = page_cache_pipe_buf_unmap,
  121. .release = page_cache_pipe_buf_release,
  122. .steal = page_cache_pipe_buf_steal,
  123. .get = page_cache_pipe_buf_get,
  124. };
  125. /*
  126. * Pipe output worker. This sets up our pipe format with the page cache
  127. * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
  128. */
  129. static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
  130. int nr_pages, unsigned long offset,
  131. unsigned long len, unsigned int flags)
  132. {
  133. int ret, do_wakeup, i;
  134. ret = 0;
  135. do_wakeup = 0;
  136. i = 0;
  137. if (pipe->inode)
  138. mutex_lock(&pipe->inode->i_mutex);
  139. for (;;) {
  140. if (!pipe->readers) {
  141. send_sig(SIGPIPE, current, 0);
  142. if (!ret)
  143. ret = -EPIPE;
  144. break;
  145. }
  146. if (pipe->nrbufs < PIPE_BUFFERS) {
  147. int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
  148. struct pipe_buffer *buf = pipe->bufs + newbuf;
  149. struct page *page = pages[i++];
  150. unsigned long this_len;
  151. this_len = PAGE_CACHE_SIZE - offset;
  152. if (this_len > len)
  153. this_len = len;
  154. buf->page = page;
  155. buf->offset = offset;
  156. buf->len = this_len;
  157. buf->ops = &page_cache_pipe_buf_ops;
  158. pipe->nrbufs++;
  159. if (pipe->inode)
  160. do_wakeup = 1;
  161. ret += this_len;
  162. len -= this_len;
  163. offset = 0;
  164. if (!--nr_pages)
  165. break;
  166. if (!len)
  167. break;
  168. if (pipe->nrbufs < PIPE_BUFFERS)
  169. continue;
  170. break;
  171. }
  172. if (flags & SPLICE_F_NONBLOCK) {
  173. if (!ret)
  174. ret = -EAGAIN;
  175. break;
  176. }
  177. if (signal_pending(current)) {
  178. if (!ret)
  179. ret = -ERESTARTSYS;
  180. break;
  181. }
  182. if (do_wakeup) {
  183. smp_mb();
  184. if (waitqueue_active(&pipe->wait))
  185. wake_up_interruptible_sync(&pipe->wait);
  186. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  187. do_wakeup = 0;
  188. }
  189. pipe->waiting_writers++;
  190. pipe_wait(pipe);
  191. pipe->waiting_writers--;
  192. }
  193. if (pipe->inode)
  194. mutex_unlock(&pipe->inode->i_mutex);
  195. if (do_wakeup) {
  196. smp_mb();
  197. if (waitqueue_active(&pipe->wait))
  198. wake_up_interruptible(&pipe->wait);
  199. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  200. }
  201. while (i < nr_pages)
  202. page_cache_release(pages[i++]);
  203. return ret;
  204. }
  205. static int
  206. __generic_file_splice_read(struct file *in, loff_t *ppos,
  207. struct pipe_inode_info *pipe, size_t len,
  208. unsigned int flags)
  209. {
  210. struct address_space *mapping = in->f_mapping;
  211. unsigned int offset, nr_pages;
  212. struct page *pages[PIPE_BUFFERS];
  213. struct page *page;
  214. pgoff_t index;
  215. int i, error;
  216. index = *ppos >> PAGE_CACHE_SHIFT;
  217. offset = *ppos & ~PAGE_CACHE_MASK;
  218. nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  219. if (nr_pages > PIPE_BUFFERS)
  220. nr_pages = PIPE_BUFFERS;
  221. /*
  222. * Initiate read-ahead on this page range. however, don't call into
  223. * read-ahead if this is a non-zero offset (we are likely doing small
  224. * chunk splice and the page is already there) for a single page.
  225. */
  226. if (!offset || nr_pages > 1)
  227. do_page_cache_readahead(mapping, in, index, nr_pages);
  228. /*
  229. * Now fill in the holes:
  230. */
  231. error = 0;
  232. for (i = 0; i < nr_pages; i++, index++) {
  233. find_page:
  234. /*
  235. * lookup the page for this index
  236. */
  237. page = find_get_page(mapping, index);
  238. if (!page) {
  239. /*
  240. * If in nonblock mode then dont block on
  241. * readpage (we've kicked readahead so there
  242. * will be asynchronous progress):
  243. */
  244. if (flags & SPLICE_F_NONBLOCK)
  245. break;
  246. /*
  247. * page didn't exist, allocate one
  248. */
  249. page = page_cache_alloc_cold(mapping);
  250. if (!page)
  251. break;
  252. error = add_to_page_cache_lru(page, mapping, index,
  253. mapping_gfp_mask(mapping));
  254. if (unlikely(error)) {
  255. page_cache_release(page);
  256. break;
  257. }
  258. goto readpage;
  259. }
  260. /*
  261. * If the page isn't uptodate, we may need to start io on it
  262. */
  263. if (!PageUptodate(page)) {
  264. lock_page(page);
  265. /*
  266. * page was truncated, stop here. if this isn't the
  267. * first page, we'll just complete what we already
  268. * added
  269. */
  270. if (!page->mapping) {
  271. unlock_page(page);
  272. page_cache_release(page);
  273. break;
  274. }
  275. /*
  276. * page was already under io and is now done, great
  277. */
  278. if (PageUptodate(page)) {
  279. unlock_page(page);
  280. goto fill_it;
  281. }
  282. readpage:
  283. /*
  284. * need to read in the page
  285. */
  286. error = mapping->a_ops->readpage(in, page);
  287. if (unlikely(error)) {
  288. page_cache_release(page);
  289. if (error == AOP_TRUNCATED_PAGE)
  290. goto find_page;
  291. break;
  292. }
  293. }
  294. fill_it:
  295. pages[i] = page;
  296. }
  297. if (i)
  298. return move_to_pipe(pipe, pages, i, offset, len, flags);
  299. return error;
  300. }
  301. /**
  302. * generic_file_splice_read - splice data from file to a pipe
  303. * @in: file to splice from
  304. * @pipe: pipe to splice to
  305. * @len: number of bytes to splice
  306. * @flags: splice modifier flags
  307. *
  308. * Will read pages from given file and fill them into a pipe.
  309. */
  310. ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
  311. struct pipe_inode_info *pipe, size_t len,
  312. unsigned int flags)
  313. {
  314. ssize_t spliced;
  315. int ret;
  316. ret = 0;
  317. spliced = 0;
  318. while (len) {
  319. ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
  320. if (ret <= 0)
  321. break;
  322. *ppos += ret;
  323. len -= ret;
  324. spliced += ret;
  325. if (!(flags & SPLICE_F_NONBLOCK))
  326. continue;
  327. ret = -EAGAIN;
  328. break;
  329. }
  330. if (spliced)
  331. return spliced;
  332. return ret;
  333. }
  334. EXPORT_SYMBOL(generic_file_splice_read);
  335. /*
  336. * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
  337. * using sendpage().
  338. */
  339. static int pipe_to_sendpage(struct pipe_inode_info *info,
  340. struct pipe_buffer *buf, struct splice_desc *sd)
  341. {
  342. struct file *file = sd->file;
  343. loff_t pos = sd->pos;
  344. unsigned int offset;
  345. ssize_t ret;
  346. void *ptr;
  347. int more;
  348. /*
  349. * Sub-optimal, but we are limited by the pipe ->map. We don't
  350. * need a kmap'ed buffer here, we just want to make sure we
  351. * have the page pinned if the pipe page originates from the
  352. * page cache.
  353. */
  354. ptr = buf->ops->map(file, info, buf);
  355. if (IS_ERR(ptr))
  356. return PTR_ERR(ptr);
  357. offset = pos & ~PAGE_CACHE_MASK;
  358. more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
  359. ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
  360. buf->ops->unmap(info, buf);
  361. if (ret == sd->len)
  362. return 0;
  363. return -EIO;
  364. }
  365. /*
  366. * This is a little more tricky than the file -> pipe splicing. There are
  367. * basically three cases:
  368. *
  369. * - Destination page already exists in the address space and there
  370. * are users of it. For that case we have no other option that
  371. * copying the data. Tough luck.
  372. * - Destination page already exists in the address space, but there
  373. * are no users of it. Make sure it's uptodate, then drop it. Fall
  374. * through to last case.
  375. * - Destination page does not exist, we can add the pipe page to
  376. * the page cache and avoid the copy.
  377. *
  378. * If asked to move pages to the output file (SPLICE_F_MOVE is set in
  379. * sd->flags), we attempt to migrate pages from the pipe to the output
  380. * file address space page cache. This is possible if no one else has
  381. * the pipe page referenced outside of the pipe and page cache. If
  382. * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
  383. * a new page in the output file page cache and fill/dirty that.
  384. */
  385. static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
  386. struct splice_desc *sd)
  387. {
  388. struct file *file = sd->file;
  389. struct address_space *mapping = file->f_mapping;
  390. gfp_t gfp_mask = mapping_gfp_mask(mapping);
  391. unsigned int offset;
  392. struct page *page;
  393. pgoff_t index;
  394. char *src;
  395. int ret;
  396. /*
  397. * make sure the data in this buffer is uptodate
  398. */
  399. src = buf->ops->map(file, info, buf);
  400. if (IS_ERR(src))
  401. return PTR_ERR(src);
  402. index = sd->pos >> PAGE_CACHE_SHIFT;
  403. offset = sd->pos & ~PAGE_CACHE_MASK;
  404. /*
  405. * Reuse buf page, if SPLICE_F_MOVE is set.
  406. */
  407. if (sd->flags & SPLICE_F_MOVE) {
  408. /*
  409. * If steal succeeds, buf->page is now pruned from the vm
  410. * side (LRU and page cache) and we can reuse it.
  411. */
  412. if (buf->ops->steal(info, buf))
  413. goto find_page;
  414. /*
  415. * this will also set the page locked
  416. */
  417. page = buf->page;
  418. if (add_to_page_cache(page, mapping, index, gfp_mask))
  419. goto find_page;
  420. if (!(buf->flags & PIPE_BUF_FLAG_LRU))
  421. lru_cache_add(page);
  422. } else {
  423. find_page:
  424. ret = -ENOMEM;
  425. page = find_or_create_page(mapping, index, gfp_mask);
  426. if (!page)
  427. goto out_nomem;
  428. /*
  429. * If the page is uptodate, it is also locked. If it isn't
  430. * uptodate, we can mark it uptodate if we are filling the
  431. * full page. Otherwise we need to read it in first...
  432. */
  433. if (!PageUptodate(page)) {
  434. if (sd->len < PAGE_CACHE_SIZE) {
  435. ret = mapping->a_ops->readpage(file, page);
  436. if (unlikely(ret))
  437. goto out;
  438. lock_page(page);
  439. if (!PageUptodate(page)) {
  440. /*
  441. * Page got invalidated, repeat.
  442. */
  443. if (!page->mapping) {
  444. unlock_page(page);
  445. page_cache_release(page);
  446. goto find_page;
  447. }
  448. ret = -EIO;
  449. goto out;
  450. }
  451. } else {
  452. WARN_ON(!PageLocked(page));
  453. SetPageUptodate(page);
  454. }
  455. }
  456. }
  457. ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
  458. if (ret == AOP_TRUNCATED_PAGE) {
  459. page_cache_release(page);
  460. goto find_page;
  461. } else if (ret)
  462. goto out;
  463. if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
  464. char *dst = kmap_atomic(page, KM_USER0);
  465. memcpy(dst + offset, src + buf->offset, sd->len);
  466. flush_dcache_page(page);
  467. kunmap_atomic(dst, KM_USER0);
  468. }
  469. ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
  470. if (ret == AOP_TRUNCATED_PAGE) {
  471. page_cache_release(page);
  472. goto find_page;
  473. } else if (ret)
  474. goto out;
  475. mark_page_accessed(page);
  476. balance_dirty_pages_ratelimited(mapping);
  477. out:
  478. if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
  479. page_cache_release(page);
  480. unlock_page(page);
  481. }
  482. out_nomem:
  483. buf->ops->unmap(info, buf);
  484. return ret;
  485. }
  486. typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
  487. struct splice_desc *);
  488. /*
  489. * Pipe input worker. Most of this logic works like a regular pipe, the
  490. * key here is the 'actor' worker passed in that actually moves the data
  491. * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
  492. */
  493. static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
  494. loff_t *ppos, size_t len, unsigned int flags,
  495. splice_actor *actor)
  496. {
  497. int ret, do_wakeup, err;
  498. struct splice_desc sd;
  499. ret = 0;
  500. do_wakeup = 0;
  501. sd.total_len = len;
  502. sd.flags = flags;
  503. sd.file = out;
  504. sd.pos = *ppos;
  505. if (pipe->inode)
  506. mutex_lock(&pipe->inode->i_mutex);
  507. for (;;) {
  508. if (pipe->nrbufs) {
  509. struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
  510. struct pipe_buf_operations *ops = buf->ops;
  511. sd.len = buf->len;
  512. if (sd.len > sd.total_len)
  513. sd.len = sd.total_len;
  514. err = actor(pipe, buf, &sd);
  515. if (err) {
  516. if (!ret && err != -ENODATA)
  517. ret = err;
  518. break;
  519. }
  520. ret += sd.len;
  521. buf->offset += sd.len;
  522. buf->len -= sd.len;
  523. if (!buf->len) {
  524. buf->ops = NULL;
  525. ops->release(pipe, buf);
  526. pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
  527. pipe->nrbufs--;
  528. if (pipe->inode)
  529. do_wakeup = 1;
  530. }
  531. sd.pos += sd.len;
  532. sd.total_len -= sd.len;
  533. if (!sd.total_len)
  534. break;
  535. }
  536. if (pipe->nrbufs)
  537. continue;
  538. if (!pipe->writers)
  539. break;
  540. if (!pipe->waiting_writers) {
  541. if (ret)
  542. break;
  543. }
  544. if (flags & SPLICE_F_NONBLOCK) {
  545. if (!ret)
  546. ret = -EAGAIN;
  547. break;
  548. }
  549. if (signal_pending(current)) {
  550. if (!ret)
  551. ret = -ERESTARTSYS;
  552. break;
  553. }
  554. if (do_wakeup) {
  555. smp_mb();
  556. if (waitqueue_active(&pipe->wait))
  557. wake_up_interruptible_sync(&pipe->wait);
  558. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  559. do_wakeup = 0;
  560. }
  561. pipe_wait(pipe);
  562. }
  563. if (pipe->inode)
  564. mutex_unlock(&pipe->inode->i_mutex);
  565. if (do_wakeup) {
  566. smp_mb();
  567. if (waitqueue_active(&pipe->wait))
  568. wake_up_interruptible(&pipe->wait);
  569. kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
  570. }
  571. return ret;
  572. }
  573. /**
  574. * generic_file_splice_write - splice data from a pipe to a file
  575. * @pipe: pipe info
  576. * @out: file to write to
  577. * @len: number of bytes to splice
  578. * @flags: splice modifier flags
  579. *
  580. * Will either move or copy pages (determined by @flags options) from
  581. * the given pipe inode to the given file.
  582. *
  583. */
  584. ssize_t
  585. generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
  586. loff_t *ppos, size_t len, unsigned int flags)
  587. {
  588. struct address_space *mapping = out->f_mapping;
  589. ssize_t ret;
  590. ret = move_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
  591. /*
  592. * If file or inode is SYNC and we actually wrote some data, sync it.
  593. */
  594. if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
  595. && ret > 0) {
  596. struct inode *inode = mapping->host;
  597. int err;
  598. mutex_lock(&inode->i_mutex);
  599. err = generic_osync_inode(mapping->host, mapping,
  600. OSYNC_METADATA|OSYNC_DATA);
  601. mutex_unlock(&inode->i_mutex);
  602. if (err)
  603. ret = err;
  604. }
  605. return ret;
  606. }
  607. EXPORT_SYMBOL(generic_file_splice_write);
  608. /**
  609. * generic_splice_sendpage - splice data from a pipe to a socket
  610. * @inode: pipe inode
  611. * @out: socket to write to
  612. * @len: number of bytes to splice
  613. * @flags: splice modifier flags
  614. *
  615. * Will send @len bytes from the pipe to a network socket. No data copying
  616. * is involved.
  617. *
  618. */
  619. ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
  620. loff_t *ppos, size_t len, unsigned int flags)
  621. {
  622. return move_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
  623. }
  624. EXPORT_SYMBOL(generic_splice_sendpage);
  625. /*
  626. * Attempt to initiate a splice from pipe to file.
  627. */
  628. static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
  629. loff_t *ppos, size_t len, unsigned int flags)
  630. {
  631. int ret;
  632. if (unlikely(!out->f_op || !out->f_op->splice_write))
  633. return -EINVAL;
  634. if (unlikely(!(out->f_mode & FMODE_WRITE)))
  635. return -EBADF;
  636. ret = rw_verify_area(WRITE, out, ppos, len);
  637. if (unlikely(ret < 0))
  638. return ret;
  639. return out->f_op->splice_write(pipe, out, ppos, len, flags);
  640. }
  641. /*
  642. * Attempt to initiate a splice from a file to a pipe.
  643. */
  644. static long do_splice_to(struct file *in, loff_t *ppos,
  645. struct pipe_inode_info *pipe, size_t len,
  646. unsigned int flags)
  647. {
  648. loff_t isize, left;
  649. int ret;
  650. if (unlikely(!in->f_op || !in->f_op->splice_read))
  651. return -EINVAL;
  652. if (unlikely(!(in->f_mode & FMODE_READ)))
  653. return -EBADF;
  654. ret = rw_verify_area(READ, in, ppos, len);
  655. if (unlikely(ret < 0))
  656. return ret;
  657. isize = i_size_read(in->f_mapping->host);
  658. if (unlikely(*ppos >= isize))
  659. return 0;
  660. left = isize - *ppos;
  661. if (unlikely(left < len))
  662. len = left;
  663. return in->f_op->splice_read(in, ppos, pipe, len, flags);
  664. }
  665. long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
  666. size_t len, unsigned int flags)
  667. {
  668. struct pipe_inode_info *pipe;
  669. long ret, bytes;
  670. loff_t out_off;
  671. umode_t i_mode;
  672. int i;
  673. /*
  674. * We require the input being a regular file, as we don't want to
  675. * randomly drop data for eg socket -> socket splicing. Use the
  676. * piped splicing for that!
  677. */
  678. i_mode = in->f_dentry->d_inode->i_mode;
  679. if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
  680. return -EINVAL;
  681. /*
  682. * neither in nor out is a pipe, setup an internal pipe attached to
  683. * 'out' and transfer the wanted data from 'in' to 'out' through that
  684. */
  685. pipe = current->splice_pipe;
  686. if (unlikely(!pipe)) {
  687. pipe = alloc_pipe_info(NULL);
  688. if (!pipe)
  689. return -ENOMEM;
  690. /*
  691. * We don't have an immediate reader, but we'll read the stuff
  692. * out of the pipe right after the move_to_pipe(). So set
  693. * PIPE_READERS appropriately.
  694. */
  695. pipe->readers = 1;
  696. current->splice_pipe = pipe;
  697. }
  698. /*
  699. * Do the splice.
  700. */
  701. ret = 0;
  702. bytes = 0;
  703. out_off = 0;
  704. while (len) {
  705. size_t read_len, max_read_len;
  706. /*
  707. * Do at most PIPE_BUFFERS pages worth of transfer:
  708. */
  709. max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
  710. ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
  711. if (unlikely(ret < 0))
  712. goto out_release;
  713. read_len = ret;
  714. /*
  715. * NOTE: nonblocking mode only applies to the input. We
  716. * must not do the output in nonblocking mode as then we
  717. * could get stuck data in the internal pipe:
  718. */
  719. ret = do_splice_from(pipe, out, &out_off, read_len,
  720. flags & ~SPLICE_F_NONBLOCK);
  721. if (unlikely(ret < 0))
  722. goto out_release;
  723. bytes += ret;
  724. len -= ret;
  725. /*
  726. * In nonblocking mode, if we got back a short read then
  727. * that was due to either an IO error or due to the
  728. * pagecache entry not being there. In the IO error case
  729. * the _next_ splice attempt will produce a clean IO error
  730. * return value (not a short read), so in both cases it's
  731. * correct to break out of the loop here:
  732. */
  733. if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
  734. break;
  735. }
  736. pipe->nrbufs = pipe->curbuf = 0;
  737. return bytes;
  738. out_release:
  739. /*
  740. * If we did an incomplete transfer we must release
  741. * the pipe buffers in question:
  742. */
  743. for (i = 0; i < PIPE_BUFFERS; i++) {
  744. struct pipe_buffer *buf = pipe->bufs + i;
  745. if (buf->ops) {
  746. buf->ops->release(pipe, buf);
  747. buf->ops = NULL;
  748. }
  749. }
  750. pipe->nrbufs = pipe->curbuf = 0;
  751. /*
  752. * If we transferred some data, return the number of bytes:
  753. */
  754. if (bytes > 0)
  755. return bytes;
  756. return ret;
  757. }
  758. EXPORT_SYMBOL(do_splice_direct);
  759. /*
  760. * Determine where to splice to/from.
  761. */
  762. static long do_splice(struct file *in, loff_t __user *off_in,
  763. struct file *out, loff_t __user *off_out,
  764. size_t len, unsigned int flags)
  765. {
  766. struct pipe_inode_info *pipe;
  767. loff_t offset, *off;
  768. pipe = in->f_dentry->d_inode->i_pipe;
  769. if (pipe) {
  770. if (off_in)
  771. return -ESPIPE;
  772. if (off_out) {
  773. if (out->f_op->llseek == no_llseek)
  774. return -EINVAL;
  775. if (copy_from_user(&offset, off_out, sizeof(loff_t)))
  776. return -EFAULT;
  777. off = &offset;
  778. } else
  779. off = &out->f_pos;
  780. return do_splice_from(pipe, out, off, len, flags);
  781. }
  782. pipe = out->f_dentry->d_inode->i_pipe;
  783. if (pipe) {
  784. if (off_out)
  785. return -ESPIPE;
  786. if (off_in) {
  787. if (in->f_op->llseek == no_llseek)
  788. return -EINVAL;
  789. if (copy_from_user(&offset, off_in, sizeof(loff_t)))
  790. return -EFAULT;
  791. off = &offset;
  792. } else
  793. off = &in->f_pos;
  794. return do_splice_to(in, off, pipe, len, flags);
  795. }
  796. return -EINVAL;
  797. }
  798. asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
  799. int fd_out, loff_t __user *off_out,
  800. size_t len, unsigned int flags)
  801. {
  802. long error;
  803. struct file *in, *out;
  804. int fput_in, fput_out;
  805. if (unlikely(!len))
  806. return 0;
  807. error = -EBADF;
  808. in = fget_light(fd_in, &fput_in);
  809. if (in) {
  810. if (in->f_mode & FMODE_READ) {
  811. out = fget_light(fd_out, &fput_out);
  812. if (out) {
  813. if (out->f_mode & FMODE_WRITE)
  814. error = do_splice(in, off_in,
  815. out, off_out,
  816. len, flags);
  817. fput_light(out, fput_out);
  818. }
  819. }
  820. fput_light(in, fput_in);
  821. }
  822. return error;
  823. }
  824. /*
  825. * Link contents of ipipe to opipe.
  826. */
  827. static int link_pipe(struct pipe_inode_info *ipipe,
  828. struct pipe_inode_info *opipe,
  829. size_t len, unsigned int flags)
  830. {
  831. struct pipe_buffer *ibuf, *obuf;
  832. int ret = 0, do_wakeup = 0, i;
  833. /*
  834. * Potential ABBA deadlock, work around it by ordering lock
  835. * grabbing by inode address. Otherwise two different processes
  836. * could deadlock (one doing tee from A -> B, the other from B -> A).
  837. */
  838. if (ipipe->inode < opipe->inode) {
  839. mutex_lock(&ipipe->inode->i_mutex);
  840. mutex_lock(&opipe->inode->i_mutex);
  841. } else {
  842. mutex_lock(&opipe->inode->i_mutex);
  843. mutex_lock(&ipipe->inode->i_mutex);
  844. }
  845. for (i = 0;; i++) {
  846. if (!opipe->readers) {
  847. send_sig(SIGPIPE, current, 0);
  848. if (!ret)
  849. ret = -EPIPE;
  850. break;
  851. }
  852. if (ipipe->nrbufs - i) {
  853. ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
  854. /*
  855. * If we have room, fill this buffer
  856. */
  857. if (opipe->nrbufs < PIPE_BUFFERS) {
  858. int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
  859. /*
  860. * Get a reference to this pipe buffer,
  861. * so we can copy the contents over.
  862. */
  863. ibuf->ops->get(ipipe, ibuf);
  864. obuf = opipe->bufs + nbuf;
  865. *obuf = *ibuf;
  866. if (obuf->len > len)
  867. obuf->len = len;
  868. opipe->nrbufs++;
  869. do_wakeup = 1;
  870. ret += obuf->len;
  871. len -= obuf->len;
  872. if (!len)
  873. break;
  874. if (opipe->nrbufs < PIPE_BUFFERS)
  875. continue;
  876. }
  877. /*
  878. * We have input available, but no output room.
  879. * If we already copied data, return that.
  880. */
  881. if (flags & SPLICE_F_NONBLOCK) {
  882. if (!ret)
  883. ret = -EAGAIN;
  884. break;
  885. }
  886. if (signal_pending(current)) {
  887. if (!ret)
  888. ret = -ERESTARTSYS;
  889. break;
  890. }
  891. if (do_wakeup) {
  892. smp_mb();
  893. if (waitqueue_active(&opipe->wait))
  894. wake_up_interruptible(&opipe->wait);
  895. kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
  896. do_wakeup = 0;
  897. }
  898. opipe->waiting_writers++;
  899. pipe_wait(opipe);
  900. opipe->waiting_writers--;
  901. continue;
  902. }
  903. /*
  904. * No input buffers, do the usual checks for available
  905. * writers and blocking and wait if necessary
  906. */
  907. if (!ipipe->writers)
  908. break;
  909. if (!ipipe->waiting_writers) {
  910. if (ret)
  911. break;
  912. }
  913. if (flags & SPLICE_F_NONBLOCK) {
  914. if (!ret)
  915. ret = -EAGAIN;
  916. break;
  917. }
  918. if (signal_pending(current)) {
  919. if (!ret)
  920. ret = -ERESTARTSYS;
  921. break;
  922. }
  923. if (waitqueue_active(&ipipe->wait))
  924. wake_up_interruptible_sync(&ipipe->wait);
  925. kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);
  926. pipe_wait(ipipe);
  927. }
  928. mutex_unlock(&ipipe->inode->i_mutex);
  929. mutex_unlock(&opipe->inode->i_mutex);
  930. if (do_wakeup) {
  931. smp_mb();
  932. if (waitqueue_active(&opipe->wait))
  933. wake_up_interruptible(&opipe->wait);
  934. kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
  935. }
  936. return ret;
  937. }
  938. /*
  939. * This is a tee(1) implementation that works on pipes. It doesn't copy
  940. * any data, it simply references the 'in' pages on the 'out' pipe.
  941. * The 'flags' used are the SPLICE_F_* variants, currently the only
  942. * applicable one is SPLICE_F_NONBLOCK.
  943. */
  944. static long do_tee(struct file *in, struct file *out, size_t len,
  945. unsigned int flags)
  946. {
  947. struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
  948. struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;
  949. /*
  950. * Link ipipe to the two output pipes, consuming as we go along.
  951. */
  952. if (ipipe && opipe)
  953. return link_pipe(ipipe, opipe, len, flags);
  954. return -EINVAL;
  955. }
  956. asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
  957. {
  958. struct file *in;
  959. int error, fput_in;
  960. if (unlikely(!len))
  961. return 0;
  962. error = -EBADF;
  963. in = fget_light(fdin, &fput_in);
  964. if (in) {
  965. if (in->f_mode & FMODE_READ) {
  966. int fput_out;
  967. struct file *out = fget_light(fdout, &fput_out);
  968. if (out) {
  969. if (out->f_mode & FMODE_WRITE)
  970. error = do_tee(in, out, len, flags);
  971. fput_light(out, fput_out);
  972. }
  973. }
  974. fput_light(in, fput_in);
  975. }
  976. return error;
  977. }