page-io.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502
  1. /*
  2. * linux/fs/ext4/page-io.c
  3. *
  4. * This contains the new page_io functions for ext4
  5. *
  6. * Written by Theodore Ts'o, 2010.
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/time.h>
  10. #include <linux/jbd2.h>
  11. #include <linux/highuid.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/quotaops.h>
  14. #include <linux/string.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/writeback.h>
  17. #include <linux/pagevec.h>
  18. #include <linux/mpage.h>
  19. #include <linux/namei.h>
  20. #include <linux/aio.h>
  21. #include <linux/uio.h>
  22. #include <linux/bio.h>
  23. #include <linux/workqueue.h>
  24. #include <linux/kernel.h>
  25. #include <linux/slab.h>
  26. #include <linux/mm.h>
  27. #include "ext4_jbd2.h"
  28. #include "xattr.h"
  29. #include "acl.h"
  30. static struct kmem_cache *io_end_cachep;
  31. int __init ext4_init_pageio(void)
  32. {
  33. io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
  34. if (io_end_cachep == NULL)
  35. return -ENOMEM;
  36. return 0;
  37. }
  38. void ext4_exit_pageio(void)
  39. {
  40. kmem_cache_destroy(io_end_cachep);
  41. }
  42. /*
  43. * Print an buffer I/O error compatible with the fs/buffer.c. This
  44. * provides compatibility with dmesg scrapers that look for a specific
  45. * buffer I/O error message. We really need a unified error reporting
  46. * structure to userspace ala Digital Unix's uerf system, but it's
  47. * probably not going to happen in my lifetime, due to LKML politics...
  48. */
  49. static void buffer_io_error(struct buffer_head *bh)
  50. {
  51. char b[BDEVNAME_SIZE];
  52. printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
  53. bdevname(bh->b_bdev, b),
  54. (unsigned long long)bh->b_blocknr);
  55. }
  56. static void ext4_finish_bio(struct bio *bio)
  57. {
  58. int i;
  59. int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
  60. for (i = 0; i < bio->bi_vcnt; i++) {
  61. struct bio_vec *bvec = &bio->bi_io_vec[i];
  62. struct page *page = bvec->bv_page;
  63. struct buffer_head *bh, *head;
  64. unsigned bio_start = bvec->bv_offset;
  65. unsigned bio_end = bio_start + bvec->bv_len;
  66. unsigned under_io = 0;
  67. unsigned long flags;
  68. if (!page)
  69. continue;
  70. if (error) {
  71. SetPageError(page);
  72. set_bit(AS_EIO, &page->mapping->flags);
  73. }
  74. bh = head = page_buffers(page);
  75. /*
  76. * We check all buffers in the page under BH_Uptodate_Lock
  77. * to avoid races with other end io clearing async_write flags
  78. */
  79. local_irq_save(flags);
  80. bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
  81. do {
  82. if (bh_offset(bh) < bio_start ||
  83. bh_offset(bh) + bh->b_size > bio_end) {
  84. if (buffer_async_write(bh))
  85. under_io++;
  86. continue;
  87. }
  88. clear_buffer_async_write(bh);
  89. if (error)
  90. buffer_io_error(bh);
  91. } while ((bh = bh->b_this_page) != head);
  92. bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
  93. local_irq_restore(flags);
  94. if (!under_io)
  95. end_page_writeback(page);
  96. }
  97. }
  98. static void ext4_release_io_end(ext4_io_end_t *io_end)
  99. {
  100. struct bio *bio, *next_bio;
  101. BUG_ON(!list_empty(&io_end->list));
  102. BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
  103. WARN_ON(io_end->handle);
  104. if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
  105. wake_up_all(ext4_ioend_wq(io_end->inode));
  106. for (bio = io_end->bio; bio; bio = next_bio) {
  107. next_bio = bio->bi_private;
  108. ext4_finish_bio(bio);
  109. bio_put(bio);
  110. }
  111. if (io_end->flag & EXT4_IO_END_DIRECT)
  112. inode_dio_done(io_end->inode);
  113. if (io_end->iocb)
  114. aio_complete(io_end->iocb, io_end->result, 0);
  115. kmem_cache_free(io_end_cachep, io_end);
  116. }
  117. static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
  118. {
  119. struct inode *inode = io_end->inode;
  120. io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
  121. /* Wake up anyone waiting on unwritten extent conversion */
  122. if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
  123. wake_up_all(ext4_ioend_wq(inode));
  124. }
  125. /*
  126. * Check a range of space and convert unwritten extents to written. Note that
  127. * we are protected from truncate touching same part of extent tree by the
  128. * fact that truncate code waits for all DIO to finish (thus exclusion from
  129. * direct IO is achieved) and also waits for PageWriteback bits. Thus we
  130. * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
  131. * completed (happens from ext4_free_ioend()).
  132. */
  133. static int ext4_end_io(ext4_io_end_t *io)
  134. {
  135. struct inode *inode = io->inode;
  136. loff_t offset = io->offset;
  137. ssize_t size = io->size;
  138. handle_t *handle = io->handle;
  139. int ret = 0;
  140. ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
  141. "list->prev 0x%p\n",
  142. io, inode->i_ino, io->list.next, io->list.prev);
  143. io->handle = NULL; /* Following call will use up the handle */
  144. ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
  145. if (ret < 0) {
  146. ext4_msg(inode->i_sb, KERN_EMERG,
  147. "failed to convert unwritten extents to written "
  148. "extents -- potential data loss! "
  149. "(inode %lu, offset %llu, size %zd, error %d)",
  150. inode->i_ino, offset, size, ret);
  151. }
  152. ext4_clear_io_unwritten_flag(io);
  153. ext4_release_io_end(io);
  154. return ret;
  155. }
  156. static void dump_completed_IO(struct inode *inode, struct list_head *head)
  157. {
  158. #ifdef EXT4FS_DEBUG
  159. struct list_head *cur, *before, *after;
  160. ext4_io_end_t *io, *io0, *io1;
  161. if (list_empty(head))
  162. return;
  163. ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
  164. list_for_each_entry(io, head, list) {
  165. cur = &io->list;
  166. before = cur->prev;
  167. io0 = container_of(before, ext4_io_end_t, list);
  168. after = cur->next;
  169. io1 = container_of(after, ext4_io_end_t, list);
  170. ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
  171. io, inode->i_ino, io0, io1);
  172. }
  173. #endif
  174. }
  175. /* Add the io_end to per-inode completed end_io list. */
  176. static void ext4_add_complete_io(ext4_io_end_t *io_end)
  177. {
  178. struct ext4_inode_info *ei = EXT4_I(io_end->inode);
  179. struct workqueue_struct *wq;
  180. unsigned long flags;
  181. BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
  182. spin_lock_irqsave(&ei->i_completed_io_lock, flags);
  183. if (io_end->handle) {
  184. wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
  185. if (list_empty(&ei->i_rsv_conversion_list))
  186. queue_work(wq, &ei->i_rsv_conversion_work);
  187. list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
  188. } else {
  189. wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq;
  190. if (list_empty(&ei->i_unrsv_conversion_list))
  191. queue_work(wq, &ei->i_unrsv_conversion_work);
  192. list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list);
  193. }
  194. spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
  195. }
  196. static int ext4_do_flush_completed_IO(struct inode *inode,
  197. struct list_head *head)
  198. {
  199. ext4_io_end_t *io;
  200. struct list_head unwritten;
  201. unsigned long flags;
  202. struct ext4_inode_info *ei = EXT4_I(inode);
  203. int err, ret = 0;
  204. spin_lock_irqsave(&ei->i_completed_io_lock, flags);
  205. dump_completed_IO(inode, head);
  206. list_replace_init(head, &unwritten);
  207. spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
  208. while (!list_empty(&unwritten)) {
  209. io = list_entry(unwritten.next, ext4_io_end_t, list);
  210. BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
  211. list_del_init(&io->list);
  212. err = ext4_end_io(io);
  213. if (unlikely(!ret && err))
  214. ret = err;
  215. }
  216. return ret;
  217. }
  218. /*
  219. * work on completed IO, to convert unwritten extents to extents
  220. */
  221. void ext4_end_io_rsv_work(struct work_struct *work)
  222. {
  223. struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
  224. i_rsv_conversion_work);
  225. ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
  226. }
  227. void ext4_end_io_unrsv_work(struct work_struct *work)
  228. {
  229. struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
  230. i_unrsv_conversion_work);
  231. ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list);
  232. }
  233. ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
  234. {
  235. ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
  236. if (io) {
  237. atomic_inc(&EXT4_I(inode)->i_ioend_count);
  238. io->inode = inode;
  239. INIT_LIST_HEAD(&io->list);
  240. atomic_set(&io->count, 1);
  241. }
  242. return io;
  243. }
  244. void ext4_put_io_end_defer(ext4_io_end_t *io_end)
  245. {
  246. if (atomic_dec_and_test(&io_end->count)) {
  247. if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
  248. ext4_release_io_end(io_end);
  249. return;
  250. }
  251. ext4_add_complete_io(io_end);
  252. }
  253. }
  254. int ext4_put_io_end(ext4_io_end_t *io_end)
  255. {
  256. int err = 0;
  257. if (atomic_dec_and_test(&io_end->count)) {
  258. if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
  259. err = ext4_convert_unwritten_extents(io_end->handle,
  260. io_end->inode, io_end->offset,
  261. io_end->size);
  262. io_end->handle = NULL;
  263. ext4_clear_io_unwritten_flag(io_end);
  264. }
  265. ext4_release_io_end(io_end);
  266. }
  267. return err;
  268. }
  269. ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
  270. {
  271. atomic_inc(&io_end->count);
  272. return io_end;
  273. }
  274. static void ext4_end_bio(struct bio *bio, int error)
  275. {
  276. ext4_io_end_t *io_end = bio->bi_private;
  277. sector_t bi_sector = bio->bi_sector;
  278. BUG_ON(!io_end);
  279. bio->bi_end_io = NULL;
  280. if (test_bit(BIO_UPTODATE, &bio->bi_flags))
  281. error = 0;
  282. if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
  283. /*
  284. * Link bio into list hanging from io_end. We have to do it
  285. * atomically as bio completions can be racing against each
  286. * other.
  287. */
  288. bio->bi_private = xchg(&io_end->bio, bio);
  289. } else {
  290. ext4_finish_bio(bio);
  291. bio_put(bio);
  292. }
  293. if (error) {
  294. struct inode *inode = io_end->inode;
  295. ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
  296. "(offset %llu size %ld starting block %llu)",
  297. inode->i_ino,
  298. (unsigned long long) io_end->offset,
  299. (long) io_end->size,
  300. (unsigned long long)
  301. bi_sector >> (inode->i_blkbits - 9));
  302. }
  303. ext4_put_io_end_defer(io_end);
  304. }
  305. void ext4_io_submit(struct ext4_io_submit *io)
  306. {
  307. struct bio *bio = io->io_bio;
  308. if (bio) {
  309. bio_get(io->io_bio);
  310. submit_bio(io->io_op, io->io_bio);
  311. BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
  312. bio_put(io->io_bio);
  313. }
  314. io->io_bio = NULL;
  315. }
  316. void ext4_io_submit_init(struct ext4_io_submit *io,
  317. struct writeback_control *wbc)
  318. {
  319. io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
  320. io->io_bio = NULL;
  321. io->io_end = NULL;
  322. }
  323. static int io_submit_init_bio(struct ext4_io_submit *io,
  324. struct buffer_head *bh)
  325. {
  326. int nvecs = bio_get_nr_vecs(bh->b_bdev);
  327. struct bio *bio;
  328. bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
  329. if (!bio)
  330. return -ENOMEM;
  331. bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
  332. bio->bi_bdev = bh->b_bdev;
  333. bio->bi_end_io = ext4_end_bio;
  334. bio->bi_private = ext4_get_io_end(io->io_end);
  335. io->io_bio = bio;
  336. io->io_next_block = bh->b_blocknr;
  337. return 0;
  338. }
  339. static int io_submit_add_bh(struct ext4_io_submit *io,
  340. struct inode *inode,
  341. struct buffer_head *bh)
  342. {
  343. int ret;
  344. if (io->io_bio && bh->b_blocknr != io->io_next_block) {
  345. submit_and_retry:
  346. ext4_io_submit(io);
  347. }
  348. if (io->io_bio == NULL) {
  349. ret = io_submit_init_bio(io, bh);
  350. if (ret)
  351. return ret;
  352. }
  353. ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
  354. if (ret != bh->b_size)
  355. goto submit_and_retry;
  356. io->io_next_block++;
  357. return 0;
  358. }
  359. int ext4_bio_write_page(struct ext4_io_submit *io,
  360. struct page *page,
  361. int len,
  362. struct writeback_control *wbc)
  363. {
  364. struct inode *inode = page->mapping->host;
  365. unsigned block_start, blocksize;
  366. struct buffer_head *bh, *head;
  367. int ret = 0;
  368. int nr_submitted = 0;
  369. blocksize = 1 << inode->i_blkbits;
  370. BUG_ON(!PageLocked(page));
  371. BUG_ON(PageWriteback(page));
  372. set_page_writeback(page);
  373. ClearPageError(page);
  374. /*
  375. * In the first loop we prepare and mark buffers to submit. We have to
  376. * mark all buffers in the page before submitting so that
  377. * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
  378. * on the first buffer finishes and we are still working on submitting
  379. * the second buffer.
  380. */
  381. bh = head = page_buffers(page);
  382. do {
  383. block_start = bh_offset(bh);
  384. if (block_start >= len) {
  385. /*
  386. * Comments copied from block_write_full_page_endio:
  387. *
  388. * The page straddles i_size. It must be zeroed out on
  389. * each and every writepage invocation because it may
  390. * be mmapped. "A file is mapped in multiples of the
  391. * page size. For a file that is not a multiple of
  392. * the page size, the remaining memory is zeroed when
  393. * mapped, and writes to that region are not written
  394. * out to the file."
  395. */
  396. zero_user_segment(page, block_start,
  397. block_start + blocksize);
  398. clear_buffer_dirty(bh);
  399. set_buffer_uptodate(bh);
  400. continue;
  401. }
  402. if (!buffer_dirty(bh) || buffer_delay(bh) ||
  403. !buffer_mapped(bh) || buffer_unwritten(bh)) {
  404. /* A hole? We can safely clear the dirty bit */
  405. if (!buffer_mapped(bh))
  406. clear_buffer_dirty(bh);
  407. if (io->io_bio)
  408. ext4_io_submit(io);
  409. continue;
  410. }
  411. if (buffer_new(bh)) {
  412. clear_buffer_new(bh);
  413. unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
  414. }
  415. set_buffer_async_write(bh);
  416. } while ((bh = bh->b_this_page) != head);
  417. /* Now submit buffers to write */
  418. bh = head = page_buffers(page);
  419. do {
  420. if (!buffer_async_write(bh))
  421. continue;
  422. ret = io_submit_add_bh(io, inode, bh);
  423. if (ret) {
  424. /*
  425. * We only get here on ENOMEM. Not much else
  426. * we can do but mark the page as dirty, and
  427. * better luck next time.
  428. */
  429. redirty_page_for_writepage(wbc, page);
  430. break;
  431. }
  432. nr_submitted++;
  433. clear_buffer_dirty(bh);
  434. } while ((bh = bh->b_this_page) != head);
  435. /* Error stopped previous loop? Clean up buffers... */
  436. if (ret) {
  437. do {
  438. clear_buffer_async_write(bh);
  439. bh = bh->b_this_page;
  440. } while (bh != head);
  441. }
  442. unlock_page(page);
  443. /* Nothing submitted - we have to end page writeback */
  444. if (!nr_submitted)
  445. end_page_writeback(page);
  446. return ret;
  447. }