fs-writeback.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266
  1. /*
  2. * fs/fs-writeback.c
  3. *
  4. * Copyright (C) 2002, Linus Torvalds.
  5. *
  6. * Contains all the functions related to writing back and waiting
  7. * upon dirty inodes against superblocks, and writing back dirty
  8. * pages against inodes. ie: data writeback. Writeout of the
  9. * inode itself is not handled here.
  10. *
  11. * 10Apr2002 Andrew Morton
  12. * Split out of fs/inode.c
  13. * Additions for address_space-based writeback
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/slab.h>
  19. #include <linux/sched.h>
  20. #include <linux/fs.h>
  21. #include <linux/mm.h>
  22. #include <linux/kthread.h>
  23. #include <linux/freezer.h>
  24. #include <linux/writeback.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/buffer_head.h>
  28. #include "internal.h"
  29. #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
  30. /*
  31. * We don't actually have pdflush, but this one is exported though /proc...
  32. */
  33. int nr_pdflush_threads;
  34. /*
  35. * Passed into wb_writeback(), essentially a subset of writeback_control
  36. */
  37. struct wb_writeback_args {
  38. long nr_pages;
  39. struct super_block *sb;
  40. enum writeback_sync_modes sync_mode;
  41. unsigned int for_kupdate:1;
  42. unsigned int range_cyclic:1;
  43. unsigned int for_background:1;
  44. };
  45. /*
  46. * Work items for the bdi_writeback threads
  47. */
  48. struct bdi_work {
  49. struct list_head list; /* pending work list */
  50. struct rcu_head rcu_head; /* for RCU free/clear of work */
  51. unsigned long seen; /* threads that have seen this work */
  52. atomic_t pending; /* number of threads still to do work */
  53. struct wb_writeback_args args; /* writeback arguments */
  54. unsigned long state; /* flag bits, see WS_* */
  55. };
  56. enum {
  57. WS_INPROGRESS = 0,
  58. WS_ONSTACK,
  59. };
  60. static inline void bdi_work_init(struct bdi_work *work,
  61. struct wb_writeback_args *args)
  62. {
  63. INIT_RCU_HEAD(&work->rcu_head);
  64. work->args = *args;
  65. __set_bit(WS_INPROGRESS, &work->state);
  66. }
  67. /**
  68. * writeback_in_progress - determine whether there is writeback in progress
  69. * @bdi: the device's backing_dev_info structure.
  70. *
  71. * Determine whether there is writeback waiting to be handled against a
  72. * backing device.
  73. */
  74. int writeback_in_progress(struct backing_dev_info *bdi)
  75. {
  76. return !list_empty(&bdi->work_list);
  77. }
  78. static void bdi_work_free(struct rcu_head *head)
  79. {
  80. struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
  81. clear_bit(WS_INPROGRESS, &work->state);
  82. smp_mb__after_clear_bit();
  83. wake_up_bit(&work->state, WS_INPROGRESS);
  84. if (!test_bit(WS_ONSTACK, &work->state))
  85. kfree(work);
  86. }
  87. static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
  88. {
  89. /*
  90. * The caller has retrieved the work arguments from this work,
  91. * drop our reference. If this is the last ref, delete and free it
  92. */
  93. if (atomic_dec_and_test(&work->pending)) {
  94. struct backing_dev_info *bdi = wb->bdi;
  95. spin_lock(&bdi->wb_lock);
  96. list_del_rcu(&work->list);
  97. spin_unlock(&bdi->wb_lock);
  98. call_rcu(&work->rcu_head, bdi_work_free);
  99. }
  100. }
  101. static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
  102. {
  103. work->seen = bdi->wb_mask;
  104. BUG_ON(!work->seen);
  105. atomic_set(&work->pending, bdi->wb_cnt);
  106. BUG_ON(!bdi->wb_cnt);
  107. /*
  108. * list_add_tail_rcu() contains the necessary barriers to
  109. * make sure the above stores are seen before the item is
  110. * noticed on the list
  111. */
  112. spin_lock(&bdi->wb_lock);
  113. list_add_tail_rcu(&work->list, &bdi->work_list);
  114. spin_unlock(&bdi->wb_lock);
  115. /*
  116. * If the default thread isn't there, make sure we add it. When
  117. * it gets created and wakes up, we'll run this work.
  118. */
  119. if (unlikely(list_empty_careful(&bdi->wb_list)))
  120. wake_up_process(default_backing_dev_info.wb.task);
  121. else {
  122. struct bdi_writeback *wb = &bdi->wb;
  123. if (wb->task)
  124. wake_up_process(wb->task);
  125. }
  126. }
  127. /*
  128. * Used for on-stack allocated work items. The caller needs to wait until
  129. * the wb threads have acked the work before it's safe to continue.
  130. */
  131. static void bdi_wait_on_work_done(struct bdi_work *work)
  132. {
  133. wait_on_bit(&work->state, WS_INPROGRESS, bdi_sched_wait,
  134. TASK_UNINTERRUPTIBLE);
  135. }
  136. static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
  137. struct wb_writeback_args *args)
  138. {
  139. struct bdi_work *work;
  140. /*
  141. * This is WB_SYNC_NONE writeback, so if allocation fails just
  142. * wakeup the thread for old dirty data writeback
  143. */
  144. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  145. if (work) {
  146. bdi_work_init(work, args);
  147. bdi_queue_work(bdi, work);
  148. } else {
  149. struct bdi_writeback *wb = &bdi->wb;
  150. if (wb->task)
  151. wake_up_process(wb->task);
  152. }
  153. }
  154. /**
  155. * bdi_queue_work_onstack - start and wait for writeback
  156. * @args: parameters to control the work queue writeback
  157. *
  158. * Description:
  159. * This function initiates writeback and waits for the operation to
  160. * complete. Callers must hold the sb s_umount semaphore for
  161. * reading, to avoid having the super disappear before we are done.
  162. */
  163. static void bdi_queue_work_onstack(struct wb_writeback_args *args)
  164. {
  165. struct bdi_work work;
  166. bdi_work_init(&work, args);
  167. __set_bit(WS_ONSTACK, &work.state);
  168. bdi_queue_work(args->sb->s_bdi, &work);
  169. bdi_wait_on_work_done(&work);
  170. }
  171. /**
  172. * bdi_start_writeback - start writeback
  173. * @bdi: the backing device to write from
  174. * @nr_pages: the number of pages to write
  175. *
  176. * Description:
  177. * This does WB_SYNC_NONE opportunistic writeback. The IO is only
  178. * started when this function returns, we make no guarentees on
  179. * completion. Caller need not hold sb s_umount semaphore.
  180. *
  181. */
  182. void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
  183. {
  184. struct wb_writeback_args args = {
  185. .sync_mode = WB_SYNC_NONE,
  186. .nr_pages = nr_pages,
  187. .range_cyclic = 1,
  188. };
  189. bdi_alloc_queue_work(bdi, &args);
  190. }
  191. /**
  192. * bdi_start_background_writeback - start background writeback
  193. * @bdi: the backing device to write from
  194. *
  195. * Description:
  196. * This does WB_SYNC_NONE background writeback. The IO is only
  197. * started when this function returns, we make no guarentees on
  198. * completion. Caller need not hold sb s_umount semaphore.
  199. */
  200. void bdi_start_background_writeback(struct backing_dev_info *bdi)
  201. {
  202. struct wb_writeback_args args = {
  203. .sync_mode = WB_SYNC_NONE,
  204. .nr_pages = LONG_MAX,
  205. .for_background = 1,
  206. .range_cyclic = 1,
  207. };
  208. bdi_alloc_queue_work(bdi, &args);
  209. }
  210. /*
  211. * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
  212. * furthest end of its superblock's dirty-inode list.
  213. *
  214. * Before stamping the inode's ->dirtied_when, we check to see whether it is
  215. * already the most-recently-dirtied inode on the b_dirty list. If that is
  216. * the case then the inode must have been redirtied while it was being written
  217. * out and we don't reset its dirtied_when.
  218. */
  219. static void redirty_tail(struct inode *inode)
  220. {
  221. struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
  222. if (!list_empty(&wb->b_dirty)) {
  223. struct inode *tail;
  224. tail = list_entry(wb->b_dirty.next, struct inode, i_list);
  225. if (time_before(inode->dirtied_when, tail->dirtied_when))
  226. inode->dirtied_when = jiffies;
  227. }
  228. list_move(&inode->i_list, &wb->b_dirty);
  229. }
  230. /*
  231. * requeue inode for re-scanning after bdi->b_io list is exhausted.
  232. */
  233. static void requeue_io(struct inode *inode)
  234. {
  235. struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
  236. list_move(&inode->i_list, &wb->b_more_io);
  237. }
  238. static void inode_sync_complete(struct inode *inode)
  239. {
  240. /*
  241. * Prevent speculative execution through spin_unlock(&inode_lock);
  242. */
  243. smp_mb();
  244. wake_up_bit(&inode->i_state, __I_SYNC);
  245. }
  246. static bool inode_dirtied_after(struct inode *inode, unsigned long t)
  247. {
  248. bool ret = time_after(inode->dirtied_when, t);
  249. #ifndef CONFIG_64BIT
  250. /*
  251. * For inodes being constantly redirtied, dirtied_when can get stuck.
  252. * It _appears_ to be in the future, but is actually in distant past.
  253. * This test is necessary to prevent such wrapped-around relative times
  254. * from permanently stopping the whole bdi writeback.
  255. */
  256. ret = ret && time_before_eq(inode->dirtied_when, jiffies);
  257. #endif
  258. return ret;
  259. }
  260. /*
  261. * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
  262. */
  263. static void move_expired_inodes(struct list_head *delaying_queue,
  264. struct list_head *dispatch_queue,
  265. unsigned long *older_than_this)
  266. {
  267. LIST_HEAD(tmp);
  268. struct list_head *pos, *node;
  269. struct super_block *sb = NULL;
  270. struct inode *inode;
  271. int do_sb_sort = 0;
  272. while (!list_empty(delaying_queue)) {
  273. inode = list_entry(delaying_queue->prev, struct inode, i_list);
  274. if (older_than_this &&
  275. inode_dirtied_after(inode, *older_than_this))
  276. break;
  277. if (sb && sb != inode->i_sb)
  278. do_sb_sort = 1;
  279. sb = inode->i_sb;
  280. list_move(&inode->i_list, &tmp);
  281. }
  282. /* just one sb in list, splice to dispatch_queue and we're done */
  283. if (!do_sb_sort) {
  284. list_splice(&tmp, dispatch_queue);
  285. return;
  286. }
  287. /* Move inodes from one superblock together */
  288. while (!list_empty(&tmp)) {
  289. inode = list_entry(tmp.prev, struct inode, i_list);
  290. sb = inode->i_sb;
  291. list_for_each_prev_safe(pos, node, &tmp) {
  292. inode = list_entry(pos, struct inode, i_list);
  293. if (inode->i_sb == sb)
  294. list_move(&inode->i_list, dispatch_queue);
  295. }
  296. }
  297. }
  298. /*
  299. * Queue all expired dirty inodes for io, eldest first.
  300. */
  301. static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
  302. {
  303. list_splice_init(&wb->b_more_io, wb->b_io.prev);
  304. move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
  305. }
  306. static int write_inode(struct inode *inode, struct writeback_control *wbc)
  307. {
  308. if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
  309. return inode->i_sb->s_op->write_inode(inode, wbc);
  310. return 0;
  311. }
  312. /*
  313. * Wait for writeback on an inode to complete.
  314. */
  315. static void inode_wait_for_writeback(struct inode *inode)
  316. {
  317. DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
  318. wait_queue_head_t *wqh;
  319. wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
  320. while (inode->i_state & I_SYNC) {
  321. spin_unlock(&inode_lock);
  322. __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
  323. spin_lock(&inode_lock);
  324. }
  325. }
  326. /*
  327. * Write out an inode's dirty pages. Called under inode_lock. Either the
  328. * caller has ref on the inode (either via __iget or via syscall against an fd)
  329. * or the inode has I_WILL_FREE set (via generic_forget_inode)
  330. *
  331. * If `wait' is set, wait on the writeout.
  332. *
  333. * The whole writeout design is quite complex and fragile. We want to avoid
  334. * starvation of particular inodes when others are being redirtied, prevent
  335. * livelocks, etc.
  336. *
  337. * Called under inode_lock.
  338. */
  339. static int
  340. writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
  341. {
  342. struct address_space *mapping = inode->i_mapping;
  343. unsigned dirty;
  344. int ret;
  345. if (!atomic_read(&inode->i_count))
  346. WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
  347. else
  348. WARN_ON(inode->i_state & I_WILL_FREE);
  349. if (inode->i_state & I_SYNC) {
  350. /*
  351. * If this inode is locked for writeback and we are not doing
  352. * writeback-for-data-integrity, move it to b_more_io so that
  353. * writeback can proceed with the other inodes on s_io.
  354. *
  355. * We'll have another go at writing back this inode when we
  356. * completed a full scan of b_io.
  357. */
  358. if (wbc->sync_mode != WB_SYNC_ALL) {
  359. requeue_io(inode);
  360. return 0;
  361. }
  362. /*
  363. * It's a data-integrity sync. We must wait.
  364. */
  365. inode_wait_for_writeback(inode);
  366. }
  367. BUG_ON(inode->i_state & I_SYNC);
  368. /* Set I_SYNC, reset I_DIRTY_PAGES */
  369. inode->i_state |= I_SYNC;
  370. inode->i_state &= ~I_DIRTY_PAGES;
  371. spin_unlock(&inode_lock);
  372. ret = do_writepages(mapping, wbc);
  373. /*
  374. * Make sure to wait on the data before writing out the metadata.
  375. * This is important for filesystems that modify metadata on data
  376. * I/O completion.
  377. */
  378. if (wbc->sync_mode == WB_SYNC_ALL) {
  379. int err = filemap_fdatawait(mapping);
  380. if (ret == 0)
  381. ret = err;
  382. }
  383. /*
  384. * Some filesystems may redirty the inode during the writeback
  385. * due to delalloc, clear dirty metadata flags right before
  386. * write_inode()
  387. */
  388. spin_lock(&inode_lock);
  389. dirty = inode->i_state & I_DIRTY;
  390. inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
  391. spin_unlock(&inode_lock);
  392. /* Don't write the inode if only I_DIRTY_PAGES was set */
  393. if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
  394. int err = write_inode(inode, wbc);
  395. if (ret == 0)
  396. ret = err;
  397. }
  398. spin_lock(&inode_lock);
  399. inode->i_state &= ~I_SYNC;
  400. if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
  401. if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
  402. /*
  403. * More pages get dirtied by a fast dirtier.
  404. */
  405. goto select_queue;
  406. } else if (inode->i_state & I_DIRTY) {
  407. /*
  408. * At least XFS will redirty the inode during the
  409. * writeback (delalloc) and on io completion (isize).
  410. */
  411. redirty_tail(inode);
  412. } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
  413. /*
  414. * We didn't write back all the pages. nfs_writepages()
  415. * sometimes bales out without doing anything. Redirty
  416. * the inode; Move it from b_io onto b_more_io/b_dirty.
  417. */
  418. /*
  419. * akpm: if the caller was the kupdate function we put
  420. * this inode at the head of b_dirty so it gets first
  421. * consideration. Otherwise, move it to the tail, for
  422. * the reasons described there. I'm not really sure
  423. * how much sense this makes. Presumably I had a good
  424. * reasons for doing it this way, and I'd rather not
  425. * muck with it at present.
  426. */
  427. if (wbc->for_kupdate) {
  428. /*
  429. * For the kupdate function we move the inode
  430. * to b_more_io so it will get more writeout as
  431. * soon as the queue becomes uncongested.
  432. */
  433. inode->i_state |= I_DIRTY_PAGES;
  434. select_queue:
  435. if (wbc->nr_to_write <= 0) {
  436. /*
  437. * slice used up: queue for next turn
  438. */
  439. requeue_io(inode);
  440. } else {
  441. /*
  442. * somehow blocked: retry later
  443. */
  444. redirty_tail(inode);
  445. }
  446. } else {
  447. /*
  448. * Otherwise fully redirty the inode so that
  449. * other inodes on this superblock will get some
  450. * writeout. Otherwise heavy writing to one
  451. * file would indefinitely suspend writeout of
  452. * all the other files.
  453. */
  454. inode->i_state |= I_DIRTY_PAGES;
  455. redirty_tail(inode);
  456. }
  457. } else if (atomic_read(&inode->i_count)) {
  458. /*
  459. * The inode is clean, inuse
  460. */
  461. list_move(&inode->i_list, &inode_in_use);
  462. } else {
  463. /*
  464. * The inode is clean, unused
  465. */
  466. list_move(&inode->i_list, &inode_unused);
  467. }
  468. }
  469. inode_sync_complete(inode);
  470. return ret;
  471. }
  472. /*
  473. * For background writeback the caller does not have the sb pinned
  474. * before calling writeback. So make sure that we do pin it, so it doesn't
  475. * go away while we are writing inodes from it.
  476. */
  477. static bool pin_sb_for_writeback(struct super_block *sb)
  478. {
  479. spin_lock(&sb_lock);
  480. if (list_empty(&sb->s_instances)) {
  481. spin_unlock(&sb_lock);
  482. return false;
  483. }
  484. sb->s_count++;
  485. spin_unlock(&sb_lock);
  486. if (down_read_trylock(&sb->s_umount)) {
  487. if (sb->s_root)
  488. return true;
  489. up_read(&sb->s_umount);
  490. }
  491. put_super(sb);
  492. return false;
  493. }
  494. /*
  495. * Write a portion of b_io inodes which belong to @sb.
  496. * If @wbc->sb != NULL, then find and write all such
  497. * inodes. Otherwise write only ones which go sequentially
  498. * in reverse order.
  499. * Return 1, if the caller writeback routine should be
  500. * interrupted. Otherwise return 0.
  501. */
  502. static int writeback_sb_inodes(struct super_block *sb,
  503. struct bdi_writeback *wb,
  504. struct writeback_control *wbc)
  505. {
  506. while (!list_empty(&wb->b_io)) {
  507. long pages_skipped;
  508. struct inode *inode = list_entry(wb->b_io.prev,
  509. struct inode, i_list);
  510. if (wbc->sb && sb != inode->i_sb) {
  511. /* super block given and doesn't
  512. match, skip this inode */
  513. redirty_tail(inode);
  514. continue;
  515. }
  516. if (sb != inode->i_sb)
  517. /* finish with this superblock */
  518. return 0;
  519. if (inode->i_state & (I_NEW | I_WILL_FREE)) {
  520. requeue_io(inode);
  521. continue;
  522. }
  523. /*
  524. * Was this inode dirtied after sync_sb_inodes was called?
  525. * This keeps sync from extra jobs and livelock.
  526. */
  527. if (inode_dirtied_after(inode, wbc->wb_start))
  528. return 1;
  529. BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
  530. __iget(inode);
  531. pages_skipped = wbc->pages_skipped;
  532. writeback_single_inode(inode, wbc);
  533. if (wbc->pages_skipped != pages_skipped) {
  534. /*
  535. * writeback is not making progress due to locked
  536. * buffers. Skip this inode for now.
  537. */
  538. redirty_tail(inode);
  539. }
  540. spin_unlock(&inode_lock);
  541. iput(inode);
  542. cond_resched();
  543. spin_lock(&inode_lock);
  544. if (wbc->nr_to_write <= 0) {
  545. wbc->more_io = 1;
  546. return 1;
  547. }
  548. if (!list_empty(&wb->b_more_io))
  549. wbc->more_io = 1;
  550. }
  551. /* b_io is empty */
  552. return 1;
  553. }
  554. void writeback_inodes_wb(struct bdi_writeback *wb,
  555. struct writeback_control *wbc)
  556. {
  557. int ret = 0;
  558. wbc->wb_start = jiffies; /* livelock avoidance */
  559. spin_lock(&inode_lock);
  560. if (!wbc->for_kupdate || list_empty(&wb->b_io))
  561. queue_io(wb, wbc->older_than_this);
  562. while (!list_empty(&wb->b_io)) {
  563. struct inode *inode = list_entry(wb->b_io.prev,
  564. struct inode, i_list);
  565. struct super_block *sb = inode->i_sb;
  566. if (wbc->sb) {
  567. /*
  568. * We are requested to write out inodes for a specific
  569. * superblock. This means we already have s_umount
  570. * taken by the caller which also waits for us to
  571. * complete the writeout.
  572. */
  573. if (sb != wbc->sb) {
  574. redirty_tail(inode);
  575. continue;
  576. }
  577. WARN_ON(!rwsem_is_locked(&sb->s_umount));
  578. ret = writeback_sb_inodes(sb, wb, wbc);
  579. } else {
  580. if (!pin_sb_for_writeback(sb)) {
  581. requeue_io(inode);
  582. continue;
  583. }
  584. ret = writeback_sb_inodes(sb, wb, wbc);
  585. drop_super(sb);
  586. }
  587. if (ret)
  588. break;
  589. }
  590. spin_unlock(&inode_lock);
  591. /* Leave any unwritten inodes on b_io */
  592. }
  593. /*
  594. * The maximum number of pages to writeout in a single bdi flush/kupdate
  595. * operation. We do this so we don't hold I_SYNC against an inode for
  596. * enormous amounts of time, which would block a userspace task which has
  597. * been forced to throttle against that inode. Also, the code reevaluates
  598. * the dirty each time it has written this many pages.
  599. */
  600. #define MAX_WRITEBACK_PAGES 1024
  601. static inline bool over_bground_thresh(void)
  602. {
  603. unsigned long background_thresh, dirty_thresh;
  604. get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
  605. return (global_page_state(NR_FILE_DIRTY) +
  606. global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
  607. }
  608. /*
  609. * Explicit flushing or periodic writeback of "old" data.
  610. *
  611. * Define "old": the first time one of an inode's pages is dirtied, we mark the
  612. * dirtying-time in the inode's address_space. So this periodic writeback code
  613. * just walks the superblock inode list, writing back any inodes which are
  614. * older than a specific point in time.
  615. *
  616. * Try to run once per dirty_writeback_interval. But if a writeback event
  617. * takes longer than a dirty_writeback_interval interval, then leave a
  618. * one-second gap.
  619. *
  620. * older_than_this takes precedence over nr_to_write. So we'll only write back
  621. * all dirty pages if they are all attached to "old" mappings.
  622. */
  623. static long wb_writeback(struct bdi_writeback *wb,
  624. struct wb_writeback_args *args)
  625. {
  626. struct writeback_control wbc = {
  627. .sb = args->sb,
  628. .sync_mode = args->sync_mode,
  629. .older_than_this = NULL,
  630. .for_kupdate = args->for_kupdate,
  631. .for_background = args->for_background,
  632. .range_cyclic = args->range_cyclic,
  633. };
  634. unsigned long oldest_jif;
  635. long wrote = 0;
  636. struct inode *inode;
  637. if (wbc.for_kupdate) {
  638. wbc.older_than_this = &oldest_jif;
  639. oldest_jif = jiffies -
  640. msecs_to_jiffies(dirty_expire_interval * 10);
  641. }
  642. if (!wbc.range_cyclic) {
  643. wbc.range_start = 0;
  644. wbc.range_end = LLONG_MAX;
  645. }
  646. for (;;) {
  647. /*
  648. * Stop writeback when nr_pages has been consumed
  649. */
  650. if (args->nr_pages <= 0)
  651. break;
  652. /*
  653. * For background writeout, stop when we are below the
  654. * background dirty threshold
  655. */
  656. if (args->for_background && !over_bground_thresh())
  657. break;
  658. wbc.more_io = 0;
  659. wbc.nr_to_write = MAX_WRITEBACK_PAGES;
  660. wbc.pages_skipped = 0;
  661. writeback_inodes_wb(wb, &wbc);
  662. args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
  663. wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
  664. /*
  665. * If we consumed everything, see if we have more
  666. */
  667. if (wbc.nr_to_write <= 0)
  668. continue;
  669. /*
  670. * Didn't write everything and we don't have more IO, bail
  671. */
  672. if (!wbc.more_io)
  673. break;
  674. /*
  675. * Did we write something? Try for more
  676. */
  677. if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
  678. continue;
  679. /*
  680. * Nothing written. Wait for some inode to
  681. * become available for writeback. Otherwise
  682. * we'll just busyloop.
  683. */
  684. spin_lock(&inode_lock);
  685. if (!list_empty(&wb->b_more_io)) {
  686. inode = list_entry(wb->b_more_io.prev,
  687. struct inode, i_list);
  688. inode_wait_for_writeback(inode);
  689. }
  690. spin_unlock(&inode_lock);
  691. }
  692. return wrote;
  693. }
  694. /*
  695. * Return the next bdi_work struct that hasn't been processed by this
  696. * wb thread yet. ->seen is initially set for each thread that exists
  697. * for this device, when a thread first notices a piece of work it
  698. * clears its bit. Depending on writeback type, the thread will notify
  699. * completion on either receiving the work (WB_SYNC_NONE) or after
  700. * it is done (WB_SYNC_ALL).
  701. */
  702. static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
  703. struct bdi_writeback *wb)
  704. {
  705. struct bdi_work *work, *ret = NULL;
  706. rcu_read_lock();
  707. list_for_each_entry_rcu(work, &bdi->work_list, list) {
  708. if (!test_bit(wb->nr, &work->seen))
  709. continue;
  710. clear_bit(wb->nr, &work->seen);
  711. ret = work;
  712. break;
  713. }
  714. rcu_read_unlock();
  715. return ret;
  716. }
  717. static long wb_check_old_data_flush(struct bdi_writeback *wb)
  718. {
  719. unsigned long expired;
  720. long nr_pages;
  721. /*
  722. * When set to zero, disable periodic writeback
  723. */
  724. if (!dirty_writeback_interval)
  725. return 0;
  726. expired = wb->last_old_flush +
  727. msecs_to_jiffies(dirty_writeback_interval * 10);
  728. if (time_before(jiffies, expired))
  729. return 0;
  730. wb->last_old_flush = jiffies;
  731. nr_pages = global_page_state(NR_FILE_DIRTY) +
  732. global_page_state(NR_UNSTABLE_NFS) +
  733. (inodes_stat.nr_inodes - inodes_stat.nr_unused);
  734. if (nr_pages) {
  735. struct wb_writeback_args args = {
  736. .nr_pages = nr_pages,
  737. .sync_mode = WB_SYNC_NONE,
  738. .for_kupdate = 1,
  739. .range_cyclic = 1,
  740. };
  741. return wb_writeback(wb, &args);
  742. }
  743. return 0;
  744. }
  745. /*
  746. * Retrieve work items and do the writeback they describe
  747. */
  748. long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
  749. {
  750. struct backing_dev_info *bdi = wb->bdi;
  751. struct bdi_work *work;
  752. long wrote = 0;
  753. while ((work = get_next_work_item(bdi, wb)) != NULL) {
  754. struct wb_writeback_args args = work->args;
  755. /*
  756. * Override sync mode, in case we must wait for completion
  757. */
  758. if (force_wait)
  759. work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
  760. /*
  761. * If this isn't a data integrity operation, just notify
  762. * that we have seen this work and we are now starting it.
  763. */
  764. if (!test_bit(WS_ONSTACK, &work->state))
  765. wb_clear_pending(wb, work);
  766. wrote += wb_writeback(wb, &args);
  767. /*
  768. * This is a data integrity writeback, so only do the
  769. * notification when we have completed the work.
  770. */
  771. if (test_bit(WS_ONSTACK, &work->state))
  772. wb_clear_pending(wb, work);
  773. }
  774. /*
  775. * Check for periodic writeback, kupdated() style
  776. */
  777. wrote += wb_check_old_data_flush(wb);
  778. return wrote;
  779. }
  780. /*
  781. * Handle writeback of dirty data for the device backed by this bdi. Also
  782. * wakes up periodically and does kupdated style flushing.
  783. */
  784. int bdi_writeback_task(struct bdi_writeback *wb)
  785. {
  786. unsigned long last_active = jiffies;
  787. unsigned long wait_jiffies = -1UL;
  788. long pages_written;
  789. while (!kthread_should_stop()) {
  790. pages_written = wb_do_writeback(wb, 0);
  791. if (pages_written)
  792. last_active = jiffies;
  793. else if (wait_jiffies != -1UL) {
  794. unsigned long max_idle;
  795. /*
  796. * Longest period of inactivity that we tolerate. If we
  797. * see dirty data again later, the task will get
  798. * recreated automatically.
  799. */
  800. max_idle = max(5UL * 60 * HZ, wait_jiffies);
  801. if (time_after(jiffies, max_idle + last_active))
  802. break;
  803. }
  804. if (dirty_writeback_interval) {
  805. wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
  806. schedule_timeout_interruptible(wait_jiffies);
  807. } else {
  808. set_current_state(TASK_INTERRUPTIBLE);
  809. if (list_empty_careful(&wb->bdi->work_list) &&
  810. !kthread_should_stop())
  811. schedule();
  812. __set_current_state(TASK_RUNNING);
  813. }
  814. try_to_freeze();
  815. }
  816. return 0;
  817. }
  818. /*
  819. * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
  820. * the whole world.
  821. */
  822. void wakeup_flusher_threads(long nr_pages)
  823. {
  824. struct backing_dev_info *bdi;
  825. struct wb_writeback_args args = {
  826. .sync_mode = WB_SYNC_NONE,
  827. };
  828. if (nr_pages) {
  829. args.nr_pages = nr_pages;
  830. } else {
  831. args.nr_pages = global_page_state(NR_FILE_DIRTY) +
  832. global_page_state(NR_UNSTABLE_NFS);
  833. }
  834. rcu_read_lock();
  835. list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
  836. if (!bdi_has_dirty_io(bdi))
  837. continue;
  838. bdi_alloc_queue_work(bdi, &args);
  839. }
  840. rcu_read_unlock();
  841. }
  842. static noinline void block_dump___mark_inode_dirty(struct inode *inode)
  843. {
  844. if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
  845. struct dentry *dentry;
  846. const char *name = "?";
  847. dentry = d_find_alias(inode);
  848. if (dentry) {
  849. spin_lock(&dentry->d_lock);
  850. name = (const char *) dentry->d_name.name;
  851. }
  852. printk(KERN_DEBUG
  853. "%s(%d): dirtied inode %lu (%s) on %s\n",
  854. current->comm, task_pid_nr(current), inode->i_ino,
  855. name, inode->i_sb->s_id);
  856. if (dentry) {
  857. spin_unlock(&dentry->d_lock);
  858. dput(dentry);
  859. }
  860. }
  861. }
  862. /**
  863. * __mark_inode_dirty - internal function
  864. * @inode: inode to mark
  865. * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
  866. * Mark an inode as dirty. Callers should use mark_inode_dirty or
  867. * mark_inode_dirty_sync.
  868. *
  869. * Put the inode on the super block's dirty list.
  870. *
  871. * CAREFUL! We mark it dirty unconditionally, but move it onto the
  872. * dirty list only if it is hashed or if it refers to a blockdev.
  873. * If it was not hashed, it will never be added to the dirty list
  874. * even if it is later hashed, as it will have been marked dirty already.
  875. *
  876. * In short, make sure you hash any inodes _before_ you start marking
  877. * them dirty.
  878. *
  879. * This function *must* be atomic for the I_DIRTY_PAGES case -
  880. * set_page_dirty() is called under spinlock in several places.
  881. *
  882. * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
  883. * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
  884. * the kernel-internal blockdev inode represents the dirtying time of the
  885. * blockdev's pages. This is why for I_DIRTY_PAGES we always use
  886. * page->mapping->host, so the page-dirtying time is recorded in the internal
  887. * blockdev inode.
  888. */
  889. void __mark_inode_dirty(struct inode *inode, int flags)
  890. {
  891. struct super_block *sb = inode->i_sb;
  892. /*
  893. * Don't do this for I_DIRTY_PAGES - that doesn't actually
  894. * dirty the inode itself
  895. */
  896. if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
  897. if (sb->s_op->dirty_inode)
  898. sb->s_op->dirty_inode(inode);
  899. }
  900. /*
  901. * make sure that changes are seen by all cpus before we test i_state
  902. * -- mikulas
  903. */
  904. smp_mb();
  905. /* avoid the locking if we can */
  906. if ((inode->i_state & flags) == flags)
  907. return;
  908. if (unlikely(block_dump))
  909. block_dump___mark_inode_dirty(inode);
  910. spin_lock(&inode_lock);
  911. if ((inode->i_state & flags) != flags) {
  912. const int was_dirty = inode->i_state & I_DIRTY;
  913. inode->i_state |= flags;
  914. /*
  915. * If the inode is being synced, just update its dirty state.
  916. * The unlocker will place the inode on the appropriate
  917. * superblock list, based upon its state.
  918. */
  919. if (inode->i_state & I_SYNC)
  920. goto out;
  921. /*
  922. * Only add valid (hashed) inodes to the superblock's
  923. * dirty list. Add blockdev inodes as well.
  924. */
  925. if (!S_ISBLK(inode->i_mode)) {
  926. if (hlist_unhashed(&inode->i_hash))
  927. goto out;
  928. }
  929. if (inode->i_state & (I_FREEING|I_CLEAR))
  930. goto out;
  931. /*
  932. * If the inode was already on b_dirty/b_io/b_more_io, don't
  933. * reposition it (that would break b_dirty time-ordering).
  934. */
  935. if (!was_dirty) {
  936. struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
  937. struct backing_dev_info *bdi = wb->bdi;
  938. if (bdi_cap_writeback_dirty(bdi) &&
  939. !test_bit(BDI_registered, &bdi->state)) {
  940. WARN_ON(1);
  941. printk(KERN_ERR "bdi-%s not registered\n",
  942. bdi->name);
  943. }
  944. inode->dirtied_when = jiffies;
  945. list_move(&inode->i_list, &wb->b_dirty);
  946. }
  947. }
  948. out:
  949. spin_unlock(&inode_lock);
  950. }
  951. EXPORT_SYMBOL(__mark_inode_dirty);
  952. /*
  953. * Write out a superblock's list of dirty inodes. A wait will be performed
  954. * upon no inodes, all inodes or the final one, depending upon sync_mode.
  955. *
  956. * If older_than_this is non-NULL, then only write out inodes which
  957. * had their first dirtying at a time earlier than *older_than_this.
  958. *
  959. * If `bdi' is non-zero then we're being asked to writeback a specific queue.
  960. * This function assumes that the blockdev superblock's inodes are backed by
  961. * a variety of queues, so all inodes are searched. For other superblocks,
  962. * assume that all inodes are backed by the same queue.
  963. *
  964. * The inodes to be written are parked on bdi->b_io. They are moved back onto
  965. * bdi->b_dirty as they are selected for writing. This way, none can be missed
  966. * on the writer throttling path, and we get decent balancing between many
  967. * throttled threads: we don't want them all piling up on inode_sync_wait.
  968. */
  969. static void wait_sb_inodes(struct super_block *sb)
  970. {
  971. struct inode *inode, *old_inode = NULL;
  972. /*
  973. * We need to be protected against the filesystem going from
  974. * r/o to r/w or vice versa.
  975. */
  976. WARN_ON(!rwsem_is_locked(&sb->s_umount));
  977. spin_lock(&inode_lock);
  978. /*
  979. * Data integrity sync. Must wait for all pages under writeback,
  980. * because there may have been pages dirtied before our sync
  981. * call, but which had writeout started before we write it out.
  982. * In which case, the inode may not be on the dirty list, but
  983. * we still have to wait for that writeout.
  984. */
  985. list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
  986. struct address_space *mapping;
  987. if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
  988. continue;
  989. mapping = inode->i_mapping;
  990. if (mapping->nrpages == 0)
  991. continue;
  992. __iget(inode);
  993. spin_unlock(&inode_lock);
  994. /*
  995. * We hold a reference to 'inode' so it couldn't have
  996. * been removed from s_inodes list while we dropped the
  997. * inode_lock. We cannot iput the inode now as we can
  998. * be holding the last reference and we cannot iput it
  999. * under inode_lock. So we keep the reference and iput
  1000. * it later.
  1001. */
  1002. iput(old_inode);
  1003. old_inode = inode;
  1004. filemap_fdatawait(mapping);
  1005. cond_resched();
  1006. spin_lock(&inode_lock);
  1007. }
  1008. spin_unlock(&inode_lock);
  1009. iput(old_inode);
  1010. }
  1011. /**
  1012. * writeback_inodes_sb - writeback dirty inodes from given super_block
  1013. * @sb: the superblock
  1014. *
  1015. * Start writeback on some inodes on this super_block. No guarantees are made
  1016. * on how many (if any) will be written, and this function does not wait
  1017. * for IO completion of submitted IO. The number of pages submitted is
  1018. * returned.
  1019. */
  1020. void writeback_inodes_sb(struct super_block *sb)
  1021. {
  1022. unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
  1023. unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
  1024. struct wb_writeback_args args = {
  1025. .sb = sb,
  1026. .sync_mode = WB_SYNC_NONE,
  1027. };
  1028. WARN_ON(!rwsem_is_locked(&sb->s_umount));
  1029. args.nr_pages = nr_dirty + nr_unstable +
  1030. (inodes_stat.nr_inodes - inodes_stat.nr_unused);
  1031. bdi_queue_work_onstack(&args);
  1032. }
  1033. EXPORT_SYMBOL(writeback_inodes_sb);
  1034. /**
  1035. * writeback_inodes_sb_if_idle - start writeback if none underway
  1036. * @sb: the superblock
  1037. *
  1038. * Invoke writeback_inodes_sb if no writeback is currently underway.
  1039. * Returns 1 if writeback was started, 0 if not.
  1040. */
  1041. int writeback_inodes_sb_if_idle(struct super_block *sb)
  1042. {
  1043. if (!writeback_in_progress(sb->s_bdi)) {
  1044. down_read(&sb->s_umount);
  1045. writeback_inodes_sb(sb);
  1046. up_read(&sb->s_umount);
  1047. return 1;
  1048. } else
  1049. return 0;
  1050. }
  1051. EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
  1052. /**
  1053. * sync_inodes_sb - sync sb inode pages
  1054. * @sb: the superblock
  1055. *
  1056. * This function writes and waits on any dirty inode belonging to this
  1057. * super_block. The number of pages synced is returned.
  1058. */
  1059. void sync_inodes_sb(struct super_block *sb)
  1060. {
  1061. struct wb_writeback_args args = {
  1062. .sb = sb,
  1063. .sync_mode = WB_SYNC_ALL,
  1064. .nr_pages = LONG_MAX,
  1065. .range_cyclic = 0,
  1066. };
  1067. WARN_ON(!rwsem_is_locked(&sb->s_umount));
  1068. bdi_queue_work_onstack(&args);
  1069. wait_sb_inodes(sb);
  1070. }
  1071. EXPORT_SYMBOL(sync_inodes_sb);
  1072. /**
  1073. * write_inode_now - write an inode to disk
  1074. * @inode: inode to write to disk
  1075. * @sync: whether the write should be synchronous or not
  1076. *
  1077. * This function commits an inode to disk immediately if it is dirty. This is
  1078. * primarily needed by knfsd.
  1079. *
  1080. * The caller must either have a ref on the inode or must have set I_WILL_FREE.
  1081. */
  1082. int write_inode_now(struct inode *inode, int sync)
  1083. {
  1084. int ret;
  1085. struct writeback_control wbc = {
  1086. .nr_to_write = LONG_MAX,
  1087. .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
  1088. .range_start = 0,
  1089. .range_end = LLONG_MAX,
  1090. };
  1091. if (!mapping_cap_writeback_dirty(inode->i_mapping))
  1092. wbc.nr_to_write = 0;
  1093. might_sleep();
  1094. spin_lock(&inode_lock);
  1095. ret = writeback_single_inode(inode, &wbc);
  1096. spin_unlock(&inode_lock);
  1097. if (sync)
  1098. inode_sync_wait(inode);
  1099. return ret;
  1100. }
  1101. EXPORT_SYMBOL(write_inode_now);
  1102. /**
  1103. * sync_inode - write an inode and its pages to disk.
  1104. * @inode: the inode to sync
  1105. * @wbc: controls the writeback mode
  1106. *
  1107. * sync_inode() will write an inode and its pages to disk. It will also
  1108. * correctly update the inode on its superblock's dirty inode lists and will
  1109. * update inode->i_state.
  1110. *
  1111. * The caller must have a ref on the inode.
  1112. */
  1113. int sync_inode(struct inode *inode, struct writeback_control *wbc)
  1114. {
  1115. int ret;
  1116. spin_lock(&inode_lock);
  1117. ret = writeback_single_inode(inode, wbc);
  1118. spin_unlock(&inode_lock);
  1119. return ret;
  1120. }
  1121. EXPORT_SYMBOL(sync_inode);