fs-writeback.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204
  1. /*
  2. * fs/fs-writeback.c
  3. *
  4. * Copyright (C) 2002, Linus Torvalds.
  5. *
  6. * Contains all the functions related to writing back and waiting
  7. * upon dirty inodes against superblocks, and writing back dirty
  8. * pages against inodes. ie: data writeback. Writeout of the
  9. * inode itself is not handled here.
  10. *
  11. * 10Apr2002 Andrew Morton
  12. * Split out of fs/inode.c
  13. * Additions for address_space-based writeback
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/sched.h>
  19. #include <linux/fs.h>
  20. #include <linux/mm.h>
  21. #include <linux/kthread.h>
  22. #include <linux/freezer.h>
  23. #include <linux/writeback.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/buffer_head.h>
  27. #include "internal.h"
  28. #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
  29. /*
  30. * We don't actually have pdflush, but this one is exported though /proc...
  31. */
  32. int nr_pdflush_threads;
  33. /*
  34. * Passed into wb_writeback(), essentially a subset of writeback_control
  35. */
  36. struct wb_writeback_args {
  37. long nr_pages;
  38. struct super_block *sb;
  39. enum writeback_sync_modes sync_mode;
  40. int for_kupdate;
  41. int range_cyclic;
  42. };
  43. /*
  44. * Work items for the bdi_writeback threads
  45. */
  46. struct bdi_work {
  47. struct list_head list;
  48. struct rcu_head rcu_head;
  49. unsigned long seen;
  50. atomic_t pending;
  51. struct wb_writeback_args args;
  52. unsigned long state;
  53. };
  54. enum {
  55. WS_USED_B = 0,
  56. WS_ONSTACK_B,
  57. };
  58. #define WS_USED (1 << WS_USED_B)
  59. #define WS_ONSTACK (1 << WS_ONSTACK_B)
  60. static inline bool bdi_work_on_stack(struct bdi_work *work)
  61. {
  62. return test_bit(WS_ONSTACK_B, &work->state);
  63. }
  64. static inline void bdi_work_init(struct bdi_work *work,
  65. struct writeback_control *wbc)
  66. {
  67. INIT_RCU_HEAD(&work->rcu_head);
  68. work->args.sb = wbc->sb;
  69. work->args.nr_pages = wbc->nr_to_write;
  70. work->args.sync_mode = wbc->sync_mode;
  71. work->args.range_cyclic = wbc->range_cyclic;
  72. work->args.for_kupdate = 0;
  73. work->state = WS_USED;
  74. }
  75. /**
  76. * writeback_in_progress - determine whether there is writeback in progress
  77. * @bdi: the device's backing_dev_info structure.
  78. *
  79. * Determine whether there is writeback waiting to be handled against a
  80. * backing device.
  81. */
  82. int writeback_in_progress(struct backing_dev_info *bdi)
  83. {
  84. return !list_empty(&bdi->work_list);
  85. }
  86. static void bdi_work_clear(struct bdi_work *work)
  87. {
  88. clear_bit(WS_USED_B, &work->state);
  89. smp_mb__after_clear_bit();
  90. wake_up_bit(&work->state, WS_USED_B);
  91. }
  92. static void bdi_work_free(struct rcu_head *head)
  93. {
  94. struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
  95. if (!bdi_work_on_stack(work))
  96. kfree(work);
  97. else
  98. bdi_work_clear(work);
  99. }
  100. static void wb_work_complete(struct bdi_work *work)
  101. {
  102. const enum writeback_sync_modes sync_mode = work->args.sync_mode;
  103. /*
  104. * For allocated work, we can clear the done/seen bit right here.
  105. * For on-stack work, we need to postpone both the clear and free
  106. * to after the RCU grace period, since the stack could be invalidated
  107. * as soon as bdi_work_clear() has done the wakeup.
  108. */
  109. if (!bdi_work_on_stack(work))
  110. bdi_work_clear(work);
  111. if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work))
  112. call_rcu(&work->rcu_head, bdi_work_free);
  113. }
  114. static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
  115. {
  116. /*
  117. * The caller has retrieved the work arguments from this work,
  118. * drop our reference. If this is the last ref, delete and free it
  119. */
  120. if (atomic_dec_and_test(&work->pending)) {
  121. struct backing_dev_info *bdi = wb->bdi;
  122. spin_lock(&bdi->wb_lock);
  123. list_del_rcu(&work->list);
  124. spin_unlock(&bdi->wb_lock);
  125. wb_work_complete(work);
  126. }
  127. }
  128. static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
  129. {
  130. if (work) {
  131. work->seen = bdi->wb_mask;
  132. BUG_ON(!work->seen);
  133. atomic_set(&work->pending, bdi->wb_cnt);
  134. BUG_ON(!bdi->wb_cnt);
  135. /*
  136. * Make sure stores are seen before it appears on the list
  137. */
  138. smp_mb();
  139. spin_lock(&bdi->wb_lock);
  140. list_add_tail_rcu(&work->list, &bdi->work_list);
  141. spin_unlock(&bdi->wb_lock);
  142. }
  143. /*
  144. * If the default thread isn't there, make sure we add it. When
  145. * it gets created and wakes up, we'll run this work.
  146. */
  147. if (unlikely(list_empty_careful(&bdi->wb_list)))
  148. wake_up_process(default_backing_dev_info.wb.task);
  149. else {
  150. struct bdi_writeback *wb = &bdi->wb;
  151. /*
  152. * If we failed allocating the bdi work item, wake up the wb
  153. * thread always. As a safety precaution, it'll flush out
  154. * everything
  155. */
  156. if (!wb_has_dirty_io(wb)) {
  157. if (work)
  158. wb_clear_pending(wb, work);
  159. } else if (wb->task)
  160. wake_up_process(wb->task);
  161. }
  162. }
  163. /*
  164. * Used for on-stack allocated work items. The caller needs to wait until
  165. * the wb threads have acked the work before it's safe to continue.
  166. */
  167. static void bdi_wait_on_work_clear(struct bdi_work *work)
  168. {
  169. wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
  170. TASK_UNINTERRUPTIBLE);
  171. }
  172. static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
  173. struct writeback_control *wbc)
  174. {
  175. struct bdi_work *work;
  176. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  177. if (work)
  178. bdi_work_init(work, wbc);
  179. bdi_queue_work(bdi, work);
  180. }
  181. void bdi_start_writeback(struct writeback_control *wbc)
  182. {
  183. /*
  184. * WB_SYNC_NONE is opportunistic writeback. If this allocation fails,
  185. * bdi_queue_work() will wake up the thread and flush old data. This
  186. * should ensure some amount of progress in freeing memory.
  187. */
  188. if (wbc->sync_mode != WB_SYNC_ALL)
  189. bdi_alloc_queue_work(wbc->bdi, wbc);
  190. else {
  191. struct bdi_work work;
  192. bdi_work_init(&work, wbc);
  193. work.state |= WS_ONSTACK;
  194. bdi_queue_work(wbc->bdi, &work);
  195. bdi_wait_on_work_clear(&work);
  196. }
  197. }
  198. /*
  199. * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
  200. * furthest end of its superblock's dirty-inode list.
  201. *
  202. * Before stamping the inode's ->dirtied_when, we check to see whether it is
  203. * already the most-recently-dirtied inode on the b_dirty list. If that is
  204. * the case then the inode must have been redirtied while it was being written
  205. * out and we don't reset its dirtied_when.
  206. */
  207. static void redirty_tail(struct inode *inode)
  208. {
  209. struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
  210. if (!list_empty(&wb->b_dirty)) {
  211. struct inode *tail;
  212. tail = list_entry(wb->b_dirty.next, struct inode, i_list);
  213. if (time_before(inode->dirtied_when, tail->dirtied_when))
  214. inode->dirtied_when = jiffies;
  215. }
  216. list_move(&inode->i_list, &wb->b_dirty);
  217. }
  218. /*
  219. * requeue inode for re-scanning after bdi->b_io list is exhausted.
  220. */
  221. static void requeue_io(struct inode *inode)
  222. {
  223. struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
  224. list_move(&inode->i_list, &wb->b_more_io);
  225. }
  226. static void inode_sync_complete(struct inode *inode)
  227. {
  228. /*
  229. * Prevent speculative execution through spin_unlock(&inode_lock);
  230. */
  231. smp_mb();
  232. wake_up_bit(&inode->i_state, __I_SYNC);
  233. }
  234. static bool inode_dirtied_after(struct inode *inode, unsigned long t)
  235. {
  236. bool ret = time_after(inode->dirtied_when, t);
  237. #ifndef CONFIG_64BIT
  238. /*
  239. * For inodes being constantly redirtied, dirtied_when can get stuck.
  240. * It _appears_ to be in the future, but is actually in distant past.
  241. * This test is necessary to prevent such wrapped-around relative times
  242. * from permanently stopping the whole pdflush writeback.
  243. */
  244. ret = ret && time_before_eq(inode->dirtied_when, jiffies);
  245. #endif
  246. return ret;
  247. }
  248. /*
  249. * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
  250. */
  251. static void move_expired_inodes(struct list_head *delaying_queue,
  252. struct list_head *dispatch_queue,
  253. unsigned long *older_than_this)
  254. {
  255. while (!list_empty(delaying_queue)) {
  256. struct inode *inode = list_entry(delaying_queue->prev,
  257. struct inode, i_list);
  258. if (older_than_this &&
  259. inode_dirtied_after(inode, *older_than_this))
  260. break;
  261. list_move(&inode->i_list, dispatch_queue);
  262. }
  263. }
  264. /*
  265. * Queue all expired dirty inodes for io, eldest first.
  266. */
  267. static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
  268. {
  269. list_splice_init(&wb->b_more_io, wb->b_io.prev);
  270. move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
  271. }
  272. static int write_inode(struct inode *inode, int sync)
  273. {
  274. if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
  275. return inode->i_sb->s_op->write_inode(inode, sync);
  276. return 0;
  277. }
  278. /*
  279. * Wait for writeback on an inode to complete.
  280. */
  281. static void inode_wait_for_writeback(struct inode *inode)
  282. {
  283. DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
  284. wait_queue_head_t *wqh;
  285. wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
  286. do {
  287. spin_unlock(&inode_lock);
  288. __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
  289. spin_lock(&inode_lock);
  290. } while (inode->i_state & I_SYNC);
  291. }
  292. /*
  293. * Write out an inode's dirty pages. Called under inode_lock. Either the
  294. * caller has ref on the inode (either via __iget or via syscall against an fd)
  295. * or the inode has I_WILL_FREE set (via generic_forget_inode)
  296. *
  297. * If `wait' is set, wait on the writeout.
  298. *
  299. * The whole writeout design is quite complex and fragile. We want to avoid
  300. * starvation of particular inodes when others are being redirtied, prevent
  301. * livelocks, etc.
  302. *
  303. * Called under inode_lock.
  304. */
  305. static int
  306. writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
  307. {
  308. struct address_space *mapping = inode->i_mapping;
  309. int wait = wbc->sync_mode == WB_SYNC_ALL;
  310. unsigned dirty;
  311. int ret;
  312. if (!atomic_read(&inode->i_count))
  313. WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
  314. else
  315. WARN_ON(inode->i_state & I_WILL_FREE);
  316. if (inode->i_state & I_SYNC) {
  317. /*
  318. * If this inode is locked for writeback and we are not doing
  319. * writeback-for-data-integrity, move it to b_more_io so that
  320. * writeback can proceed with the other inodes on s_io.
  321. *
  322. * We'll have another go at writing back this inode when we
  323. * completed a full scan of b_io.
  324. */
  325. if (!wait) {
  326. requeue_io(inode);
  327. return 0;
  328. }
  329. /*
  330. * It's a data-integrity sync. We must wait.
  331. */
  332. inode_wait_for_writeback(inode);
  333. }
  334. BUG_ON(inode->i_state & I_SYNC);
  335. /* Set I_SYNC, reset I_DIRTY */
  336. dirty = inode->i_state & I_DIRTY;
  337. inode->i_state |= I_SYNC;
  338. inode->i_state &= ~I_DIRTY;
  339. spin_unlock(&inode_lock);
  340. ret = do_writepages(mapping, wbc);
  341. /* Don't write the inode if only I_DIRTY_PAGES was set */
  342. if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
  343. int err = write_inode(inode, wait);
  344. if (ret == 0)
  345. ret = err;
  346. }
  347. if (wait) {
  348. int err = filemap_fdatawait(mapping);
  349. if (ret == 0)
  350. ret = err;
  351. }
  352. spin_lock(&inode_lock);
  353. inode->i_state &= ~I_SYNC;
  354. if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
  355. if (!(inode->i_state & I_DIRTY) &&
  356. mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
  357. /*
  358. * We didn't write back all the pages. nfs_writepages()
  359. * sometimes bales out without doing anything. Redirty
  360. * the inode; Move it from b_io onto b_more_io/b_dirty.
  361. */
  362. /*
  363. * akpm: if the caller was the kupdate function we put
  364. * this inode at the head of b_dirty so it gets first
  365. * consideration. Otherwise, move it to the tail, for
  366. * the reasons described there. I'm not really sure
  367. * how much sense this makes. Presumably I had a good
  368. * reasons for doing it this way, and I'd rather not
  369. * muck with it at present.
  370. */
  371. if (wbc->for_kupdate) {
  372. /*
  373. * For the kupdate function we move the inode
  374. * to b_more_io so it will get more writeout as
  375. * soon as the queue becomes uncongested.
  376. */
  377. inode->i_state |= I_DIRTY_PAGES;
  378. if (wbc->nr_to_write <= 0) {
  379. /*
  380. * slice used up: queue for next turn
  381. */
  382. requeue_io(inode);
  383. } else {
  384. /*
  385. * somehow blocked: retry later
  386. */
  387. redirty_tail(inode);
  388. }
  389. } else {
  390. /*
  391. * Otherwise fully redirty the inode so that
  392. * other inodes on this superblock will get some
  393. * writeout. Otherwise heavy writing to one
  394. * file would indefinitely suspend writeout of
  395. * all the other files.
  396. */
  397. inode->i_state |= I_DIRTY_PAGES;
  398. redirty_tail(inode);
  399. }
  400. } else if (inode->i_state & I_DIRTY) {
  401. /*
  402. * Someone redirtied the inode while were writing back
  403. * the pages.
  404. */
  405. redirty_tail(inode);
  406. } else if (atomic_read(&inode->i_count)) {
  407. /*
  408. * The inode is clean, inuse
  409. */
  410. list_move(&inode->i_list, &inode_in_use);
  411. } else {
  412. /*
  413. * The inode is clean, unused
  414. */
  415. list_move(&inode->i_list, &inode_unused);
  416. }
  417. }
  418. inode_sync_complete(inode);
  419. return ret;
  420. }
  421. /*
  422. * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
  423. * before calling writeback. So make sure that we do pin it, so it doesn't
  424. * go away while we are writing inodes from it.
  425. *
  426. * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
  427. * 1 if we failed.
  428. */
  429. static int pin_sb_for_writeback(struct writeback_control *wbc,
  430. struct inode *inode)
  431. {
  432. struct super_block *sb = inode->i_sb;
  433. /*
  434. * Caller must already hold the ref for this
  435. */
  436. if (wbc->sync_mode == WB_SYNC_ALL) {
  437. WARN_ON(!rwsem_is_locked(&sb->s_umount));
  438. return 0;
  439. }
  440. spin_lock(&sb_lock);
  441. sb->s_count++;
  442. if (down_read_trylock(&sb->s_umount)) {
  443. if (sb->s_root) {
  444. spin_unlock(&sb_lock);
  445. return 0;
  446. }
  447. /*
  448. * umounted, drop rwsem again and fall through to failure
  449. */
  450. up_read(&sb->s_umount);
  451. }
  452. sb->s_count--;
  453. spin_unlock(&sb_lock);
  454. return 1;
  455. }
  456. static void unpin_sb_for_writeback(struct writeback_control *wbc,
  457. struct inode *inode)
  458. {
  459. struct super_block *sb = inode->i_sb;
  460. if (wbc->sync_mode == WB_SYNC_ALL)
  461. return;
  462. up_read(&sb->s_umount);
  463. put_super(sb);
  464. }
  465. static void writeback_inodes_wb(struct bdi_writeback *wb,
  466. struct writeback_control *wbc)
  467. {
  468. struct super_block *sb = wbc->sb;
  469. const int is_blkdev_sb = sb_is_blkdev_sb(sb);
  470. const unsigned long start = jiffies; /* livelock avoidance */
  471. spin_lock(&inode_lock);
  472. if (!wbc->for_kupdate || list_empty(&wb->b_io))
  473. queue_io(wb, wbc->older_than_this);
  474. while (!list_empty(&wb->b_io)) {
  475. struct inode *inode = list_entry(wb->b_io.prev,
  476. struct inode, i_list);
  477. long pages_skipped;
  478. /*
  479. * super block given and doesn't match, skip this inode
  480. */
  481. if (sb && sb != inode->i_sb) {
  482. redirty_tail(inode);
  483. continue;
  484. }
  485. if (!bdi_cap_writeback_dirty(wb->bdi)) {
  486. redirty_tail(inode);
  487. if (is_blkdev_sb) {
  488. /*
  489. * Dirty memory-backed blockdev: the ramdisk
  490. * driver does this. Skip just this inode
  491. */
  492. continue;
  493. }
  494. /*
  495. * Dirty memory-backed inode against a filesystem other
  496. * than the kernel-internal bdev filesystem. Skip the
  497. * entire superblock.
  498. */
  499. break;
  500. }
  501. if (inode->i_state & (I_NEW | I_WILL_FREE)) {
  502. requeue_io(inode);
  503. continue;
  504. }
  505. if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
  506. wbc->encountered_congestion = 1;
  507. if (!is_blkdev_sb)
  508. break; /* Skip a congested fs */
  509. requeue_io(inode);
  510. continue; /* Skip a congested blockdev */
  511. }
  512. /*
  513. * Was this inode dirtied after sync_sb_inodes was called?
  514. * This keeps sync from extra jobs and livelock.
  515. */
  516. if (inode_dirtied_after(inode, start))
  517. break;
  518. if (pin_sb_for_writeback(wbc, inode)) {
  519. requeue_io(inode);
  520. continue;
  521. }
  522. BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
  523. __iget(inode);
  524. pages_skipped = wbc->pages_skipped;
  525. writeback_single_inode(inode, wbc);
  526. unpin_sb_for_writeback(wbc, inode);
  527. if (wbc->pages_skipped != pages_skipped) {
  528. /*
  529. * writeback is not making progress due to locked
  530. * buffers. Skip this inode for now.
  531. */
  532. redirty_tail(inode);
  533. }
  534. spin_unlock(&inode_lock);
  535. iput(inode);
  536. cond_resched();
  537. spin_lock(&inode_lock);
  538. if (wbc->nr_to_write <= 0) {
  539. wbc->more_io = 1;
  540. break;
  541. }
  542. if (!list_empty(&wb->b_more_io))
  543. wbc->more_io = 1;
  544. }
  545. spin_unlock(&inode_lock);
  546. /* Leave any unwritten inodes on b_io */
  547. }
  548. void writeback_inodes_wbc(struct writeback_control *wbc)
  549. {
  550. struct backing_dev_info *bdi = wbc->bdi;
  551. writeback_inodes_wb(&bdi->wb, wbc);
  552. }
  553. /*
  554. * The maximum number of pages to writeout in a single bdi flush/kupdate
  555. * operation. We do this so we don't hold I_SYNC against an inode for
  556. * enormous amounts of time, which would block a userspace task which has
  557. * been forced to throttle against that inode. Also, the code reevaluates
  558. * the dirty each time it has written this many pages.
  559. */
  560. #define MAX_WRITEBACK_PAGES 1024
  561. static inline bool over_bground_thresh(void)
  562. {
  563. unsigned long background_thresh, dirty_thresh;
  564. get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
  565. return (global_page_state(NR_FILE_DIRTY) +
  566. global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
  567. }
  568. /*
  569. * Explicit flushing or periodic writeback of "old" data.
  570. *
  571. * Define "old": the first time one of an inode's pages is dirtied, we mark the
  572. * dirtying-time in the inode's address_space. So this periodic writeback code
  573. * just walks the superblock inode list, writing back any inodes which are
  574. * older than a specific point in time.
  575. *
  576. * Try to run once per dirty_writeback_interval. But if a writeback event
  577. * takes longer than a dirty_writeback_interval interval, then leave a
  578. * one-second gap.
  579. *
  580. * older_than_this takes precedence over nr_to_write. So we'll only write back
  581. * all dirty pages if they are all attached to "old" mappings.
  582. */
  583. static long wb_writeback(struct bdi_writeback *wb,
  584. struct wb_writeback_args *args)
  585. {
  586. struct writeback_control wbc = {
  587. .bdi = wb->bdi,
  588. .sb = args->sb,
  589. .sync_mode = args->sync_mode,
  590. .older_than_this = NULL,
  591. .for_kupdate = args->for_kupdate,
  592. .range_cyclic = args->range_cyclic,
  593. };
  594. unsigned long oldest_jif;
  595. long wrote = 0;
  596. if (wbc.for_kupdate) {
  597. wbc.older_than_this = &oldest_jif;
  598. oldest_jif = jiffies -
  599. msecs_to_jiffies(dirty_expire_interval * 10);
  600. }
  601. if (!wbc.range_cyclic) {
  602. wbc.range_start = 0;
  603. wbc.range_end = LLONG_MAX;
  604. }
  605. for (;;) {
  606. /*
  607. * Don't flush anything for non-integrity writeback where
  608. * no nr_pages was given
  609. */
  610. if (!args->for_kupdate && args->nr_pages <= 0 &&
  611. args->sync_mode == WB_SYNC_NONE)
  612. break;
  613. /*
  614. * If no specific pages were given and this is just a
  615. * periodic background writeout and we are below the
  616. * background dirty threshold, don't do anything
  617. */
  618. if (args->for_kupdate && args->nr_pages <= 0 &&
  619. !over_bground_thresh())
  620. break;
  621. wbc.more_io = 0;
  622. wbc.encountered_congestion = 0;
  623. wbc.nr_to_write = MAX_WRITEBACK_PAGES;
  624. wbc.pages_skipped = 0;
  625. writeback_inodes_wb(wb, &wbc);
  626. args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
  627. wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
  628. /*
  629. * If we ran out of stuff to write, bail unless more_io got set
  630. */
  631. if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
  632. if (wbc.more_io && !wbc.for_kupdate)
  633. continue;
  634. break;
  635. }
  636. }
  637. return wrote;
  638. }
  639. /*
  640. * Return the next bdi_work struct that hasn't been processed by this
  641. * wb thread yet
  642. */
  643. static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
  644. struct bdi_writeback *wb)
  645. {
  646. struct bdi_work *work, *ret = NULL;
  647. rcu_read_lock();
  648. list_for_each_entry_rcu(work, &bdi->work_list, list) {
  649. if (!test_and_clear_bit(wb->nr, &work->seen))
  650. continue;
  651. ret = work;
  652. break;
  653. }
  654. rcu_read_unlock();
  655. return ret;
  656. }
  657. static long wb_check_old_data_flush(struct bdi_writeback *wb)
  658. {
  659. unsigned long expired;
  660. long nr_pages;
  661. expired = wb->last_old_flush +
  662. msecs_to_jiffies(dirty_writeback_interval * 10);
  663. if (time_before(jiffies, expired))
  664. return 0;
  665. wb->last_old_flush = jiffies;
  666. nr_pages = global_page_state(NR_FILE_DIRTY) +
  667. global_page_state(NR_UNSTABLE_NFS) +
  668. (inodes_stat.nr_inodes - inodes_stat.nr_unused);
  669. if (nr_pages) {
  670. struct wb_writeback_args args = {
  671. .nr_pages = nr_pages,
  672. .sync_mode = WB_SYNC_NONE,
  673. .for_kupdate = 1,
  674. .range_cyclic = 1,
  675. };
  676. return wb_writeback(wb, &args);
  677. }
  678. return 0;
  679. }
  680. /*
  681. * Retrieve work items and do the writeback they describe
  682. */
  683. long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
  684. {
  685. struct backing_dev_info *bdi = wb->bdi;
  686. struct bdi_work *work;
  687. long wrote = 0;
  688. while ((work = get_next_work_item(bdi, wb)) != NULL) {
  689. struct wb_writeback_args args = work->args;
  690. /*
  691. * Override sync mode, in case we must wait for completion
  692. */
  693. if (force_wait)
  694. work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
  695. /*
  696. * If this isn't a data integrity operation, just notify
  697. * that we have seen this work and we are now starting it.
  698. */
  699. if (args.sync_mode == WB_SYNC_NONE)
  700. wb_clear_pending(wb, work);
  701. wrote += wb_writeback(wb, &args);
  702. /*
  703. * This is a data integrity writeback, so only do the
  704. * notification when we have completed the work.
  705. */
  706. if (args.sync_mode == WB_SYNC_ALL)
  707. wb_clear_pending(wb, work);
  708. }
  709. /*
  710. * Check for periodic writeback, kupdated() style
  711. */
  712. wrote += wb_check_old_data_flush(wb);
  713. return wrote;
  714. }
  715. /*
  716. * Handle writeback of dirty data for the device backed by this bdi. Also
  717. * wakes up periodically and does kupdated style flushing.
  718. */
  719. int bdi_writeback_task(struct bdi_writeback *wb)
  720. {
  721. unsigned long last_active = jiffies;
  722. unsigned long wait_jiffies = -1UL;
  723. long pages_written;
  724. while (!kthread_should_stop()) {
  725. pages_written = wb_do_writeback(wb, 0);
  726. if (pages_written)
  727. last_active = jiffies;
  728. else if (wait_jiffies != -1UL) {
  729. unsigned long max_idle;
  730. /*
  731. * Longest period of inactivity that we tolerate. If we
  732. * see dirty data again later, the task will get
  733. * recreated automatically.
  734. */
  735. max_idle = max(5UL * 60 * HZ, wait_jiffies);
  736. if (time_after(jiffies, max_idle + last_active))
  737. break;
  738. }
  739. wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
  740. set_current_state(TASK_INTERRUPTIBLE);
  741. schedule_timeout(wait_jiffies);
  742. try_to_freeze();
  743. }
  744. return 0;
  745. }
  746. /*
  747. * Schedule writeback for all backing devices. Can only be used for
  748. * WB_SYNC_NONE writeback, WB_SYNC_ALL should use bdi_start_writeback()
  749. * and pass in the superblock.
  750. */
  751. static void bdi_writeback_all(struct writeback_control *wbc)
  752. {
  753. struct backing_dev_info *bdi;
  754. WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
  755. spin_lock(&bdi_lock);
  756. list_for_each_entry(bdi, &bdi_list, bdi_list) {
  757. if (!bdi_has_dirty_io(bdi))
  758. continue;
  759. bdi_alloc_queue_work(bdi, wbc);
  760. }
  761. spin_unlock(&bdi_lock);
  762. }
  763. /*
  764. * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
  765. * the whole world.
  766. */
  767. void wakeup_flusher_threads(long nr_pages)
  768. {
  769. struct writeback_control wbc = {
  770. .sync_mode = WB_SYNC_NONE,
  771. .older_than_this = NULL,
  772. .range_cyclic = 1,
  773. };
  774. if (nr_pages == 0)
  775. nr_pages = global_page_state(NR_FILE_DIRTY) +
  776. global_page_state(NR_UNSTABLE_NFS);
  777. wbc.nr_to_write = nr_pages;
  778. bdi_writeback_all(&wbc);
  779. }
  780. static noinline void block_dump___mark_inode_dirty(struct inode *inode)
  781. {
  782. if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
  783. struct dentry *dentry;
  784. const char *name = "?";
  785. dentry = d_find_alias(inode);
  786. if (dentry) {
  787. spin_lock(&dentry->d_lock);
  788. name = (const char *) dentry->d_name.name;
  789. }
  790. printk(KERN_DEBUG
  791. "%s(%d): dirtied inode %lu (%s) on %s\n",
  792. current->comm, task_pid_nr(current), inode->i_ino,
  793. name, inode->i_sb->s_id);
  794. if (dentry) {
  795. spin_unlock(&dentry->d_lock);
  796. dput(dentry);
  797. }
  798. }
  799. }
  800. /**
  801. * __mark_inode_dirty - internal function
  802. * @inode: inode to mark
  803. * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
  804. * Mark an inode as dirty. Callers should use mark_inode_dirty or
  805. * mark_inode_dirty_sync.
  806. *
  807. * Put the inode on the super block's dirty list.
  808. *
  809. * CAREFUL! We mark it dirty unconditionally, but move it onto the
  810. * dirty list only if it is hashed or if it refers to a blockdev.
  811. * If it was not hashed, it will never be added to the dirty list
  812. * even if it is later hashed, as it will have been marked dirty already.
  813. *
  814. * In short, make sure you hash any inodes _before_ you start marking
  815. * them dirty.
  816. *
  817. * This function *must* be atomic for the I_DIRTY_PAGES case -
  818. * set_page_dirty() is called under spinlock in several places.
  819. *
  820. * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
  821. * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
  822. * the kernel-internal blockdev inode represents the dirtying time of the
  823. * blockdev's pages. This is why for I_DIRTY_PAGES we always use
  824. * page->mapping->host, so the page-dirtying time is recorded in the internal
  825. * blockdev inode.
  826. */
  827. void __mark_inode_dirty(struct inode *inode, int flags)
  828. {
  829. struct super_block *sb = inode->i_sb;
  830. /*
  831. * Don't do this for I_DIRTY_PAGES - that doesn't actually
  832. * dirty the inode itself
  833. */
  834. if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
  835. if (sb->s_op->dirty_inode)
  836. sb->s_op->dirty_inode(inode);
  837. }
  838. /*
  839. * make sure that changes are seen by all cpus before we test i_state
  840. * -- mikulas
  841. */
  842. smp_mb();
  843. /* avoid the locking if we can */
  844. if ((inode->i_state & flags) == flags)
  845. return;
  846. if (unlikely(block_dump))
  847. block_dump___mark_inode_dirty(inode);
  848. spin_lock(&inode_lock);
  849. if ((inode->i_state & flags) != flags) {
  850. const int was_dirty = inode->i_state & I_DIRTY;
  851. inode->i_state |= flags;
  852. /*
  853. * If the inode is being synced, just update its dirty state.
  854. * The unlocker will place the inode on the appropriate
  855. * superblock list, based upon its state.
  856. */
  857. if (inode->i_state & I_SYNC)
  858. goto out;
  859. /*
  860. * Only add valid (hashed) inodes to the superblock's
  861. * dirty list. Add blockdev inodes as well.
  862. */
  863. if (!S_ISBLK(inode->i_mode)) {
  864. if (hlist_unhashed(&inode->i_hash))
  865. goto out;
  866. }
  867. if (inode->i_state & (I_FREEING|I_CLEAR))
  868. goto out;
  869. /*
  870. * If the inode was already on b_dirty/b_io/b_more_io, don't
  871. * reposition it (that would break b_dirty time-ordering).
  872. */
  873. if (!was_dirty) {
  874. struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
  875. struct backing_dev_info *bdi = wb->bdi;
  876. if (bdi_cap_writeback_dirty(bdi) &&
  877. !test_bit(BDI_registered, &bdi->state)) {
  878. WARN_ON(1);
  879. printk(KERN_ERR "bdi-%s not registered\n",
  880. bdi->name);
  881. }
  882. inode->dirtied_when = jiffies;
  883. list_move(&inode->i_list, &wb->b_dirty);
  884. }
  885. }
  886. out:
  887. spin_unlock(&inode_lock);
  888. }
  889. EXPORT_SYMBOL(__mark_inode_dirty);
  890. /*
  891. * Write out a superblock's list of dirty inodes. A wait will be performed
  892. * upon no inodes, all inodes or the final one, depending upon sync_mode.
  893. *
  894. * If older_than_this is non-NULL, then only write out inodes which
  895. * had their first dirtying at a time earlier than *older_than_this.
  896. *
  897. * If we're a pdlfush thread, then implement pdflush collision avoidance
  898. * against the entire list.
  899. *
  900. * If `bdi' is non-zero then we're being asked to writeback a specific queue.
  901. * This function assumes that the blockdev superblock's inodes are backed by
  902. * a variety of queues, so all inodes are searched. For other superblocks,
  903. * assume that all inodes are backed by the same queue.
  904. *
  905. * The inodes to be written are parked on bdi->b_io. They are moved back onto
  906. * bdi->b_dirty as they are selected for writing. This way, none can be missed
  907. * on the writer throttling path, and we get decent balancing between many
  908. * throttled threads: we don't want them all piling up on inode_sync_wait.
  909. */
  910. static void wait_sb_inodes(struct writeback_control *wbc)
  911. {
  912. struct inode *inode, *old_inode = NULL;
  913. /*
  914. * We need to be protected against the filesystem going from
  915. * r/o to r/w or vice versa.
  916. */
  917. WARN_ON(!rwsem_is_locked(&wbc->sb->s_umount));
  918. spin_lock(&inode_lock);
  919. /*
  920. * Data integrity sync. Must wait for all pages under writeback,
  921. * because there may have been pages dirtied before our sync
  922. * call, but which had writeout started before we write it out.
  923. * In which case, the inode may not be on the dirty list, but
  924. * we still have to wait for that writeout.
  925. */
  926. list_for_each_entry(inode, &wbc->sb->s_inodes, i_sb_list) {
  927. struct address_space *mapping;
  928. if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
  929. continue;
  930. mapping = inode->i_mapping;
  931. if (mapping->nrpages == 0)
  932. continue;
  933. __iget(inode);
  934. spin_unlock(&inode_lock);
  935. /*
  936. * We hold a reference to 'inode' so it couldn't have
  937. * been removed from s_inodes list while we dropped the
  938. * inode_lock. We cannot iput the inode now as we can
  939. * be holding the last reference and we cannot iput it
  940. * under inode_lock. So we keep the reference and iput
  941. * it later.
  942. */
  943. iput(old_inode);
  944. old_inode = inode;
  945. filemap_fdatawait(mapping);
  946. cond_resched();
  947. spin_lock(&inode_lock);
  948. }
  949. spin_unlock(&inode_lock);
  950. iput(old_inode);
  951. }
  952. /**
  953. * writeback_inodes_sb - writeback dirty inodes from given super_block
  954. * @sb: the superblock
  955. *
  956. * Start writeback on some inodes on this super_block. No guarantees are made
  957. * on how many (if any) will be written, and this function does not wait
  958. * for IO completion of submitted IO. The number of pages submitted is
  959. * returned.
  960. */
  961. long writeback_inodes_sb(struct super_block *sb)
  962. {
  963. struct writeback_control wbc = {
  964. .sb = sb,
  965. .sync_mode = WB_SYNC_NONE,
  966. .range_start = 0,
  967. .range_end = LLONG_MAX,
  968. };
  969. unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
  970. unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
  971. long nr_to_write;
  972. nr_to_write = nr_dirty + nr_unstable +
  973. (inodes_stat.nr_inodes - inodes_stat.nr_unused);
  974. wbc.nr_to_write = nr_to_write;
  975. bdi_writeback_all(&wbc);
  976. return nr_to_write - wbc.nr_to_write;
  977. }
  978. EXPORT_SYMBOL(writeback_inodes_sb);
  979. /**
  980. * sync_inodes_sb - sync sb inode pages
  981. * @sb: the superblock
  982. *
  983. * This function writes and waits on any dirty inode belonging to this
  984. * super_block. The number of pages synced is returned.
  985. */
  986. long sync_inodes_sb(struct super_block *sb)
  987. {
  988. struct writeback_control wbc = {
  989. .sb = sb,
  990. .bdi = sb->s_bdi,
  991. .sync_mode = WB_SYNC_ALL,
  992. .range_start = 0,
  993. .range_end = LLONG_MAX,
  994. };
  995. long nr_to_write = LONG_MAX; /* doesn't actually matter */
  996. wbc.nr_to_write = nr_to_write;
  997. bdi_start_writeback(&wbc);
  998. wait_sb_inodes(&wbc);
  999. return nr_to_write - wbc.nr_to_write;
  1000. }
  1001. EXPORT_SYMBOL(sync_inodes_sb);
  1002. /**
  1003. * write_inode_now - write an inode to disk
  1004. * @inode: inode to write to disk
  1005. * @sync: whether the write should be synchronous or not
  1006. *
  1007. * This function commits an inode to disk immediately if it is dirty. This is
  1008. * primarily needed by knfsd.
  1009. *
  1010. * The caller must either have a ref on the inode or must have set I_WILL_FREE.
  1011. */
  1012. int write_inode_now(struct inode *inode, int sync)
  1013. {
  1014. int ret;
  1015. struct writeback_control wbc = {
  1016. .nr_to_write = LONG_MAX,
  1017. .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
  1018. .range_start = 0,
  1019. .range_end = LLONG_MAX,
  1020. };
  1021. if (!mapping_cap_writeback_dirty(inode->i_mapping))
  1022. wbc.nr_to_write = 0;
  1023. might_sleep();
  1024. spin_lock(&inode_lock);
  1025. ret = writeback_single_inode(inode, &wbc);
  1026. spin_unlock(&inode_lock);
  1027. if (sync)
  1028. inode_sync_wait(inode);
  1029. return ret;
  1030. }
  1031. EXPORT_SYMBOL(write_inode_now);
  1032. /**
  1033. * sync_inode - write an inode and its pages to disk.
  1034. * @inode: the inode to sync
  1035. * @wbc: controls the writeback mode
  1036. *
  1037. * sync_inode() will write an inode and its pages to disk. It will also
  1038. * correctly update the inode on its superblock's dirty inode lists and will
  1039. * update inode->i_state.
  1040. *
  1041. * The caller must have a ref on the inode.
  1042. */
  1043. int sync_inode(struct inode *inode, struct writeback_control *wbc)
  1044. {
  1045. int ret;
  1046. spin_lock(&inode_lock);
  1047. ret = writeback_single_inode(inode, wbc);
  1048. spin_unlock(&inode_lock);
  1049. return ret;
  1050. }
  1051. EXPORT_SYMBOL(sync_inode);