backing-dev.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. #include <linux/wait.h>
  2. #include <linux/backing-dev.h>
  3. #include <linux/kthread.h>
  4. #include <linux/freezer.h>
  5. #include <linux/fs.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/mm.h>
  8. #include <linux/sched.h>
  9. #include <linux/module.h>
  10. #include <linux/writeback.h>
  11. #include <linux/device.h>
  12. #include <trace/events/writeback.h>
  13. static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
  14. struct backing_dev_info default_backing_dev_info = {
  15. .name = "default",
  16. .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
  17. .state = 0,
  18. .capabilities = BDI_CAP_MAP_COPY,
  19. };
  20. EXPORT_SYMBOL_GPL(default_backing_dev_info);
  21. struct backing_dev_info noop_backing_dev_info = {
  22. .name = "noop",
  23. .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  24. };
  25. EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  26. static struct class *bdi_class;
  27. /*
  28. * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
  29. * locking.
  30. */
  31. DEFINE_SPINLOCK(bdi_lock);
  32. LIST_HEAD(bdi_list);
  33. /* bdi_wq serves all asynchronous writeback tasks */
  34. struct workqueue_struct *bdi_wq;
  35. void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
  36. {
  37. if (wb1 < wb2) {
  38. spin_lock(&wb1->list_lock);
  39. spin_lock_nested(&wb2->list_lock, 1);
  40. } else {
  41. spin_lock(&wb2->list_lock);
  42. spin_lock_nested(&wb1->list_lock, 1);
  43. }
  44. }
  45. #ifdef CONFIG_DEBUG_FS
  46. #include <linux/debugfs.h>
  47. #include <linux/seq_file.h>
  48. static struct dentry *bdi_debug_root;
  49. static void bdi_debug_init(void)
  50. {
  51. bdi_debug_root = debugfs_create_dir("bdi", NULL);
  52. }
  53. static int bdi_debug_stats_show(struct seq_file *m, void *v)
  54. {
  55. struct backing_dev_info *bdi = m->private;
  56. struct bdi_writeback *wb = &bdi->wb;
  57. unsigned long background_thresh;
  58. unsigned long dirty_thresh;
  59. unsigned long bdi_thresh;
  60. unsigned long nr_dirty, nr_io, nr_more_io;
  61. struct inode *inode;
  62. nr_dirty = nr_io = nr_more_io = 0;
  63. spin_lock(&wb->list_lock);
  64. list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
  65. nr_dirty++;
  66. list_for_each_entry(inode, &wb->b_io, i_wb_list)
  67. nr_io++;
  68. list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
  69. nr_more_io++;
  70. spin_unlock(&wb->list_lock);
  71. global_dirty_limits(&background_thresh, &dirty_thresh);
  72. bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
  73. #define K(x) ((x) << (PAGE_SHIFT - 10))
  74. seq_printf(m,
  75. "BdiWriteback: %10lu kB\n"
  76. "BdiReclaimable: %10lu kB\n"
  77. "BdiDirtyThresh: %10lu kB\n"
  78. "DirtyThresh: %10lu kB\n"
  79. "BackgroundThresh: %10lu kB\n"
  80. "BdiDirtied: %10lu kB\n"
  81. "BdiWritten: %10lu kB\n"
  82. "BdiWriteBandwidth: %10lu kBps\n"
  83. "b_dirty: %10lu\n"
  84. "b_io: %10lu\n"
  85. "b_more_io: %10lu\n"
  86. "bdi_list: %10u\n"
  87. "state: %10lx\n",
  88. (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
  89. (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
  90. K(bdi_thresh),
  91. K(dirty_thresh),
  92. K(background_thresh),
  93. (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
  94. (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
  95. (unsigned long) K(bdi->write_bandwidth),
  96. nr_dirty,
  97. nr_io,
  98. nr_more_io,
  99. !list_empty(&bdi->bdi_list), bdi->state);
  100. #undef K
  101. return 0;
  102. }
  103. static int bdi_debug_stats_open(struct inode *inode, struct file *file)
  104. {
  105. return single_open(file, bdi_debug_stats_show, inode->i_private);
  106. }
  107. static const struct file_operations bdi_debug_stats_fops = {
  108. .open = bdi_debug_stats_open,
  109. .read = seq_read,
  110. .llseek = seq_lseek,
  111. .release = single_release,
  112. };
  113. static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
  114. {
  115. bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
  116. bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
  117. bdi, &bdi_debug_stats_fops);
  118. }
  119. static void bdi_debug_unregister(struct backing_dev_info *bdi)
  120. {
  121. debugfs_remove(bdi->debug_stats);
  122. debugfs_remove(bdi->debug_dir);
  123. }
  124. #else
  125. static inline void bdi_debug_init(void)
  126. {
  127. }
  128. static inline void bdi_debug_register(struct backing_dev_info *bdi,
  129. const char *name)
  130. {
  131. }
  132. static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
  133. {
  134. }
  135. #endif
  136. static ssize_t read_ahead_kb_store(struct device *dev,
  137. struct device_attribute *attr,
  138. const char *buf, size_t count)
  139. {
  140. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  141. unsigned long read_ahead_kb;
  142. ssize_t ret;
  143. ret = kstrtoul(buf, 10, &read_ahead_kb);
  144. if (ret < 0)
  145. return ret;
  146. bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
  147. return count;
  148. }
  149. #define K(pages) ((pages) << (PAGE_SHIFT - 10))
  150. #define BDI_SHOW(name, expr) \
  151. static ssize_t name##_show(struct device *dev, \
  152. struct device_attribute *attr, char *page) \
  153. { \
  154. struct backing_dev_info *bdi = dev_get_drvdata(dev); \
  155. \
  156. return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
  157. } \
  158. static DEVICE_ATTR_RW(name);
  159. BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
  160. static ssize_t min_ratio_store(struct device *dev,
  161. struct device_attribute *attr, const char *buf, size_t count)
  162. {
  163. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  164. unsigned int ratio;
  165. ssize_t ret;
  166. ret = kstrtouint(buf, 10, &ratio);
  167. if (ret < 0)
  168. return ret;
  169. ret = bdi_set_min_ratio(bdi, ratio);
  170. if (!ret)
  171. ret = count;
  172. return ret;
  173. }
  174. BDI_SHOW(min_ratio, bdi->min_ratio)
  175. static ssize_t max_ratio_store(struct device *dev,
  176. struct device_attribute *attr, const char *buf, size_t count)
  177. {
  178. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  179. unsigned int ratio;
  180. ssize_t ret;
  181. ret = kstrtouint(buf, 10, &ratio);
  182. if (ret < 0)
  183. return ret;
  184. ret = bdi_set_max_ratio(bdi, ratio);
  185. if (!ret)
  186. ret = count;
  187. return ret;
  188. }
  189. BDI_SHOW(max_ratio, bdi->max_ratio)
  190. static ssize_t stable_pages_required_show(struct device *dev,
  191. struct device_attribute *attr,
  192. char *page)
  193. {
  194. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  195. return snprintf(page, PAGE_SIZE-1, "%d\n",
  196. bdi_cap_stable_pages_required(bdi) ? 1 : 0);
  197. }
  198. static DEVICE_ATTR_RO(stable_pages_required);
  199. static struct attribute *bdi_dev_attrs[] = {
  200. &dev_attr_read_ahead_kb.attr,
  201. &dev_attr_min_ratio.attr,
  202. &dev_attr_max_ratio.attr,
  203. &dev_attr_stable_pages_required.attr,
  204. NULL,
  205. };
  206. ATTRIBUTE_GROUPS(bdi_dev);
  207. static __init int bdi_class_init(void)
  208. {
  209. bdi_class = class_create(THIS_MODULE, "bdi");
  210. if (IS_ERR(bdi_class))
  211. return PTR_ERR(bdi_class);
  212. bdi_class->dev_groups = bdi_dev_groups;
  213. bdi_debug_init();
  214. return 0;
  215. }
  216. postcore_initcall(bdi_class_init);
  217. static int __init default_bdi_init(void)
  218. {
  219. int err;
  220. bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
  221. WQ_UNBOUND | WQ_SYSFS, 0);
  222. if (!bdi_wq)
  223. return -ENOMEM;
  224. err = bdi_init(&default_backing_dev_info);
  225. if (!err)
  226. bdi_register(&default_backing_dev_info, NULL, "default");
  227. err = bdi_init(&noop_backing_dev_info);
  228. return err;
  229. }
  230. subsys_initcall(default_bdi_init);
  231. int bdi_has_dirty_io(struct backing_dev_info *bdi)
  232. {
  233. return wb_has_dirty_io(&bdi->wb);
  234. }
  235. /*
  236. * This function is used when the first inode for this bdi is marked dirty. It
  237. * wakes-up the corresponding bdi thread which should then take care of the
  238. * periodic background write-out of dirty inodes. Since the write-out would
  239. * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
  240. * set up a timer which wakes the bdi thread up later.
  241. *
  242. * Note, we wouldn't bother setting up the timer, but this function is on the
  243. * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
  244. * by delaying the wake-up.
  245. */
  246. void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
  247. {
  248. unsigned long timeout;
  249. timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
  250. mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
  251. }
  252. /*
  253. * Remove bdi from bdi_list, and ensure that it is no longer visible
  254. */
  255. static void bdi_remove_from_list(struct backing_dev_info *bdi)
  256. {
  257. spin_lock_bh(&bdi_lock);
  258. list_del_rcu(&bdi->bdi_list);
  259. spin_unlock_bh(&bdi_lock);
  260. synchronize_rcu_expedited();
  261. /* bdi_list is now unused, clear it to mark @bdi dying */
  262. INIT_LIST_HEAD(&bdi->bdi_list);
  263. }
  264. int bdi_register(struct backing_dev_info *bdi, struct device *parent,
  265. const char *fmt, ...)
  266. {
  267. va_list args;
  268. struct device *dev;
  269. if (bdi->dev) /* The driver needs to use separate queues per device */
  270. return 0;
  271. va_start(args, fmt);
  272. dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
  273. va_end(args);
  274. if (IS_ERR(dev))
  275. return PTR_ERR(dev);
  276. bdi->dev = dev;
  277. bdi_debug_register(bdi, dev_name(dev));
  278. set_bit(BDI_registered, &bdi->state);
  279. spin_lock_bh(&bdi_lock);
  280. list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
  281. spin_unlock_bh(&bdi_lock);
  282. trace_writeback_bdi_register(bdi);
  283. return 0;
  284. }
  285. EXPORT_SYMBOL(bdi_register);
  286. int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
  287. {
  288. return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
  289. }
  290. EXPORT_SYMBOL(bdi_register_dev);
  291. /*
  292. * Remove bdi from the global list and shutdown any threads we have running
  293. */
  294. static void bdi_wb_shutdown(struct backing_dev_info *bdi)
  295. {
  296. if (!bdi_cap_writeback_dirty(bdi))
  297. return;
  298. /*
  299. * Make sure nobody finds us on the bdi_list anymore
  300. */
  301. bdi_remove_from_list(bdi);
  302. /*
  303. * Drain work list and shutdown the delayed_work. At this point,
  304. * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
  305. * is dying and its work_list needs to be drained no matter what.
  306. */
  307. mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
  308. flush_delayed_work(&bdi->wb.dwork);
  309. WARN_ON(!list_empty(&bdi->work_list));
  310. /*
  311. * This shouldn't be necessary unless @bdi for some reason has
  312. * unflushed dirty IO after work_list is drained. Do it anyway
  313. * just in case.
  314. */
  315. cancel_delayed_work_sync(&bdi->wb.dwork);
  316. }
  317. /*
  318. * This bdi is going away now, make sure that no super_blocks point to it
  319. */
  320. static void bdi_prune_sb(struct backing_dev_info *bdi)
  321. {
  322. struct super_block *sb;
  323. spin_lock(&sb_lock);
  324. list_for_each_entry(sb, &super_blocks, s_list) {
  325. if (sb->s_bdi == bdi)
  326. sb->s_bdi = &default_backing_dev_info;
  327. }
  328. spin_unlock(&sb_lock);
  329. }
  330. void bdi_unregister(struct backing_dev_info *bdi)
  331. {
  332. struct device *dev = bdi->dev;
  333. if (dev) {
  334. bdi_set_min_ratio(bdi, 0);
  335. trace_writeback_bdi_unregister(bdi);
  336. bdi_prune_sb(bdi);
  337. bdi_wb_shutdown(bdi);
  338. bdi_debug_unregister(bdi);
  339. spin_lock_bh(&bdi->wb_lock);
  340. bdi->dev = NULL;
  341. spin_unlock_bh(&bdi->wb_lock);
  342. device_unregister(dev);
  343. }
  344. }
  345. EXPORT_SYMBOL(bdi_unregister);
  346. static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
  347. {
  348. memset(wb, 0, sizeof(*wb));
  349. wb->bdi = bdi;
  350. wb->last_old_flush = jiffies;
  351. INIT_LIST_HEAD(&wb->b_dirty);
  352. INIT_LIST_HEAD(&wb->b_io);
  353. INIT_LIST_HEAD(&wb->b_more_io);
  354. spin_lock_init(&wb->list_lock);
  355. INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);
  356. }
  357. /*
  358. * Initial write bandwidth: 100 MB/s
  359. */
  360. #define INIT_BW (100 << (20 - PAGE_SHIFT))
  361. int bdi_init(struct backing_dev_info *bdi)
  362. {
  363. int i, err;
  364. bdi->dev = NULL;
  365. bdi->min_ratio = 0;
  366. bdi->max_ratio = 100;
  367. bdi->max_prop_frac = FPROP_FRAC_BASE;
  368. spin_lock_init(&bdi->wb_lock);
  369. INIT_LIST_HEAD(&bdi->bdi_list);
  370. INIT_LIST_HEAD(&bdi->work_list);
  371. bdi_wb_init(&bdi->wb, bdi);
  372. for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
  373. err = percpu_counter_init(&bdi->bdi_stat[i], 0);
  374. if (err)
  375. goto err;
  376. }
  377. bdi->dirty_exceeded = 0;
  378. bdi->bw_time_stamp = jiffies;
  379. bdi->written_stamp = 0;
  380. bdi->balanced_dirty_ratelimit = INIT_BW;
  381. bdi->dirty_ratelimit = INIT_BW;
  382. bdi->write_bandwidth = INIT_BW;
  383. bdi->avg_write_bandwidth = INIT_BW;
  384. err = fprop_local_init_percpu(&bdi->completions);
  385. if (err) {
  386. err:
  387. while (i--)
  388. percpu_counter_destroy(&bdi->bdi_stat[i]);
  389. }
  390. return err;
  391. }
  392. EXPORT_SYMBOL(bdi_init);
  393. void bdi_destroy(struct backing_dev_info *bdi)
  394. {
  395. int i;
  396. /*
  397. * Splice our entries to the default_backing_dev_info, if this
  398. * bdi disappears
  399. */
  400. if (bdi_has_dirty_io(bdi)) {
  401. struct bdi_writeback *dst = &default_backing_dev_info.wb;
  402. bdi_lock_two(&bdi->wb, dst);
  403. list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
  404. list_splice(&bdi->wb.b_io, &dst->b_io);
  405. list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
  406. spin_unlock(&bdi->wb.list_lock);
  407. spin_unlock(&dst->list_lock);
  408. }
  409. bdi_unregister(bdi);
  410. /*
  411. * If bdi_unregister() had already been called earlier, the dwork
  412. * could still be pending because bdi_prune_sb() can race with the
  413. * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty().
  414. */
  415. cancel_delayed_work_sync(&bdi->wb.dwork);
  416. for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
  417. percpu_counter_destroy(&bdi->bdi_stat[i]);
  418. fprop_local_destroy_percpu(&bdi->completions);
  419. }
  420. EXPORT_SYMBOL(bdi_destroy);
  421. /*
  422. * For use from filesystems to quickly init and register a bdi associated
  423. * with dirty writeback
  424. */
  425. int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
  426. unsigned int cap)
  427. {
  428. int err;
  429. bdi->name = name;
  430. bdi->capabilities = cap;
  431. err = bdi_init(bdi);
  432. if (err)
  433. return err;
  434. err = bdi_register(bdi, NULL, "%.28s-%ld", name,
  435. atomic_long_inc_return(&bdi_seq));
  436. if (err) {
  437. bdi_destroy(bdi);
  438. return err;
  439. }
  440. return 0;
  441. }
  442. EXPORT_SYMBOL(bdi_setup_and_register);
  443. static wait_queue_head_t congestion_wqh[2] = {
  444. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
  445. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
  446. };
  447. static atomic_t nr_bdi_congested[2];
  448. void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
  449. {
  450. enum bdi_state bit;
  451. wait_queue_head_t *wqh = &congestion_wqh[sync];
  452. bit = sync ? BDI_sync_congested : BDI_async_congested;
  453. if (test_and_clear_bit(bit, &bdi->state))
  454. atomic_dec(&nr_bdi_congested[sync]);
  455. smp_mb__after_clear_bit();
  456. if (waitqueue_active(wqh))
  457. wake_up(wqh);
  458. }
  459. EXPORT_SYMBOL(clear_bdi_congested);
  460. void set_bdi_congested(struct backing_dev_info *bdi, int sync)
  461. {
  462. enum bdi_state bit;
  463. bit = sync ? BDI_sync_congested : BDI_async_congested;
  464. if (!test_and_set_bit(bit, &bdi->state))
  465. atomic_inc(&nr_bdi_congested[sync]);
  466. }
  467. EXPORT_SYMBOL(set_bdi_congested);
  468. /**
  469. * congestion_wait - wait for a backing_dev to become uncongested
  470. * @sync: SYNC or ASYNC IO
  471. * @timeout: timeout in jiffies
  472. *
  473. * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
  474. * write congestion. If no backing_devs are congested then just wait for the
  475. * next write to be completed.
  476. */
  477. long congestion_wait(int sync, long timeout)
  478. {
  479. long ret;
  480. unsigned long start = jiffies;
  481. DEFINE_WAIT(wait);
  482. wait_queue_head_t *wqh = &congestion_wqh[sync];
  483. prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  484. ret = io_schedule_timeout(timeout);
  485. finish_wait(wqh, &wait);
  486. trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
  487. jiffies_to_usecs(jiffies - start));
  488. return ret;
  489. }
  490. EXPORT_SYMBOL(congestion_wait);
  491. /**
  492. * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
  493. * @zone: A zone to check if it is heavily congested
  494. * @sync: SYNC or ASYNC IO
  495. * @timeout: timeout in jiffies
  496. *
  497. * In the event of a congested backing_dev (any backing_dev) and the given
  498. * @zone has experienced recent congestion, this waits for up to @timeout
  499. * jiffies for either a BDI to exit congestion of the given @sync queue
  500. * or a write to complete.
  501. *
  502. * In the absence of zone congestion, cond_resched() is called to yield
  503. * the processor if necessary but otherwise does not sleep.
  504. *
  505. * The return value is 0 if the sleep is for the full timeout. Otherwise,
  506. * it is the number of jiffies that were still remaining when the function
  507. * returned. return_value == timeout implies the function did not sleep.
  508. */
  509. long wait_iff_congested(struct zone *zone, int sync, long timeout)
  510. {
  511. long ret;
  512. unsigned long start = jiffies;
  513. DEFINE_WAIT(wait);
  514. wait_queue_head_t *wqh = &congestion_wqh[sync];
  515. /*
  516. * If there is no congestion, or heavy congestion is not being
  517. * encountered in the current zone, yield if necessary instead
  518. * of sleeping on the congestion queue
  519. */
  520. if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
  521. !zone_is_reclaim_congested(zone)) {
  522. cond_resched();
  523. /* In case we scheduled, work out time remaining */
  524. ret = timeout - (jiffies - start);
  525. if (ret < 0)
  526. ret = 0;
  527. goto out;
  528. }
  529. /* Sleep until uncongested or a write happens */
  530. prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  531. ret = io_schedule_timeout(timeout);
  532. finish_wait(wqh, &wait);
  533. out:
  534. trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
  535. jiffies_to_usecs(jiffies - start));
  536. return ret;
  537. }
  538. EXPORT_SYMBOL(wait_iff_congested);
  539. int pdflush_proc_obsolete(struct ctl_table *table, int write,
  540. void __user *buffer, size_t *lenp, loff_t *ppos)
  541. {
  542. char kbuf[] = "0\n";
  543. if (*ppos) {
  544. *lenp = 0;
  545. return 0;
  546. }
  547. if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
  548. return -EFAULT;
  549. printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n",
  550. table->procname);
  551. *lenp = 2;
  552. *ppos += *lenp;
  553. return 2;
  554. }