backing-dev.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. #include <linux/wait.h>
  2. #include <linux/backing-dev.h>
  3. #include <linux/kthread.h>
  4. #include <linux/freezer.h>
  5. #include <linux/fs.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/mm.h>
  8. #include <linux/sched.h>
  9. #include <linux/module.h>
  10. #include <linux/writeback.h>
  11. #include <linux/device.h>
  12. #include <trace/events/writeback.h>
  13. static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
  14. void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
  15. {
  16. }
  17. EXPORT_SYMBOL(default_unplug_io_fn);
  18. struct backing_dev_info default_backing_dev_info = {
  19. .name = "default",
  20. .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
  21. .state = 0,
  22. .capabilities = BDI_CAP_MAP_COPY,
  23. .unplug_io_fn = default_unplug_io_fn,
  24. };
  25. EXPORT_SYMBOL_GPL(default_backing_dev_info);
  26. struct backing_dev_info noop_backing_dev_info = {
  27. .name = "noop",
  28. };
  29. EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  30. static struct class *bdi_class;
  31. /*
  32. * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
  33. * reader side protection for bdi_pending_list. bdi_list has RCU reader side
  34. * locking.
  35. */
  36. DEFINE_SPINLOCK(bdi_lock);
  37. LIST_HEAD(bdi_list);
  38. LIST_HEAD(bdi_pending_list);
  39. static struct task_struct *sync_supers_tsk;
  40. static struct timer_list sync_supers_timer;
  41. static int bdi_sync_supers(void *);
  42. static void sync_supers_timer_fn(unsigned long);
  43. #ifdef CONFIG_DEBUG_FS
  44. #include <linux/debugfs.h>
  45. #include <linux/seq_file.h>
  46. static struct dentry *bdi_debug_root;
  47. static void bdi_debug_init(void)
  48. {
  49. bdi_debug_root = debugfs_create_dir("bdi", NULL);
  50. }
  51. static int bdi_debug_stats_show(struct seq_file *m, void *v)
  52. {
  53. struct backing_dev_info *bdi = m->private;
  54. struct bdi_writeback *wb = &bdi->wb;
  55. unsigned long background_thresh;
  56. unsigned long dirty_thresh;
  57. unsigned long bdi_thresh;
  58. unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
  59. struct inode *inode;
  60. nr_wb = nr_dirty = nr_io = nr_more_io = 0;
  61. spin_lock(&inode_lock);
  62. list_for_each_entry(inode, &wb->b_dirty, i_list)
  63. nr_dirty++;
  64. list_for_each_entry(inode, &wb->b_io, i_list)
  65. nr_io++;
  66. list_for_each_entry(inode, &wb->b_more_io, i_list)
  67. nr_more_io++;
  68. spin_unlock(&inode_lock);
  69. get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
  70. #define K(x) ((x) << (PAGE_SHIFT - 10))
  71. seq_printf(m,
  72. "BdiWriteback: %8lu kB\n"
  73. "BdiReclaimable: %8lu kB\n"
  74. "BdiDirtyThresh: %8lu kB\n"
  75. "DirtyThresh: %8lu kB\n"
  76. "BackgroundThresh: %8lu kB\n"
  77. "b_dirty: %8lu\n"
  78. "b_io: %8lu\n"
  79. "b_more_io: %8lu\n"
  80. "bdi_list: %8u\n"
  81. "state: %8lx\n",
  82. (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
  83. (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
  84. K(bdi_thresh), K(dirty_thresh),
  85. K(background_thresh), nr_dirty, nr_io, nr_more_io,
  86. !list_empty(&bdi->bdi_list), bdi->state);
  87. #undef K
  88. return 0;
  89. }
  90. static int bdi_debug_stats_open(struct inode *inode, struct file *file)
  91. {
  92. return single_open(file, bdi_debug_stats_show, inode->i_private);
  93. }
  94. static const struct file_operations bdi_debug_stats_fops = {
  95. .open = bdi_debug_stats_open,
  96. .read = seq_read,
  97. .llseek = seq_lseek,
  98. .release = single_release,
  99. };
  100. static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
  101. {
  102. bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
  103. bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
  104. bdi, &bdi_debug_stats_fops);
  105. }
  106. static void bdi_debug_unregister(struct backing_dev_info *bdi)
  107. {
  108. debugfs_remove(bdi->debug_stats);
  109. debugfs_remove(bdi->debug_dir);
  110. }
  111. #else
  112. static inline void bdi_debug_init(void)
  113. {
  114. }
  115. static inline void bdi_debug_register(struct backing_dev_info *bdi,
  116. const char *name)
  117. {
  118. }
  119. static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
  120. {
  121. }
  122. #endif
  123. static ssize_t read_ahead_kb_store(struct device *dev,
  124. struct device_attribute *attr,
  125. const char *buf, size_t count)
  126. {
  127. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  128. char *end;
  129. unsigned long read_ahead_kb;
  130. ssize_t ret = -EINVAL;
  131. read_ahead_kb = simple_strtoul(buf, &end, 10);
  132. if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
  133. bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
  134. ret = count;
  135. }
  136. return ret;
  137. }
  138. #define K(pages) ((pages) << (PAGE_SHIFT - 10))
  139. #define BDI_SHOW(name, expr) \
  140. static ssize_t name##_show(struct device *dev, \
  141. struct device_attribute *attr, char *page) \
  142. { \
  143. struct backing_dev_info *bdi = dev_get_drvdata(dev); \
  144. \
  145. return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
  146. }
  147. BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
  148. static ssize_t min_ratio_store(struct device *dev,
  149. struct device_attribute *attr, const char *buf, size_t count)
  150. {
  151. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  152. char *end;
  153. unsigned int ratio;
  154. ssize_t ret = -EINVAL;
  155. ratio = simple_strtoul(buf, &end, 10);
  156. if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
  157. ret = bdi_set_min_ratio(bdi, ratio);
  158. if (!ret)
  159. ret = count;
  160. }
  161. return ret;
  162. }
  163. BDI_SHOW(min_ratio, bdi->min_ratio)
  164. static ssize_t max_ratio_store(struct device *dev,
  165. struct device_attribute *attr, const char *buf, size_t count)
  166. {
  167. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  168. char *end;
  169. unsigned int ratio;
  170. ssize_t ret = -EINVAL;
  171. ratio = simple_strtoul(buf, &end, 10);
  172. if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
  173. ret = bdi_set_max_ratio(bdi, ratio);
  174. if (!ret)
  175. ret = count;
  176. }
  177. return ret;
  178. }
  179. BDI_SHOW(max_ratio, bdi->max_ratio)
  180. #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
  181. static struct device_attribute bdi_dev_attrs[] = {
  182. __ATTR_RW(read_ahead_kb),
  183. __ATTR_RW(min_ratio),
  184. __ATTR_RW(max_ratio),
  185. __ATTR_NULL,
  186. };
  187. static __init int bdi_class_init(void)
  188. {
  189. bdi_class = class_create(THIS_MODULE, "bdi");
  190. if (IS_ERR(bdi_class))
  191. return PTR_ERR(bdi_class);
  192. bdi_class->dev_attrs = bdi_dev_attrs;
  193. bdi_debug_init();
  194. return 0;
  195. }
  196. postcore_initcall(bdi_class_init);
  197. static int __init default_bdi_init(void)
  198. {
  199. int err;
  200. sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
  201. BUG_ON(IS_ERR(sync_supers_tsk));
  202. init_timer(&sync_supers_timer);
  203. setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
  204. bdi_arm_supers_timer();
  205. err = bdi_init(&default_backing_dev_info);
  206. if (!err)
  207. bdi_register(&default_backing_dev_info, NULL, "default");
  208. return err;
  209. }
  210. subsys_initcall(default_bdi_init);
  211. static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
  212. {
  213. memset(wb, 0, sizeof(*wb));
  214. wb->bdi = bdi;
  215. wb->last_old_flush = jiffies;
  216. INIT_LIST_HEAD(&wb->b_dirty);
  217. INIT_LIST_HEAD(&wb->b_io);
  218. INIT_LIST_HEAD(&wb->b_more_io);
  219. }
  220. int bdi_has_dirty_io(struct backing_dev_info *bdi)
  221. {
  222. return wb_has_dirty_io(&bdi->wb);
  223. }
  224. static void bdi_flush_io(struct backing_dev_info *bdi)
  225. {
  226. struct writeback_control wbc = {
  227. .sync_mode = WB_SYNC_NONE,
  228. .older_than_this = NULL,
  229. .range_cyclic = 1,
  230. .nr_to_write = 1024,
  231. };
  232. writeback_inodes_wb(&bdi->wb, &wbc);
  233. }
  234. /*
  235. * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
  236. * or we risk deadlocking on ->s_umount. The longer term solution would be
  237. * to implement sync_supers_bdi() or similar and simply do it from the
  238. * bdi writeback thread individually.
  239. */
  240. static int bdi_sync_supers(void *unused)
  241. {
  242. set_user_nice(current, 0);
  243. while (!kthread_should_stop()) {
  244. set_current_state(TASK_INTERRUPTIBLE);
  245. schedule();
  246. /*
  247. * Do this periodically, like kupdated() did before.
  248. */
  249. sync_supers();
  250. }
  251. return 0;
  252. }
  253. void bdi_arm_supers_timer(void)
  254. {
  255. unsigned long next;
  256. if (!dirty_writeback_interval)
  257. return;
  258. next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
  259. mod_timer(&sync_supers_timer, round_jiffies_up(next));
  260. }
  261. static void sync_supers_timer_fn(unsigned long unused)
  262. {
  263. wake_up_process(sync_supers_tsk);
  264. bdi_arm_supers_timer();
  265. }
  266. /*
  267. * Calculate the longest interval (jiffies) bdi threads are allowed to be
  268. * inactive.
  269. */
  270. static unsigned long bdi_longest_inactive(void)
  271. {
  272. unsigned long interval;
  273. interval = msecs_to_jiffies(dirty_writeback_interval * 10);
  274. return max(5UL * 60 * HZ, interval);
  275. }
  276. static int bdi_forker_thread(void *ptr)
  277. {
  278. struct bdi_writeback *me = ptr;
  279. current->flags |= PF_FLUSHER | PF_SWAPWRITE;
  280. set_freezable();
  281. /*
  282. * Our parent may run at a different priority, just set us to normal
  283. */
  284. set_user_nice(current, 0);
  285. for (;;) {
  286. struct task_struct *task = NULL;
  287. struct backing_dev_info *bdi;
  288. enum {
  289. NO_ACTION, /* Nothing to do */
  290. FORK_THREAD, /* Fork bdi thread */
  291. KILL_THREAD, /* Kill inactive bdi thread */
  292. } action = NO_ACTION;
  293. /*
  294. * Temporary measure, we want to make sure we don't see
  295. * dirty data on the default backing_dev_info
  296. */
  297. if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
  298. wb_do_writeback(me, 0);
  299. spin_lock_bh(&bdi_lock);
  300. set_current_state(TASK_INTERRUPTIBLE);
  301. list_for_each_entry(bdi, &bdi_list, bdi_list) {
  302. bool have_dirty_io;
  303. if (!bdi_cap_writeback_dirty(bdi) ||
  304. bdi_cap_flush_forker(bdi))
  305. continue;
  306. WARN(!test_bit(BDI_registered, &bdi->state),
  307. "bdi %p/%s is not registered!\n", bdi, bdi->name);
  308. have_dirty_io = !list_empty(&bdi->work_list) ||
  309. wb_has_dirty_io(&bdi->wb);
  310. /*
  311. * If the bdi has work to do, but the thread does not
  312. * exist - create it.
  313. */
  314. if (!bdi->wb.task && have_dirty_io) {
  315. /*
  316. * Set the pending bit - if someone will try to
  317. * unregister this bdi - it'll wait on this bit.
  318. */
  319. set_bit(BDI_pending, &bdi->state);
  320. action = FORK_THREAD;
  321. break;
  322. }
  323. spin_lock(&bdi->wb_lock);
  324. /*
  325. * If there is no work to do and the bdi thread was
  326. * inactive long enough - kill it. The wb_lock is taken
  327. * to make sure no-one adds more work to this bdi and
  328. * wakes the bdi thread up.
  329. */
  330. if (bdi->wb.task && !have_dirty_io &&
  331. time_after(jiffies, bdi->wb.last_active +
  332. bdi_longest_inactive())) {
  333. task = bdi->wb.task;
  334. bdi->wb.task = NULL;
  335. spin_unlock(&bdi->wb_lock);
  336. set_bit(BDI_pending, &bdi->state);
  337. action = KILL_THREAD;
  338. break;
  339. }
  340. spin_unlock(&bdi->wb_lock);
  341. }
  342. spin_unlock_bh(&bdi_lock);
  343. /* Keep working if default bdi still has things to do */
  344. if (!list_empty(&me->bdi->work_list))
  345. __set_current_state(TASK_RUNNING);
  346. switch (action) {
  347. case FORK_THREAD:
  348. __set_current_state(TASK_RUNNING);
  349. task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s",
  350. dev_name(bdi->dev));
  351. if (IS_ERR(task)) {
  352. /*
  353. * If thread creation fails, force writeout of
  354. * the bdi from the thread.
  355. */
  356. bdi_flush_io(bdi);
  357. } else {
  358. /*
  359. * The spinlock makes sure we do not lose
  360. * wake-ups when racing with 'bdi_queue_work()'.
  361. */
  362. spin_lock(&bdi->wb_lock);
  363. bdi->wb.task = task;
  364. spin_unlock(&bdi->wb_lock);
  365. }
  366. break;
  367. case KILL_THREAD:
  368. __set_current_state(TASK_RUNNING);
  369. kthread_stop(task);
  370. break;
  371. case NO_ACTION:
  372. if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
  373. /*
  374. * There are no dirty data. The only thing we
  375. * should now care about is checking for
  376. * inactive bdi threads and killing them. Thus,
  377. * let's sleep for longer time, save energy and
  378. * be friendly for battery-driven devices.
  379. */
  380. schedule_timeout(bdi_longest_inactive());
  381. else
  382. schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
  383. try_to_freeze();
  384. /* Back to the main loop */
  385. continue;
  386. }
  387. /*
  388. * Clear pending bit and wakeup anybody waiting to tear us down.
  389. */
  390. clear_bit(BDI_pending, &bdi->state);
  391. smp_mb__after_clear_bit();
  392. wake_up_bit(&bdi->state, BDI_pending);
  393. }
  394. return 0;
  395. }
  396. /*
  397. * Remove bdi from bdi_list, and ensure that it is no longer visible
  398. */
  399. static void bdi_remove_from_list(struct backing_dev_info *bdi)
  400. {
  401. spin_lock_bh(&bdi_lock);
  402. list_del_rcu(&bdi->bdi_list);
  403. spin_unlock_bh(&bdi_lock);
  404. synchronize_rcu();
  405. }
  406. int bdi_register(struct backing_dev_info *bdi, struct device *parent,
  407. const char *fmt, ...)
  408. {
  409. va_list args;
  410. int ret = 0;
  411. struct device *dev;
  412. if (bdi->dev) /* The driver needs to use separate queues per device */
  413. goto exit;
  414. va_start(args, fmt);
  415. dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
  416. va_end(args);
  417. if (IS_ERR(dev)) {
  418. ret = PTR_ERR(dev);
  419. goto exit;
  420. }
  421. spin_lock_bh(&bdi_lock);
  422. list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
  423. spin_unlock_bh(&bdi_lock);
  424. bdi->dev = dev;
  425. /*
  426. * Just start the forker thread for our default backing_dev_info,
  427. * and add other bdi's to the list. They will get a thread created
  428. * on-demand when they need it.
  429. */
  430. if (bdi_cap_flush_forker(bdi)) {
  431. struct bdi_writeback *wb = &bdi->wb;
  432. wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
  433. dev_name(dev));
  434. if (IS_ERR(wb->task)) {
  435. wb->task = NULL;
  436. ret = -ENOMEM;
  437. bdi_remove_from_list(bdi);
  438. goto exit;
  439. }
  440. }
  441. bdi_debug_register(bdi, dev_name(dev));
  442. set_bit(BDI_registered, &bdi->state);
  443. trace_writeback_bdi_register(bdi);
  444. exit:
  445. return ret;
  446. }
  447. EXPORT_SYMBOL(bdi_register);
  448. int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
  449. {
  450. return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
  451. }
  452. EXPORT_SYMBOL(bdi_register_dev);
  453. /*
  454. * Remove bdi from the global list and shutdown any threads we have running
  455. */
  456. static void bdi_wb_shutdown(struct backing_dev_info *bdi)
  457. {
  458. if (!bdi_cap_writeback_dirty(bdi))
  459. return;
  460. /*
  461. * Make sure nobody finds us on the bdi_list anymore
  462. */
  463. bdi_remove_from_list(bdi);
  464. /*
  465. * If setup is pending, wait for that to complete first
  466. */
  467. wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
  468. TASK_UNINTERRUPTIBLE);
  469. /*
  470. * Finally, kill the kernel thread. We don't need to be RCU
  471. * safe anymore, since the bdi is gone from visibility. Force
  472. * unfreeze of the thread before calling kthread_stop(), otherwise
  473. * it would never exet if it is currently stuck in the refrigerator.
  474. */
  475. if (bdi->wb.task) {
  476. thaw_process(bdi->wb.task);
  477. kthread_stop(bdi->wb.task);
  478. }
  479. }
  480. /*
  481. * This bdi is going away now, make sure that no super_blocks point to it
  482. */
  483. static void bdi_prune_sb(struct backing_dev_info *bdi)
  484. {
  485. struct super_block *sb;
  486. spin_lock(&sb_lock);
  487. list_for_each_entry(sb, &super_blocks, s_list) {
  488. if (sb->s_bdi == bdi)
  489. sb->s_bdi = NULL;
  490. }
  491. spin_unlock(&sb_lock);
  492. }
  493. void bdi_unregister(struct backing_dev_info *bdi)
  494. {
  495. if (bdi->dev) {
  496. trace_writeback_bdi_unregister(bdi);
  497. bdi_prune_sb(bdi);
  498. if (!bdi_cap_flush_forker(bdi))
  499. bdi_wb_shutdown(bdi);
  500. bdi_debug_unregister(bdi);
  501. device_unregister(bdi->dev);
  502. bdi->dev = NULL;
  503. }
  504. }
  505. EXPORT_SYMBOL(bdi_unregister);
  506. int bdi_init(struct backing_dev_info *bdi)
  507. {
  508. int i, err;
  509. bdi->dev = NULL;
  510. bdi->min_ratio = 0;
  511. bdi->max_ratio = 100;
  512. bdi->max_prop_frac = PROP_FRAC_BASE;
  513. spin_lock_init(&bdi->wb_lock);
  514. INIT_LIST_HEAD(&bdi->bdi_list);
  515. INIT_LIST_HEAD(&bdi->work_list);
  516. bdi_wb_init(&bdi->wb, bdi);
  517. for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
  518. err = percpu_counter_init(&bdi->bdi_stat[i], 0);
  519. if (err)
  520. goto err;
  521. }
  522. bdi->dirty_exceeded = 0;
  523. err = prop_local_init_percpu(&bdi->completions);
  524. if (err) {
  525. err:
  526. while (i--)
  527. percpu_counter_destroy(&bdi->bdi_stat[i]);
  528. }
  529. return err;
  530. }
  531. EXPORT_SYMBOL(bdi_init);
  532. void bdi_destroy(struct backing_dev_info *bdi)
  533. {
  534. int i;
  535. /*
  536. * Splice our entries to the default_backing_dev_info, if this
  537. * bdi disappears
  538. */
  539. if (bdi_has_dirty_io(bdi)) {
  540. struct bdi_writeback *dst = &default_backing_dev_info.wb;
  541. spin_lock(&inode_lock);
  542. list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
  543. list_splice(&bdi->wb.b_io, &dst->b_io);
  544. list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
  545. spin_unlock(&inode_lock);
  546. }
  547. bdi_unregister(bdi);
  548. for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
  549. percpu_counter_destroy(&bdi->bdi_stat[i]);
  550. prop_local_destroy_percpu(&bdi->completions);
  551. }
  552. EXPORT_SYMBOL(bdi_destroy);
  553. /*
  554. * For use from filesystems to quickly init and register a bdi associated
  555. * with dirty writeback
  556. */
  557. int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
  558. unsigned int cap)
  559. {
  560. char tmp[32];
  561. int err;
  562. bdi->name = name;
  563. bdi->capabilities = cap;
  564. err = bdi_init(bdi);
  565. if (err)
  566. return err;
  567. sprintf(tmp, "%.28s%s", name, "-%d");
  568. err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
  569. if (err) {
  570. bdi_destroy(bdi);
  571. return err;
  572. }
  573. return 0;
  574. }
  575. EXPORT_SYMBOL(bdi_setup_and_register);
  576. static wait_queue_head_t congestion_wqh[2] = {
  577. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
  578. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
  579. };
  580. void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
  581. {
  582. enum bdi_state bit;
  583. wait_queue_head_t *wqh = &congestion_wqh[sync];
  584. bit = sync ? BDI_sync_congested : BDI_async_congested;
  585. clear_bit(bit, &bdi->state);
  586. smp_mb__after_clear_bit();
  587. if (waitqueue_active(wqh))
  588. wake_up(wqh);
  589. }
  590. EXPORT_SYMBOL(clear_bdi_congested);
  591. void set_bdi_congested(struct backing_dev_info *bdi, int sync)
  592. {
  593. enum bdi_state bit;
  594. bit = sync ? BDI_sync_congested : BDI_async_congested;
  595. set_bit(bit, &bdi->state);
  596. }
  597. EXPORT_SYMBOL(set_bdi_congested);
  598. /**
  599. * congestion_wait - wait for a backing_dev to become uncongested
  600. * @sync: SYNC or ASYNC IO
  601. * @timeout: timeout in jiffies
  602. *
  603. * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
  604. * write congestion. If no backing_devs are congested then just wait for the
  605. * next write to be completed.
  606. */
  607. long congestion_wait(int sync, long timeout)
  608. {
  609. long ret;
  610. DEFINE_WAIT(wait);
  611. wait_queue_head_t *wqh = &congestion_wqh[sync];
  612. prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  613. ret = io_schedule_timeout(timeout);
  614. finish_wait(wqh, &wait);
  615. return ret;
  616. }
  617. EXPORT_SYMBOL(congestion_wait);