backing-dev.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. #include <linux/wait.h>
  2. #include <linux/backing-dev.h>
  3. #include <linux/kthread.h>
  4. #include <linux/freezer.h>
  5. #include <linux/fs.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/mm.h>
  8. #include <linux/sched.h>
  9. #include <linux/module.h>
  10. #include <linux/writeback.h>
  11. #include <linux/device.h>
  12. void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
  13. {
  14. }
  15. EXPORT_SYMBOL(default_unplug_io_fn);
  16. struct backing_dev_info default_backing_dev_info = {
  17. .name = "default",
  18. .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
  19. .state = 0,
  20. .capabilities = BDI_CAP_MAP_COPY,
  21. .unplug_io_fn = default_unplug_io_fn,
  22. };
  23. EXPORT_SYMBOL_GPL(default_backing_dev_info);
  24. static struct class *bdi_class;
  25. /*
  26. * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
  27. * reader side protection for bdi_pending_list. bdi_list has RCU reader side
  28. * locking.
  29. */
  30. DEFINE_SPINLOCK(bdi_lock);
  31. LIST_HEAD(bdi_list);
  32. LIST_HEAD(bdi_pending_list);
  33. static struct task_struct *sync_supers_tsk;
  34. static struct timer_list sync_supers_timer;
  35. static int bdi_sync_supers(void *);
  36. static void sync_supers_timer_fn(unsigned long);
  37. static void arm_supers_timer(void);
  38. static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
  39. #ifdef CONFIG_DEBUG_FS
  40. #include <linux/debugfs.h>
  41. #include <linux/seq_file.h>
  42. static struct dentry *bdi_debug_root;
  43. static void bdi_debug_init(void)
  44. {
  45. bdi_debug_root = debugfs_create_dir("bdi", NULL);
  46. }
  47. static int bdi_debug_stats_show(struct seq_file *m, void *v)
  48. {
  49. struct backing_dev_info *bdi = m->private;
  50. struct bdi_writeback *wb;
  51. unsigned long background_thresh;
  52. unsigned long dirty_thresh;
  53. unsigned long bdi_thresh;
  54. unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
  55. struct inode *inode;
  56. /*
  57. * inode lock is enough here, the bdi->wb_list is protected by
  58. * RCU on the reader side
  59. */
  60. nr_wb = nr_dirty = nr_io = nr_more_io = 0;
  61. spin_lock(&inode_lock);
  62. list_for_each_entry(wb, &bdi->wb_list, list) {
  63. nr_wb++;
  64. list_for_each_entry(inode, &wb->b_dirty, i_list)
  65. nr_dirty++;
  66. list_for_each_entry(inode, &wb->b_io, i_list)
  67. nr_io++;
  68. list_for_each_entry(inode, &wb->b_more_io, i_list)
  69. nr_more_io++;
  70. }
  71. spin_unlock(&inode_lock);
  72. get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
  73. #define K(x) ((x) << (PAGE_SHIFT - 10))
  74. seq_printf(m,
  75. "BdiWriteback: %8lu kB\n"
  76. "BdiReclaimable: %8lu kB\n"
  77. "BdiDirtyThresh: %8lu kB\n"
  78. "DirtyThresh: %8lu kB\n"
  79. "BackgroundThresh: %8lu kB\n"
  80. "WritebackThreads: %8lu\n"
  81. "b_dirty: %8lu\n"
  82. "b_io: %8lu\n"
  83. "b_more_io: %8lu\n"
  84. "bdi_list: %8u\n"
  85. "state: %8lx\n"
  86. "wb_mask: %8lx\n"
  87. "wb_list: %8u\n"
  88. "wb_cnt: %8u\n",
  89. (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
  90. (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
  91. K(bdi_thresh), K(dirty_thresh),
  92. K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
  93. !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
  94. !list_empty(&bdi->wb_list), bdi->wb_cnt);
  95. #undef K
  96. return 0;
  97. }
  98. static int bdi_debug_stats_open(struct inode *inode, struct file *file)
  99. {
  100. return single_open(file, bdi_debug_stats_show, inode->i_private);
  101. }
  102. static const struct file_operations bdi_debug_stats_fops = {
  103. .open = bdi_debug_stats_open,
  104. .read = seq_read,
  105. .llseek = seq_lseek,
  106. .release = single_release,
  107. };
  108. static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
  109. {
  110. bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
  111. bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
  112. bdi, &bdi_debug_stats_fops);
  113. }
  114. static void bdi_debug_unregister(struct backing_dev_info *bdi)
  115. {
  116. debugfs_remove(bdi->debug_stats);
  117. debugfs_remove(bdi->debug_dir);
  118. }
  119. #else
  120. static inline void bdi_debug_init(void)
  121. {
  122. }
  123. static inline void bdi_debug_register(struct backing_dev_info *bdi,
  124. const char *name)
  125. {
  126. }
  127. static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
  128. {
  129. }
  130. #endif
  131. static ssize_t read_ahead_kb_store(struct device *dev,
  132. struct device_attribute *attr,
  133. const char *buf, size_t count)
  134. {
  135. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  136. char *end;
  137. unsigned long read_ahead_kb;
  138. ssize_t ret = -EINVAL;
  139. read_ahead_kb = simple_strtoul(buf, &end, 10);
  140. if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
  141. bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
  142. ret = count;
  143. }
  144. return ret;
  145. }
  146. #define K(pages) ((pages) << (PAGE_SHIFT - 10))
  147. #define BDI_SHOW(name, expr) \
  148. static ssize_t name##_show(struct device *dev, \
  149. struct device_attribute *attr, char *page) \
  150. { \
  151. struct backing_dev_info *bdi = dev_get_drvdata(dev); \
  152. \
  153. return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
  154. }
  155. BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
  156. static ssize_t min_ratio_store(struct device *dev,
  157. struct device_attribute *attr, const char *buf, size_t count)
  158. {
  159. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  160. char *end;
  161. unsigned int ratio;
  162. ssize_t ret = -EINVAL;
  163. ratio = simple_strtoul(buf, &end, 10);
  164. if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
  165. ret = bdi_set_min_ratio(bdi, ratio);
  166. if (!ret)
  167. ret = count;
  168. }
  169. return ret;
  170. }
  171. BDI_SHOW(min_ratio, bdi->min_ratio)
  172. static ssize_t max_ratio_store(struct device *dev,
  173. struct device_attribute *attr, const char *buf, size_t count)
  174. {
  175. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  176. char *end;
  177. unsigned int ratio;
  178. ssize_t ret = -EINVAL;
  179. ratio = simple_strtoul(buf, &end, 10);
  180. if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
  181. ret = bdi_set_max_ratio(bdi, ratio);
  182. if (!ret)
  183. ret = count;
  184. }
  185. return ret;
  186. }
  187. BDI_SHOW(max_ratio, bdi->max_ratio)
  188. #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
  189. static struct device_attribute bdi_dev_attrs[] = {
  190. __ATTR_RW(read_ahead_kb),
  191. __ATTR_RW(min_ratio),
  192. __ATTR_RW(max_ratio),
  193. __ATTR_NULL,
  194. };
  195. static __init int bdi_class_init(void)
  196. {
  197. bdi_class = class_create(THIS_MODULE, "bdi");
  198. bdi_class->dev_attrs = bdi_dev_attrs;
  199. bdi_debug_init();
  200. return 0;
  201. }
  202. postcore_initcall(bdi_class_init);
  203. static int __init default_bdi_init(void)
  204. {
  205. int err;
  206. sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
  207. BUG_ON(IS_ERR(sync_supers_tsk));
  208. init_timer(&sync_supers_timer);
  209. setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
  210. arm_supers_timer();
  211. err = bdi_init(&default_backing_dev_info);
  212. if (!err)
  213. bdi_register(&default_backing_dev_info, NULL, "default");
  214. return err;
  215. }
  216. subsys_initcall(default_bdi_init);
  217. static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
  218. {
  219. memset(wb, 0, sizeof(*wb));
  220. wb->bdi = bdi;
  221. wb->last_old_flush = jiffies;
  222. INIT_LIST_HEAD(&wb->b_dirty);
  223. INIT_LIST_HEAD(&wb->b_io);
  224. INIT_LIST_HEAD(&wb->b_more_io);
  225. }
  226. static void bdi_task_init(struct backing_dev_info *bdi,
  227. struct bdi_writeback *wb)
  228. {
  229. struct task_struct *tsk = current;
  230. spin_lock(&bdi->wb_lock);
  231. list_add_tail_rcu(&wb->list, &bdi->wb_list);
  232. spin_unlock(&bdi->wb_lock);
  233. tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
  234. set_freezable();
  235. /*
  236. * Our parent may run at a different priority, just set us to normal
  237. */
  238. set_user_nice(tsk, 0);
  239. }
  240. static int bdi_start_fn(void *ptr)
  241. {
  242. struct bdi_writeback *wb = ptr;
  243. struct backing_dev_info *bdi = wb->bdi;
  244. int ret;
  245. /*
  246. * Add us to the active bdi_list
  247. */
  248. spin_lock_bh(&bdi_lock);
  249. list_add_rcu(&bdi->bdi_list, &bdi_list);
  250. spin_unlock_bh(&bdi_lock);
  251. bdi_task_init(bdi, wb);
  252. /*
  253. * Clear pending bit and wakeup anybody waiting to tear us down
  254. */
  255. clear_bit(BDI_pending, &bdi->state);
  256. smp_mb__after_clear_bit();
  257. wake_up_bit(&bdi->state, BDI_pending);
  258. ret = bdi_writeback_task(wb);
  259. /*
  260. * Remove us from the list
  261. */
  262. spin_lock(&bdi->wb_lock);
  263. list_del_rcu(&wb->list);
  264. spin_unlock(&bdi->wb_lock);
  265. /*
  266. * Flush any work that raced with us exiting. No new work
  267. * will be added, since this bdi isn't discoverable anymore.
  268. */
  269. if (!list_empty(&bdi->work_list))
  270. wb_do_writeback(wb, 1);
  271. wb->task = NULL;
  272. return ret;
  273. }
  274. int bdi_has_dirty_io(struct backing_dev_info *bdi)
  275. {
  276. return wb_has_dirty_io(&bdi->wb);
  277. }
  278. static void bdi_flush_io(struct backing_dev_info *bdi)
  279. {
  280. struct writeback_control wbc = {
  281. .bdi = bdi,
  282. .sync_mode = WB_SYNC_NONE,
  283. .older_than_this = NULL,
  284. .range_cyclic = 1,
  285. .nr_to_write = 1024,
  286. };
  287. writeback_inodes_wbc(&wbc);
  288. }
  289. /*
  290. * kupdated() used to do this. We cannot do it from the bdi_forker_task()
  291. * or we risk deadlocking on ->s_umount. The longer term solution would be
  292. * to implement sync_supers_bdi() or similar and simply do it from the
  293. * bdi writeback tasks individually.
  294. */
  295. static int bdi_sync_supers(void *unused)
  296. {
  297. set_user_nice(current, 0);
  298. while (!kthread_should_stop()) {
  299. set_current_state(TASK_INTERRUPTIBLE);
  300. schedule();
  301. /*
  302. * Do this periodically, like kupdated() did before.
  303. */
  304. sync_supers();
  305. }
  306. return 0;
  307. }
  308. static void arm_supers_timer(void)
  309. {
  310. unsigned long next;
  311. next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
  312. mod_timer(&sync_supers_timer, round_jiffies_up(next));
  313. }
  314. static void sync_supers_timer_fn(unsigned long unused)
  315. {
  316. wake_up_process(sync_supers_tsk);
  317. arm_supers_timer();
  318. }
  319. static int bdi_forker_task(void *ptr)
  320. {
  321. struct bdi_writeback *me = ptr;
  322. bdi_task_init(me->bdi, me);
  323. for (;;) {
  324. struct backing_dev_info *bdi, *tmp;
  325. struct bdi_writeback *wb;
  326. /*
  327. * Temporary measure, we want to make sure we don't see
  328. * dirty data on the default backing_dev_info
  329. */
  330. if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
  331. wb_do_writeback(me, 0);
  332. spin_lock_bh(&bdi_lock);
  333. /*
  334. * Check if any existing bdi's have dirty data without
  335. * a thread registered. If so, set that up.
  336. */
  337. list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
  338. if (bdi->wb.task)
  339. continue;
  340. if (list_empty(&bdi->work_list) &&
  341. !bdi_has_dirty_io(bdi))
  342. continue;
  343. bdi_add_default_flusher_task(bdi);
  344. }
  345. set_current_state(TASK_INTERRUPTIBLE);
  346. if (list_empty(&bdi_pending_list)) {
  347. unsigned long wait;
  348. spin_unlock_bh(&bdi_lock);
  349. wait = msecs_to_jiffies(dirty_writeback_interval * 10);
  350. schedule_timeout(wait);
  351. try_to_freeze();
  352. continue;
  353. }
  354. __set_current_state(TASK_RUNNING);
  355. /*
  356. * This is our real job - check for pending entries in
  357. * bdi_pending_list, and create the tasks that got added
  358. */
  359. bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
  360. bdi_list);
  361. list_del_init(&bdi->bdi_list);
  362. spin_unlock_bh(&bdi_lock);
  363. wb = &bdi->wb;
  364. wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
  365. dev_name(bdi->dev));
  366. /*
  367. * If task creation fails, then readd the bdi to
  368. * the pending list and force writeout of the bdi
  369. * from this forker thread. That will free some memory
  370. * and we can try again.
  371. */
  372. if (IS_ERR(wb->task)) {
  373. wb->task = NULL;
  374. /*
  375. * Add this 'bdi' to the back, so we get
  376. * a chance to flush other bdi's to free
  377. * memory.
  378. */
  379. spin_lock_bh(&bdi_lock);
  380. list_add_tail(&bdi->bdi_list, &bdi_pending_list);
  381. spin_unlock_bh(&bdi_lock);
  382. bdi_flush_io(bdi);
  383. }
  384. }
  385. return 0;
  386. }
  387. static void bdi_add_to_pending(struct rcu_head *head)
  388. {
  389. struct backing_dev_info *bdi;
  390. bdi = container_of(head, struct backing_dev_info, rcu_head);
  391. INIT_LIST_HEAD(&bdi->bdi_list);
  392. spin_lock(&bdi_lock);
  393. list_add_tail(&bdi->bdi_list, &bdi_pending_list);
  394. spin_unlock(&bdi_lock);
  395. /*
  396. * We are now on the pending list, wake up bdi_forker_task()
  397. * to finish the job and add us back to the active bdi_list
  398. */
  399. wake_up_process(default_backing_dev_info.wb.task);
  400. }
  401. /*
  402. * Add the default flusher task that gets created for any bdi
  403. * that has dirty data pending writeout
  404. */
  405. void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
  406. {
  407. if (!bdi_cap_writeback_dirty(bdi))
  408. return;
  409. if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
  410. printk(KERN_ERR "bdi %p/%s is not registered!\n",
  411. bdi, bdi->name);
  412. return;
  413. }
  414. /*
  415. * Check with the helper whether to proceed adding a task. Will only
  416. * abort if we two or more simultanous calls to
  417. * bdi_add_default_flusher_task() occured, further additions will block
  418. * waiting for previous additions to finish.
  419. */
  420. if (!test_and_set_bit(BDI_pending, &bdi->state)) {
  421. list_del_rcu(&bdi->bdi_list);
  422. /*
  423. * We must wait for the current RCU period to end before
  424. * moving to the pending list. So schedule that operation
  425. * from an RCU callback.
  426. */
  427. call_rcu(&bdi->rcu_head, bdi_add_to_pending);
  428. }
  429. }
  430. /*
  431. * Remove bdi from bdi_list, and ensure that it is no longer visible
  432. */
  433. static void bdi_remove_from_list(struct backing_dev_info *bdi)
  434. {
  435. spin_lock_bh(&bdi_lock);
  436. list_del_rcu(&bdi->bdi_list);
  437. spin_unlock_bh(&bdi_lock);
  438. synchronize_rcu();
  439. }
  440. int bdi_register(struct backing_dev_info *bdi, struct device *parent,
  441. const char *fmt, ...)
  442. {
  443. va_list args;
  444. int ret = 0;
  445. struct device *dev;
  446. if (bdi->dev) /* The driver needs to use separate queues per device */
  447. goto exit;
  448. va_start(args, fmt);
  449. dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
  450. va_end(args);
  451. if (IS_ERR(dev)) {
  452. ret = PTR_ERR(dev);
  453. goto exit;
  454. }
  455. spin_lock_bh(&bdi_lock);
  456. list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
  457. spin_unlock_bh(&bdi_lock);
  458. bdi->dev = dev;
  459. /*
  460. * Just start the forker thread for our default backing_dev_info,
  461. * and add other bdi's to the list. They will get a thread created
  462. * on-demand when they need it.
  463. */
  464. if (bdi_cap_flush_forker(bdi)) {
  465. struct bdi_writeback *wb = &bdi->wb;
  466. wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
  467. dev_name(dev));
  468. if (IS_ERR(wb->task)) {
  469. wb->task = NULL;
  470. ret = -ENOMEM;
  471. bdi_remove_from_list(bdi);
  472. goto exit;
  473. }
  474. }
  475. bdi_debug_register(bdi, dev_name(dev));
  476. set_bit(BDI_registered, &bdi->state);
  477. exit:
  478. return ret;
  479. }
  480. EXPORT_SYMBOL(bdi_register);
  481. int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
  482. {
  483. return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
  484. }
  485. EXPORT_SYMBOL(bdi_register_dev);
  486. /*
  487. * Remove bdi from the global list and shutdown any threads we have running
  488. */
  489. static void bdi_wb_shutdown(struct backing_dev_info *bdi)
  490. {
  491. struct bdi_writeback *wb;
  492. if (!bdi_cap_writeback_dirty(bdi))
  493. return;
  494. /*
  495. * If setup is pending, wait for that to complete first
  496. */
  497. wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
  498. TASK_UNINTERRUPTIBLE);
  499. /*
  500. * Make sure nobody finds us on the bdi_list anymore
  501. */
  502. bdi_remove_from_list(bdi);
  503. /*
  504. * Finally, kill the kernel threads. We don't need to be RCU
  505. * safe anymore, since the bdi is gone from visibility.
  506. */
  507. list_for_each_entry(wb, &bdi->wb_list, list)
  508. kthread_stop(wb->task);
  509. }
  510. /*
  511. * This bdi is going away now, make sure that no super_blocks point to it
  512. */
  513. static void bdi_prune_sb(struct backing_dev_info *bdi)
  514. {
  515. struct super_block *sb;
  516. spin_lock(&sb_lock);
  517. list_for_each_entry(sb, &super_blocks, s_list) {
  518. if (sb->s_bdi == bdi)
  519. sb->s_bdi = NULL;
  520. }
  521. spin_unlock(&sb_lock);
  522. }
  523. void bdi_unregister(struct backing_dev_info *bdi)
  524. {
  525. if (bdi->dev) {
  526. bdi_prune_sb(bdi);
  527. if (!bdi_cap_flush_forker(bdi))
  528. bdi_wb_shutdown(bdi);
  529. bdi_debug_unregister(bdi);
  530. device_unregister(bdi->dev);
  531. bdi->dev = NULL;
  532. }
  533. }
  534. EXPORT_SYMBOL(bdi_unregister);
  535. int bdi_init(struct backing_dev_info *bdi)
  536. {
  537. int i, err;
  538. bdi->dev = NULL;
  539. bdi->min_ratio = 0;
  540. bdi->max_ratio = 100;
  541. bdi->max_prop_frac = PROP_FRAC_BASE;
  542. spin_lock_init(&bdi->wb_lock);
  543. INIT_RCU_HEAD(&bdi->rcu_head);
  544. INIT_LIST_HEAD(&bdi->bdi_list);
  545. INIT_LIST_HEAD(&bdi->wb_list);
  546. INIT_LIST_HEAD(&bdi->work_list);
  547. bdi_wb_init(&bdi->wb, bdi);
  548. /*
  549. * Just one thread support for now, hard code mask and count
  550. */
  551. bdi->wb_mask = 1;
  552. bdi->wb_cnt = 1;
  553. for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
  554. err = percpu_counter_init(&bdi->bdi_stat[i], 0);
  555. if (err)
  556. goto err;
  557. }
  558. bdi->dirty_exceeded = 0;
  559. err = prop_local_init_percpu(&bdi->completions);
  560. if (err) {
  561. err:
  562. while (i--)
  563. percpu_counter_destroy(&bdi->bdi_stat[i]);
  564. }
  565. return err;
  566. }
  567. EXPORT_SYMBOL(bdi_init);
  568. void bdi_destroy(struct backing_dev_info *bdi)
  569. {
  570. int i;
  571. /*
  572. * Splice our entries to the default_backing_dev_info, if this
  573. * bdi disappears
  574. */
  575. if (bdi_has_dirty_io(bdi)) {
  576. struct bdi_writeback *dst = &default_backing_dev_info.wb;
  577. spin_lock(&inode_lock);
  578. list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
  579. list_splice(&bdi->wb.b_io, &dst->b_io);
  580. list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
  581. spin_unlock(&inode_lock);
  582. }
  583. bdi_unregister(bdi);
  584. for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
  585. percpu_counter_destroy(&bdi->bdi_stat[i]);
  586. prop_local_destroy_percpu(&bdi->completions);
  587. }
  588. EXPORT_SYMBOL(bdi_destroy);
  589. static wait_queue_head_t congestion_wqh[2] = {
  590. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
  591. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
  592. };
  593. void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
  594. {
  595. enum bdi_state bit;
  596. wait_queue_head_t *wqh = &congestion_wqh[sync];
  597. bit = sync ? BDI_sync_congested : BDI_async_congested;
  598. clear_bit(bit, &bdi->state);
  599. smp_mb__after_clear_bit();
  600. if (waitqueue_active(wqh))
  601. wake_up(wqh);
  602. }
  603. EXPORT_SYMBOL(clear_bdi_congested);
  604. void set_bdi_congested(struct backing_dev_info *bdi, int sync)
  605. {
  606. enum bdi_state bit;
  607. bit = sync ? BDI_sync_congested : BDI_async_congested;
  608. set_bit(bit, &bdi->state);
  609. }
  610. EXPORT_SYMBOL(set_bdi_congested);
  611. /**
  612. * congestion_wait - wait for a backing_dev to become uncongested
  613. * @sync: SYNC or ASYNC IO
  614. * @timeout: timeout in jiffies
  615. *
  616. * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
  617. * write congestion. If no backing_devs are congested then just wait for the
  618. * next write to be completed.
  619. */
  620. long congestion_wait(int sync, long timeout)
  621. {
  622. long ret;
  623. DEFINE_WAIT(wait);
  624. wait_queue_head_t *wqh = &congestion_wqh[sync];
  625. prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  626. ret = io_schedule_timeout(timeout);
  627. finish_wait(wqh, &wait);
  628. return ret;
  629. }
  630. EXPORT_SYMBOL(congestion_wait);