backing-dev.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888
  1. #include <linux/wait.h>
  2. #include <linux/backing-dev.h>
  3. #include <linux/kthread.h>
  4. #include <linux/freezer.h>
  5. #include <linux/fs.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/mm.h>
  8. #include <linux/sched.h>
  9. #include <linux/module.h>
  10. #include <linux/writeback.h>
  11. #include <linux/device.h>
  12. #include <trace/events/writeback.h>
  13. static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
  14. struct backing_dev_info default_backing_dev_info = {
  15. .name = "default",
  16. .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
  17. .state = 0,
  18. .capabilities = BDI_CAP_MAP_COPY,
  19. };
  20. EXPORT_SYMBOL_GPL(default_backing_dev_info);
  21. struct backing_dev_info noop_backing_dev_info = {
  22. .name = "noop",
  23. .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  24. };
  25. EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  26. static struct class *bdi_class;
  27. /*
  28. * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
  29. * reader side protection for bdi_pending_list. bdi_list has RCU reader side
  30. * locking.
  31. */
  32. DEFINE_SPINLOCK(bdi_lock);
  33. LIST_HEAD(bdi_list);
  34. LIST_HEAD(bdi_pending_list);
  35. static struct task_struct *sync_supers_tsk;
  36. static struct timer_list sync_supers_timer;
  37. static int bdi_sync_supers(void *);
  38. static void sync_supers_timer_fn(unsigned long);
  39. void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
  40. {
  41. if (wb1 < wb2) {
  42. spin_lock(&wb1->list_lock);
  43. spin_lock_nested(&wb2->list_lock, 1);
  44. } else {
  45. spin_lock(&wb2->list_lock);
  46. spin_lock_nested(&wb1->list_lock, 1);
  47. }
  48. }
  49. #ifdef CONFIG_DEBUG_FS
  50. #include <linux/debugfs.h>
  51. #include <linux/seq_file.h>
  52. static struct dentry *bdi_debug_root;
  53. static void bdi_debug_init(void)
  54. {
  55. bdi_debug_root = debugfs_create_dir("bdi", NULL);
  56. }
  57. static int bdi_debug_stats_show(struct seq_file *m, void *v)
  58. {
  59. struct backing_dev_info *bdi = m->private;
  60. struct bdi_writeback *wb = &bdi->wb;
  61. unsigned long background_thresh;
  62. unsigned long dirty_thresh;
  63. unsigned long bdi_thresh;
  64. unsigned long nr_dirty, nr_io, nr_more_io;
  65. struct inode *inode;
  66. nr_dirty = nr_io = nr_more_io = 0;
  67. spin_lock(&wb->list_lock);
  68. list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
  69. nr_dirty++;
  70. list_for_each_entry(inode, &wb->b_io, i_wb_list)
  71. nr_io++;
  72. list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
  73. nr_more_io++;
  74. spin_unlock(&wb->list_lock);
  75. global_dirty_limits(&background_thresh, &dirty_thresh);
  76. bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
  77. #define K(x) ((x) << (PAGE_SHIFT - 10))
  78. seq_printf(m,
  79. "BdiWriteback: %10lu kB\n"
  80. "BdiReclaimable: %10lu kB\n"
  81. "BdiDirtyThresh: %10lu kB\n"
  82. "DirtyThresh: %10lu kB\n"
  83. "BackgroundThresh: %10lu kB\n"
  84. "BdiDirtied: %10lu kB\n"
  85. "BdiWritten: %10lu kB\n"
  86. "BdiWriteBandwidth: %10lu kBps\n"
  87. "b_dirty: %10lu\n"
  88. "b_io: %10lu\n"
  89. "b_more_io: %10lu\n"
  90. "bdi_list: %10u\n"
  91. "state: %10lx\n",
  92. (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
  93. (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
  94. K(bdi_thresh),
  95. K(dirty_thresh),
  96. K(background_thresh),
  97. (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
  98. (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
  99. (unsigned long) K(bdi->write_bandwidth),
  100. nr_dirty,
  101. nr_io,
  102. nr_more_io,
  103. !list_empty(&bdi->bdi_list), bdi->state);
  104. #undef K
  105. return 0;
  106. }
  107. static int bdi_debug_stats_open(struct inode *inode, struct file *file)
  108. {
  109. return single_open(file, bdi_debug_stats_show, inode->i_private);
  110. }
  111. static const struct file_operations bdi_debug_stats_fops = {
  112. .open = bdi_debug_stats_open,
  113. .read = seq_read,
  114. .llseek = seq_lseek,
  115. .release = single_release,
  116. };
  117. static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
  118. {
  119. bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
  120. bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
  121. bdi, &bdi_debug_stats_fops);
  122. }
  123. static void bdi_debug_unregister(struct backing_dev_info *bdi)
  124. {
  125. debugfs_remove(bdi->debug_stats);
  126. debugfs_remove(bdi->debug_dir);
  127. }
  128. #else
  129. static inline void bdi_debug_init(void)
  130. {
  131. }
  132. static inline void bdi_debug_register(struct backing_dev_info *bdi,
  133. const char *name)
  134. {
  135. }
  136. static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
  137. {
  138. }
  139. #endif
  140. static ssize_t read_ahead_kb_store(struct device *dev,
  141. struct device_attribute *attr,
  142. const char *buf, size_t count)
  143. {
  144. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  145. char *end;
  146. unsigned long read_ahead_kb;
  147. ssize_t ret = -EINVAL;
  148. read_ahead_kb = simple_strtoul(buf, &end, 10);
  149. if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
  150. bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
  151. ret = count;
  152. }
  153. return ret;
  154. }
  155. #define K(pages) ((pages) << (PAGE_SHIFT - 10))
  156. #define BDI_SHOW(name, expr) \
  157. static ssize_t name##_show(struct device *dev, \
  158. struct device_attribute *attr, char *page) \
  159. { \
  160. struct backing_dev_info *bdi = dev_get_drvdata(dev); \
  161. \
  162. return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
  163. }
  164. BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
  165. static ssize_t min_ratio_store(struct device *dev,
  166. struct device_attribute *attr, const char *buf, size_t count)
  167. {
  168. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  169. char *end;
  170. unsigned int ratio;
  171. ssize_t ret = -EINVAL;
  172. ratio = simple_strtoul(buf, &end, 10);
  173. if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
  174. ret = bdi_set_min_ratio(bdi, ratio);
  175. if (!ret)
  176. ret = count;
  177. }
  178. return ret;
  179. }
  180. BDI_SHOW(min_ratio, bdi->min_ratio)
  181. static ssize_t max_ratio_store(struct device *dev,
  182. struct device_attribute *attr, const char *buf, size_t count)
  183. {
  184. struct backing_dev_info *bdi = dev_get_drvdata(dev);
  185. char *end;
  186. unsigned int ratio;
  187. ssize_t ret = -EINVAL;
  188. ratio = simple_strtoul(buf, &end, 10);
  189. if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
  190. ret = bdi_set_max_ratio(bdi, ratio);
  191. if (!ret)
  192. ret = count;
  193. }
  194. return ret;
  195. }
  196. BDI_SHOW(max_ratio, bdi->max_ratio)
  197. #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
  198. static struct device_attribute bdi_dev_attrs[] = {
  199. __ATTR_RW(read_ahead_kb),
  200. __ATTR_RW(min_ratio),
  201. __ATTR_RW(max_ratio),
  202. __ATTR_NULL,
  203. };
  204. static __init int bdi_class_init(void)
  205. {
  206. bdi_class = class_create(THIS_MODULE, "bdi");
  207. if (IS_ERR(bdi_class))
  208. return PTR_ERR(bdi_class);
  209. bdi_class->dev_attrs = bdi_dev_attrs;
  210. bdi_debug_init();
  211. return 0;
  212. }
  213. postcore_initcall(bdi_class_init);
  214. static int __init default_bdi_init(void)
  215. {
  216. int err;
  217. sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
  218. BUG_ON(IS_ERR(sync_supers_tsk));
  219. setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
  220. bdi_arm_supers_timer();
  221. err = bdi_init(&default_backing_dev_info);
  222. if (!err)
  223. bdi_register(&default_backing_dev_info, NULL, "default");
  224. err = bdi_init(&noop_backing_dev_info);
  225. return err;
  226. }
  227. subsys_initcall(default_bdi_init);
  228. int bdi_has_dirty_io(struct backing_dev_info *bdi)
  229. {
  230. return wb_has_dirty_io(&bdi->wb);
  231. }
  232. /*
  233. * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
  234. * or we risk deadlocking on ->s_umount. The longer term solution would be
  235. * to implement sync_supers_bdi() or similar and simply do it from the
  236. * bdi writeback thread individually.
  237. */
  238. static int bdi_sync_supers(void *unused)
  239. {
  240. set_user_nice(current, 0);
  241. while (!kthread_should_stop()) {
  242. set_current_state(TASK_INTERRUPTIBLE);
  243. schedule();
  244. /*
  245. * Do this periodically, like kupdated() did before.
  246. */
  247. sync_supers();
  248. }
  249. return 0;
  250. }
  251. void bdi_arm_supers_timer(void)
  252. {
  253. unsigned long next;
  254. if (!dirty_writeback_interval)
  255. return;
  256. next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
  257. mod_timer(&sync_supers_timer, round_jiffies_up(next));
  258. }
  259. static void sync_supers_timer_fn(unsigned long unused)
  260. {
  261. wake_up_process(sync_supers_tsk);
  262. bdi_arm_supers_timer();
  263. }
  264. static void wakeup_timer_fn(unsigned long data)
  265. {
  266. struct backing_dev_info *bdi = (struct backing_dev_info *)data;
  267. spin_lock_bh(&bdi->wb_lock);
  268. if (bdi->wb.task) {
  269. trace_writeback_wake_thread(bdi);
  270. wake_up_process(bdi->wb.task);
  271. } else if (bdi->dev) {
  272. /*
  273. * When bdi tasks are inactive for long time, they are killed.
  274. * In this case we have to wake-up the forker thread which
  275. * should create and run the bdi thread.
  276. */
  277. trace_writeback_wake_forker_thread(bdi);
  278. wake_up_process(default_backing_dev_info.wb.task);
  279. }
  280. spin_unlock_bh(&bdi->wb_lock);
  281. }
  282. /*
  283. * This function is used when the first inode for this bdi is marked dirty. It
  284. * wakes-up the corresponding bdi thread which should then take care of the
  285. * periodic background write-out of dirty inodes. Since the write-out would
  286. * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
  287. * set up a timer which wakes the bdi thread up later.
  288. *
  289. * Note, we wouldn't bother setting up the timer, but this function is on the
  290. * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
  291. * by delaying the wake-up.
  292. */
  293. void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
  294. {
  295. unsigned long timeout;
  296. timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
  297. mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout);
  298. }
  299. /*
  300. * Calculate the longest interval (jiffies) bdi threads are allowed to be
  301. * inactive.
  302. */
  303. static unsigned long bdi_longest_inactive(void)
  304. {
  305. unsigned long interval;
  306. interval = msecs_to_jiffies(dirty_writeback_interval * 10);
  307. return max(5UL * 60 * HZ, interval);
  308. }
  309. /*
  310. * Clear pending bit and wakeup anybody waiting for flusher thread creation or
  311. * shutdown
  312. */
  313. static void bdi_clear_pending(struct backing_dev_info *bdi)
  314. {
  315. clear_bit(BDI_pending, &bdi->state);
  316. smp_mb__after_clear_bit();
  317. wake_up_bit(&bdi->state, BDI_pending);
  318. }
  319. static int bdi_forker_thread(void *ptr)
  320. {
  321. struct bdi_writeback *me = ptr;
  322. current->flags |= PF_SWAPWRITE;
  323. set_freezable();
  324. /*
  325. * Our parent may run at a different priority, just set us to normal
  326. */
  327. set_user_nice(current, 0);
  328. for (;;) {
  329. struct task_struct *task = NULL;
  330. struct backing_dev_info *bdi;
  331. enum {
  332. NO_ACTION, /* Nothing to do */
  333. FORK_THREAD, /* Fork bdi thread */
  334. KILL_THREAD, /* Kill inactive bdi thread */
  335. } action = NO_ACTION;
  336. /*
  337. * Temporary measure, we want to make sure we don't see
  338. * dirty data on the default backing_dev_info
  339. */
  340. if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
  341. del_timer(&me->wakeup_timer);
  342. wb_do_writeback(me, 0);
  343. }
  344. spin_lock_bh(&bdi_lock);
  345. /*
  346. * In the following loop we are going to check whether we have
  347. * some work to do without any synchronization with tasks
  348. * waking us up to do work for them. Set the task state here
  349. * so that we don't miss wakeups after verifying conditions.
  350. */
  351. set_current_state(TASK_INTERRUPTIBLE);
  352. list_for_each_entry(bdi, &bdi_list, bdi_list) {
  353. bool have_dirty_io;
  354. if (!bdi_cap_writeback_dirty(bdi) ||
  355. bdi_cap_flush_forker(bdi))
  356. continue;
  357. WARN(!test_bit(BDI_registered, &bdi->state),
  358. "bdi %p/%s is not registered!\n", bdi, bdi->name);
  359. have_dirty_io = !list_empty(&bdi->work_list) ||
  360. wb_has_dirty_io(&bdi->wb);
  361. /*
  362. * If the bdi has work to do, but the thread does not
  363. * exist - create it.
  364. */
  365. if (!bdi->wb.task && have_dirty_io) {
  366. /*
  367. * Set the pending bit - if someone will try to
  368. * unregister this bdi - it'll wait on this bit.
  369. */
  370. set_bit(BDI_pending, &bdi->state);
  371. action = FORK_THREAD;
  372. break;
  373. }
  374. spin_lock(&bdi->wb_lock);
  375. /*
  376. * If there is no work to do and the bdi thread was
  377. * inactive long enough - kill it. The wb_lock is taken
  378. * to make sure no-one adds more work to this bdi and
  379. * wakes the bdi thread up.
  380. */
  381. if (bdi->wb.task && !have_dirty_io &&
  382. time_after(jiffies, bdi->wb.last_active +
  383. bdi_longest_inactive())) {
  384. task = bdi->wb.task;
  385. bdi->wb.task = NULL;
  386. spin_unlock(&bdi->wb_lock);
  387. set_bit(BDI_pending, &bdi->state);
  388. action = KILL_THREAD;
  389. break;
  390. }
  391. spin_unlock(&bdi->wb_lock);
  392. }
  393. spin_unlock_bh(&bdi_lock);
  394. /* Keep working if default bdi still has things to do */
  395. if (!list_empty(&me->bdi->work_list))
  396. __set_current_state(TASK_RUNNING);
  397. switch (action) {
  398. case FORK_THREAD:
  399. __set_current_state(TASK_RUNNING);
  400. task = kthread_create(bdi_writeback_thread, &bdi->wb,
  401. "flush-%s", dev_name(bdi->dev));
  402. if (IS_ERR(task)) {
  403. /*
  404. * If thread creation fails, force writeout of
  405. * the bdi from the thread. Hopefully 1024 is
  406. * large enough for efficient IO.
  407. */
  408. writeback_inodes_wb(&bdi->wb, 1024,
  409. WB_REASON_FORKER_THREAD);
  410. } else {
  411. /*
  412. * The spinlock makes sure we do not lose
  413. * wake-ups when racing with 'bdi_queue_work()'.
  414. * And as soon as the bdi thread is visible, we
  415. * can start it.
  416. */
  417. spin_lock_bh(&bdi->wb_lock);
  418. bdi->wb.task = task;
  419. spin_unlock_bh(&bdi->wb_lock);
  420. wake_up_process(task);
  421. }
  422. bdi_clear_pending(bdi);
  423. break;
  424. case KILL_THREAD:
  425. __set_current_state(TASK_RUNNING);
  426. kthread_stop(task);
  427. bdi_clear_pending(bdi);
  428. break;
  429. case NO_ACTION:
  430. if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
  431. /*
  432. * There are no dirty data. The only thing we
  433. * should now care about is checking for
  434. * inactive bdi threads and killing them. Thus,
  435. * let's sleep for longer time, save energy and
  436. * be friendly for battery-driven devices.
  437. */
  438. schedule_timeout(bdi_longest_inactive());
  439. else
  440. schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
  441. try_to_freeze();
  442. break;
  443. }
  444. }
  445. return 0;
  446. }
  447. /*
  448. * Remove bdi from bdi_list, and ensure that it is no longer visible
  449. */
  450. static void bdi_remove_from_list(struct backing_dev_info *bdi)
  451. {
  452. spin_lock_bh(&bdi_lock);
  453. list_del_rcu(&bdi->bdi_list);
  454. spin_unlock_bh(&bdi_lock);
  455. synchronize_rcu_expedited();
  456. }
  457. int bdi_register(struct backing_dev_info *bdi, struct device *parent,
  458. const char *fmt, ...)
  459. {
  460. va_list args;
  461. struct device *dev;
  462. if (bdi->dev) /* The driver needs to use separate queues per device */
  463. return 0;
  464. va_start(args, fmt);
  465. dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
  466. va_end(args);
  467. if (IS_ERR(dev))
  468. return PTR_ERR(dev);
  469. bdi->dev = dev;
  470. /*
  471. * Just start the forker thread for our default backing_dev_info,
  472. * and add other bdi's to the list. They will get a thread created
  473. * on-demand when they need it.
  474. */
  475. if (bdi_cap_flush_forker(bdi)) {
  476. struct bdi_writeback *wb = &bdi->wb;
  477. wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
  478. dev_name(dev));
  479. if (IS_ERR(wb->task))
  480. return PTR_ERR(wb->task);
  481. }
  482. bdi_debug_register(bdi, dev_name(dev));
  483. set_bit(BDI_registered, &bdi->state);
  484. spin_lock_bh(&bdi_lock);
  485. list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
  486. spin_unlock_bh(&bdi_lock);
  487. trace_writeback_bdi_register(bdi);
  488. return 0;
  489. }
  490. EXPORT_SYMBOL(bdi_register);
  491. int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
  492. {
  493. return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
  494. }
  495. EXPORT_SYMBOL(bdi_register_dev);
  496. /*
  497. * Remove bdi from the global list and shutdown any threads we have running
  498. */
  499. static void bdi_wb_shutdown(struct backing_dev_info *bdi)
  500. {
  501. struct task_struct *task;
  502. if (!bdi_cap_writeback_dirty(bdi))
  503. return;
  504. /*
  505. * Make sure nobody finds us on the bdi_list anymore
  506. */
  507. bdi_remove_from_list(bdi);
  508. /*
  509. * If setup is pending, wait for that to complete first
  510. */
  511. wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
  512. TASK_UNINTERRUPTIBLE);
  513. /*
  514. * Finally, kill the kernel thread. We don't need to be RCU
  515. * safe anymore, since the bdi is gone from visibility.
  516. */
  517. spin_lock_bh(&bdi->wb_lock);
  518. task = bdi->wb.task;
  519. bdi->wb.task = NULL;
  520. spin_unlock_bh(&bdi->wb_lock);
  521. if (task)
  522. kthread_stop(task);
  523. }
  524. /*
  525. * This bdi is going away now, make sure that no super_blocks point to it
  526. */
  527. static void bdi_prune_sb(struct backing_dev_info *bdi)
  528. {
  529. struct super_block *sb;
  530. spin_lock(&sb_lock);
  531. list_for_each_entry(sb, &super_blocks, s_list) {
  532. if (sb->s_bdi == bdi)
  533. sb->s_bdi = &default_backing_dev_info;
  534. }
  535. spin_unlock(&sb_lock);
  536. }
  537. void bdi_unregister(struct backing_dev_info *bdi)
  538. {
  539. struct device *dev = bdi->dev;
  540. if (dev) {
  541. bdi_set_min_ratio(bdi, 0);
  542. trace_writeback_bdi_unregister(bdi);
  543. bdi_prune_sb(bdi);
  544. del_timer_sync(&bdi->wb.wakeup_timer);
  545. if (!bdi_cap_flush_forker(bdi))
  546. bdi_wb_shutdown(bdi);
  547. bdi_debug_unregister(bdi);
  548. spin_lock_bh(&bdi->wb_lock);
  549. bdi->dev = NULL;
  550. spin_unlock_bh(&bdi->wb_lock);
  551. device_unregister(dev);
  552. }
  553. }
  554. EXPORT_SYMBOL(bdi_unregister);
  555. static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
  556. {
  557. memset(wb, 0, sizeof(*wb));
  558. wb->bdi = bdi;
  559. wb->last_old_flush = jiffies;
  560. INIT_LIST_HEAD(&wb->b_dirty);
  561. INIT_LIST_HEAD(&wb->b_io);
  562. INIT_LIST_HEAD(&wb->b_more_io);
  563. spin_lock_init(&wb->list_lock);
  564. setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
  565. }
  566. /*
  567. * Initial write bandwidth: 100 MB/s
  568. */
  569. #define INIT_BW (100 << (20 - PAGE_SHIFT))
  570. int bdi_init(struct backing_dev_info *bdi)
  571. {
  572. int i, err;
  573. bdi->dev = NULL;
  574. bdi->min_ratio = 0;
  575. bdi->max_ratio = 100;
  576. bdi->max_prop_frac = PROP_FRAC_BASE;
  577. spin_lock_init(&bdi->wb_lock);
  578. INIT_LIST_HEAD(&bdi->bdi_list);
  579. INIT_LIST_HEAD(&bdi->work_list);
  580. bdi_wb_init(&bdi->wb, bdi);
  581. for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
  582. err = percpu_counter_init(&bdi->bdi_stat[i], 0);
  583. if (err)
  584. goto err;
  585. }
  586. bdi->dirty_exceeded = 0;
  587. bdi->bw_time_stamp = jiffies;
  588. bdi->written_stamp = 0;
  589. bdi->balanced_dirty_ratelimit = INIT_BW;
  590. bdi->dirty_ratelimit = INIT_BW;
  591. bdi->write_bandwidth = INIT_BW;
  592. bdi->avg_write_bandwidth = INIT_BW;
  593. err = prop_local_init_percpu(&bdi->completions);
  594. if (err) {
  595. err:
  596. while (i--)
  597. percpu_counter_destroy(&bdi->bdi_stat[i]);
  598. }
  599. return err;
  600. }
  601. EXPORT_SYMBOL(bdi_init);
  602. void bdi_destroy(struct backing_dev_info *bdi)
  603. {
  604. int i;
  605. /*
  606. * Splice our entries to the default_backing_dev_info, if this
  607. * bdi disappears
  608. */
  609. if (bdi_has_dirty_io(bdi)) {
  610. struct bdi_writeback *dst = &default_backing_dev_info.wb;
  611. bdi_lock_two(&bdi->wb, dst);
  612. list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
  613. list_splice(&bdi->wb.b_io, &dst->b_io);
  614. list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
  615. spin_unlock(&bdi->wb.list_lock);
  616. spin_unlock(&dst->list_lock);
  617. }
  618. bdi_unregister(bdi);
  619. /*
  620. * If bdi_unregister() had already been called earlier, the
  621. * wakeup_timer could still be armed because bdi_prune_sb()
  622. * can race with the bdi_wakeup_thread_delayed() calls from
  623. * __mark_inode_dirty().
  624. */
  625. del_timer_sync(&bdi->wb.wakeup_timer);
  626. for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
  627. percpu_counter_destroy(&bdi->bdi_stat[i]);
  628. prop_local_destroy_percpu(&bdi->completions);
  629. }
  630. EXPORT_SYMBOL(bdi_destroy);
  631. /*
  632. * For use from filesystems to quickly init and register a bdi associated
  633. * with dirty writeback
  634. */
  635. int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
  636. unsigned int cap)
  637. {
  638. char tmp[32];
  639. int err;
  640. bdi->name = name;
  641. bdi->capabilities = cap;
  642. err = bdi_init(bdi);
  643. if (err)
  644. return err;
  645. sprintf(tmp, "%.28s%s", name, "-%d");
  646. err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
  647. if (err) {
  648. bdi_destroy(bdi);
  649. return err;
  650. }
  651. return 0;
  652. }
  653. EXPORT_SYMBOL(bdi_setup_and_register);
  654. static wait_queue_head_t congestion_wqh[2] = {
  655. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
  656. __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
  657. };
  658. static atomic_t nr_bdi_congested[2];
  659. void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
  660. {
  661. enum bdi_state bit;
  662. wait_queue_head_t *wqh = &congestion_wqh[sync];
  663. bit = sync ? BDI_sync_congested : BDI_async_congested;
  664. if (test_and_clear_bit(bit, &bdi->state))
  665. atomic_dec(&nr_bdi_congested[sync]);
  666. smp_mb__after_clear_bit();
  667. if (waitqueue_active(wqh))
  668. wake_up(wqh);
  669. }
  670. EXPORT_SYMBOL(clear_bdi_congested);
  671. void set_bdi_congested(struct backing_dev_info *bdi, int sync)
  672. {
  673. enum bdi_state bit;
  674. bit = sync ? BDI_sync_congested : BDI_async_congested;
  675. if (!test_and_set_bit(bit, &bdi->state))
  676. atomic_inc(&nr_bdi_congested[sync]);
  677. }
  678. EXPORT_SYMBOL(set_bdi_congested);
  679. /**
  680. * congestion_wait - wait for a backing_dev to become uncongested
  681. * @sync: SYNC or ASYNC IO
  682. * @timeout: timeout in jiffies
  683. *
  684. * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
  685. * write congestion. If no backing_devs are congested then just wait for the
  686. * next write to be completed.
  687. */
  688. long congestion_wait(int sync, long timeout)
  689. {
  690. long ret;
  691. unsigned long start = jiffies;
  692. DEFINE_WAIT(wait);
  693. wait_queue_head_t *wqh = &congestion_wqh[sync];
  694. prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  695. ret = io_schedule_timeout(timeout);
  696. finish_wait(wqh, &wait);
  697. trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
  698. jiffies_to_usecs(jiffies - start));
  699. return ret;
  700. }
  701. EXPORT_SYMBOL(congestion_wait);
  702. /**
  703. * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
  704. * @zone: A zone to check if it is heavily congested
  705. * @sync: SYNC or ASYNC IO
  706. * @timeout: timeout in jiffies
  707. *
  708. * In the event of a congested backing_dev (any backing_dev) and the given
  709. * @zone has experienced recent congestion, this waits for up to @timeout
  710. * jiffies for either a BDI to exit congestion of the given @sync queue
  711. * or a write to complete.
  712. *
  713. * In the absence of zone congestion, cond_resched() is called to yield
  714. * the processor if necessary but otherwise does not sleep.
  715. *
  716. * The return value is 0 if the sleep is for the full timeout. Otherwise,
  717. * it is the number of jiffies that were still remaining when the function
  718. * returned. return_value == timeout implies the function did not sleep.
  719. */
  720. long wait_iff_congested(struct zone *zone, int sync, long timeout)
  721. {
  722. long ret;
  723. unsigned long start = jiffies;
  724. DEFINE_WAIT(wait);
  725. wait_queue_head_t *wqh = &congestion_wqh[sync];
  726. /*
  727. * If there is no congestion, or heavy congestion is not being
  728. * encountered in the current zone, yield if necessary instead
  729. * of sleeping on the congestion queue
  730. */
  731. if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
  732. !zone_is_reclaim_congested(zone)) {
  733. cond_resched();
  734. /* In case we scheduled, work out time remaining */
  735. ret = timeout - (jiffies - start);
  736. if (ret < 0)
  737. ret = 0;
  738. goto out;
  739. }
  740. /* Sleep until uncongested or a write happens */
  741. prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  742. ret = io_schedule_timeout(timeout);
  743. finish_wait(wqh, &wait);
  744. out:
  745. trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
  746. jiffies_to_usecs(jiffies - start));
  747. return ret;
  748. }
  749. EXPORT_SYMBOL(wait_iff_congested);