writeback.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. /*
  2. * background writeback - scan btree for dirty data and write it to the backing
  3. * device
  4. *
  5. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6. * Copyright 2012 Google, Inc.
  7. */
  8. #include "bcache.h"
  9. #include "btree.h"
  10. #include "debug.h"
  11. #include "writeback.h"
  12. #include <linux/delay.h>
  13. #include <linux/freezer.h>
  14. #include <linux/kthread.h>
  15. #include <trace/events/bcache.h>
  16. /* Rate limiting */
  17. static void __update_writeback_rate(struct cached_dev *dc)
  18. {
  19. struct cache_set *c = dc->disk.c;
  20. uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
  21. uint64_t cache_dirty_target =
  22. div_u64(cache_sectors * dc->writeback_percent, 100);
  23. int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
  24. c->cached_dev_sectors);
  25. /* PD controller */
  26. int change = 0;
  27. int64_t error;
  28. int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
  29. int64_t derivative = dirty - dc->disk.sectors_dirty_last;
  30. dc->disk.sectors_dirty_last = dirty;
  31. derivative *= dc->writeback_rate_d_term;
  32. derivative = clamp(derivative, -dirty, dirty);
  33. derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
  34. dc->writeback_rate_d_smooth, 0);
  35. /* Avoid divide by zero */
  36. if (!target)
  37. goto out;
  38. error = div64_s64((dirty + derivative - target) << 8, target);
  39. change = div_s64((dc->writeback_rate.rate * error) >> 8,
  40. dc->writeback_rate_p_term_inverse);
  41. /* Don't increase writeback rate if the device isn't keeping up */
  42. if (change > 0 &&
  43. time_after64(local_clock(),
  44. dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
  45. change = 0;
  46. dc->writeback_rate.rate =
  47. clamp_t(int64_t, dc->writeback_rate.rate + change,
  48. 1, NSEC_PER_MSEC);
  49. out:
  50. dc->writeback_rate_derivative = derivative;
  51. dc->writeback_rate_change = change;
  52. dc->writeback_rate_target = target;
  53. }
  54. static void update_writeback_rate(struct work_struct *work)
  55. {
  56. struct cached_dev *dc = container_of(to_delayed_work(work),
  57. struct cached_dev,
  58. writeback_rate_update);
  59. down_read(&dc->writeback_lock);
  60. if (atomic_read(&dc->has_dirty) &&
  61. dc->writeback_percent)
  62. __update_writeback_rate(dc);
  63. up_read(&dc->writeback_lock);
  64. schedule_delayed_work(&dc->writeback_rate_update,
  65. dc->writeback_rate_update_seconds * HZ);
  66. }
  67. static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
  68. {
  69. uint64_t ret;
  70. if (atomic_read(&dc->disk.detaching) ||
  71. !dc->writeback_percent)
  72. return 0;
  73. ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
  74. return min_t(uint64_t, ret, HZ);
  75. }
  76. struct dirty_io {
  77. struct closure cl;
  78. struct cached_dev *dc;
  79. struct bio bio;
  80. };
  81. static void dirty_init(struct keybuf_key *w)
  82. {
  83. struct dirty_io *io = w->private;
  84. struct bio *bio = &io->bio;
  85. bio_init(bio);
  86. if (!io->dc->writeback_percent)
  87. bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
  88. bio->bi_size = KEY_SIZE(&w->key) << 9;
  89. bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
  90. bio->bi_private = w;
  91. bio->bi_io_vec = bio->bi_inline_vecs;
  92. bch_bio_map(bio, NULL);
  93. }
  94. static void dirty_io_destructor(struct closure *cl)
  95. {
  96. struct dirty_io *io = container_of(cl, struct dirty_io, cl);
  97. kfree(io);
  98. }
  99. static void write_dirty_finish(struct closure *cl)
  100. {
  101. struct dirty_io *io = container_of(cl, struct dirty_io, cl);
  102. struct keybuf_key *w = io->bio.bi_private;
  103. struct cached_dev *dc = io->dc;
  104. struct bio_vec *bv;
  105. int i;
  106. bio_for_each_segment_all(bv, &io->bio, i)
  107. __free_page(bv->bv_page);
  108. /* This is kind of a dumb way of signalling errors. */
  109. if (KEY_DIRTY(&w->key)) {
  110. unsigned i;
  111. struct btree_op op;
  112. struct keylist keys;
  113. bch_btree_op_init(&op, -1);
  114. bch_keylist_init(&keys);
  115. op.type = BTREE_REPLACE;
  116. bkey_copy(&op.replace, &w->key);
  117. SET_KEY_DIRTY(&w->key, false);
  118. bch_keylist_add(&keys, &w->key);
  119. for (i = 0; i < KEY_PTRS(&w->key); i++)
  120. atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
  121. bch_btree_insert(&op, dc->disk.c, &keys, NULL);
  122. if (op.insert_collision)
  123. trace_bcache_writeback_collision(&w->key);
  124. atomic_long_inc(op.insert_collision
  125. ? &dc->disk.c->writeback_keys_failed
  126. : &dc->disk.c->writeback_keys_done);
  127. }
  128. bch_keybuf_del(&dc->writeback_keys, w);
  129. up(&dc->in_flight);
  130. closure_return_with_destructor(cl, dirty_io_destructor);
  131. }
  132. static void dirty_endio(struct bio *bio, int error)
  133. {
  134. struct keybuf_key *w = bio->bi_private;
  135. struct dirty_io *io = w->private;
  136. if (error)
  137. SET_KEY_DIRTY(&w->key, false);
  138. closure_put(&io->cl);
  139. }
  140. static void write_dirty(struct closure *cl)
  141. {
  142. struct dirty_io *io = container_of(cl, struct dirty_io, cl);
  143. struct keybuf_key *w = io->bio.bi_private;
  144. dirty_init(w);
  145. io->bio.bi_rw = WRITE;
  146. io->bio.bi_sector = KEY_START(&w->key);
  147. io->bio.bi_bdev = io->dc->bdev;
  148. io->bio.bi_end_io = dirty_endio;
  149. closure_bio_submit(&io->bio, cl, &io->dc->disk);
  150. continue_at(cl, write_dirty_finish, system_wq);
  151. }
  152. static void read_dirty_endio(struct bio *bio, int error)
  153. {
  154. struct keybuf_key *w = bio->bi_private;
  155. struct dirty_io *io = w->private;
  156. bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
  157. error, "reading dirty data from cache");
  158. dirty_endio(bio, error);
  159. }
  160. static void read_dirty_submit(struct closure *cl)
  161. {
  162. struct dirty_io *io = container_of(cl, struct dirty_io, cl);
  163. closure_bio_submit(&io->bio, cl, &io->dc->disk);
  164. continue_at(cl, write_dirty, system_wq);
  165. }
  166. static void read_dirty(struct cached_dev *dc)
  167. {
  168. unsigned delay = 0;
  169. struct keybuf_key *w;
  170. struct dirty_io *io;
  171. struct closure cl;
  172. closure_init_stack(&cl);
  173. /*
  174. * XXX: if we error, background writeback just spins. Should use some
  175. * mempools.
  176. */
  177. while (!kthread_should_stop()) {
  178. try_to_freeze();
  179. w = bch_keybuf_next(&dc->writeback_keys);
  180. if (!w)
  181. break;
  182. BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
  183. if (KEY_START(&w->key) != dc->last_read ||
  184. jiffies_to_msecs(delay) > 50)
  185. while (!kthread_should_stop() && delay)
  186. delay = schedule_timeout_interruptible(delay);
  187. dc->last_read = KEY_OFFSET(&w->key);
  188. io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
  189. * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
  190. GFP_KERNEL);
  191. if (!io)
  192. goto err;
  193. w->private = io;
  194. io->dc = dc;
  195. dirty_init(w);
  196. io->bio.bi_sector = PTR_OFFSET(&w->key, 0);
  197. io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
  198. &w->key, 0)->bdev;
  199. io->bio.bi_rw = READ;
  200. io->bio.bi_end_io = read_dirty_endio;
  201. if (bio_alloc_pages(&io->bio, GFP_KERNEL))
  202. goto err_free;
  203. trace_bcache_writeback(&w->key);
  204. down(&dc->in_flight);
  205. closure_call(&io->cl, read_dirty_submit, NULL, &cl);
  206. delay = writeback_delay(dc, KEY_SIZE(&w->key));
  207. }
  208. if (0) {
  209. err_free:
  210. kfree(w->private);
  211. err:
  212. bch_keybuf_del(&dc->writeback_keys, w);
  213. }
  214. /*
  215. * Wait for outstanding writeback IOs to finish (and keybuf slots to be
  216. * freed) before refilling again
  217. */
  218. closure_sync(&cl);
  219. }
  220. /* Scan for dirty data */
  221. void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
  222. uint64_t offset, int nr_sectors)
  223. {
  224. struct bcache_device *d = c->devices[inode];
  225. unsigned stripe_offset;
  226. uint64_t stripe = offset;
  227. if (!d)
  228. return;
  229. do_div(stripe, d->stripe_size);
  230. stripe_offset = offset & (d->stripe_size - 1);
  231. while (nr_sectors) {
  232. int s = min_t(unsigned, abs(nr_sectors),
  233. d->stripe_size - stripe_offset);
  234. if (nr_sectors < 0)
  235. s = -s;
  236. atomic_add(s, d->stripe_sectors_dirty + stripe);
  237. nr_sectors -= s;
  238. stripe_offset = 0;
  239. stripe++;
  240. }
  241. }
  242. static bool dirty_pred(struct keybuf *buf, struct bkey *k)
  243. {
  244. return KEY_DIRTY(k);
  245. }
  246. static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
  247. {
  248. uint64_t stripe = KEY_START(k);
  249. unsigned nr_sectors = KEY_SIZE(k);
  250. struct cached_dev *dc = container_of(buf, struct cached_dev,
  251. writeback_keys);
  252. if (!KEY_DIRTY(k))
  253. return false;
  254. do_div(stripe, dc->disk.stripe_size);
  255. while (1) {
  256. if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) ==
  257. dc->disk.stripe_size)
  258. return true;
  259. if (nr_sectors <= dc->disk.stripe_size)
  260. return false;
  261. nr_sectors -= dc->disk.stripe_size;
  262. stripe++;
  263. }
  264. }
  265. static bool refill_dirty(struct cached_dev *dc)
  266. {
  267. struct keybuf *buf = &dc->writeback_keys;
  268. bool searched_from_start = false;
  269. struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
  270. if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
  271. buf->last_scanned = KEY(dc->disk.id, 0, 0);
  272. searched_from_start = true;
  273. }
  274. if (dc->partial_stripes_expensive) {
  275. uint64_t i;
  276. for (i = 0; i < dc->disk.nr_stripes; i++)
  277. if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
  278. dc->disk.stripe_size)
  279. goto full_stripes;
  280. goto normal_refill;
  281. full_stripes:
  282. searched_from_start = false; /* not searching entire btree */
  283. bch_refill_keybuf(dc->disk.c, buf, &end,
  284. dirty_full_stripe_pred);
  285. } else {
  286. normal_refill:
  287. bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
  288. }
  289. return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
  290. }
  291. static int bch_writeback_thread(void *arg)
  292. {
  293. struct cached_dev *dc = arg;
  294. bool searched_full_index;
  295. while (!kthread_should_stop()) {
  296. down_write(&dc->writeback_lock);
  297. if (!atomic_read(&dc->has_dirty) ||
  298. (!atomic_read(&dc->disk.detaching) &&
  299. !dc->writeback_running)) {
  300. up_write(&dc->writeback_lock);
  301. set_current_state(TASK_INTERRUPTIBLE);
  302. if (kthread_should_stop())
  303. return 0;
  304. try_to_freeze();
  305. schedule();
  306. continue;
  307. }
  308. searched_full_index = refill_dirty(dc);
  309. if (searched_full_index &&
  310. RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
  311. atomic_set(&dc->has_dirty, 0);
  312. cached_dev_put(dc);
  313. SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
  314. bch_write_bdev_super(dc, NULL);
  315. }
  316. up_write(&dc->writeback_lock);
  317. bch_ratelimit_reset(&dc->writeback_rate);
  318. read_dirty(dc);
  319. if (searched_full_index) {
  320. unsigned delay = dc->writeback_delay * HZ;
  321. while (delay &&
  322. !kthread_should_stop() &&
  323. !atomic_read(&dc->disk.detaching))
  324. delay = schedule_timeout_interruptible(delay);
  325. }
  326. }
  327. return 0;
  328. }
  329. /* Init */
  330. struct sectors_dirty_init {
  331. struct btree_op op;
  332. unsigned inode;
  333. };
  334. static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
  335. struct bkey *k)
  336. {
  337. struct sectors_dirty_init *op = container_of(_op,
  338. struct sectors_dirty_init, op);
  339. if (KEY_INODE(k) > op->inode)
  340. return MAP_DONE;
  341. if (KEY_DIRTY(k))
  342. bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
  343. KEY_START(k), KEY_SIZE(k));
  344. return MAP_CONTINUE;
  345. }
  346. void bch_sectors_dirty_init(struct cached_dev *dc)
  347. {
  348. struct sectors_dirty_init op;
  349. bch_btree_op_init(&op.op, -1);
  350. op.inode = dc->disk.id;
  351. bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
  352. sectors_dirty_init_fn, 0);
  353. }
  354. int bch_cached_dev_writeback_init(struct cached_dev *dc)
  355. {
  356. sema_init(&dc->in_flight, 64);
  357. init_rwsem(&dc->writeback_lock);
  358. bch_keybuf_init(&dc->writeback_keys);
  359. dc->writeback_metadata = true;
  360. dc->writeback_running = true;
  361. dc->writeback_percent = 10;
  362. dc->writeback_delay = 30;
  363. dc->writeback_rate.rate = 1024;
  364. dc->writeback_rate_update_seconds = 30;
  365. dc->writeback_rate_d_term = 16;
  366. dc->writeback_rate_p_term_inverse = 64;
  367. dc->writeback_rate_d_smooth = 8;
  368. dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
  369. "bcache_writeback");
  370. if (IS_ERR(dc->writeback_thread))
  371. return PTR_ERR(dc->writeback_thread);
  372. set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
  373. INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
  374. schedule_delayed_work(&dc->writeback_rate_update,
  375. dc->writeback_rate_update_seconds * HZ);
  376. return 0;
  377. }