super.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965
  1. /*
  2. * bcache setup/teardown code, and some metadata io - read a superblock and
  3. * figure out what to do with it.
  4. *
  5. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6. * Copyright 2012 Google, Inc.
  7. */
  8. #include "bcache.h"
  9. #include "btree.h"
  10. #include "debug.h"
  11. #include "request.h"
  12. #include <linux/buffer_head.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/genhd.h>
  15. #include <linux/module.h>
  16. #include <linux/random.h>
  17. #include <linux/reboot.h>
  18. #include <linux/sysfs.h>
  19. MODULE_LICENSE("GPL");
  20. MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
  21. static const char bcache_magic[] = {
  22. 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
  23. 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
  24. };
  25. static const char invalid_uuid[] = {
  26. 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
  27. 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
  28. };
  29. /* Default is -1; we skip past it for struct cached_dev's cache mode */
  30. const char * const bch_cache_modes[] = {
  31. "default",
  32. "writethrough",
  33. "writeback",
  34. "writearound",
  35. "none",
  36. NULL
  37. };
  38. struct uuid_entry_v0 {
  39. uint8_t uuid[16];
  40. uint8_t label[32];
  41. uint32_t first_reg;
  42. uint32_t last_reg;
  43. uint32_t invalidated;
  44. uint32_t pad;
  45. };
  46. static struct kobject *bcache_kobj;
  47. struct mutex bch_register_lock;
  48. LIST_HEAD(bch_cache_sets);
  49. static LIST_HEAD(uncached_devices);
  50. static int bcache_major, bcache_minor;
  51. static wait_queue_head_t unregister_wait;
  52. struct workqueue_struct *bcache_wq;
  53. #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
  54. static void bio_split_pool_free(struct bio_split_pool *p)
  55. {
  56. if (p->bio_split_hook)
  57. mempool_destroy(p->bio_split_hook);
  58. if (p->bio_split)
  59. bioset_free(p->bio_split);
  60. }
  61. static int bio_split_pool_init(struct bio_split_pool *p)
  62. {
  63. p->bio_split = bioset_create(4, 0);
  64. if (!p->bio_split)
  65. return -ENOMEM;
  66. p->bio_split_hook = mempool_create_kmalloc_pool(4,
  67. sizeof(struct bio_split_hook));
  68. if (!p->bio_split_hook)
  69. return -ENOMEM;
  70. return 0;
  71. }
  72. /* Superblock */
  73. static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
  74. struct page **res)
  75. {
  76. const char *err;
  77. struct cache_sb *s;
  78. struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
  79. unsigned i;
  80. if (!bh)
  81. return "IO error";
  82. s = (struct cache_sb *) bh->b_data;
  83. sb->offset = le64_to_cpu(s->offset);
  84. sb->version = le64_to_cpu(s->version);
  85. memcpy(sb->magic, s->magic, 16);
  86. memcpy(sb->uuid, s->uuid, 16);
  87. memcpy(sb->set_uuid, s->set_uuid, 16);
  88. memcpy(sb->label, s->label, SB_LABEL_SIZE);
  89. sb->flags = le64_to_cpu(s->flags);
  90. sb->seq = le64_to_cpu(s->seq);
  91. sb->last_mount = le32_to_cpu(s->last_mount);
  92. sb->first_bucket = le16_to_cpu(s->first_bucket);
  93. sb->keys = le16_to_cpu(s->keys);
  94. for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
  95. sb->d[i] = le64_to_cpu(s->d[i]);
  96. pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
  97. sb->version, sb->flags, sb->seq, sb->keys);
  98. err = "Not a bcache superblock";
  99. if (sb->offset != SB_SECTOR)
  100. goto err;
  101. if (memcmp(sb->magic, bcache_magic, 16))
  102. goto err;
  103. err = "Too many journal buckets";
  104. if (sb->keys > SB_JOURNAL_BUCKETS)
  105. goto err;
  106. err = "Bad checksum";
  107. if (s->csum != csum_set(s))
  108. goto err;
  109. err = "Bad UUID";
  110. if (bch_is_zero(sb->uuid, 16))
  111. goto err;
  112. switch (sb->version) {
  113. case BCACHE_SB_VERSION_BDEV:
  114. sb->block_size = le16_to_cpu(s->block_size);
  115. sb->data_offset = BDEV_DATA_START_DEFAULT;
  116. break;
  117. case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
  118. sb->block_size = le16_to_cpu(s->block_size);
  119. sb->data_offset = le64_to_cpu(s->data_offset);
  120. err = "Bad data offset";
  121. if (sb->data_offset < BDEV_DATA_START_DEFAULT)
  122. goto err;
  123. break;
  124. case BCACHE_SB_VERSION_CDEV:
  125. case BCACHE_SB_VERSION_CDEV_WITH_UUID:
  126. sb->nbuckets = le64_to_cpu(s->nbuckets);
  127. sb->block_size = le16_to_cpu(s->block_size);
  128. sb->bucket_size = le16_to_cpu(s->bucket_size);
  129. sb->nr_in_set = le16_to_cpu(s->nr_in_set);
  130. sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
  131. err = "Too many buckets";
  132. if (sb->nbuckets > LONG_MAX)
  133. goto err;
  134. err = "Not enough buckets";
  135. if (sb->nbuckets < 1 << 7)
  136. goto err;
  137. err = "Bad block/bucket size";
  138. if (!is_power_of_2(sb->block_size) ||
  139. sb->block_size > PAGE_SECTORS ||
  140. !is_power_of_2(sb->bucket_size) ||
  141. sb->bucket_size < PAGE_SECTORS)
  142. goto err;
  143. err = "Invalid superblock: device too small";
  144. if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
  145. goto err;
  146. err = "Bad UUID";
  147. if (bch_is_zero(sb->set_uuid, 16))
  148. goto err;
  149. err = "Bad cache device number in set";
  150. if (!sb->nr_in_set ||
  151. sb->nr_in_set <= sb->nr_this_dev ||
  152. sb->nr_in_set > MAX_CACHES_PER_SET)
  153. goto err;
  154. err = "Journal buckets not sequential";
  155. for (i = 0; i < sb->keys; i++)
  156. if (sb->d[i] != sb->first_bucket + i)
  157. goto err;
  158. err = "Too many journal buckets";
  159. if (sb->first_bucket + sb->keys > sb->nbuckets)
  160. goto err;
  161. err = "Invalid superblock: first bucket comes before end of super";
  162. if (sb->first_bucket * sb->bucket_size < 16)
  163. goto err;
  164. break;
  165. default:
  166. err = "Unsupported superblock version";
  167. goto err;
  168. }
  169. sb->last_mount = get_seconds();
  170. err = NULL;
  171. get_page(bh->b_page);
  172. *res = bh->b_page;
  173. err:
  174. put_bh(bh);
  175. return err;
  176. }
  177. static void write_bdev_super_endio(struct bio *bio, int error)
  178. {
  179. struct cached_dev *dc = bio->bi_private;
  180. /* XXX: error checking */
  181. closure_put(&dc->sb_write.cl);
  182. }
  183. static void __write_super(struct cache_sb *sb, struct bio *bio)
  184. {
  185. struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
  186. unsigned i;
  187. bio->bi_sector = SB_SECTOR;
  188. bio->bi_rw = REQ_SYNC|REQ_META;
  189. bio->bi_size = SB_SIZE;
  190. bch_bio_map(bio, NULL);
  191. out->offset = cpu_to_le64(sb->offset);
  192. out->version = cpu_to_le64(sb->version);
  193. memcpy(out->uuid, sb->uuid, 16);
  194. memcpy(out->set_uuid, sb->set_uuid, 16);
  195. memcpy(out->label, sb->label, SB_LABEL_SIZE);
  196. out->flags = cpu_to_le64(sb->flags);
  197. out->seq = cpu_to_le64(sb->seq);
  198. out->last_mount = cpu_to_le32(sb->last_mount);
  199. out->first_bucket = cpu_to_le16(sb->first_bucket);
  200. out->keys = cpu_to_le16(sb->keys);
  201. for (i = 0; i < sb->keys; i++)
  202. out->d[i] = cpu_to_le64(sb->d[i]);
  203. out->csum = csum_set(out);
  204. pr_debug("ver %llu, flags %llu, seq %llu",
  205. sb->version, sb->flags, sb->seq);
  206. submit_bio(REQ_WRITE, bio);
  207. }
  208. void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
  209. {
  210. struct closure *cl = &dc->sb_write.cl;
  211. struct bio *bio = &dc->sb_bio;
  212. closure_lock(&dc->sb_write, parent);
  213. bio_reset(bio);
  214. bio->bi_bdev = dc->bdev;
  215. bio->bi_end_io = write_bdev_super_endio;
  216. bio->bi_private = dc;
  217. closure_get(cl);
  218. __write_super(&dc->sb, bio);
  219. closure_return(cl);
  220. }
  221. static void write_super_endio(struct bio *bio, int error)
  222. {
  223. struct cache *ca = bio->bi_private;
  224. bch_count_io_errors(ca, error, "writing superblock");
  225. closure_put(&ca->set->sb_write.cl);
  226. }
  227. void bcache_write_super(struct cache_set *c)
  228. {
  229. struct closure *cl = &c->sb_write.cl;
  230. struct cache *ca;
  231. unsigned i;
  232. closure_lock(&c->sb_write, &c->cl);
  233. c->sb.seq++;
  234. for_each_cache(ca, c, i) {
  235. struct bio *bio = &ca->sb_bio;
  236. ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
  237. ca->sb.seq = c->sb.seq;
  238. ca->sb.last_mount = c->sb.last_mount;
  239. SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
  240. bio_reset(bio);
  241. bio->bi_bdev = ca->bdev;
  242. bio->bi_end_io = write_super_endio;
  243. bio->bi_private = ca;
  244. closure_get(cl);
  245. __write_super(&ca->sb, bio);
  246. }
  247. closure_return(cl);
  248. }
  249. /* UUID io */
  250. static void uuid_endio(struct bio *bio, int error)
  251. {
  252. struct closure *cl = bio->bi_private;
  253. struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl);
  254. cache_set_err_on(error, c, "accessing uuids");
  255. bch_bbio_free(bio, c);
  256. closure_put(cl);
  257. }
  258. static void uuid_io(struct cache_set *c, unsigned long rw,
  259. struct bkey *k, struct closure *parent)
  260. {
  261. struct closure *cl = &c->uuid_write.cl;
  262. struct uuid_entry *u;
  263. unsigned i;
  264. BUG_ON(!parent);
  265. closure_lock(&c->uuid_write, parent);
  266. for (i = 0; i < KEY_PTRS(k); i++) {
  267. struct bio *bio = bch_bbio_alloc(c);
  268. bio->bi_rw = REQ_SYNC|REQ_META|rw;
  269. bio->bi_size = KEY_SIZE(k) << 9;
  270. bio->bi_end_io = uuid_endio;
  271. bio->bi_private = cl;
  272. bch_bio_map(bio, c->uuids);
  273. bch_submit_bbio(bio, c, k, i);
  274. if (!(rw & WRITE))
  275. break;
  276. }
  277. pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read",
  278. pkey(&c->uuid_bucket));
  279. for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
  280. if (!bch_is_zero(u->uuid, 16))
  281. pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
  282. u - c->uuids, u->uuid, u->label,
  283. u->first_reg, u->last_reg, u->invalidated);
  284. closure_return(cl);
  285. }
  286. static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
  287. {
  288. struct bkey *k = &j->uuid_bucket;
  289. if (__bch_ptr_invalid(c, 1, k))
  290. return "bad uuid pointer";
  291. bkey_copy(&c->uuid_bucket, k);
  292. uuid_io(c, READ_SYNC, k, cl);
  293. if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
  294. struct uuid_entry_v0 *u0 = (void *) c->uuids;
  295. struct uuid_entry *u1 = (void *) c->uuids;
  296. int i;
  297. closure_sync(cl);
  298. /*
  299. * Since the new uuid entry is bigger than the old, we have to
  300. * convert starting at the highest memory address and work down
  301. * in order to do it in place
  302. */
  303. for (i = c->nr_uuids - 1;
  304. i >= 0;
  305. --i) {
  306. memcpy(u1[i].uuid, u0[i].uuid, 16);
  307. memcpy(u1[i].label, u0[i].label, 32);
  308. u1[i].first_reg = u0[i].first_reg;
  309. u1[i].last_reg = u0[i].last_reg;
  310. u1[i].invalidated = u0[i].invalidated;
  311. u1[i].flags = 0;
  312. u1[i].sectors = 0;
  313. }
  314. }
  315. return NULL;
  316. }
  317. static int __uuid_write(struct cache_set *c)
  318. {
  319. BKEY_PADDED(key) k;
  320. struct closure cl;
  321. closure_init_stack(&cl);
  322. lockdep_assert_held(&bch_register_lock);
  323. if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl))
  324. return 1;
  325. SET_KEY_SIZE(&k.key, c->sb.bucket_size);
  326. uuid_io(c, REQ_WRITE, &k.key, &cl);
  327. closure_sync(&cl);
  328. bkey_copy(&c->uuid_bucket, &k.key);
  329. __bkey_put(c, &k.key);
  330. return 0;
  331. }
  332. int bch_uuid_write(struct cache_set *c)
  333. {
  334. int ret = __uuid_write(c);
  335. if (!ret)
  336. bch_journal_meta(c, NULL);
  337. return ret;
  338. }
  339. static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
  340. {
  341. struct uuid_entry *u;
  342. for (u = c->uuids;
  343. u < c->uuids + c->nr_uuids; u++)
  344. if (!memcmp(u->uuid, uuid, 16))
  345. return u;
  346. return NULL;
  347. }
  348. static struct uuid_entry *uuid_find_empty(struct cache_set *c)
  349. {
  350. static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
  351. return uuid_find(c, zero_uuid);
  352. }
  353. /*
  354. * Bucket priorities/gens:
  355. *
  356. * For each bucket, we store on disk its
  357. * 8 bit gen
  358. * 16 bit priority
  359. *
  360. * See alloc.c for an explanation of the gen. The priority is used to implement
  361. * lru (and in the future other) cache replacement policies; for most purposes
  362. * it's just an opaque integer.
  363. *
  364. * The gens and the priorities don't have a whole lot to do with each other, and
  365. * it's actually the gens that must be written out at specific times - it's no
  366. * big deal if the priorities don't get written, if we lose them we just reuse
  367. * buckets in suboptimal order.
  368. *
  369. * On disk they're stored in a packed array, and in as many buckets are required
  370. * to fit them all. The buckets we use to store them form a list; the journal
  371. * header points to the first bucket, the first bucket points to the second
  372. * bucket, et cetera.
  373. *
  374. * This code is used by the allocation code; periodically (whenever it runs out
  375. * of buckets to allocate from) the allocation code will invalidate some
  376. * buckets, but it can't use those buckets until their new gens are safely on
  377. * disk.
  378. */
  379. static void prio_endio(struct bio *bio, int error)
  380. {
  381. struct cache *ca = bio->bi_private;
  382. cache_set_err_on(error, ca->set, "accessing priorities");
  383. bch_bbio_free(bio, ca->set);
  384. closure_put(&ca->prio);
  385. }
  386. static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
  387. {
  388. struct closure *cl = &ca->prio;
  389. struct bio *bio = bch_bbio_alloc(ca->set);
  390. closure_init_stack(cl);
  391. bio->bi_sector = bucket * ca->sb.bucket_size;
  392. bio->bi_bdev = ca->bdev;
  393. bio->bi_rw = REQ_SYNC|REQ_META|rw;
  394. bio->bi_size = bucket_bytes(ca);
  395. bio->bi_end_io = prio_endio;
  396. bio->bi_private = ca;
  397. bch_bio_map(bio, ca->disk_buckets);
  398. closure_bio_submit(bio, &ca->prio, ca);
  399. closure_sync(cl);
  400. }
  401. #define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \
  402. fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused)
  403. void bch_prio_write(struct cache *ca)
  404. {
  405. int i;
  406. struct bucket *b;
  407. struct closure cl;
  408. closure_init_stack(&cl);
  409. lockdep_assert_held(&ca->set->bucket_lock);
  410. for (b = ca->buckets;
  411. b < ca->buckets + ca->sb.nbuckets; b++)
  412. b->disk_gen = b->gen;
  413. ca->disk_buckets->seq++;
  414. atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
  415. &ca->meta_sectors_written);
  416. pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
  417. fifo_used(&ca->free_inc), fifo_used(&ca->unused));
  418. blktrace_msg(ca, "Starting priorities: " buckets_free(ca));
  419. for (i = prio_buckets(ca) - 1; i >= 0; --i) {
  420. long bucket;
  421. struct prio_set *p = ca->disk_buckets;
  422. struct bucket_disk *d = p->data;
  423. struct bucket_disk *end = d + prios_per_bucket(ca);
  424. for (b = ca->buckets + i * prios_per_bucket(ca);
  425. b < ca->buckets + ca->sb.nbuckets && d < end;
  426. b++, d++) {
  427. d->prio = cpu_to_le16(b->prio);
  428. d->gen = b->gen;
  429. }
  430. p->next_bucket = ca->prio_buckets[i + 1];
  431. p->magic = pset_magic(ca);
  432. p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
  433. bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl);
  434. BUG_ON(bucket == -1);
  435. mutex_unlock(&ca->set->bucket_lock);
  436. prio_io(ca, bucket, REQ_WRITE);
  437. mutex_lock(&ca->set->bucket_lock);
  438. ca->prio_buckets[i] = bucket;
  439. atomic_dec_bug(&ca->buckets[bucket].pin);
  440. }
  441. mutex_unlock(&ca->set->bucket_lock);
  442. bch_journal_meta(ca->set, &cl);
  443. closure_sync(&cl);
  444. mutex_lock(&ca->set->bucket_lock);
  445. ca->need_save_prio = 0;
  446. /*
  447. * Don't want the old priorities to get garbage collected until after we
  448. * finish writing the new ones, and they're journalled
  449. */
  450. for (i = 0; i < prio_buckets(ca); i++)
  451. ca->prio_last_buckets[i] = ca->prio_buckets[i];
  452. }
  453. static void prio_read(struct cache *ca, uint64_t bucket)
  454. {
  455. struct prio_set *p = ca->disk_buckets;
  456. struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
  457. struct bucket *b;
  458. unsigned bucket_nr = 0;
  459. for (b = ca->buckets;
  460. b < ca->buckets + ca->sb.nbuckets;
  461. b++, d++) {
  462. if (d == end) {
  463. ca->prio_buckets[bucket_nr] = bucket;
  464. ca->prio_last_buckets[bucket_nr] = bucket;
  465. bucket_nr++;
  466. prio_io(ca, bucket, READ_SYNC);
  467. if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
  468. pr_warn("bad csum reading priorities");
  469. if (p->magic != pset_magic(ca))
  470. pr_warn("bad magic reading priorities");
  471. bucket = p->next_bucket;
  472. d = p->data;
  473. }
  474. b->prio = le16_to_cpu(d->prio);
  475. b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen;
  476. }
  477. }
  478. /* Bcache device */
  479. static int open_dev(struct block_device *b, fmode_t mode)
  480. {
  481. struct bcache_device *d = b->bd_disk->private_data;
  482. if (atomic_read(&d->closing))
  483. return -ENXIO;
  484. closure_get(&d->cl);
  485. return 0;
  486. }
  487. static int release_dev(struct gendisk *b, fmode_t mode)
  488. {
  489. struct bcache_device *d = b->private_data;
  490. closure_put(&d->cl);
  491. return 0;
  492. }
  493. static int ioctl_dev(struct block_device *b, fmode_t mode,
  494. unsigned int cmd, unsigned long arg)
  495. {
  496. struct bcache_device *d = b->bd_disk->private_data;
  497. return d->ioctl(d, mode, cmd, arg);
  498. }
  499. static const struct block_device_operations bcache_ops = {
  500. .open = open_dev,
  501. .release = release_dev,
  502. .ioctl = ioctl_dev,
  503. .owner = THIS_MODULE,
  504. };
  505. void bcache_device_stop(struct bcache_device *d)
  506. {
  507. if (!atomic_xchg(&d->closing, 1))
  508. closure_queue(&d->cl);
  509. }
  510. static void bcache_device_detach(struct bcache_device *d)
  511. {
  512. lockdep_assert_held(&bch_register_lock);
  513. if (atomic_read(&d->detaching)) {
  514. struct uuid_entry *u = d->c->uuids + d->id;
  515. SET_UUID_FLASH_ONLY(u, 0);
  516. memcpy(u->uuid, invalid_uuid, 16);
  517. u->invalidated = cpu_to_le32(get_seconds());
  518. bch_uuid_write(d->c);
  519. atomic_set(&d->detaching, 0);
  520. }
  521. d->c->devices[d->id] = NULL;
  522. closure_put(&d->c->caching);
  523. d->c = NULL;
  524. }
  525. static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
  526. unsigned id)
  527. {
  528. BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags));
  529. d->id = id;
  530. d->c = c;
  531. c->devices[id] = d;
  532. closure_get(&c->caching);
  533. }
  534. static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
  535. const char *name)
  536. {
  537. snprintf(d->name, BCACHEDEVNAME_SIZE,
  538. "%s%u", name, d->id);
  539. WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
  540. sysfs_create_link(&c->kobj, &d->kobj, d->name),
  541. "Couldn't create device <-> cache set symlinks");
  542. }
  543. static void bcache_device_free(struct bcache_device *d)
  544. {
  545. lockdep_assert_held(&bch_register_lock);
  546. pr_info("%s stopped", d->disk->disk_name);
  547. if (d->c)
  548. bcache_device_detach(d);
  549. if (d->disk)
  550. del_gendisk(d->disk);
  551. if (d->disk && d->disk->queue)
  552. blk_cleanup_queue(d->disk->queue);
  553. if (d->disk)
  554. put_disk(d->disk);
  555. bio_split_pool_free(&d->bio_split_hook);
  556. if (d->unaligned_bvec)
  557. mempool_destroy(d->unaligned_bvec);
  558. if (d->bio_split)
  559. bioset_free(d->bio_split);
  560. closure_debug_destroy(&d->cl);
  561. }
  562. static int bcache_device_init(struct bcache_device *d, unsigned block_size)
  563. {
  564. struct request_queue *q;
  565. if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
  566. !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
  567. sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
  568. bio_split_pool_init(&d->bio_split_hook))
  569. return -ENOMEM;
  570. d->disk = alloc_disk(1);
  571. if (!d->disk)
  572. return -ENOMEM;
  573. snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
  574. d->disk->major = bcache_major;
  575. d->disk->first_minor = bcache_minor++;
  576. d->disk->fops = &bcache_ops;
  577. d->disk->private_data = d;
  578. q = blk_alloc_queue(GFP_KERNEL);
  579. if (!q)
  580. return -ENOMEM;
  581. blk_queue_make_request(q, NULL);
  582. d->disk->queue = q;
  583. q->queuedata = d;
  584. q->backing_dev_info.congested_data = d;
  585. q->limits.max_hw_sectors = UINT_MAX;
  586. q->limits.max_sectors = UINT_MAX;
  587. q->limits.max_segment_size = UINT_MAX;
  588. q->limits.max_segments = BIO_MAX_PAGES;
  589. q->limits.max_discard_sectors = UINT_MAX;
  590. q->limits.io_min = block_size;
  591. q->limits.logical_block_size = block_size;
  592. q->limits.physical_block_size = block_size;
  593. set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
  594. set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
  595. return 0;
  596. }
  597. /* Cached device */
  598. static void calc_cached_dev_sectors(struct cache_set *c)
  599. {
  600. uint64_t sectors = 0;
  601. struct cached_dev *dc;
  602. list_for_each_entry(dc, &c->cached_devs, list)
  603. sectors += bdev_sectors(dc->bdev);
  604. c->cached_dev_sectors = sectors;
  605. }
  606. void bch_cached_dev_run(struct cached_dev *dc)
  607. {
  608. struct bcache_device *d = &dc->disk;
  609. if (atomic_xchg(&dc->running, 1))
  610. return;
  611. if (!d->c &&
  612. BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
  613. struct closure cl;
  614. closure_init_stack(&cl);
  615. SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
  616. bch_write_bdev_super(dc, &cl);
  617. closure_sync(&cl);
  618. }
  619. add_disk(d->disk);
  620. #if 0
  621. char *env[] = { "SYMLINK=label" , NULL };
  622. kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
  623. #endif
  624. if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
  625. sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
  626. pr_debug("error creating sysfs link");
  627. }
  628. static void cached_dev_detach_finish(struct work_struct *w)
  629. {
  630. struct cached_dev *dc = container_of(w, struct cached_dev, detach);
  631. char buf[BDEVNAME_SIZE];
  632. struct closure cl;
  633. closure_init_stack(&cl);
  634. BUG_ON(!atomic_read(&dc->disk.detaching));
  635. BUG_ON(atomic_read(&dc->count));
  636. sysfs_remove_link(&dc->disk.c->kobj, dc->disk.name);
  637. sysfs_remove_link(&dc->disk.kobj, "cache");
  638. mutex_lock(&bch_register_lock);
  639. memset(&dc->sb.set_uuid, 0, 16);
  640. SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
  641. bch_write_bdev_super(dc, &cl);
  642. closure_sync(&cl);
  643. bcache_device_detach(&dc->disk);
  644. list_move(&dc->list, &uncached_devices);
  645. mutex_unlock(&bch_register_lock);
  646. pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
  647. /* Drop ref we took in cached_dev_detach() */
  648. closure_put(&dc->disk.cl);
  649. }
  650. void bch_cached_dev_detach(struct cached_dev *dc)
  651. {
  652. lockdep_assert_held(&bch_register_lock);
  653. if (atomic_read(&dc->disk.closing))
  654. return;
  655. if (atomic_xchg(&dc->disk.detaching, 1))
  656. return;
  657. /*
  658. * Block the device from being closed and freed until we're finished
  659. * detaching
  660. */
  661. closure_get(&dc->disk.cl);
  662. bch_writeback_queue(dc);
  663. cached_dev_put(dc);
  664. }
  665. int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
  666. {
  667. uint32_t rtime = cpu_to_le32(get_seconds());
  668. struct uuid_entry *u;
  669. char buf[BDEVNAME_SIZE];
  670. bdevname(dc->bdev, buf);
  671. if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
  672. return -ENOENT;
  673. if (dc->disk.c) {
  674. pr_err("Can't attach %s: already attached", buf);
  675. return -EINVAL;
  676. }
  677. if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
  678. pr_err("Can't attach %s: shutting down", buf);
  679. return -EINVAL;
  680. }
  681. if (dc->sb.block_size < c->sb.block_size) {
  682. /* Will die */
  683. pr_err("Couldn't attach %s: block size less than set's block size",
  684. buf);
  685. return -EINVAL;
  686. }
  687. u = uuid_find(c, dc->sb.uuid);
  688. if (u &&
  689. (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
  690. BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
  691. memcpy(u->uuid, invalid_uuid, 16);
  692. u->invalidated = cpu_to_le32(get_seconds());
  693. u = NULL;
  694. }
  695. if (!u) {
  696. if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
  697. pr_err("Couldn't find uuid for %s in set", buf);
  698. return -ENOENT;
  699. }
  700. u = uuid_find_empty(c);
  701. if (!u) {
  702. pr_err("Not caching %s, no room for UUID", buf);
  703. return -EINVAL;
  704. }
  705. }
  706. /* Deadlocks since we're called via sysfs...
  707. sysfs_remove_file(&dc->kobj, &sysfs_attach);
  708. */
  709. if (bch_is_zero(u->uuid, 16)) {
  710. struct closure cl;
  711. closure_init_stack(&cl);
  712. memcpy(u->uuid, dc->sb.uuid, 16);
  713. memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
  714. u->first_reg = u->last_reg = rtime;
  715. bch_uuid_write(c);
  716. memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
  717. SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
  718. bch_write_bdev_super(dc, &cl);
  719. closure_sync(&cl);
  720. } else {
  721. u->last_reg = rtime;
  722. bch_uuid_write(c);
  723. }
  724. bcache_device_attach(&dc->disk, c, u - c->uuids);
  725. bcache_device_link(&dc->disk, c, "bdev");
  726. list_move(&dc->list, &c->cached_devs);
  727. calc_cached_dev_sectors(c);
  728. smp_wmb();
  729. /*
  730. * dc->c must be set before dc->count != 0 - paired with the mb in
  731. * cached_dev_get()
  732. */
  733. atomic_set(&dc->count, 1);
  734. if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
  735. atomic_set(&dc->has_dirty, 1);
  736. atomic_inc(&dc->count);
  737. bch_writeback_queue(dc);
  738. }
  739. bch_cached_dev_run(dc);
  740. pr_info("Caching %s as %s on set %pU",
  741. bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
  742. dc->disk.c->sb.set_uuid);
  743. return 0;
  744. }
  745. void bch_cached_dev_release(struct kobject *kobj)
  746. {
  747. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  748. disk.kobj);
  749. kfree(dc);
  750. module_put(THIS_MODULE);
  751. }
  752. static void cached_dev_free(struct closure *cl)
  753. {
  754. struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
  755. cancel_delayed_work_sync(&dc->writeback_rate_update);
  756. mutex_lock(&bch_register_lock);
  757. bcache_device_free(&dc->disk);
  758. list_del(&dc->list);
  759. mutex_unlock(&bch_register_lock);
  760. if (!IS_ERR_OR_NULL(dc->bdev)) {
  761. blk_sync_queue(bdev_get_queue(dc->bdev));
  762. blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  763. }
  764. wake_up(&unregister_wait);
  765. kobject_put(&dc->disk.kobj);
  766. }
  767. static void cached_dev_flush(struct closure *cl)
  768. {
  769. struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
  770. struct bcache_device *d = &dc->disk;
  771. bch_cache_accounting_destroy(&dc->accounting);
  772. kobject_del(&d->kobj);
  773. continue_at(cl, cached_dev_free, system_wq);
  774. }
  775. static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
  776. {
  777. int err;
  778. struct io *io;
  779. closure_init(&dc->disk.cl, NULL);
  780. set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
  781. __module_get(THIS_MODULE);
  782. INIT_LIST_HEAD(&dc->list);
  783. kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
  784. bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
  785. err = bcache_device_init(&dc->disk, block_size);
  786. if (err)
  787. goto err;
  788. spin_lock_init(&dc->io_lock);
  789. closure_init_unlocked(&dc->sb_write);
  790. INIT_WORK(&dc->detach, cached_dev_detach_finish);
  791. dc->sequential_merge = true;
  792. dc->sequential_cutoff = 4 << 20;
  793. INIT_LIST_HEAD(&dc->io_lru);
  794. dc->sb_bio.bi_max_vecs = 1;
  795. dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
  796. for (io = dc->io; io < dc->io + RECENT_IO; io++) {
  797. list_add(&io->lru, &dc->io_lru);
  798. hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
  799. }
  800. bch_writeback_init_cached_dev(dc);
  801. return 0;
  802. err:
  803. bcache_device_stop(&dc->disk);
  804. return err;
  805. }
  806. /* Cached device - bcache superblock */
  807. static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
  808. struct block_device *bdev,
  809. struct cached_dev *dc)
  810. {
  811. char name[BDEVNAME_SIZE];
  812. const char *err = "cannot allocate memory";
  813. struct gendisk *g;
  814. struct cache_set *c;
  815. if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
  816. return err;
  817. memcpy(&dc->sb, sb, sizeof(struct cache_sb));
  818. dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
  819. dc->bdev = bdev;
  820. dc->bdev->bd_holder = dc;
  821. g = dc->disk.disk;
  822. set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
  823. g->queue->backing_dev_info.ra_pages =
  824. max(g->queue->backing_dev_info.ra_pages,
  825. bdev->bd_queue->backing_dev_info.ra_pages);
  826. bch_cached_dev_request_init(dc);
  827. err = "error creating kobject";
  828. if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
  829. "bcache"))
  830. goto err;
  831. if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
  832. goto err;
  833. list_add(&dc->list, &uncached_devices);
  834. list_for_each_entry(c, &bch_cache_sets, list)
  835. bch_cached_dev_attach(dc, c);
  836. if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
  837. BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
  838. bch_cached_dev_run(dc);
  839. return NULL;
  840. err:
  841. kobject_put(&dc->disk.kobj);
  842. pr_notice("error opening %s: %s", bdevname(bdev, name), err);
  843. /*
  844. * Return NULL instead of an error because kobject_put() cleans
  845. * everything up
  846. */
  847. return NULL;
  848. }
  849. /* Flash only volumes */
  850. void bch_flash_dev_release(struct kobject *kobj)
  851. {
  852. struct bcache_device *d = container_of(kobj, struct bcache_device,
  853. kobj);
  854. kfree(d);
  855. }
  856. static void flash_dev_free(struct closure *cl)
  857. {
  858. struct bcache_device *d = container_of(cl, struct bcache_device, cl);
  859. bcache_device_free(d);
  860. kobject_put(&d->kobj);
  861. }
  862. static void flash_dev_flush(struct closure *cl)
  863. {
  864. struct bcache_device *d = container_of(cl, struct bcache_device, cl);
  865. sysfs_remove_link(&d->c->kobj, d->name);
  866. sysfs_remove_link(&d->kobj, "cache");
  867. kobject_del(&d->kobj);
  868. continue_at(cl, flash_dev_free, system_wq);
  869. }
  870. static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
  871. {
  872. struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
  873. GFP_KERNEL);
  874. if (!d)
  875. return -ENOMEM;
  876. closure_init(&d->cl, NULL);
  877. set_closure_fn(&d->cl, flash_dev_flush, system_wq);
  878. kobject_init(&d->kobj, &bch_flash_dev_ktype);
  879. if (bcache_device_init(d, block_bytes(c)))
  880. goto err;
  881. bcache_device_attach(d, c, u - c->uuids);
  882. set_capacity(d->disk, u->sectors);
  883. bch_flash_dev_request_init(d);
  884. add_disk(d->disk);
  885. if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
  886. goto err;
  887. bcache_device_link(d, c, "volume");
  888. return 0;
  889. err:
  890. kobject_put(&d->kobj);
  891. return -ENOMEM;
  892. }
  893. static int flash_devs_run(struct cache_set *c)
  894. {
  895. int ret = 0;
  896. struct uuid_entry *u;
  897. for (u = c->uuids;
  898. u < c->uuids + c->nr_uuids && !ret;
  899. u++)
  900. if (UUID_FLASH_ONLY(u))
  901. ret = flash_dev_run(c, u);
  902. return ret;
  903. }
  904. int bch_flash_dev_create(struct cache_set *c, uint64_t size)
  905. {
  906. struct uuid_entry *u;
  907. if (test_bit(CACHE_SET_STOPPING, &c->flags))
  908. return -EINTR;
  909. u = uuid_find_empty(c);
  910. if (!u) {
  911. pr_err("Can't create volume, no room for UUID");
  912. return -EINVAL;
  913. }
  914. get_random_bytes(u->uuid, 16);
  915. memset(u->label, 0, 32);
  916. u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
  917. SET_UUID_FLASH_ONLY(u, 1);
  918. u->sectors = size >> 9;
  919. bch_uuid_write(c);
  920. return flash_dev_run(c, u);
  921. }
  922. /* Cache set */
  923. __printf(2, 3)
  924. bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
  925. {
  926. va_list args;
  927. if (test_bit(CACHE_SET_STOPPING, &c->flags))
  928. return false;
  929. /* XXX: we can be called from atomic context
  930. acquire_console_sem();
  931. */
  932. printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
  933. va_start(args, fmt);
  934. vprintk(fmt, args);
  935. va_end(args);
  936. printk(", disabling caching\n");
  937. bch_cache_set_unregister(c);
  938. return true;
  939. }
  940. void bch_cache_set_release(struct kobject *kobj)
  941. {
  942. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  943. kfree(c);
  944. module_put(THIS_MODULE);
  945. }
  946. static void cache_set_free(struct closure *cl)
  947. {
  948. struct cache_set *c = container_of(cl, struct cache_set, cl);
  949. struct cache *ca;
  950. unsigned i;
  951. if (!IS_ERR_OR_NULL(c->debug))
  952. debugfs_remove(c->debug);
  953. bch_open_buckets_free(c);
  954. bch_btree_cache_free(c);
  955. bch_journal_free(c);
  956. for_each_cache(ca, c, i)
  957. if (ca)
  958. kobject_put(&ca->kobj);
  959. free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
  960. free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
  961. kfree(c->fill_iter);
  962. if (c->bio_split)
  963. bioset_free(c->bio_split);
  964. if (c->bio_meta)
  965. mempool_destroy(c->bio_meta);
  966. if (c->search)
  967. mempool_destroy(c->search);
  968. kfree(c->devices);
  969. mutex_lock(&bch_register_lock);
  970. list_del(&c->list);
  971. mutex_unlock(&bch_register_lock);
  972. pr_info("Cache set %pU unregistered", c->sb.set_uuid);
  973. wake_up(&unregister_wait);
  974. closure_debug_destroy(&c->cl);
  975. kobject_put(&c->kobj);
  976. }
  977. static void cache_set_flush(struct closure *cl)
  978. {
  979. struct cache_set *c = container_of(cl, struct cache_set, caching);
  980. struct btree *b;
  981. /* Shut down allocator threads */
  982. set_bit(CACHE_SET_STOPPING_2, &c->flags);
  983. wake_up(&c->alloc_wait);
  984. bch_cache_accounting_destroy(&c->accounting);
  985. kobject_put(&c->internal);
  986. kobject_del(&c->kobj);
  987. if (!IS_ERR_OR_NULL(c->root))
  988. list_add(&c->root->list, &c->btree_cache);
  989. /* Should skip this if we're unregistering because of an error */
  990. list_for_each_entry(b, &c->btree_cache, list)
  991. if (btree_node_dirty(b))
  992. bch_btree_write(b, true, NULL);
  993. closure_return(cl);
  994. }
  995. static void __cache_set_unregister(struct closure *cl)
  996. {
  997. struct cache_set *c = container_of(cl, struct cache_set, caching);
  998. struct cached_dev *dc, *t;
  999. size_t i;
  1000. mutex_lock(&bch_register_lock);
  1001. if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
  1002. list_for_each_entry_safe(dc, t, &c->cached_devs, list)
  1003. bch_cached_dev_detach(dc);
  1004. for (i = 0; i < c->nr_uuids; i++)
  1005. if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i]))
  1006. bcache_device_stop(c->devices[i]);
  1007. mutex_unlock(&bch_register_lock);
  1008. continue_at(cl, cache_set_flush, system_wq);
  1009. }
  1010. void bch_cache_set_stop(struct cache_set *c)
  1011. {
  1012. if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
  1013. closure_queue(&c->caching);
  1014. }
  1015. void bch_cache_set_unregister(struct cache_set *c)
  1016. {
  1017. set_bit(CACHE_SET_UNREGISTERING, &c->flags);
  1018. bch_cache_set_stop(c);
  1019. }
  1020. #define alloc_bucket_pages(gfp, c) \
  1021. ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
  1022. struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
  1023. {
  1024. int iter_size;
  1025. struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
  1026. if (!c)
  1027. return NULL;
  1028. __module_get(THIS_MODULE);
  1029. closure_init(&c->cl, NULL);
  1030. set_closure_fn(&c->cl, cache_set_free, system_wq);
  1031. closure_init(&c->caching, &c->cl);
  1032. set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
  1033. /* Maybe create continue_at_noreturn() and use it here? */
  1034. closure_set_stopped(&c->cl);
  1035. closure_put(&c->cl);
  1036. kobject_init(&c->kobj, &bch_cache_set_ktype);
  1037. kobject_init(&c->internal, &bch_cache_set_internal_ktype);
  1038. bch_cache_accounting_init(&c->accounting, &c->cl);
  1039. memcpy(c->sb.set_uuid, sb->set_uuid, 16);
  1040. c->sb.block_size = sb->block_size;
  1041. c->sb.bucket_size = sb->bucket_size;
  1042. c->sb.nr_in_set = sb->nr_in_set;
  1043. c->sb.last_mount = sb->last_mount;
  1044. c->bucket_bits = ilog2(sb->bucket_size);
  1045. c->block_bits = ilog2(sb->block_size);
  1046. c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
  1047. c->btree_pages = c->sb.bucket_size / PAGE_SECTORS;
  1048. if (c->btree_pages > BTREE_MAX_PAGES)
  1049. c->btree_pages = max_t(int, c->btree_pages / 4,
  1050. BTREE_MAX_PAGES);
  1051. init_waitqueue_head(&c->alloc_wait);
  1052. mutex_init(&c->bucket_lock);
  1053. mutex_init(&c->fill_lock);
  1054. mutex_init(&c->sort_lock);
  1055. spin_lock_init(&c->sort_time_lock);
  1056. closure_init_unlocked(&c->sb_write);
  1057. closure_init_unlocked(&c->uuid_write);
  1058. spin_lock_init(&c->btree_read_time_lock);
  1059. bch_moving_init_cache_set(c);
  1060. INIT_LIST_HEAD(&c->list);
  1061. INIT_LIST_HEAD(&c->cached_devs);
  1062. INIT_LIST_HEAD(&c->btree_cache);
  1063. INIT_LIST_HEAD(&c->btree_cache_freeable);
  1064. INIT_LIST_HEAD(&c->btree_cache_freed);
  1065. INIT_LIST_HEAD(&c->data_buckets);
  1066. c->search = mempool_create_slab_pool(32, bch_search_cache);
  1067. if (!c->search)
  1068. goto err;
  1069. iter_size = (sb->bucket_size / sb->block_size + 1) *
  1070. sizeof(struct btree_iter_set);
  1071. if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
  1072. !(c->bio_meta = mempool_create_kmalloc_pool(2,
  1073. sizeof(struct bbio) + sizeof(struct bio_vec) *
  1074. bucket_pages(c))) ||
  1075. !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
  1076. !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) ||
  1077. !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
  1078. !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
  1079. bch_journal_alloc(c) ||
  1080. bch_btree_cache_alloc(c) ||
  1081. bch_open_buckets_alloc(c))
  1082. goto err;
  1083. c->fill_iter->size = sb->bucket_size / sb->block_size;
  1084. c->congested_read_threshold_us = 2000;
  1085. c->congested_write_threshold_us = 20000;
  1086. c->error_limit = 8 << IO_ERROR_SHIFT;
  1087. return c;
  1088. err:
  1089. bch_cache_set_unregister(c);
  1090. return NULL;
  1091. }
  1092. static void run_cache_set(struct cache_set *c)
  1093. {
  1094. const char *err = "cannot allocate memory";
  1095. struct cached_dev *dc, *t;
  1096. struct cache *ca;
  1097. unsigned i;
  1098. struct btree_op op;
  1099. bch_btree_op_init_stack(&op);
  1100. op.lock = SHRT_MAX;
  1101. for_each_cache(ca, c, i)
  1102. c->nbuckets += ca->sb.nbuckets;
  1103. if (CACHE_SYNC(&c->sb)) {
  1104. LIST_HEAD(journal);
  1105. struct bkey *k;
  1106. struct jset *j;
  1107. err = "cannot allocate memory for journal";
  1108. if (bch_journal_read(c, &journal, &op))
  1109. goto err;
  1110. pr_debug("btree_journal_read() done");
  1111. err = "no journal entries found";
  1112. if (list_empty(&journal))
  1113. goto err;
  1114. j = &list_entry(journal.prev, struct journal_replay, list)->j;
  1115. err = "IO error reading priorities";
  1116. for_each_cache(ca, c, i)
  1117. prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
  1118. /*
  1119. * If prio_read() fails it'll call cache_set_error and we'll
  1120. * tear everything down right away, but if we perhaps checked
  1121. * sooner we could avoid journal replay.
  1122. */
  1123. k = &j->btree_root;
  1124. err = "bad btree root";
  1125. if (__bch_ptr_invalid(c, j->btree_level + 1, k))
  1126. goto err;
  1127. err = "error reading btree root";
  1128. c->root = bch_btree_node_get(c, k, j->btree_level, &op);
  1129. if (IS_ERR_OR_NULL(c->root))
  1130. goto err;
  1131. list_del_init(&c->root->list);
  1132. rw_unlock(true, c->root);
  1133. err = uuid_read(c, j, &op.cl);
  1134. if (err)
  1135. goto err;
  1136. err = "error in recovery";
  1137. if (bch_btree_check(c, &op))
  1138. goto err;
  1139. bch_journal_mark(c, &journal);
  1140. bch_btree_gc_finish(c);
  1141. pr_debug("btree_check() done");
  1142. /*
  1143. * bcache_journal_next() can't happen sooner, or
  1144. * btree_gc_finish() will give spurious errors about last_gc >
  1145. * gc_gen - this is a hack but oh well.
  1146. */
  1147. bch_journal_next(&c->journal);
  1148. for_each_cache(ca, c, i)
  1149. closure_call(&ca->alloc, bch_allocator_thread,
  1150. system_wq, &c->cl);
  1151. /*
  1152. * First place it's safe to allocate: btree_check() and
  1153. * btree_gc_finish() have to run before we have buckets to
  1154. * allocate, and bch_bucket_alloc_set() might cause a journal
  1155. * entry to be written so bcache_journal_next() has to be called
  1156. * first.
  1157. *
  1158. * If the uuids were in the old format we have to rewrite them
  1159. * before the next journal entry is written:
  1160. */
  1161. if (j->version < BCACHE_JSET_VERSION_UUID)
  1162. __uuid_write(c);
  1163. bch_journal_replay(c, &journal, &op);
  1164. } else {
  1165. pr_notice("invalidating existing data");
  1166. /* Don't want invalidate_buckets() to queue a gc yet */
  1167. closure_lock(&c->gc, NULL);
  1168. for_each_cache(ca, c, i) {
  1169. unsigned j;
  1170. ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
  1171. 2, SB_JOURNAL_BUCKETS);
  1172. for (j = 0; j < ca->sb.keys; j++)
  1173. ca->sb.d[j] = ca->sb.first_bucket + j;
  1174. }
  1175. bch_btree_gc_finish(c);
  1176. for_each_cache(ca, c, i)
  1177. closure_call(&ca->alloc, bch_allocator_thread,
  1178. ca->alloc_workqueue, &c->cl);
  1179. mutex_lock(&c->bucket_lock);
  1180. for_each_cache(ca, c, i)
  1181. bch_prio_write(ca);
  1182. mutex_unlock(&c->bucket_lock);
  1183. wake_up(&c->alloc_wait);
  1184. err = "cannot allocate new UUID bucket";
  1185. if (__uuid_write(c))
  1186. goto err_unlock_gc;
  1187. err = "cannot allocate new btree root";
  1188. c->root = bch_btree_node_alloc(c, 0, &op.cl);
  1189. if (IS_ERR_OR_NULL(c->root))
  1190. goto err_unlock_gc;
  1191. bkey_copy_key(&c->root->key, &MAX_KEY);
  1192. bch_btree_write(c->root, true, &op);
  1193. bch_btree_set_root(c->root);
  1194. rw_unlock(true, c->root);
  1195. /*
  1196. * We don't want to write the first journal entry until
  1197. * everything is set up - fortunately journal entries won't be
  1198. * written until the SET_CACHE_SYNC() here:
  1199. */
  1200. SET_CACHE_SYNC(&c->sb, true);
  1201. bch_journal_next(&c->journal);
  1202. bch_journal_meta(c, &op.cl);
  1203. /* Unlock */
  1204. closure_set_stopped(&c->gc.cl);
  1205. closure_put(&c->gc.cl);
  1206. }
  1207. closure_sync(&op.cl);
  1208. c->sb.last_mount = get_seconds();
  1209. bcache_write_super(c);
  1210. list_for_each_entry_safe(dc, t, &uncached_devices, list)
  1211. bch_cached_dev_attach(dc, c);
  1212. flash_devs_run(c);
  1213. return;
  1214. err_unlock_gc:
  1215. closure_set_stopped(&c->gc.cl);
  1216. closure_put(&c->gc.cl);
  1217. err:
  1218. closure_sync(&op.cl);
  1219. /* XXX: test this, it's broken */
  1220. bch_cache_set_error(c, err);
  1221. }
  1222. static bool can_attach_cache(struct cache *ca, struct cache_set *c)
  1223. {
  1224. return ca->sb.block_size == c->sb.block_size &&
  1225. ca->sb.bucket_size == c->sb.block_size &&
  1226. ca->sb.nr_in_set == c->sb.nr_in_set;
  1227. }
  1228. static const char *register_cache_set(struct cache *ca)
  1229. {
  1230. char buf[12];
  1231. const char *err = "cannot allocate memory";
  1232. struct cache_set *c;
  1233. list_for_each_entry(c, &bch_cache_sets, list)
  1234. if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
  1235. if (c->cache[ca->sb.nr_this_dev])
  1236. return "duplicate cache set member";
  1237. if (!can_attach_cache(ca, c))
  1238. return "cache sb does not match set";
  1239. if (!CACHE_SYNC(&ca->sb))
  1240. SET_CACHE_SYNC(&c->sb, false);
  1241. goto found;
  1242. }
  1243. c = bch_cache_set_alloc(&ca->sb);
  1244. if (!c)
  1245. return err;
  1246. err = "error creating kobject";
  1247. if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
  1248. kobject_add(&c->internal, &c->kobj, "internal"))
  1249. goto err;
  1250. if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
  1251. goto err;
  1252. bch_debug_init_cache_set(c);
  1253. list_add(&c->list, &bch_cache_sets);
  1254. found:
  1255. sprintf(buf, "cache%i", ca->sb.nr_this_dev);
  1256. if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
  1257. sysfs_create_link(&c->kobj, &ca->kobj, buf))
  1258. goto err;
  1259. if (ca->sb.seq > c->sb.seq) {
  1260. c->sb.version = ca->sb.version;
  1261. memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
  1262. c->sb.flags = ca->sb.flags;
  1263. c->sb.seq = ca->sb.seq;
  1264. pr_debug("set version = %llu", c->sb.version);
  1265. }
  1266. ca->set = c;
  1267. ca->set->cache[ca->sb.nr_this_dev] = ca;
  1268. c->cache_by_alloc[c->caches_loaded++] = ca;
  1269. if (c->caches_loaded == c->sb.nr_in_set)
  1270. run_cache_set(c);
  1271. return NULL;
  1272. err:
  1273. bch_cache_set_unregister(c);
  1274. return err;
  1275. }
  1276. /* Cache device */
  1277. void bch_cache_release(struct kobject *kobj)
  1278. {
  1279. struct cache *ca = container_of(kobj, struct cache, kobj);
  1280. if (ca->set)
  1281. ca->set->cache[ca->sb.nr_this_dev] = NULL;
  1282. bch_cache_allocator_exit(ca);
  1283. bio_split_pool_free(&ca->bio_split_hook);
  1284. if (ca->alloc_workqueue)
  1285. destroy_workqueue(ca->alloc_workqueue);
  1286. free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
  1287. kfree(ca->prio_buckets);
  1288. vfree(ca->buckets);
  1289. free_heap(&ca->heap);
  1290. free_fifo(&ca->unused);
  1291. free_fifo(&ca->free_inc);
  1292. free_fifo(&ca->free);
  1293. if (ca->sb_bio.bi_inline_vecs[0].bv_page)
  1294. put_page(ca->sb_bio.bi_io_vec[0].bv_page);
  1295. if (!IS_ERR_OR_NULL(ca->bdev)) {
  1296. blk_sync_queue(bdev_get_queue(ca->bdev));
  1297. blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1298. }
  1299. kfree(ca);
  1300. module_put(THIS_MODULE);
  1301. }
  1302. static int cache_alloc(struct cache_sb *sb, struct cache *ca)
  1303. {
  1304. size_t free;
  1305. struct bucket *b;
  1306. if (!ca)
  1307. return -ENOMEM;
  1308. __module_get(THIS_MODULE);
  1309. kobject_init(&ca->kobj, &bch_cache_ktype);
  1310. memcpy(&ca->sb, sb, sizeof(struct cache_sb));
  1311. INIT_LIST_HEAD(&ca->discards);
  1312. bio_init(&ca->sb_bio);
  1313. ca->sb_bio.bi_max_vecs = 1;
  1314. ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
  1315. bio_init(&ca->journal.bio);
  1316. ca->journal.bio.bi_max_vecs = 8;
  1317. ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
  1318. free = roundup_pow_of_two(ca->sb.nbuckets) >> 9;
  1319. free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
  1320. if (!init_fifo(&ca->free, free, GFP_KERNEL) ||
  1321. !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
  1322. !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
  1323. !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
  1324. !(ca->buckets = vmalloc(sizeof(struct bucket) *
  1325. ca->sb.nbuckets)) ||
  1326. !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
  1327. 2, GFP_KERNEL)) ||
  1328. !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
  1329. !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
  1330. bio_split_pool_init(&ca->bio_split_hook))
  1331. goto err;
  1332. ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
  1333. memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
  1334. for_each_bucket(b, ca)
  1335. atomic_set(&b->pin, 0);
  1336. if (bch_cache_allocator_init(ca))
  1337. goto err;
  1338. return 0;
  1339. err:
  1340. kobject_put(&ca->kobj);
  1341. return -ENOMEM;
  1342. }
  1343. static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
  1344. struct block_device *bdev, struct cache *ca)
  1345. {
  1346. char name[BDEVNAME_SIZE];
  1347. const char *err = "cannot allocate memory";
  1348. if (cache_alloc(sb, ca) != 0)
  1349. return err;
  1350. ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
  1351. ca->bdev = bdev;
  1352. ca->bdev->bd_holder = ca;
  1353. if (blk_queue_discard(bdev_get_queue(ca->bdev)))
  1354. ca->discard = CACHE_DISCARD(&ca->sb);
  1355. err = "error creating kobject";
  1356. if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
  1357. goto err;
  1358. err = register_cache_set(ca);
  1359. if (err)
  1360. goto err;
  1361. pr_info("registered cache device %s", bdevname(bdev, name));
  1362. return NULL;
  1363. err:
  1364. kobject_put(&ca->kobj);
  1365. pr_info("error opening %s: %s", bdevname(bdev, name), err);
  1366. /* Return NULL instead of an error because kobject_put() cleans
  1367. * everything up
  1368. */
  1369. return NULL;
  1370. }
  1371. /* Global interfaces/init */
  1372. static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
  1373. const char *, size_t);
  1374. kobj_attribute_write(register, register_bcache);
  1375. kobj_attribute_write(register_quiet, register_bcache);
  1376. static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
  1377. const char *buffer, size_t size)
  1378. {
  1379. ssize_t ret = size;
  1380. const char *err = "cannot allocate memory";
  1381. char *path = NULL;
  1382. struct cache_sb *sb = NULL;
  1383. struct block_device *bdev = NULL;
  1384. struct page *sb_page = NULL;
  1385. if (!try_module_get(THIS_MODULE))
  1386. return -EBUSY;
  1387. mutex_lock(&bch_register_lock);
  1388. if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
  1389. !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
  1390. goto err;
  1391. err = "failed to open device";
  1392. bdev = blkdev_get_by_path(strim(path),
  1393. FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  1394. sb);
  1395. if (bdev == ERR_PTR(-EBUSY))
  1396. err = "device busy";
  1397. if (IS_ERR(bdev) ||
  1398. set_blocksize(bdev, 4096))
  1399. goto err;
  1400. err = read_super(sb, bdev, &sb_page);
  1401. if (err)
  1402. goto err_close;
  1403. if (SB_IS_BDEV(sb)) {
  1404. struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
  1405. err = register_bdev(sb, sb_page, bdev, dc);
  1406. } else {
  1407. struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  1408. err = register_cache(sb, sb_page, bdev, ca);
  1409. }
  1410. if (err) {
  1411. /* register_(bdev|cache) will only return an error if they
  1412. * didn't get far enough to create the kobject - if they did,
  1413. * the kobject destructor will do this cleanup.
  1414. */
  1415. put_page(sb_page);
  1416. err_close:
  1417. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1418. err:
  1419. if (attr != &ksysfs_register_quiet)
  1420. pr_info("error opening %s: %s", path, err);
  1421. ret = -EINVAL;
  1422. }
  1423. kfree(sb);
  1424. kfree(path);
  1425. mutex_unlock(&bch_register_lock);
  1426. module_put(THIS_MODULE);
  1427. return ret;
  1428. }
  1429. static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
  1430. {
  1431. if (code == SYS_DOWN ||
  1432. code == SYS_HALT ||
  1433. code == SYS_POWER_OFF) {
  1434. DEFINE_WAIT(wait);
  1435. unsigned long start = jiffies;
  1436. bool stopped = false;
  1437. struct cache_set *c, *tc;
  1438. struct cached_dev *dc, *tdc;
  1439. mutex_lock(&bch_register_lock);
  1440. if (list_empty(&bch_cache_sets) &&
  1441. list_empty(&uncached_devices))
  1442. goto out;
  1443. pr_info("Stopping all devices:");
  1444. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1445. bch_cache_set_stop(c);
  1446. list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
  1447. bcache_device_stop(&dc->disk);
  1448. /* What's a condition variable? */
  1449. while (1) {
  1450. long timeout = start + 2 * HZ - jiffies;
  1451. stopped = list_empty(&bch_cache_sets) &&
  1452. list_empty(&uncached_devices);
  1453. if (timeout < 0 || stopped)
  1454. break;
  1455. prepare_to_wait(&unregister_wait, &wait,
  1456. TASK_UNINTERRUPTIBLE);
  1457. mutex_unlock(&bch_register_lock);
  1458. schedule_timeout(timeout);
  1459. mutex_lock(&bch_register_lock);
  1460. }
  1461. finish_wait(&unregister_wait, &wait);
  1462. if (stopped)
  1463. pr_info("All devices stopped");
  1464. else
  1465. pr_notice("Timeout waiting for devices to be closed");
  1466. out:
  1467. mutex_unlock(&bch_register_lock);
  1468. }
  1469. return NOTIFY_DONE;
  1470. }
  1471. static struct notifier_block reboot = {
  1472. .notifier_call = bcache_reboot,
  1473. .priority = INT_MAX, /* before any real devices */
  1474. };
  1475. static void bcache_exit(void)
  1476. {
  1477. bch_debug_exit();
  1478. bch_writeback_exit();
  1479. bch_request_exit();
  1480. bch_btree_exit();
  1481. if (bcache_kobj)
  1482. kobject_put(bcache_kobj);
  1483. if (bcache_wq)
  1484. destroy_workqueue(bcache_wq);
  1485. unregister_blkdev(bcache_major, "bcache");
  1486. unregister_reboot_notifier(&reboot);
  1487. }
  1488. static int __init bcache_init(void)
  1489. {
  1490. static const struct attribute *files[] = {
  1491. &ksysfs_register.attr,
  1492. &ksysfs_register_quiet.attr,
  1493. NULL
  1494. };
  1495. mutex_init(&bch_register_lock);
  1496. init_waitqueue_head(&unregister_wait);
  1497. register_reboot_notifier(&reboot);
  1498. closure_debug_init();
  1499. bcache_major = register_blkdev(0, "bcache");
  1500. if (bcache_major < 0)
  1501. return bcache_major;
  1502. if (!(bcache_wq = create_workqueue("bcache")) ||
  1503. !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
  1504. sysfs_create_files(bcache_kobj, files) ||
  1505. bch_btree_init() ||
  1506. bch_request_init() ||
  1507. bch_writeback_init() ||
  1508. bch_debug_init(bcache_kobj))
  1509. goto err;
  1510. return 0;
  1511. err:
  1512. bcache_exit();
  1513. return -ENOMEM;
  1514. }
  1515. module_exit(bcache_exit);
  1516. module_init(bcache_init);