super.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994
  1. /*
  2. * bcache setup/teardown code, and some metadata io - read a superblock and
  3. * figure out what to do with it.
  4. *
  5. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6. * Copyright 2012 Google, Inc.
  7. */
  8. #include "bcache.h"
  9. #include "btree.h"
  10. #include "debug.h"
  11. #include "request.h"
  12. #include <linux/buffer_head.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/genhd.h>
  15. #include <linux/module.h>
  16. #include <linux/random.h>
  17. #include <linux/reboot.h>
  18. #include <linux/sysfs.h>
  19. MODULE_LICENSE("GPL");
  20. MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
  21. static const char bcache_magic[] = {
  22. 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
  23. 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
  24. };
  25. static const char invalid_uuid[] = {
  26. 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
  27. 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
  28. };
  29. /* Default is -1; we skip past it for struct cached_dev's cache mode */
  30. const char * const bch_cache_modes[] = {
  31. "default",
  32. "writethrough",
  33. "writeback",
  34. "writearound",
  35. "none",
  36. NULL
  37. };
  38. struct uuid_entry_v0 {
  39. uint8_t uuid[16];
  40. uint8_t label[32];
  41. uint32_t first_reg;
  42. uint32_t last_reg;
  43. uint32_t invalidated;
  44. uint32_t pad;
  45. };
  46. static struct kobject *bcache_kobj;
  47. struct mutex bch_register_lock;
  48. LIST_HEAD(bch_cache_sets);
  49. static LIST_HEAD(uncached_devices);
  50. static int bcache_major, bcache_minor;
  51. static wait_queue_head_t unregister_wait;
  52. struct workqueue_struct *bcache_wq;
  53. #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
  54. static void bio_split_pool_free(struct bio_split_pool *p)
  55. {
  56. if (p->bio_split_hook)
  57. mempool_destroy(p->bio_split_hook);
  58. if (p->bio_split)
  59. bioset_free(p->bio_split);
  60. }
  61. static int bio_split_pool_init(struct bio_split_pool *p)
  62. {
  63. p->bio_split = bioset_create(4, 0);
  64. if (!p->bio_split)
  65. return -ENOMEM;
  66. p->bio_split_hook = mempool_create_kmalloc_pool(4,
  67. sizeof(struct bio_split_hook));
  68. if (!p->bio_split_hook)
  69. return -ENOMEM;
  70. return 0;
  71. }
  72. /* Superblock */
  73. static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
  74. struct page **res)
  75. {
  76. const char *err;
  77. struct cache_sb *s;
  78. struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
  79. unsigned i;
  80. if (!bh)
  81. return "IO error";
  82. s = (struct cache_sb *) bh->b_data;
  83. sb->offset = le64_to_cpu(s->offset);
  84. sb->version = le64_to_cpu(s->version);
  85. memcpy(sb->magic, s->magic, 16);
  86. memcpy(sb->uuid, s->uuid, 16);
  87. memcpy(sb->set_uuid, s->set_uuid, 16);
  88. memcpy(sb->label, s->label, SB_LABEL_SIZE);
  89. sb->flags = le64_to_cpu(s->flags);
  90. sb->seq = le64_to_cpu(s->seq);
  91. sb->last_mount = le32_to_cpu(s->last_mount);
  92. sb->first_bucket = le16_to_cpu(s->first_bucket);
  93. sb->keys = le16_to_cpu(s->keys);
  94. for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
  95. sb->d[i] = le64_to_cpu(s->d[i]);
  96. pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
  97. sb->version, sb->flags, sb->seq, sb->keys);
  98. err = "Not a bcache superblock";
  99. if (sb->offset != SB_SECTOR)
  100. goto err;
  101. if (memcmp(sb->magic, bcache_magic, 16))
  102. goto err;
  103. err = "Too many journal buckets";
  104. if (sb->keys > SB_JOURNAL_BUCKETS)
  105. goto err;
  106. err = "Bad checksum";
  107. if (s->csum != csum_set(s))
  108. goto err;
  109. err = "Bad UUID";
  110. if (bch_is_zero(sb->uuid, 16))
  111. goto err;
  112. sb->block_size = le16_to_cpu(s->block_size);
  113. err = "Superblock block size smaller than device block size";
  114. if (sb->block_size << 9 < bdev_logical_block_size(bdev))
  115. goto err;
  116. switch (sb->version) {
  117. case BCACHE_SB_VERSION_BDEV:
  118. sb->data_offset = BDEV_DATA_START_DEFAULT;
  119. break;
  120. case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
  121. sb->data_offset = le64_to_cpu(s->data_offset);
  122. err = "Bad data offset";
  123. if (sb->data_offset < BDEV_DATA_START_DEFAULT)
  124. goto err;
  125. break;
  126. case BCACHE_SB_VERSION_CDEV:
  127. case BCACHE_SB_VERSION_CDEV_WITH_UUID:
  128. sb->nbuckets = le64_to_cpu(s->nbuckets);
  129. sb->block_size = le16_to_cpu(s->block_size);
  130. sb->bucket_size = le16_to_cpu(s->bucket_size);
  131. sb->nr_in_set = le16_to_cpu(s->nr_in_set);
  132. sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
  133. err = "Too many buckets";
  134. if (sb->nbuckets > LONG_MAX)
  135. goto err;
  136. err = "Not enough buckets";
  137. if (sb->nbuckets < 1 << 7)
  138. goto err;
  139. err = "Bad block/bucket size";
  140. if (!is_power_of_2(sb->block_size) ||
  141. sb->block_size > PAGE_SECTORS ||
  142. !is_power_of_2(sb->bucket_size) ||
  143. sb->bucket_size < PAGE_SECTORS)
  144. goto err;
  145. err = "Invalid superblock: device too small";
  146. if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
  147. goto err;
  148. err = "Bad UUID";
  149. if (bch_is_zero(sb->set_uuid, 16))
  150. goto err;
  151. err = "Bad cache device number in set";
  152. if (!sb->nr_in_set ||
  153. sb->nr_in_set <= sb->nr_this_dev ||
  154. sb->nr_in_set > MAX_CACHES_PER_SET)
  155. goto err;
  156. err = "Journal buckets not sequential";
  157. for (i = 0; i < sb->keys; i++)
  158. if (sb->d[i] != sb->first_bucket + i)
  159. goto err;
  160. err = "Too many journal buckets";
  161. if (sb->first_bucket + sb->keys > sb->nbuckets)
  162. goto err;
  163. err = "Invalid superblock: first bucket comes before end of super";
  164. if (sb->first_bucket * sb->bucket_size < 16)
  165. goto err;
  166. break;
  167. default:
  168. err = "Unsupported superblock version";
  169. goto err;
  170. }
  171. sb->last_mount = get_seconds();
  172. err = NULL;
  173. get_page(bh->b_page);
  174. *res = bh->b_page;
  175. err:
  176. put_bh(bh);
  177. return err;
  178. }
  179. static void write_bdev_super_endio(struct bio *bio, int error)
  180. {
  181. struct cached_dev *dc = bio->bi_private;
  182. /* XXX: error checking */
  183. closure_put(&dc->sb_write.cl);
  184. }
  185. static void __write_super(struct cache_sb *sb, struct bio *bio)
  186. {
  187. struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
  188. unsigned i;
  189. bio->bi_sector = SB_SECTOR;
  190. bio->bi_rw = REQ_SYNC|REQ_META;
  191. bio->bi_size = SB_SIZE;
  192. bch_bio_map(bio, NULL);
  193. out->offset = cpu_to_le64(sb->offset);
  194. out->version = cpu_to_le64(sb->version);
  195. memcpy(out->uuid, sb->uuid, 16);
  196. memcpy(out->set_uuid, sb->set_uuid, 16);
  197. memcpy(out->label, sb->label, SB_LABEL_SIZE);
  198. out->flags = cpu_to_le64(sb->flags);
  199. out->seq = cpu_to_le64(sb->seq);
  200. out->last_mount = cpu_to_le32(sb->last_mount);
  201. out->first_bucket = cpu_to_le16(sb->first_bucket);
  202. out->keys = cpu_to_le16(sb->keys);
  203. for (i = 0; i < sb->keys; i++)
  204. out->d[i] = cpu_to_le64(sb->d[i]);
  205. out->csum = csum_set(out);
  206. pr_debug("ver %llu, flags %llu, seq %llu",
  207. sb->version, sb->flags, sb->seq);
  208. submit_bio(REQ_WRITE, bio);
  209. }
  210. void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
  211. {
  212. struct closure *cl = &dc->sb_write.cl;
  213. struct bio *bio = &dc->sb_bio;
  214. closure_lock(&dc->sb_write, parent);
  215. bio_reset(bio);
  216. bio->bi_bdev = dc->bdev;
  217. bio->bi_end_io = write_bdev_super_endio;
  218. bio->bi_private = dc;
  219. closure_get(cl);
  220. __write_super(&dc->sb, bio);
  221. closure_return(cl);
  222. }
  223. static void write_super_endio(struct bio *bio, int error)
  224. {
  225. struct cache *ca = bio->bi_private;
  226. bch_count_io_errors(ca, error, "writing superblock");
  227. closure_put(&ca->set->sb_write.cl);
  228. }
  229. void bcache_write_super(struct cache_set *c)
  230. {
  231. struct closure *cl = &c->sb_write.cl;
  232. struct cache *ca;
  233. unsigned i;
  234. closure_lock(&c->sb_write, &c->cl);
  235. c->sb.seq++;
  236. for_each_cache(ca, c, i) {
  237. struct bio *bio = &ca->sb_bio;
  238. ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
  239. ca->sb.seq = c->sb.seq;
  240. ca->sb.last_mount = c->sb.last_mount;
  241. SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
  242. bio_reset(bio);
  243. bio->bi_bdev = ca->bdev;
  244. bio->bi_end_io = write_super_endio;
  245. bio->bi_private = ca;
  246. closure_get(cl);
  247. __write_super(&ca->sb, bio);
  248. }
  249. closure_return(cl);
  250. }
  251. /* UUID io */
  252. static void uuid_endio(struct bio *bio, int error)
  253. {
  254. struct closure *cl = bio->bi_private;
  255. struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl);
  256. cache_set_err_on(error, c, "accessing uuids");
  257. bch_bbio_free(bio, c);
  258. closure_put(cl);
  259. }
  260. static void uuid_io(struct cache_set *c, unsigned long rw,
  261. struct bkey *k, struct closure *parent)
  262. {
  263. struct closure *cl = &c->uuid_write.cl;
  264. struct uuid_entry *u;
  265. unsigned i;
  266. BUG_ON(!parent);
  267. closure_lock(&c->uuid_write, parent);
  268. for (i = 0; i < KEY_PTRS(k); i++) {
  269. struct bio *bio = bch_bbio_alloc(c);
  270. bio->bi_rw = REQ_SYNC|REQ_META|rw;
  271. bio->bi_size = KEY_SIZE(k) << 9;
  272. bio->bi_end_io = uuid_endio;
  273. bio->bi_private = cl;
  274. bch_bio_map(bio, c->uuids);
  275. bch_submit_bbio(bio, c, k, i);
  276. if (!(rw & WRITE))
  277. break;
  278. }
  279. pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read",
  280. pkey(&c->uuid_bucket));
  281. for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
  282. if (!bch_is_zero(u->uuid, 16))
  283. pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
  284. u - c->uuids, u->uuid, u->label,
  285. u->first_reg, u->last_reg, u->invalidated);
  286. closure_return(cl);
  287. }
  288. static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
  289. {
  290. struct bkey *k = &j->uuid_bucket;
  291. if (__bch_ptr_invalid(c, 1, k))
  292. return "bad uuid pointer";
  293. bkey_copy(&c->uuid_bucket, k);
  294. uuid_io(c, READ_SYNC, k, cl);
  295. if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
  296. struct uuid_entry_v0 *u0 = (void *) c->uuids;
  297. struct uuid_entry *u1 = (void *) c->uuids;
  298. int i;
  299. closure_sync(cl);
  300. /*
  301. * Since the new uuid entry is bigger than the old, we have to
  302. * convert starting at the highest memory address and work down
  303. * in order to do it in place
  304. */
  305. for (i = c->nr_uuids - 1;
  306. i >= 0;
  307. --i) {
  308. memcpy(u1[i].uuid, u0[i].uuid, 16);
  309. memcpy(u1[i].label, u0[i].label, 32);
  310. u1[i].first_reg = u0[i].first_reg;
  311. u1[i].last_reg = u0[i].last_reg;
  312. u1[i].invalidated = u0[i].invalidated;
  313. u1[i].flags = 0;
  314. u1[i].sectors = 0;
  315. }
  316. }
  317. return NULL;
  318. }
  319. static int __uuid_write(struct cache_set *c)
  320. {
  321. BKEY_PADDED(key) k;
  322. struct closure cl;
  323. closure_init_stack(&cl);
  324. lockdep_assert_held(&bch_register_lock);
  325. if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl))
  326. return 1;
  327. SET_KEY_SIZE(&k.key, c->sb.bucket_size);
  328. uuid_io(c, REQ_WRITE, &k.key, &cl);
  329. closure_sync(&cl);
  330. bkey_copy(&c->uuid_bucket, &k.key);
  331. __bkey_put(c, &k.key);
  332. return 0;
  333. }
  334. int bch_uuid_write(struct cache_set *c)
  335. {
  336. int ret = __uuid_write(c);
  337. if (!ret)
  338. bch_journal_meta(c, NULL);
  339. return ret;
  340. }
  341. static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
  342. {
  343. struct uuid_entry *u;
  344. for (u = c->uuids;
  345. u < c->uuids + c->nr_uuids; u++)
  346. if (!memcmp(u->uuid, uuid, 16))
  347. return u;
  348. return NULL;
  349. }
  350. static struct uuid_entry *uuid_find_empty(struct cache_set *c)
  351. {
  352. static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
  353. return uuid_find(c, zero_uuid);
  354. }
  355. /*
  356. * Bucket priorities/gens:
  357. *
  358. * For each bucket, we store on disk its
  359. * 8 bit gen
  360. * 16 bit priority
  361. *
  362. * See alloc.c for an explanation of the gen. The priority is used to implement
  363. * lru (and in the future other) cache replacement policies; for most purposes
  364. * it's just an opaque integer.
  365. *
  366. * The gens and the priorities don't have a whole lot to do with each other, and
  367. * it's actually the gens that must be written out at specific times - it's no
  368. * big deal if the priorities don't get written, if we lose them we just reuse
  369. * buckets in suboptimal order.
  370. *
  371. * On disk they're stored in a packed array, and in as many buckets are required
  372. * to fit them all. The buckets we use to store them form a list; the journal
  373. * header points to the first bucket, the first bucket points to the second
  374. * bucket, et cetera.
  375. *
  376. * This code is used by the allocation code; periodically (whenever it runs out
  377. * of buckets to allocate from) the allocation code will invalidate some
  378. * buckets, but it can't use those buckets until their new gens are safely on
  379. * disk.
  380. */
  381. static void prio_endio(struct bio *bio, int error)
  382. {
  383. struct cache *ca = bio->bi_private;
  384. cache_set_err_on(error, ca->set, "accessing priorities");
  385. bch_bbio_free(bio, ca->set);
  386. closure_put(&ca->prio);
  387. }
  388. static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
  389. {
  390. struct closure *cl = &ca->prio;
  391. struct bio *bio = bch_bbio_alloc(ca->set);
  392. closure_init_stack(cl);
  393. bio->bi_sector = bucket * ca->sb.bucket_size;
  394. bio->bi_bdev = ca->bdev;
  395. bio->bi_rw = REQ_SYNC|REQ_META|rw;
  396. bio->bi_size = bucket_bytes(ca);
  397. bio->bi_end_io = prio_endio;
  398. bio->bi_private = ca;
  399. bch_bio_map(bio, ca->disk_buckets);
  400. closure_bio_submit(bio, &ca->prio, ca);
  401. closure_sync(cl);
  402. }
  403. #define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \
  404. fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused)
  405. void bch_prio_write(struct cache *ca)
  406. {
  407. int i;
  408. struct bucket *b;
  409. struct closure cl;
  410. closure_init_stack(&cl);
  411. lockdep_assert_held(&ca->set->bucket_lock);
  412. for (b = ca->buckets;
  413. b < ca->buckets + ca->sb.nbuckets; b++)
  414. b->disk_gen = b->gen;
  415. ca->disk_buckets->seq++;
  416. atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
  417. &ca->meta_sectors_written);
  418. pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
  419. fifo_used(&ca->free_inc), fifo_used(&ca->unused));
  420. blktrace_msg(ca, "Starting priorities: " buckets_free(ca));
  421. for (i = prio_buckets(ca) - 1; i >= 0; --i) {
  422. long bucket;
  423. struct prio_set *p = ca->disk_buckets;
  424. struct bucket_disk *d = p->data;
  425. struct bucket_disk *end = d + prios_per_bucket(ca);
  426. for (b = ca->buckets + i * prios_per_bucket(ca);
  427. b < ca->buckets + ca->sb.nbuckets && d < end;
  428. b++, d++) {
  429. d->prio = cpu_to_le16(b->prio);
  430. d->gen = b->gen;
  431. }
  432. p->next_bucket = ca->prio_buckets[i + 1];
  433. p->magic = pset_magic(ca);
  434. p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
  435. bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl);
  436. BUG_ON(bucket == -1);
  437. mutex_unlock(&ca->set->bucket_lock);
  438. prio_io(ca, bucket, REQ_WRITE);
  439. mutex_lock(&ca->set->bucket_lock);
  440. ca->prio_buckets[i] = bucket;
  441. atomic_dec_bug(&ca->buckets[bucket].pin);
  442. }
  443. mutex_unlock(&ca->set->bucket_lock);
  444. bch_journal_meta(ca->set, &cl);
  445. closure_sync(&cl);
  446. mutex_lock(&ca->set->bucket_lock);
  447. ca->need_save_prio = 0;
  448. /*
  449. * Don't want the old priorities to get garbage collected until after we
  450. * finish writing the new ones, and they're journalled
  451. */
  452. for (i = 0; i < prio_buckets(ca); i++)
  453. ca->prio_last_buckets[i] = ca->prio_buckets[i];
  454. }
  455. static void prio_read(struct cache *ca, uint64_t bucket)
  456. {
  457. struct prio_set *p = ca->disk_buckets;
  458. struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
  459. struct bucket *b;
  460. unsigned bucket_nr = 0;
  461. for (b = ca->buckets;
  462. b < ca->buckets + ca->sb.nbuckets;
  463. b++, d++) {
  464. if (d == end) {
  465. ca->prio_buckets[bucket_nr] = bucket;
  466. ca->prio_last_buckets[bucket_nr] = bucket;
  467. bucket_nr++;
  468. prio_io(ca, bucket, READ_SYNC);
  469. if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
  470. pr_warn("bad csum reading priorities");
  471. if (p->magic != pset_magic(ca))
  472. pr_warn("bad magic reading priorities");
  473. bucket = p->next_bucket;
  474. d = p->data;
  475. }
  476. b->prio = le16_to_cpu(d->prio);
  477. b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen;
  478. }
  479. }
  480. /* Bcache device */
  481. static int open_dev(struct block_device *b, fmode_t mode)
  482. {
  483. struct bcache_device *d = b->bd_disk->private_data;
  484. if (atomic_read(&d->closing))
  485. return -ENXIO;
  486. closure_get(&d->cl);
  487. return 0;
  488. }
  489. static void release_dev(struct gendisk *b, fmode_t mode)
  490. {
  491. struct bcache_device *d = b->private_data;
  492. closure_put(&d->cl);
  493. }
  494. static int ioctl_dev(struct block_device *b, fmode_t mode,
  495. unsigned int cmd, unsigned long arg)
  496. {
  497. struct bcache_device *d = b->bd_disk->private_data;
  498. return d->ioctl(d, mode, cmd, arg);
  499. }
  500. static const struct block_device_operations bcache_ops = {
  501. .open = open_dev,
  502. .release = release_dev,
  503. .ioctl = ioctl_dev,
  504. .owner = THIS_MODULE,
  505. };
  506. void bcache_device_stop(struct bcache_device *d)
  507. {
  508. if (!atomic_xchg(&d->closing, 1))
  509. closure_queue(&d->cl);
  510. }
  511. static void bcache_device_unlink(struct bcache_device *d)
  512. {
  513. unsigned i;
  514. struct cache *ca;
  515. sysfs_remove_link(&d->c->kobj, d->name);
  516. sysfs_remove_link(&d->kobj, "cache");
  517. for_each_cache(ca, d->c, i)
  518. bd_unlink_disk_holder(ca->bdev, d->disk);
  519. }
  520. static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
  521. const char *name)
  522. {
  523. unsigned i;
  524. struct cache *ca;
  525. for_each_cache(ca, d->c, i)
  526. bd_link_disk_holder(ca->bdev, d->disk);
  527. snprintf(d->name, BCACHEDEVNAME_SIZE,
  528. "%s%u", name, d->id);
  529. WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
  530. sysfs_create_link(&c->kobj, &d->kobj, d->name),
  531. "Couldn't create device <-> cache set symlinks");
  532. }
  533. static void bcache_device_detach(struct bcache_device *d)
  534. {
  535. lockdep_assert_held(&bch_register_lock);
  536. if (atomic_read(&d->detaching)) {
  537. struct uuid_entry *u = d->c->uuids + d->id;
  538. SET_UUID_FLASH_ONLY(u, 0);
  539. memcpy(u->uuid, invalid_uuid, 16);
  540. u->invalidated = cpu_to_le32(get_seconds());
  541. bch_uuid_write(d->c);
  542. atomic_set(&d->detaching, 0);
  543. }
  544. bcache_device_unlink(d);
  545. d->c->devices[d->id] = NULL;
  546. closure_put(&d->c->caching);
  547. d->c = NULL;
  548. }
  549. static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
  550. unsigned id)
  551. {
  552. BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags));
  553. d->id = id;
  554. d->c = c;
  555. c->devices[id] = d;
  556. closure_get(&c->caching);
  557. }
  558. static void bcache_device_free(struct bcache_device *d)
  559. {
  560. lockdep_assert_held(&bch_register_lock);
  561. pr_info("%s stopped", d->disk->disk_name);
  562. if (d->c)
  563. bcache_device_detach(d);
  564. if (d->disk && d->disk->flags & GENHD_FL_UP)
  565. del_gendisk(d->disk);
  566. if (d->disk && d->disk->queue)
  567. blk_cleanup_queue(d->disk->queue);
  568. if (d->disk)
  569. put_disk(d->disk);
  570. bio_split_pool_free(&d->bio_split_hook);
  571. if (d->unaligned_bvec)
  572. mempool_destroy(d->unaligned_bvec);
  573. if (d->bio_split)
  574. bioset_free(d->bio_split);
  575. closure_debug_destroy(&d->cl);
  576. }
  577. static int bcache_device_init(struct bcache_device *d, unsigned block_size)
  578. {
  579. struct request_queue *q;
  580. if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
  581. !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
  582. sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
  583. bio_split_pool_init(&d->bio_split_hook) ||
  584. !(d->disk = alloc_disk(1)) ||
  585. !(q = blk_alloc_queue(GFP_KERNEL)))
  586. return -ENOMEM;
  587. snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
  588. d->disk->major = bcache_major;
  589. d->disk->first_minor = bcache_minor++;
  590. d->disk->fops = &bcache_ops;
  591. d->disk->private_data = d;
  592. blk_queue_make_request(q, NULL);
  593. d->disk->queue = q;
  594. q->queuedata = d;
  595. q->backing_dev_info.congested_data = d;
  596. q->limits.max_hw_sectors = UINT_MAX;
  597. q->limits.max_sectors = UINT_MAX;
  598. q->limits.max_segment_size = UINT_MAX;
  599. q->limits.max_segments = BIO_MAX_PAGES;
  600. q->limits.max_discard_sectors = UINT_MAX;
  601. q->limits.io_min = block_size;
  602. q->limits.logical_block_size = block_size;
  603. q->limits.physical_block_size = block_size;
  604. set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
  605. set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
  606. return 0;
  607. }
  608. /* Cached device */
  609. static void calc_cached_dev_sectors(struct cache_set *c)
  610. {
  611. uint64_t sectors = 0;
  612. struct cached_dev *dc;
  613. list_for_each_entry(dc, &c->cached_devs, list)
  614. sectors += bdev_sectors(dc->bdev);
  615. c->cached_dev_sectors = sectors;
  616. }
  617. void bch_cached_dev_run(struct cached_dev *dc)
  618. {
  619. struct bcache_device *d = &dc->disk;
  620. if (atomic_xchg(&dc->running, 1))
  621. return;
  622. if (!d->c &&
  623. BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
  624. struct closure cl;
  625. closure_init_stack(&cl);
  626. SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
  627. bch_write_bdev_super(dc, &cl);
  628. closure_sync(&cl);
  629. }
  630. add_disk(d->disk);
  631. bd_link_disk_holder(dc->bdev, dc->disk.disk);
  632. #if 0
  633. char *env[] = { "SYMLINK=label" , NULL };
  634. kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
  635. #endif
  636. if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
  637. sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
  638. pr_debug("error creating sysfs link");
  639. }
  640. static void cached_dev_detach_finish(struct work_struct *w)
  641. {
  642. struct cached_dev *dc = container_of(w, struct cached_dev, detach);
  643. char buf[BDEVNAME_SIZE];
  644. struct closure cl;
  645. closure_init_stack(&cl);
  646. BUG_ON(!atomic_read(&dc->disk.detaching));
  647. BUG_ON(atomic_read(&dc->count));
  648. mutex_lock(&bch_register_lock);
  649. memset(&dc->sb.set_uuid, 0, 16);
  650. SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
  651. bch_write_bdev_super(dc, &cl);
  652. closure_sync(&cl);
  653. bcache_device_detach(&dc->disk);
  654. list_move(&dc->list, &uncached_devices);
  655. mutex_unlock(&bch_register_lock);
  656. pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
  657. /* Drop ref we took in cached_dev_detach() */
  658. closure_put(&dc->disk.cl);
  659. }
  660. void bch_cached_dev_detach(struct cached_dev *dc)
  661. {
  662. lockdep_assert_held(&bch_register_lock);
  663. if (atomic_read(&dc->disk.closing))
  664. return;
  665. if (atomic_xchg(&dc->disk.detaching, 1))
  666. return;
  667. /*
  668. * Block the device from being closed and freed until we're finished
  669. * detaching
  670. */
  671. closure_get(&dc->disk.cl);
  672. bch_writeback_queue(dc);
  673. cached_dev_put(dc);
  674. }
  675. int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
  676. {
  677. uint32_t rtime = cpu_to_le32(get_seconds());
  678. struct uuid_entry *u;
  679. char buf[BDEVNAME_SIZE];
  680. bdevname(dc->bdev, buf);
  681. if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
  682. return -ENOENT;
  683. if (dc->disk.c) {
  684. pr_err("Can't attach %s: already attached", buf);
  685. return -EINVAL;
  686. }
  687. if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
  688. pr_err("Can't attach %s: shutting down", buf);
  689. return -EINVAL;
  690. }
  691. if (dc->sb.block_size < c->sb.block_size) {
  692. /* Will die */
  693. pr_err("Couldn't attach %s: block size less than set's block size",
  694. buf);
  695. return -EINVAL;
  696. }
  697. u = uuid_find(c, dc->sb.uuid);
  698. if (u &&
  699. (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
  700. BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
  701. memcpy(u->uuid, invalid_uuid, 16);
  702. u->invalidated = cpu_to_le32(get_seconds());
  703. u = NULL;
  704. }
  705. if (!u) {
  706. if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
  707. pr_err("Couldn't find uuid for %s in set", buf);
  708. return -ENOENT;
  709. }
  710. u = uuid_find_empty(c);
  711. if (!u) {
  712. pr_err("Not caching %s, no room for UUID", buf);
  713. return -EINVAL;
  714. }
  715. }
  716. /* Deadlocks since we're called via sysfs...
  717. sysfs_remove_file(&dc->kobj, &sysfs_attach);
  718. */
  719. if (bch_is_zero(u->uuid, 16)) {
  720. struct closure cl;
  721. closure_init_stack(&cl);
  722. memcpy(u->uuid, dc->sb.uuid, 16);
  723. memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
  724. u->first_reg = u->last_reg = rtime;
  725. bch_uuid_write(c);
  726. memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
  727. SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
  728. bch_write_bdev_super(dc, &cl);
  729. closure_sync(&cl);
  730. } else {
  731. u->last_reg = rtime;
  732. bch_uuid_write(c);
  733. }
  734. bcache_device_attach(&dc->disk, c, u - c->uuids);
  735. list_move(&dc->list, &c->cached_devs);
  736. calc_cached_dev_sectors(c);
  737. smp_wmb();
  738. /*
  739. * dc->c must be set before dc->count != 0 - paired with the mb in
  740. * cached_dev_get()
  741. */
  742. atomic_set(&dc->count, 1);
  743. if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
  744. atomic_set(&dc->has_dirty, 1);
  745. atomic_inc(&dc->count);
  746. bch_writeback_queue(dc);
  747. }
  748. bch_cached_dev_run(dc);
  749. bcache_device_link(&dc->disk, c, "bdev");
  750. pr_info("Caching %s as %s on set %pU",
  751. bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
  752. dc->disk.c->sb.set_uuid);
  753. return 0;
  754. }
  755. void bch_cached_dev_release(struct kobject *kobj)
  756. {
  757. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  758. disk.kobj);
  759. kfree(dc);
  760. module_put(THIS_MODULE);
  761. }
  762. static void cached_dev_free(struct closure *cl)
  763. {
  764. struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
  765. cancel_delayed_work_sync(&dc->writeback_rate_update);
  766. mutex_lock(&bch_register_lock);
  767. if (atomic_read(&dc->running))
  768. bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
  769. bcache_device_free(&dc->disk);
  770. list_del(&dc->list);
  771. mutex_unlock(&bch_register_lock);
  772. if (!IS_ERR_OR_NULL(dc->bdev)) {
  773. if (dc->bdev->bd_disk)
  774. blk_sync_queue(bdev_get_queue(dc->bdev));
  775. blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  776. }
  777. wake_up(&unregister_wait);
  778. kobject_put(&dc->disk.kobj);
  779. }
  780. static void cached_dev_flush(struct closure *cl)
  781. {
  782. struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
  783. struct bcache_device *d = &dc->disk;
  784. bch_cache_accounting_destroy(&dc->accounting);
  785. kobject_del(&d->kobj);
  786. continue_at(cl, cached_dev_free, system_wq);
  787. }
  788. static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
  789. {
  790. int ret;
  791. struct io *io;
  792. struct request_queue *q = bdev_get_queue(dc->bdev);
  793. __module_get(THIS_MODULE);
  794. INIT_LIST_HEAD(&dc->list);
  795. closure_init(&dc->disk.cl, NULL);
  796. set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
  797. kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
  798. INIT_WORK(&dc->detach, cached_dev_detach_finish);
  799. closure_init_unlocked(&dc->sb_write);
  800. INIT_LIST_HEAD(&dc->io_lru);
  801. spin_lock_init(&dc->io_lock);
  802. bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
  803. dc->sequential_merge = true;
  804. dc->sequential_cutoff = 4 << 20;
  805. for (io = dc->io; io < dc->io + RECENT_IO; io++) {
  806. list_add(&io->lru, &dc->io_lru);
  807. hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
  808. }
  809. ret = bcache_device_init(&dc->disk, block_size);
  810. if (ret)
  811. return ret;
  812. set_capacity(dc->disk.disk,
  813. dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
  814. dc->disk.disk->queue->backing_dev_info.ra_pages =
  815. max(dc->disk.disk->queue->backing_dev_info.ra_pages,
  816. q->backing_dev_info.ra_pages);
  817. bch_cached_dev_request_init(dc);
  818. bch_cached_dev_writeback_init(dc);
  819. return 0;
  820. }
  821. /* Cached device - bcache superblock */
  822. static void register_bdev(struct cache_sb *sb, struct page *sb_page,
  823. struct block_device *bdev,
  824. struct cached_dev *dc)
  825. {
  826. char name[BDEVNAME_SIZE];
  827. const char *err = "cannot allocate memory";
  828. struct cache_set *c;
  829. memcpy(&dc->sb, sb, sizeof(struct cache_sb));
  830. dc->bdev = bdev;
  831. dc->bdev->bd_holder = dc;
  832. bio_init(&dc->sb_bio);
  833. dc->sb_bio.bi_max_vecs = 1;
  834. dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
  835. dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
  836. get_page(sb_page);
  837. if (cached_dev_init(dc, sb->block_size << 9))
  838. goto err;
  839. err = "error creating kobject";
  840. if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
  841. "bcache"))
  842. goto err;
  843. if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
  844. goto err;
  845. pr_info("registered backing device %s", bdevname(bdev, name));
  846. list_add(&dc->list, &uncached_devices);
  847. list_for_each_entry(c, &bch_cache_sets, list)
  848. bch_cached_dev_attach(dc, c);
  849. if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
  850. BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
  851. bch_cached_dev_run(dc);
  852. return;
  853. err:
  854. pr_notice("error opening %s: %s", bdevname(bdev, name), err);
  855. bcache_device_stop(&dc->disk);
  856. }
  857. /* Flash only volumes */
  858. void bch_flash_dev_release(struct kobject *kobj)
  859. {
  860. struct bcache_device *d = container_of(kobj, struct bcache_device,
  861. kobj);
  862. kfree(d);
  863. }
  864. static void flash_dev_free(struct closure *cl)
  865. {
  866. struct bcache_device *d = container_of(cl, struct bcache_device, cl);
  867. bcache_device_free(d);
  868. kobject_put(&d->kobj);
  869. }
  870. static void flash_dev_flush(struct closure *cl)
  871. {
  872. struct bcache_device *d = container_of(cl, struct bcache_device, cl);
  873. bcache_device_unlink(d);
  874. kobject_del(&d->kobj);
  875. continue_at(cl, flash_dev_free, system_wq);
  876. }
  877. static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
  878. {
  879. struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
  880. GFP_KERNEL);
  881. if (!d)
  882. return -ENOMEM;
  883. closure_init(&d->cl, NULL);
  884. set_closure_fn(&d->cl, flash_dev_flush, system_wq);
  885. kobject_init(&d->kobj, &bch_flash_dev_ktype);
  886. if (bcache_device_init(d, block_bytes(c)))
  887. goto err;
  888. bcache_device_attach(d, c, u - c->uuids);
  889. set_capacity(d->disk, u->sectors);
  890. bch_flash_dev_request_init(d);
  891. add_disk(d->disk);
  892. if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
  893. goto err;
  894. bcache_device_link(d, c, "volume");
  895. return 0;
  896. err:
  897. kobject_put(&d->kobj);
  898. return -ENOMEM;
  899. }
  900. static int flash_devs_run(struct cache_set *c)
  901. {
  902. int ret = 0;
  903. struct uuid_entry *u;
  904. for (u = c->uuids;
  905. u < c->uuids + c->nr_uuids && !ret;
  906. u++)
  907. if (UUID_FLASH_ONLY(u))
  908. ret = flash_dev_run(c, u);
  909. return ret;
  910. }
  911. int bch_flash_dev_create(struct cache_set *c, uint64_t size)
  912. {
  913. struct uuid_entry *u;
  914. if (test_bit(CACHE_SET_STOPPING, &c->flags))
  915. return -EINTR;
  916. u = uuid_find_empty(c);
  917. if (!u) {
  918. pr_err("Can't create volume, no room for UUID");
  919. return -EINVAL;
  920. }
  921. get_random_bytes(u->uuid, 16);
  922. memset(u->label, 0, 32);
  923. u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
  924. SET_UUID_FLASH_ONLY(u, 1);
  925. u->sectors = size >> 9;
  926. bch_uuid_write(c);
  927. return flash_dev_run(c, u);
  928. }
  929. /* Cache set */
  930. __printf(2, 3)
  931. bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
  932. {
  933. va_list args;
  934. if (test_bit(CACHE_SET_STOPPING, &c->flags))
  935. return false;
  936. /* XXX: we can be called from atomic context
  937. acquire_console_sem();
  938. */
  939. printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
  940. va_start(args, fmt);
  941. vprintk(fmt, args);
  942. va_end(args);
  943. printk(", disabling caching\n");
  944. bch_cache_set_unregister(c);
  945. return true;
  946. }
  947. void bch_cache_set_release(struct kobject *kobj)
  948. {
  949. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  950. kfree(c);
  951. module_put(THIS_MODULE);
  952. }
  953. static void cache_set_free(struct closure *cl)
  954. {
  955. struct cache_set *c = container_of(cl, struct cache_set, cl);
  956. struct cache *ca;
  957. unsigned i;
  958. if (!IS_ERR_OR_NULL(c->debug))
  959. debugfs_remove(c->debug);
  960. bch_open_buckets_free(c);
  961. bch_btree_cache_free(c);
  962. bch_journal_free(c);
  963. for_each_cache(ca, c, i)
  964. if (ca)
  965. kobject_put(&ca->kobj);
  966. free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
  967. free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
  968. if (c->bio_split)
  969. bioset_free(c->bio_split);
  970. if (c->fill_iter)
  971. mempool_destroy(c->fill_iter);
  972. if (c->bio_meta)
  973. mempool_destroy(c->bio_meta);
  974. if (c->search)
  975. mempool_destroy(c->search);
  976. kfree(c->devices);
  977. mutex_lock(&bch_register_lock);
  978. list_del(&c->list);
  979. mutex_unlock(&bch_register_lock);
  980. pr_info("Cache set %pU unregistered", c->sb.set_uuid);
  981. wake_up(&unregister_wait);
  982. closure_debug_destroy(&c->cl);
  983. kobject_put(&c->kobj);
  984. }
  985. static void cache_set_flush(struct closure *cl)
  986. {
  987. struct cache_set *c = container_of(cl, struct cache_set, caching);
  988. struct btree *b;
  989. /* Shut down allocator threads */
  990. set_bit(CACHE_SET_STOPPING_2, &c->flags);
  991. wake_up_allocators(c);
  992. bch_cache_accounting_destroy(&c->accounting);
  993. kobject_put(&c->internal);
  994. kobject_del(&c->kobj);
  995. if (!IS_ERR_OR_NULL(c->root))
  996. list_add(&c->root->list, &c->btree_cache);
  997. /* Should skip this if we're unregistering because of an error */
  998. list_for_each_entry(b, &c->btree_cache, list)
  999. if (btree_node_dirty(b))
  1000. bch_btree_node_write(b, NULL);
  1001. closure_return(cl);
  1002. }
  1003. static void __cache_set_unregister(struct closure *cl)
  1004. {
  1005. struct cache_set *c = container_of(cl, struct cache_set, caching);
  1006. struct cached_dev *dc, *t;
  1007. size_t i;
  1008. mutex_lock(&bch_register_lock);
  1009. if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
  1010. list_for_each_entry_safe(dc, t, &c->cached_devs, list)
  1011. bch_cached_dev_detach(dc);
  1012. for (i = 0; i < c->nr_uuids; i++)
  1013. if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i]))
  1014. bcache_device_stop(c->devices[i]);
  1015. mutex_unlock(&bch_register_lock);
  1016. continue_at(cl, cache_set_flush, system_wq);
  1017. }
  1018. void bch_cache_set_stop(struct cache_set *c)
  1019. {
  1020. if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
  1021. closure_queue(&c->caching);
  1022. }
  1023. void bch_cache_set_unregister(struct cache_set *c)
  1024. {
  1025. set_bit(CACHE_SET_UNREGISTERING, &c->flags);
  1026. bch_cache_set_stop(c);
  1027. }
  1028. #define alloc_bucket_pages(gfp, c) \
  1029. ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
  1030. struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
  1031. {
  1032. int iter_size;
  1033. struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
  1034. if (!c)
  1035. return NULL;
  1036. __module_get(THIS_MODULE);
  1037. closure_init(&c->cl, NULL);
  1038. set_closure_fn(&c->cl, cache_set_free, system_wq);
  1039. closure_init(&c->caching, &c->cl);
  1040. set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
  1041. /* Maybe create continue_at_noreturn() and use it here? */
  1042. closure_set_stopped(&c->cl);
  1043. closure_put(&c->cl);
  1044. kobject_init(&c->kobj, &bch_cache_set_ktype);
  1045. kobject_init(&c->internal, &bch_cache_set_internal_ktype);
  1046. bch_cache_accounting_init(&c->accounting, &c->cl);
  1047. memcpy(c->sb.set_uuid, sb->set_uuid, 16);
  1048. c->sb.block_size = sb->block_size;
  1049. c->sb.bucket_size = sb->bucket_size;
  1050. c->sb.nr_in_set = sb->nr_in_set;
  1051. c->sb.last_mount = sb->last_mount;
  1052. c->bucket_bits = ilog2(sb->bucket_size);
  1053. c->block_bits = ilog2(sb->block_size);
  1054. c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
  1055. c->btree_pages = c->sb.bucket_size / PAGE_SECTORS;
  1056. if (c->btree_pages > BTREE_MAX_PAGES)
  1057. c->btree_pages = max_t(int, c->btree_pages / 4,
  1058. BTREE_MAX_PAGES);
  1059. mutex_init(&c->bucket_lock);
  1060. mutex_init(&c->sort_lock);
  1061. spin_lock_init(&c->sort_time_lock);
  1062. closure_init_unlocked(&c->sb_write);
  1063. closure_init_unlocked(&c->uuid_write);
  1064. spin_lock_init(&c->btree_read_time_lock);
  1065. bch_moving_init_cache_set(c);
  1066. INIT_LIST_HEAD(&c->list);
  1067. INIT_LIST_HEAD(&c->cached_devs);
  1068. INIT_LIST_HEAD(&c->btree_cache);
  1069. INIT_LIST_HEAD(&c->btree_cache_freeable);
  1070. INIT_LIST_HEAD(&c->btree_cache_freed);
  1071. INIT_LIST_HEAD(&c->data_buckets);
  1072. c->search = mempool_create_slab_pool(32, bch_search_cache);
  1073. if (!c->search)
  1074. goto err;
  1075. iter_size = (sb->bucket_size / sb->block_size + 1) *
  1076. sizeof(struct btree_iter_set);
  1077. if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
  1078. !(c->bio_meta = mempool_create_kmalloc_pool(2,
  1079. sizeof(struct bbio) + sizeof(struct bio_vec) *
  1080. bucket_pages(c))) ||
  1081. !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
  1082. !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
  1083. !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
  1084. !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
  1085. bch_journal_alloc(c) ||
  1086. bch_btree_cache_alloc(c) ||
  1087. bch_open_buckets_alloc(c))
  1088. goto err;
  1089. c->congested_read_threshold_us = 2000;
  1090. c->congested_write_threshold_us = 20000;
  1091. c->error_limit = 8 << IO_ERROR_SHIFT;
  1092. return c;
  1093. err:
  1094. bch_cache_set_unregister(c);
  1095. return NULL;
  1096. }
  1097. static void run_cache_set(struct cache_set *c)
  1098. {
  1099. const char *err = "cannot allocate memory";
  1100. struct cached_dev *dc, *t;
  1101. struct cache *ca;
  1102. unsigned i;
  1103. struct btree_op op;
  1104. bch_btree_op_init_stack(&op);
  1105. op.lock = SHRT_MAX;
  1106. for_each_cache(ca, c, i)
  1107. c->nbuckets += ca->sb.nbuckets;
  1108. if (CACHE_SYNC(&c->sb)) {
  1109. LIST_HEAD(journal);
  1110. struct bkey *k;
  1111. struct jset *j;
  1112. err = "cannot allocate memory for journal";
  1113. if (bch_journal_read(c, &journal, &op))
  1114. goto err;
  1115. pr_debug("btree_journal_read() done");
  1116. err = "no journal entries found";
  1117. if (list_empty(&journal))
  1118. goto err;
  1119. j = &list_entry(journal.prev, struct journal_replay, list)->j;
  1120. err = "IO error reading priorities";
  1121. for_each_cache(ca, c, i)
  1122. prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
  1123. /*
  1124. * If prio_read() fails it'll call cache_set_error and we'll
  1125. * tear everything down right away, but if we perhaps checked
  1126. * sooner we could avoid journal replay.
  1127. */
  1128. k = &j->btree_root;
  1129. err = "bad btree root";
  1130. if (__bch_ptr_invalid(c, j->btree_level + 1, k))
  1131. goto err;
  1132. err = "error reading btree root";
  1133. c->root = bch_btree_node_get(c, k, j->btree_level, &op);
  1134. if (IS_ERR_OR_NULL(c->root))
  1135. goto err;
  1136. list_del_init(&c->root->list);
  1137. rw_unlock(true, c->root);
  1138. err = uuid_read(c, j, &op.cl);
  1139. if (err)
  1140. goto err;
  1141. err = "error in recovery";
  1142. if (bch_btree_check(c, &op))
  1143. goto err;
  1144. bch_journal_mark(c, &journal);
  1145. bch_btree_gc_finish(c);
  1146. pr_debug("btree_check() done");
  1147. /*
  1148. * bcache_journal_next() can't happen sooner, or
  1149. * btree_gc_finish() will give spurious errors about last_gc >
  1150. * gc_gen - this is a hack but oh well.
  1151. */
  1152. bch_journal_next(&c->journal);
  1153. err = "error starting allocator thread";
  1154. for_each_cache(ca, c, i)
  1155. if (bch_cache_allocator_start(ca))
  1156. goto err;
  1157. /*
  1158. * First place it's safe to allocate: btree_check() and
  1159. * btree_gc_finish() have to run before we have buckets to
  1160. * allocate, and bch_bucket_alloc_set() might cause a journal
  1161. * entry to be written so bcache_journal_next() has to be called
  1162. * first.
  1163. *
  1164. * If the uuids were in the old format we have to rewrite them
  1165. * before the next journal entry is written:
  1166. */
  1167. if (j->version < BCACHE_JSET_VERSION_UUID)
  1168. __uuid_write(c);
  1169. bch_journal_replay(c, &journal, &op);
  1170. } else {
  1171. pr_notice("invalidating existing data");
  1172. /* Don't want invalidate_buckets() to queue a gc yet */
  1173. closure_lock(&c->gc, NULL);
  1174. for_each_cache(ca, c, i) {
  1175. unsigned j;
  1176. ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
  1177. 2, SB_JOURNAL_BUCKETS);
  1178. for (j = 0; j < ca->sb.keys; j++)
  1179. ca->sb.d[j] = ca->sb.first_bucket + j;
  1180. }
  1181. bch_btree_gc_finish(c);
  1182. err = "error starting allocator thread";
  1183. for_each_cache(ca, c, i)
  1184. if (bch_cache_allocator_start(ca))
  1185. goto err;
  1186. mutex_lock(&c->bucket_lock);
  1187. for_each_cache(ca, c, i)
  1188. bch_prio_write(ca);
  1189. mutex_unlock(&c->bucket_lock);
  1190. err = "cannot allocate new UUID bucket";
  1191. if (__uuid_write(c))
  1192. goto err_unlock_gc;
  1193. err = "cannot allocate new btree root";
  1194. c->root = bch_btree_node_alloc(c, 0, &op.cl);
  1195. if (IS_ERR_OR_NULL(c->root))
  1196. goto err_unlock_gc;
  1197. bkey_copy_key(&c->root->key, &MAX_KEY);
  1198. bch_btree_node_write(c->root, &op.cl);
  1199. bch_btree_set_root(c->root);
  1200. rw_unlock(true, c->root);
  1201. /*
  1202. * We don't want to write the first journal entry until
  1203. * everything is set up - fortunately journal entries won't be
  1204. * written until the SET_CACHE_SYNC() here:
  1205. */
  1206. SET_CACHE_SYNC(&c->sb, true);
  1207. bch_journal_next(&c->journal);
  1208. bch_journal_meta(c, &op.cl);
  1209. /* Unlock */
  1210. closure_set_stopped(&c->gc.cl);
  1211. closure_put(&c->gc.cl);
  1212. }
  1213. closure_sync(&op.cl);
  1214. c->sb.last_mount = get_seconds();
  1215. bcache_write_super(c);
  1216. list_for_each_entry_safe(dc, t, &uncached_devices, list)
  1217. bch_cached_dev_attach(dc, c);
  1218. flash_devs_run(c);
  1219. return;
  1220. err_unlock_gc:
  1221. closure_set_stopped(&c->gc.cl);
  1222. closure_put(&c->gc.cl);
  1223. err:
  1224. closure_sync(&op.cl);
  1225. /* XXX: test this, it's broken */
  1226. bch_cache_set_error(c, err);
  1227. }
  1228. static bool can_attach_cache(struct cache *ca, struct cache_set *c)
  1229. {
  1230. return ca->sb.block_size == c->sb.block_size &&
  1231. ca->sb.bucket_size == c->sb.block_size &&
  1232. ca->sb.nr_in_set == c->sb.nr_in_set;
  1233. }
  1234. static const char *register_cache_set(struct cache *ca)
  1235. {
  1236. char buf[12];
  1237. const char *err = "cannot allocate memory";
  1238. struct cache_set *c;
  1239. list_for_each_entry(c, &bch_cache_sets, list)
  1240. if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
  1241. if (c->cache[ca->sb.nr_this_dev])
  1242. return "duplicate cache set member";
  1243. if (!can_attach_cache(ca, c))
  1244. return "cache sb does not match set";
  1245. if (!CACHE_SYNC(&ca->sb))
  1246. SET_CACHE_SYNC(&c->sb, false);
  1247. goto found;
  1248. }
  1249. c = bch_cache_set_alloc(&ca->sb);
  1250. if (!c)
  1251. return err;
  1252. err = "error creating kobject";
  1253. if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
  1254. kobject_add(&c->internal, &c->kobj, "internal"))
  1255. goto err;
  1256. if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
  1257. goto err;
  1258. bch_debug_init_cache_set(c);
  1259. list_add(&c->list, &bch_cache_sets);
  1260. found:
  1261. sprintf(buf, "cache%i", ca->sb.nr_this_dev);
  1262. if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
  1263. sysfs_create_link(&c->kobj, &ca->kobj, buf))
  1264. goto err;
  1265. if (ca->sb.seq > c->sb.seq) {
  1266. c->sb.version = ca->sb.version;
  1267. memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
  1268. c->sb.flags = ca->sb.flags;
  1269. c->sb.seq = ca->sb.seq;
  1270. pr_debug("set version = %llu", c->sb.version);
  1271. }
  1272. ca->set = c;
  1273. ca->set->cache[ca->sb.nr_this_dev] = ca;
  1274. c->cache_by_alloc[c->caches_loaded++] = ca;
  1275. if (c->caches_loaded == c->sb.nr_in_set)
  1276. run_cache_set(c);
  1277. return NULL;
  1278. err:
  1279. bch_cache_set_unregister(c);
  1280. return err;
  1281. }
  1282. /* Cache device */
  1283. void bch_cache_release(struct kobject *kobj)
  1284. {
  1285. struct cache *ca = container_of(kobj, struct cache, kobj);
  1286. if (ca->set)
  1287. ca->set->cache[ca->sb.nr_this_dev] = NULL;
  1288. bch_cache_allocator_exit(ca);
  1289. bio_split_pool_free(&ca->bio_split_hook);
  1290. free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
  1291. kfree(ca->prio_buckets);
  1292. vfree(ca->buckets);
  1293. free_heap(&ca->heap);
  1294. free_fifo(&ca->unused);
  1295. free_fifo(&ca->free_inc);
  1296. free_fifo(&ca->free);
  1297. if (ca->sb_bio.bi_inline_vecs[0].bv_page)
  1298. put_page(ca->sb_bio.bi_io_vec[0].bv_page);
  1299. if (!IS_ERR_OR_NULL(ca->bdev)) {
  1300. blk_sync_queue(bdev_get_queue(ca->bdev));
  1301. blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1302. }
  1303. kfree(ca);
  1304. module_put(THIS_MODULE);
  1305. }
  1306. static int cache_alloc(struct cache_sb *sb, struct cache *ca)
  1307. {
  1308. size_t free;
  1309. struct bucket *b;
  1310. __module_get(THIS_MODULE);
  1311. kobject_init(&ca->kobj, &bch_cache_ktype);
  1312. INIT_LIST_HEAD(&ca->discards);
  1313. bio_init(&ca->journal.bio);
  1314. ca->journal.bio.bi_max_vecs = 8;
  1315. ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
  1316. free = roundup_pow_of_two(ca->sb.nbuckets) >> 9;
  1317. free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
  1318. if (!init_fifo(&ca->free, free, GFP_KERNEL) ||
  1319. !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
  1320. !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
  1321. !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
  1322. !(ca->buckets = vzalloc(sizeof(struct bucket) *
  1323. ca->sb.nbuckets)) ||
  1324. !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
  1325. 2, GFP_KERNEL)) ||
  1326. !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
  1327. bio_split_pool_init(&ca->bio_split_hook))
  1328. return -ENOMEM;
  1329. ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
  1330. for_each_bucket(b, ca)
  1331. atomic_set(&b->pin, 0);
  1332. if (bch_cache_allocator_init(ca))
  1333. goto err;
  1334. return 0;
  1335. err:
  1336. kobject_put(&ca->kobj);
  1337. return -ENOMEM;
  1338. }
  1339. static void register_cache(struct cache_sb *sb, struct page *sb_page,
  1340. struct block_device *bdev, struct cache *ca)
  1341. {
  1342. char name[BDEVNAME_SIZE];
  1343. const char *err = "cannot allocate memory";
  1344. memcpy(&ca->sb, sb, sizeof(struct cache_sb));
  1345. ca->bdev = bdev;
  1346. ca->bdev->bd_holder = ca;
  1347. bio_init(&ca->sb_bio);
  1348. ca->sb_bio.bi_max_vecs = 1;
  1349. ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
  1350. ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
  1351. get_page(sb_page);
  1352. if (blk_queue_discard(bdev_get_queue(ca->bdev)))
  1353. ca->discard = CACHE_DISCARD(&ca->sb);
  1354. if (cache_alloc(sb, ca) != 0)
  1355. goto err;
  1356. err = "error creating kobject";
  1357. if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
  1358. goto err;
  1359. err = register_cache_set(ca);
  1360. if (err)
  1361. goto err;
  1362. pr_info("registered cache device %s", bdevname(bdev, name));
  1363. return;
  1364. err:
  1365. pr_notice("error opening %s: %s", bdevname(bdev, name), err);
  1366. kobject_put(&ca->kobj);
  1367. }
  1368. /* Global interfaces/init */
  1369. static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
  1370. const char *, size_t);
  1371. kobj_attribute_write(register, register_bcache);
  1372. kobj_attribute_write(register_quiet, register_bcache);
  1373. static bool bch_is_open_backing(struct block_device *bdev) {
  1374. struct cache_set *c, *tc;
  1375. struct cached_dev *dc, *t;
  1376. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1377. list_for_each_entry_safe(dc, t, &c->cached_devs, list)
  1378. if (dc->bdev == bdev)
  1379. return true;
  1380. list_for_each_entry_safe(dc, t, &uncached_devices, list)
  1381. if (dc->bdev == bdev)
  1382. return true;
  1383. return false;
  1384. }
  1385. static bool bch_is_open_cache(struct block_device *bdev) {
  1386. struct cache_set *c, *tc;
  1387. struct cache *ca;
  1388. unsigned i;
  1389. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1390. for_each_cache(ca, c, i)
  1391. if (ca->bdev == bdev)
  1392. return true;
  1393. return false;
  1394. }
  1395. static bool bch_is_open(struct block_device *bdev) {
  1396. return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
  1397. }
  1398. static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
  1399. const char *buffer, size_t size)
  1400. {
  1401. ssize_t ret = size;
  1402. const char *err = "cannot allocate memory";
  1403. char *path = NULL;
  1404. struct cache_sb *sb = NULL;
  1405. struct block_device *bdev = NULL;
  1406. struct page *sb_page = NULL;
  1407. if (!try_module_get(THIS_MODULE))
  1408. return -EBUSY;
  1409. mutex_lock(&bch_register_lock);
  1410. if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
  1411. !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
  1412. goto err;
  1413. err = "failed to open device";
  1414. bdev = blkdev_get_by_path(strim(path),
  1415. FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  1416. sb);
  1417. if (IS_ERR(bdev)) {
  1418. if (bdev == ERR_PTR(-EBUSY)) {
  1419. bdev = lookup_bdev(strim(path));
  1420. if (!IS_ERR(bdev) && bch_is_open(bdev))
  1421. err = "device already registered";
  1422. else
  1423. err = "device busy";
  1424. }
  1425. goto err;
  1426. }
  1427. err = "failed to set blocksize";
  1428. if (set_blocksize(bdev, 4096))
  1429. goto err_close;
  1430. err = read_super(sb, bdev, &sb_page);
  1431. if (err)
  1432. goto err_close;
  1433. if (SB_IS_BDEV(sb)) {
  1434. struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
  1435. if (!dc)
  1436. goto err_close;
  1437. register_bdev(sb, sb_page, bdev, dc);
  1438. } else {
  1439. struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  1440. if (!ca)
  1441. goto err_close;
  1442. register_cache(sb, sb_page, bdev, ca);
  1443. }
  1444. out:
  1445. if (sb_page)
  1446. put_page(sb_page);
  1447. kfree(sb);
  1448. kfree(path);
  1449. mutex_unlock(&bch_register_lock);
  1450. module_put(THIS_MODULE);
  1451. return ret;
  1452. err_close:
  1453. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1454. err:
  1455. if (attr != &ksysfs_register_quiet)
  1456. pr_info("error opening %s: %s", path, err);
  1457. ret = -EINVAL;
  1458. goto out;
  1459. }
  1460. static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
  1461. {
  1462. if (code == SYS_DOWN ||
  1463. code == SYS_HALT ||
  1464. code == SYS_POWER_OFF) {
  1465. DEFINE_WAIT(wait);
  1466. unsigned long start = jiffies;
  1467. bool stopped = false;
  1468. struct cache_set *c, *tc;
  1469. struct cached_dev *dc, *tdc;
  1470. mutex_lock(&bch_register_lock);
  1471. if (list_empty(&bch_cache_sets) &&
  1472. list_empty(&uncached_devices))
  1473. goto out;
  1474. pr_info("Stopping all devices:");
  1475. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1476. bch_cache_set_stop(c);
  1477. list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
  1478. bcache_device_stop(&dc->disk);
  1479. /* What's a condition variable? */
  1480. while (1) {
  1481. long timeout = start + 2 * HZ - jiffies;
  1482. stopped = list_empty(&bch_cache_sets) &&
  1483. list_empty(&uncached_devices);
  1484. if (timeout < 0 || stopped)
  1485. break;
  1486. prepare_to_wait(&unregister_wait, &wait,
  1487. TASK_UNINTERRUPTIBLE);
  1488. mutex_unlock(&bch_register_lock);
  1489. schedule_timeout(timeout);
  1490. mutex_lock(&bch_register_lock);
  1491. }
  1492. finish_wait(&unregister_wait, &wait);
  1493. if (stopped)
  1494. pr_info("All devices stopped");
  1495. else
  1496. pr_notice("Timeout waiting for devices to be closed");
  1497. out:
  1498. mutex_unlock(&bch_register_lock);
  1499. }
  1500. return NOTIFY_DONE;
  1501. }
  1502. static struct notifier_block reboot = {
  1503. .notifier_call = bcache_reboot,
  1504. .priority = INT_MAX, /* before any real devices */
  1505. };
  1506. static void bcache_exit(void)
  1507. {
  1508. bch_debug_exit();
  1509. bch_writeback_exit();
  1510. bch_request_exit();
  1511. bch_btree_exit();
  1512. if (bcache_kobj)
  1513. kobject_put(bcache_kobj);
  1514. if (bcache_wq)
  1515. destroy_workqueue(bcache_wq);
  1516. unregister_blkdev(bcache_major, "bcache");
  1517. unregister_reboot_notifier(&reboot);
  1518. }
  1519. static int __init bcache_init(void)
  1520. {
  1521. static const struct attribute *files[] = {
  1522. &ksysfs_register.attr,
  1523. &ksysfs_register_quiet.attr,
  1524. NULL
  1525. };
  1526. mutex_init(&bch_register_lock);
  1527. init_waitqueue_head(&unregister_wait);
  1528. register_reboot_notifier(&reboot);
  1529. closure_debug_init();
  1530. bcache_major = register_blkdev(0, "bcache");
  1531. if (bcache_major < 0)
  1532. return bcache_major;
  1533. if (!(bcache_wq = create_workqueue("bcache")) ||
  1534. !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
  1535. sysfs_create_files(bcache_kobj, files) ||
  1536. bch_btree_init() ||
  1537. bch_request_init() ||
  1538. bch_writeback_init() ||
  1539. bch_debug_init(bcache_kobj))
  1540. goto err;
  1541. return 0;
  1542. err:
  1543. bcache_exit();
  1544. return -ENOMEM;
  1545. }
  1546. module_exit(bcache_exit);
  1547. module_init(bcache_init);