dm-cache-target.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694
  1. /*
  2. * Copyright (C) 2012 Red Hat. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison.h"
  8. #include "dm-bio-record.h"
  9. #include "dm-cache-metadata.h"
  10. #include <linux/dm-io.h>
  11. #include <linux/dm-kcopyd.h>
  12. #include <linux/init.h>
  13. #include <linux/mempool.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #define DM_MSG_PREFIX "cache"
  18. DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
  19. "A percentage of time allocated for copying to and/or from cache");
  20. /*----------------------------------------------------------------*/
  21. /*
  22. * Glossary:
  23. *
  24. * oblock: index of an origin block
  25. * cblock: index of a cache block
  26. * promotion: movement of a block from origin to cache
  27. * demotion: movement of a block from cache to origin
  28. * migration: movement of a block between the origin and cache device,
  29. * either direction
  30. */
  31. /*----------------------------------------------------------------*/
  32. static size_t bitset_size_in_bytes(unsigned nr_entries)
  33. {
  34. return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
  35. }
  36. static unsigned long *alloc_bitset(unsigned nr_entries)
  37. {
  38. size_t s = bitset_size_in_bytes(nr_entries);
  39. return vzalloc(s);
  40. }
  41. static void clear_bitset(void *bitset, unsigned nr_entries)
  42. {
  43. size_t s = bitset_size_in_bytes(nr_entries);
  44. memset(bitset, 0, s);
  45. }
  46. static void free_bitset(unsigned long *bits)
  47. {
  48. vfree(bits);
  49. }
  50. /*----------------------------------------------------------------*/
  51. #define PRISON_CELLS 1024
  52. #define MIGRATION_POOL_SIZE 128
  53. #define COMMIT_PERIOD HZ
  54. #define MIGRATION_COUNT_WINDOW 10
  55. /*
  56. * The block size of the device holding cache data must be
  57. * between 32KB and 1GB.
  58. */
  59. #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
  60. #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  61. /*
  62. * FIXME: the cache is read/write for the time being.
  63. */
  64. enum cache_mode {
  65. CM_WRITE, /* metadata may be changed */
  66. CM_READ_ONLY, /* metadata may not be changed */
  67. };
  68. struct cache_features {
  69. enum cache_mode mode;
  70. bool write_through:1;
  71. };
  72. struct cache_stats {
  73. atomic_t read_hit;
  74. atomic_t read_miss;
  75. atomic_t write_hit;
  76. atomic_t write_miss;
  77. atomic_t demotion;
  78. atomic_t promotion;
  79. atomic_t copies_avoided;
  80. atomic_t cache_cell_clash;
  81. atomic_t commit_count;
  82. atomic_t discard_count;
  83. };
  84. struct cache {
  85. struct dm_target *ti;
  86. struct dm_target_callbacks callbacks;
  87. struct dm_cache_metadata *cmd;
  88. /*
  89. * Metadata is written to this device.
  90. */
  91. struct dm_dev *metadata_dev;
  92. /*
  93. * The slower of the two data devices. Typically a spindle.
  94. */
  95. struct dm_dev *origin_dev;
  96. /*
  97. * The faster of the two data devices. Typically an SSD.
  98. */
  99. struct dm_dev *cache_dev;
  100. /*
  101. * Size of the origin device in _complete_ blocks and native sectors.
  102. */
  103. dm_oblock_t origin_blocks;
  104. sector_t origin_sectors;
  105. /*
  106. * Size of the cache device in blocks.
  107. */
  108. dm_cblock_t cache_size;
  109. /*
  110. * Fields for converting from sectors to blocks.
  111. */
  112. uint32_t sectors_per_block;
  113. int sectors_per_block_shift;
  114. spinlock_t lock;
  115. struct bio_list deferred_bios;
  116. struct bio_list deferred_flush_bios;
  117. struct bio_list deferred_writethrough_bios;
  118. struct list_head quiesced_migrations;
  119. struct list_head completed_migrations;
  120. struct list_head need_commit_migrations;
  121. sector_t migration_threshold;
  122. wait_queue_head_t migration_wait;
  123. atomic_t nr_migrations;
  124. wait_queue_head_t quiescing_wait;
  125. atomic_t quiescing;
  126. atomic_t quiescing_ack;
  127. /*
  128. * cache_size entries, dirty if set
  129. */
  130. dm_cblock_t nr_dirty;
  131. unsigned long *dirty_bitset;
  132. /*
  133. * origin_blocks entries, discarded if set.
  134. */
  135. dm_dblock_t discard_nr_blocks;
  136. unsigned long *discard_bitset;
  137. uint32_t discard_block_size; /* a power of 2 times sectors per block */
  138. /*
  139. * Rather than reconstructing the table line for the status we just
  140. * save it and regurgitate.
  141. */
  142. unsigned nr_ctr_args;
  143. const char **ctr_args;
  144. struct dm_kcopyd_client *copier;
  145. struct workqueue_struct *wq;
  146. struct work_struct worker;
  147. struct delayed_work waker;
  148. unsigned long last_commit_jiffies;
  149. struct dm_bio_prison *prison;
  150. struct dm_deferred_set *all_io_ds;
  151. mempool_t *migration_pool;
  152. struct dm_cache_migration *next_migration;
  153. struct dm_cache_policy *policy;
  154. unsigned policy_nr_args;
  155. bool need_tick_bio:1;
  156. bool sized:1;
  157. bool commit_requested:1;
  158. bool loaded_mappings:1;
  159. bool loaded_discards:1;
  160. /*
  161. * Cache features such as write-through.
  162. */
  163. struct cache_features features;
  164. struct cache_stats stats;
  165. };
  166. struct per_bio_data {
  167. bool tick:1;
  168. unsigned req_nr:2;
  169. struct dm_deferred_entry *all_io_entry;
  170. /*
  171. * writethrough fields. These MUST remain at the end of this
  172. * structure and the 'cache' member must be the first as it
  173. * is used to determine the offset of the writethrough fields.
  174. */
  175. struct cache *cache;
  176. dm_cblock_t cblock;
  177. bio_end_io_t *saved_bi_end_io;
  178. struct dm_bio_details bio_details;
  179. };
  180. struct dm_cache_migration {
  181. struct list_head list;
  182. struct cache *cache;
  183. unsigned long start_jiffies;
  184. dm_oblock_t old_oblock;
  185. dm_oblock_t new_oblock;
  186. dm_cblock_t cblock;
  187. bool err:1;
  188. bool writeback:1;
  189. bool demote:1;
  190. bool promote:1;
  191. struct dm_bio_prison_cell *old_ocell;
  192. struct dm_bio_prison_cell *new_ocell;
  193. };
  194. /*
  195. * Processing a bio in the worker thread may require these memory
  196. * allocations. We prealloc to avoid deadlocks (the same worker thread
  197. * frees them back to the mempool).
  198. */
  199. struct prealloc {
  200. struct dm_cache_migration *mg;
  201. struct dm_bio_prison_cell *cell1;
  202. struct dm_bio_prison_cell *cell2;
  203. };
  204. static void wake_worker(struct cache *cache)
  205. {
  206. queue_work(cache->wq, &cache->worker);
  207. }
  208. /*----------------------------------------------------------------*/
  209. static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
  210. {
  211. /* FIXME: change to use a local slab. */
  212. return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
  213. }
  214. static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
  215. {
  216. dm_bio_prison_free_cell(cache->prison, cell);
  217. }
  218. static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
  219. {
  220. if (!p->mg) {
  221. p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
  222. if (!p->mg)
  223. return -ENOMEM;
  224. }
  225. if (!p->cell1) {
  226. p->cell1 = alloc_prison_cell(cache);
  227. if (!p->cell1)
  228. return -ENOMEM;
  229. }
  230. if (!p->cell2) {
  231. p->cell2 = alloc_prison_cell(cache);
  232. if (!p->cell2)
  233. return -ENOMEM;
  234. }
  235. return 0;
  236. }
  237. static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
  238. {
  239. if (p->cell2)
  240. free_prison_cell(cache, p->cell2);
  241. if (p->cell1)
  242. free_prison_cell(cache, p->cell1);
  243. if (p->mg)
  244. mempool_free(p->mg, cache->migration_pool);
  245. }
  246. static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
  247. {
  248. struct dm_cache_migration *mg = p->mg;
  249. BUG_ON(!mg);
  250. p->mg = NULL;
  251. return mg;
  252. }
  253. /*
  254. * You must have a cell within the prealloc struct to return. If not this
  255. * function will BUG() rather than returning NULL.
  256. */
  257. static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
  258. {
  259. struct dm_bio_prison_cell *r = NULL;
  260. if (p->cell1) {
  261. r = p->cell1;
  262. p->cell1 = NULL;
  263. } else if (p->cell2) {
  264. r = p->cell2;
  265. p->cell2 = NULL;
  266. } else
  267. BUG();
  268. return r;
  269. }
  270. /*
  271. * You can't have more than two cells in a prealloc struct. BUG() will be
  272. * called if you try and overfill.
  273. */
  274. static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
  275. {
  276. if (!p->cell2)
  277. p->cell2 = cell;
  278. else if (!p->cell1)
  279. p->cell1 = cell;
  280. else
  281. BUG();
  282. }
  283. /*----------------------------------------------------------------*/
  284. static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
  285. {
  286. key->virtual = 0;
  287. key->dev = 0;
  288. key->block = from_oblock(oblock);
  289. }
  290. /*
  291. * The caller hands in a preallocated cell, and a free function for it.
  292. * The cell will be freed if there's an error, or if it wasn't used because
  293. * a cell with that key already exists.
  294. */
  295. typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
  296. static int bio_detain(struct cache *cache, dm_oblock_t oblock,
  297. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  298. cell_free_fn free_fn, void *free_context,
  299. struct dm_bio_prison_cell **cell_result)
  300. {
  301. int r;
  302. struct dm_cell_key key;
  303. build_key(oblock, &key);
  304. r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
  305. if (r)
  306. free_fn(free_context, cell_prealloc);
  307. return r;
  308. }
  309. static int get_cell(struct cache *cache,
  310. dm_oblock_t oblock,
  311. struct prealloc *structs,
  312. struct dm_bio_prison_cell **cell_result)
  313. {
  314. int r;
  315. struct dm_cell_key key;
  316. struct dm_bio_prison_cell *cell_prealloc;
  317. cell_prealloc = prealloc_get_cell(structs);
  318. build_key(oblock, &key);
  319. r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
  320. if (r)
  321. prealloc_put_cell(structs, cell_prealloc);
  322. return r;
  323. }
  324. /*----------------------------------------------------------------*/
  325. static bool is_dirty(struct cache *cache, dm_cblock_t b)
  326. {
  327. return test_bit(from_cblock(b), cache->dirty_bitset);
  328. }
  329. static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  330. {
  331. if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
  332. cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
  333. policy_set_dirty(cache->policy, oblock);
  334. }
  335. }
  336. static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  337. {
  338. if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
  339. policy_clear_dirty(cache->policy, oblock);
  340. cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
  341. if (!from_cblock(cache->nr_dirty))
  342. dm_table_event(cache->ti->table);
  343. }
  344. }
  345. /*----------------------------------------------------------------*/
  346. static bool block_size_is_power_of_two(struct cache *cache)
  347. {
  348. return cache->sectors_per_block_shift >= 0;
  349. }
  350. /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
  351. #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
  352. __always_inline
  353. #endif
  354. static dm_block_t block_div(dm_block_t b, uint32_t n)
  355. {
  356. do_div(b, n);
  357. return b;
  358. }
  359. static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
  360. {
  361. uint32_t discard_blocks = cache->discard_block_size;
  362. dm_block_t b = from_oblock(oblock);
  363. if (!block_size_is_power_of_two(cache))
  364. discard_blocks = discard_blocks / cache->sectors_per_block;
  365. else
  366. discard_blocks >>= cache->sectors_per_block_shift;
  367. b = block_div(b, discard_blocks);
  368. return to_dblock(b);
  369. }
  370. static void set_discard(struct cache *cache, dm_dblock_t b)
  371. {
  372. unsigned long flags;
  373. atomic_inc(&cache->stats.discard_count);
  374. spin_lock_irqsave(&cache->lock, flags);
  375. set_bit(from_dblock(b), cache->discard_bitset);
  376. spin_unlock_irqrestore(&cache->lock, flags);
  377. }
  378. static void clear_discard(struct cache *cache, dm_dblock_t b)
  379. {
  380. unsigned long flags;
  381. spin_lock_irqsave(&cache->lock, flags);
  382. clear_bit(from_dblock(b), cache->discard_bitset);
  383. spin_unlock_irqrestore(&cache->lock, flags);
  384. }
  385. static bool is_discarded(struct cache *cache, dm_dblock_t b)
  386. {
  387. int r;
  388. unsigned long flags;
  389. spin_lock_irqsave(&cache->lock, flags);
  390. r = test_bit(from_dblock(b), cache->discard_bitset);
  391. spin_unlock_irqrestore(&cache->lock, flags);
  392. return r;
  393. }
  394. static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
  395. {
  396. int r;
  397. unsigned long flags;
  398. spin_lock_irqsave(&cache->lock, flags);
  399. r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
  400. cache->discard_bitset);
  401. spin_unlock_irqrestore(&cache->lock, flags);
  402. return r;
  403. }
  404. /*----------------------------------------------------------------*/
  405. static void load_stats(struct cache *cache)
  406. {
  407. struct dm_cache_statistics stats;
  408. dm_cache_metadata_get_stats(cache->cmd, &stats);
  409. atomic_set(&cache->stats.read_hit, stats.read_hits);
  410. atomic_set(&cache->stats.read_miss, stats.read_misses);
  411. atomic_set(&cache->stats.write_hit, stats.write_hits);
  412. atomic_set(&cache->stats.write_miss, stats.write_misses);
  413. }
  414. static void save_stats(struct cache *cache)
  415. {
  416. struct dm_cache_statistics stats;
  417. stats.read_hits = atomic_read(&cache->stats.read_hit);
  418. stats.read_misses = atomic_read(&cache->stats.read_miss);
  419. stats.write_hits = atomic_read(&cache->stats.write_hit);
  420. stats.write_misses = atomic_read(&cache->stats.write_miss);
  421. dm_cache_metadata_set_stats(cache->cmd, &stats);
  422. }
  423. /*----------------------------------------------------------------
  424. * Per bio data
  425. *--------------------------------------------------------------*/
  426. /*
  427. * If using writeback, leave out struct per_bio_data's writethrough fields.
  428. */
  429. #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
  430. #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
  431. static size_t get_per_bio_data_size(struct cache *cache)
  432. {
  433. return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
  434. }
  435. static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
  436. {
  437. struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
  438. BUG_ON(!pb);
  439. return pb;
  440. }
  441. static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
  442. {
  443. struct per_bio_data *pb = get_per_bio_data(bio, data_size);
  444. pb->tick = false;
  445. pb->req_nr = dm_bio_get_target_bio_nr(bio);
  446. pb->all_io_entry = NULL;
  447. return pb;
  448. }
  449. /*----------------------------------------------------------------
  450. * Remapping
  451. *--------------------------------------------------------------*/
  452. static void remap_to_origin(struct cache *cache, struct bio *bio)
  453. {
  454. bio->bi_bdev = cache->origin_dev->bdev;
  455. }
  456. static void remap_to_cache(struct cache *cache, struct bio *bio,
  457. dm_cblock_t cblock)
  458. {
  459. sector_t bi_sector = bio->bi_sector;
  460. bio->bi_bdev = cache->cache_dev->bdev;
  461. if (!block_size_is_power_of_two(cache))
  462. bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
  463. sector_div(bi_sector, cache->sectors_per_block);
  464. else
  465. bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
  466. (bi_sector & (cache->sectors_per_block - 1));
  467. }
  468. static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
  469. {
  470. unsigned long flags;
  471. size_t pb_data_size = get_per_bio_data_size(cache);
  472. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  473. spin_lock_irqsave(&cache->lock, flags);
  474. if (cache->need_tick_bio &&
  475. !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
  476. pb->tick = true;
  477. cache->need_tick_bio = false;
  478. }
  479. spin_unlock_irqrestore(&cache->lock, flags);
  480. }
  481. static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
  482. dm_oblock_t oblock)
  483. {
  484. check_if_tick_bio_needed(cache, bio);
  485. remap_to_origin(cache, bio);
  486. if (bio_data_dir(bio) == WRITE)
  487. clear_discard(cache, oblock_to_dblock(cache, oblock));
  488. }
  489. static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
  490. dm_oblock_t oblock, dm_cblock_t cblock)
  491. {
  492. check_if_tick_bio_needed(cache, bio);
  493. remap_to_cache(cache, bio, cblock);
  494. if (bio_data_dir(bio) == WRITE) {
  495. set_dirty(cache, oblock, cblock);
  496. clear_discard(cache, oblock_to_dblock(cache, oblock));
  497. }
  498. }
  499. static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
  500. {
  501. sector_t block_nr = bio->bi_sector;
  502. if (!block_size_is_power_of_two(cache))
  503. (void) sector_div(block_nr, cache->sectors_per_block);
  504. else
  505. block_nr >>= cache->sectors_per_block_shift;
  506. return to_oblock(block_nr);
  507. }
  508. static int bio_triggers_commit(struct cache *cache, struct bio *bio)
  509. {
  510. return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
  511. }
  512. static void issue(struct cache *cache, struct bio *bio)
  513. {
  514. unsigned long flags;
  515. if (!bio_triggers_commit(cache, bio)) {
  516. generic_make_request(bio);
  517. return;
  518. }
  519. /*
  520. * Batch together any bios that trigger commits and then issue a
  521. * single commit for them in do_worker().
  522. */
  523. spin_lock_irqsave(&cache->lock, flags);
  524. cache->commit_requested = true;
  525. bio_list_add(&cache->deferred_flush_bios, bio);
  526. spin_unlock_irqrestore(&cache->lock, flags);
  527. }
  528. static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
  529. {
  530. unsigned long flags;
  531. spin_lock_irqsave(&cache->lock, flags);
  532. bio_list_add(&cache->deferred_writethrough_bios, bio);
  533. spin_unlock_irqrestore(&cache->lock, flags);
  534. wake_worker(cache);
  535. }
  536. static void writethrough_endio(struct bio *bio, int err)
  537. {
  538. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  539. bio->bi_end_io = pb->saved_bi_end_io;
  540. if (err) {
  541. bio_endio(bio, err);
  542. return;
  543. }
  544. dm_bio_restore(&pb->bio_details, bio);
  545. remap_to_cache(pb->cache, bio, pb->cblock);
  546. /*
  547. * We can't issue this bio directly, since we're in interrupt
  548. * context. So it gets put on a bio list for processing by the
  549. * worker thread.
  550. */
  551. defer_writethrough_bio(pb->cache, bio);
  552. }
  553. /*
  554. * When running in writethrough mode we need to send writes to clean blocks
  555. * to both the cache and origin devices. In future we'd like to clone the
  556. * bio and send them in parallel, but for now we're doing them in
  557. * series as this is easier.
  558. */
  559. static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
  560. dm_oblock_t oblock, dm_cblock_t cblock)
  561. {
  562. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  563. pb->cache = cache;
  564. pb->cblock = cblock;
  565. pb->saved_bi_end_io = bio->bi_end_io;
  566. dm_bio_record(&pb->bio_details, bio);
  567. bio->bi_end_io = writethrough_endio;
  568. remap_to_origin_clear_discard(pb->cache, bio, oblock);
  569. }
  570. /*----------------------------------------------------------------
  571. * Migration processing
  572. *
  573. * Migration covers moving data from the origin device to the cache, or
  574. * vice versa.
  575. *--------------------------------------------------------------*/
  576. static void free_migration(struct dm_cache_migration *mg)
  577. {
  578. mempool_free(mg, mg->cache->migration_pool);
  579. }
  580. static void inc_nr_migrations(struct cache *cache)
  581. {
  582. atomic_inc(&cache->nr_migrations);
  583. }
  584. static void dec_nr_migrations(struct cache *cache)
  585. {
  586. atomic_dec(&cache->nr_migrations);
  587. /*
  588. * Wake the worker in case we're suspending the target.
  589. */
  590. wake_up(&cache->migration_wait);
  591. }
  592. static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
  593. bool holder)
  594. {
  595. (holder ? dm_cell_release : dm_cell_release_no_holder)
  596. (cache->prison, cell, &cache->deferred_bios);
  597. free_prison_cell(cache, cell);
  598. }
  599. static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
  600. bool holder)
  601. {
  602. unsigned long flags;
  603. spin_lock_irqsave(&cache->lock, flags);
  604. __cell_defer(cache, cell, holder);
  605. spin_unlock_irqrestore(&cache->lock, flags);
  606. wake_worker(cache);
  607. }
  608. static void cleanup_migration(struct dm_cache_migration *mg)
  609. {
  610. struct cache *cache = mg->cache;
  611. free_migration(mg);
  612. dec_nr_migrations(cache);
  613. }
  614. static void migration_failure(struct dm_cache_migration *mg)
  615. {
  616. struct cache *cache = mg->cache;
  617. if (mg->writeback) {
  618. DMWARN_LIMIT("writeback failed; couldn't copy block");
  619. set_dirty(cache, mg->old_oblock, mg->cblock);
  620. cell_defer(cache, mg->old_ocell, false);
  621. } else if (mg->demote) {
  622. DMWARN_LIMIT("demotion failed; couldn't copy block");
  623. policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
  624. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  625. if (mg->promote)
  626. cell_defer(cache, mg->new_ocell, true);
  627. } else {
  628. DMWARN_LIMIT("promotion failed; couldn't copy block");
  629. policy_remove_mapping(cache->policy, mg->new_oblock);
  630. cell_defer(cache, mg->new_ocell, true);
  631. }
  632. cleanup_migration(mg);
  633. }
  634. static void migration_success_pre_commit(struct dm_cache_migration *mg)
  635. {
  636. unsigned long flags;
  637. struct cache *cache = mg->cache;
  638. if (mg->writeback) {
  639. cell_defer(cache, mg->old_ocell, false);
  640. clear_dirty(cache, mg->old_oblock, mg->cblock);
  641. cleanup_migration(mg);
  642. return;
  643. } else if (mg->demote) {
  644. if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
  645. DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
  646. policy_force_mapping(cache->policy, mg->new_oblock,
  647. mg->old_oblock);
  648. if (mg->promote)
  649. cell_defer(cache, mg->new_ocell, true);
  650. cleanup_migration(mg);
  651. return;
  652. }
  653. } else {
  654. if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
  655. DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
  656. policy_remove_mapping(cache->policy, mg->new_oblock);
  657. cleanup_migration(mg);
  658. return;
  659. }
  660. }
  661. spin_lock_irqsave(&cache->lock, flags);
  662. list_add_tail(&mg->list, &cache->need_commit_migrations);
  663. cache->commit_requested = true;
  664. spin_unlock_irqrestore(&cache->lock, flags);
  665. }
  666. static void migration_success_post_commit(struct dm_cache_migration *mg)
  667. {
  668. unsigned long flags;
  669. struct cache *cache = mg->cache;
  670. if (mg->writeback) {
  671. DMWARN("writeback unexpectedly triggered commit");
  672. return;
  673. } else if (mg->demote) {
  674. cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
  675. if (mg->promote) {
  676. mg->demote = false;
  677. spin_lock_irqsave(&cache->lock, flags);
  678. list_add_tail(&mg->list, &cache->quiesced_migrations);
  679. spin_unlock_irqrestore(&cache->lock, flags);
  680. } else
  681. cleanup_migration(mg);
  682. } else {
  683. cell_defer(cache, mg->new_ocell, true);
  684. clear_dirty(cache, mg->new_oblock, mg->cblock);
  685. cleanup_migration(mg);
  686. }
  687. }
  688. static void copy_complete(int read_err, unsigned long write_err, void *context)
  689. {
  690. unsigned long flags;
  691. struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
  692. struct cache *cache = mg->cache;
  693. if (read_err || write_err)
  694. mg->err = true;
  695. spin_lock_irqsave(&cache->lock, flags);
  696. list_add_tail(&mg->list, &cache->completed_migrations);
  697. spin_unlock_irqrestore(&cache->lock, flags);
  698. wake_worker(cache);
  699. }
  700. static void issue_copy_real(struct dm_cache_migration *mg)
  701. {
  702. int r;
  703. struct dm_io_region o_region, c_region;
  704. struct cache *cache = mg->cache;
  705. o_region.bdev = cache->origin_dev->bdev;
  706. o_region.count = cache->sectors_per_block;
  707. c_region.bdev = cache->cache_dev->bdev;
  708. c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
  709. c_region.count = cache->sectors_per_block;
  710. if (mg->writeback || mg->demote) {
  711. /* demote */
  712. o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
  713. r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
  714. } else {
  715. /* promote */
  716. o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
  717. r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
  718. }
  719. if (r < 0) {
  720. DMERR_LIMIT("issuing migration failed");
  721. migration_failure(mg);
  722. }
  723. }
  724. static void avoid_copy(struct dm_cache_migration *mg)
  725. {
  726. atomic_inc(&mg->cache->stats.copies_avoided);
  727. migration_success_pre_commit(mg);
  728. }
  729. static void issue_copy(struct dm_cache_migration *mg)
  730. {
  731. bool avoid;
  732. struct cache *cache = mg->cache;
  733. if (mg->writeback || mg->demote)
  734. avoid = !is_dirty(cache, mg->cblock) ||
  735. is_discarded_oblock(cache, mg->old_oblock);
  736. else
  737. avoid = is_discarded_oblock(cache, mg->new_oblock);
  738. avoid ? avoid_copy(mg) : issue_copy_real(mg);
  739. }
  740. static void complete_migration(struct dm_cache_migration *mg)
  741. {
  742. if (mg->err)
  743. migration_failure(mg);
  744. else
  745. migration_success_pre_commit(mg);
  746. }
  747. static void process_migrations(struct cache *cache, struct list_head *head,
  748. void (*fn)(struct dm_cache_migration *))
  749. {
  750. unsigned long flags;
  751. struct list_head list;
  752. struct dm_cache_migration *mg, *tmp;
  753. INIT_LIST_HEAD(&list);
  754. spin_lock_irqsave(&cache->lock, flags);
  755. list_splice_init(head, &list);
  756. spin_unlock_irqrestore(&cache->lock, flags);
  757. list_for_each_entry_safe(mg, tmp, &list, list)
  758. fn(mg);
  759. }
  760. static void __queue_quiesced_migration(struct dm_cache_migration *mg)
  761. {
  762. list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
  763. }
  764. static void queue_quiesced_migration(struct dm_cache_migration *mg)
  765. {
  766. unsigned long flags;
  767. struct cache *cache = mg->cache;
  768. spin_lock_irqsave(&cache->lock, flags);
  769. __queue_quiesced_migration(mg);
  770. spin_unlock_irqrestore(&cache->lock, flags);
  771. wake_worker(cache);
  772. }
  773. static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
  774. {
  775. unsigned long flags;
  776. struct dm_cache_migration *mg, *tmp;
  777. spin_lock_irqsave(&cache->lock, flags);
  778. list_for_each_entry_safe(mg, tmp, work, list)
  779. __queue_quiesced_migration(mg);
  780. spin_unlock_irqrestore(&cache->lock, flags);
  781. wake_worker(cache);
  782. }
  783. static void check_for_quiesced_migrations(struct cache *cache,
  784. struct per_bio_data *pb)
  785. {
  786. struct list_head work;
  787. if (!pb->all_io_entry)
  788. return;
  789. INIT_LIST_HEAD(&work);
  790. if (pb->all_io_entry)
  791. dm_deferred_entry_dec(pb->all_io_entry, &work);
  792. if (!list_empty(&work))
  793. queue_quiesced_migrations(cache, &work);
  794. }
  795. static void quiesce_migration(struct dm_cache_migration *mg)
  796. {
  797. if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
  798. queue_quiesced_migration(mg);
  799. }
  800. static void promote(struct cache *cache, struct prealloc *structs,
  801. dm_oblock_t oblock, dm_cblock_t cblock,
  802. struct dm_bio_prison_cell *cell)
  803. {
  804. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  805. mg->err = false;
  806. mg->writeback = false;
  807. mg->demote = false;
  808. mg->promote = true;
  809. mg->cache = cache;
  810. mg->new_oblock = oblock;
  811. mg->cblock = cblock;
  812. mg->old_ocell = NULL;
  813. mg->new_ocell = cell;
  814. mg->start_jiffies = jiffies;
  815. inc_nr_migrations(cache);
  816. quiesce_migration(mg);
  817. }
  818. static void writeback(struct cache *cache, struct prealloc *structs,
  819. dm_oblock_t oblock, dm_cblock_t cblock,
  820. struct dm_bio_prison_cell *cell)
  821. {
  822. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  823. mg->err = false;
  824. mg->writeback = true;
  825. mg->demote = false;
  826. mg->promote = false;
  827. mg->cache = cache;
  828. mg->old_oblock = oblock;
  829. mg->cblock = cblock;
  830. mg->old_ocell = cell;
  831. mg->new_ocell = NULL;
  832. mg->start_jiffies = jiffies;
  833. inc_nr_migrations(cache);
  834. quiesce_migration(mg);
  835. }
  836. static void demote_then_promote(struct cache *cache, struct prealloc *structs,
  837. dm_oblock_t old_oblock, dm_oblock_t new_oblock,
  838. dm_cblock_t cblock,
  839. struct dm_bio_prison_cell *old_ocell,
  840. struct dm_bio_prison_cell *new_ocell)
  841. {
  842. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  843. mg->err = false;
  844. mg->writeback = false;
  845. mg->demote = true;
  846. mg->promote = true;
  847. mg->cache = cache;
  848. mg->old_oblock = old_oblock;
  849. mg->new_oblock = new_oblock;
  850. mg->cblock = cblock;
  851. mg->old_ocell = old_ocell;
  852. mg->new_ocell = new_ocell;
  853. mg->start_jiffies = jiffies;
  854. inc_nr_migrations(cache);
  855. quiesce_migration(mg);
  856. }
  857. /*----------------------------------------------------------------
  858. * bio processing
  859. *--------------------------------------------------------------*/
  860. static void defer_bio(struct cache *cache, struct bio *bio)
  861. {
  862. unsigned long flags;
  863. spin_lock_irqsave(&cache->lock, flags);
  864. bio_list_add(&cache->deferred_bios, bio);
  865. spin_unlock_irqrestore(&cache->lock, flags);
  866. wake_worker(cache);
  867. }
  868. static void process_flush_bio(struct cache *cache, struct bio *bio)
  869. {
  870. size_t pb_data_size = get_per_bio_data_size(cache);
  871. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  872. BUG_ON(bio->bi_size);
  873. if (!pb->req_nr)
  874. remap_to_origin(cache, bio);
  875. else
  876. remap_to_cache(cache, bio, 0);
  877. issue(cache, bio);
  878. }
  879. /*
  880. * People generally discard large parts of a device, eg, the whole device
  881. * when formatting. Splitting these large discards up into cache block
  882. * sized ios and then quiescing (always neccessary for discard) takes too
  883. * long.
  884. *
  885. * We keep it simple, and allow any size of discard to come in, and just
  886. * mark off blocks on the discard bitset. No passdown occurs!
  887. *
  888. * To implement passdown we need to change the bio_prison such that a cell
  889. * can have a key that spans many blocks.
  890. */
  891. static void process_discard_bio(struct cache *cache, struct bio *bio)
  892. {
  893. dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
  894. cache->discard_block_size);
  895. dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
  896. dm_block_t b;
  897. end_block = block_div(end_block, cache->discard_block_size);
  898. for (b = start_block; b < end_block; b++)
  899. set_discard(cache, to_dblock(b));
  900. bio_endio(bio, 0);
  901. }
  902. static bool spare_migration_bandwidth(struct cache *cache)
  903. {
  904. sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
  905. cache->sectors_per_block;
  906. return current_volume < cache->migration_threshold;
  907. }
  908. static bool is_writethrough_io(struct cache *cache, struct bio *bio,
  909. dm_cblock_t cblock)
  910. {
  911. return bio_data_dir(bio) == WRITE &&
  912. cache->features.write_through && !is_dirty(cache, cblock);
  913. }
  914. static void inc_hit_counter(struct cache *cache, struct bio *bio)
  915. {
  916. atomic_inc(bio_data_dir(bio) == READ ?
  917. &cache->stats.read_hit : &cache->stats.write_hit);
  918. }
  919. static void inc_miss_counter(struct cache *cache, struct bio *bio)
  920. {
  921. atomic_inc(bio_data_dir(bio) == READ ?
  922. &cache->stats.read_miss : &cache->stats.write_miss);
  923. }
  924. static void process_bio(struct cache *cache, struct prealloc *structs,
  925. struct bio *bio)
  926. {
  927. int r;
  928. bool release_cell = true;
  929. dm_oblock_t block = get_bio_block(cache, bio);
  930. struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
  931. struct policy_result lookup_result;
  932. size_t pb_data_size = get_per_bio_data_size(cache);
  933. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  934. bool discarded_block = is_discarded_oblock(cache, block);
  935. bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
  936. /*
  937. * Check to see if that block is currently migrating.
  938. */
  939. cell_prealloc = prealloc_get_cell(structs);
  940. r = bio_detain(cache, block, bio, cell_prealloc,
  941. (cell_free_fn) prealloc_put_cell,
  942. structs, &new_ocell);
  943. if (r > 0)
  944. return;
  945. r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
  946. bio, &lookup_result);
  947. if (r == -EWOULDBLOCK)
  948. /* migration has been denied */
  949. lookup_result.op = POLICY_MISS;
  950. switch (lookup_result.op) {
  951. case POLICY_HIT:
  952. inc_hit_counter(cache, bio);
  953. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  954. if (is_writethrough_io(cache, bio, lookup_result.cblock))
  955. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  956. else
  957. remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
  958. issue(cache, bio);
  959. break;
  960. case POLICY_MISS:
  961. inc_miss_counter(cache, bio);
  962. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  963. remap_to_origin_clear_discard(cache, bio, block);
  964. issue(cache, bio);
  965. break;
  966. case POLICY_NEW:
  967. atomic_inc(&cache->stats.promotion);
  968. promote(cache, structs, block, lookup_result.cblock, new_ocell);
  969. release_cell = false;
  970. break;
  971. case POLICY_REPLACE:
  972. cell_prealloc = prealloc_get_cell(structs);
  973. r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
  974. (cell_free_fn) prealloc_put_cell,
  975. structs, &old_ocell);
  976. if (r > 0) {
  977. /*
  978. * We have to be careful to avoid lock inversion of
  979. * the cells. So we back off, and wait for the
  980. * old_ocell to become free.
  981. */
  982. policy_force_mapping(cache->policy, block,
  983. lookup_result.old_oblock);
  984. atomic_inc(&cache->stats.cache_cell_clash);
  985. break;
  986. }
  987. atomic_inc(&cache->stats.demotion);
  988. atomic_inc(&cache->stats.promotion);
  989. demote_then_promote(cache, structs, lookup_result.old_oblock,
  990. block, lookup_result.cblock,
  991. old_ocell, new_ocell);
  992. release_cell = false;
  993. break;
  994. default:
  995. DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
  996. (unsigned) lookup_result.op);
  997. bio_io_error(bio);
  998. }
  999. if (release_cell)
  1000. cell_defer(cache, new_ocell, false);
  1001. }
  1002. static int need_commit_due_to_time(struct cache *cache)
  1003. {
  1004. return jiffies < cache->last_commit_jiffies ||
  1005. jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
  1006. }
  1007. static int commit_if_needed(struct cache *cache)
  1008. {
  1009. int r = 0;
  1010. if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
  1011. dm_cache_changed_this_transaction(cache->cmd)) {
  1012. atomic_inc(&cache->stats.commit_count);
  1013. cache->commit_requested = false;
  1014. r = dm_cache_commit(cache->cmd, false);
  1015. cache->last_commit_jiffies = jiffies;
  1016. }
  1017. return r;
  1018. }
  1019. static void process_deferred_bios(struct cache *cache)
  1020. {
  1021. unsigned long flags;
  1022. struct bio_list bios;
  1023. struct bio *bio;
  1024. struct prealloc structs;
  1025. memset(&structs, 0, sizeof(structs));
  1026. bio_list_init(&bios);
  1027. spin_lock_irqsave(&cache->lock, flags);
  1028. bio_list_merge(&bios, &cache->deferred_bios);
  1029. bio_list_init(&cache->deferred_bios);
  1030. spin_unlock_irqrestore(&cache->lock, flags);
  1031. while (!bio_list_empty(&bios)) {
  1032. /*
  1033. * If we've got no free migration structs, and processing
  1034. * this bio might require one, we pause until there are some
  1035. * prepared mappings to process.
  1036. */
  1037. if (prealloc_data_structs(cache, &structs)) {
  1038. spin_lock_irqsave(&cache->lock, flags);
  1039. bio_list_merge(&cache->deferred_bios, &bios);
  1040. spin_unlock_irqrestore(&cache->lock, flags);
  1041. break;
  1042. }
  1043. bio = bio_list_pop(&bios);
  1044. if (bio->bi_rw & REQ_FLUSH)
  1045. process_flush_bio(cache, bio);
  1046. else if (bio->bi_rw & REQ_DISCARD)
  1047. process_discard_bio(cache, bio);
  1048. else
  1049. process_bio(cache, &structs, bio);
  1050. }
  1051. prealloc_free_structs(cache, &structs);
  1052. }
  1053. static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
  1054. {
  1055. unsigned long flags;
  1056. struct bio_list bios;
  1057. struct bio *bio;
  1058. bio_list_init(&bios);
  1059. spin_lock_irqsave(&cache->lock, flags);
  1060. bio_list_merge(&bios, &cache->deferred_flush_bios);
  1061. bio_list_init(&cache->deferred_flush_bios);
  1062. spin_unlock_irqrestore(&cache->lock, flags);
  1063. while ((bio = bio_list_pop(&bios)))
  1064. submit_bios ? generic_make_request(bio) : bio_io_error(bio);
  1065. }
  1066. static void process_deferred_writethrough_bios(struct cache *cache)
  1067. {
  1068. unsigned long flags;
  1069. struct bio_list bios;
  1070. struct bio *bio;
  1071. bio_list_init(&bios);
  1072. spin_lock_irqsave(&cache->lock, flags);
  1073. bio_list_merge(&bios, &cache->deferred_writethrough_bios);
  1074. bio_list_init(&cache->deferred_writethrough_bios);
  1075. spin_unlock_irqrestore(&cache->lock, flags);
  1076. while ((bio = bio_list_pop(&bios)))
  1077. generic_make_request(bio);
  1078. }
  1079. static void writeback_some_dirty_blocks(struct cache *cache)
  1080. {
  1081. int r = 0;
  1082. dm_oblock_t oblock;
  1083. dm_cblock_t cblock;
  1084. struct prealloc structs;
  1085. struct dm_bio_prison_cell *old_ocell;
  1086. memset(&structs, 0, sizeof(structs));
  1087. while (spare_migration_bandwidth(cache)) {
  1088. if (prealloc_data_structs(cache, &structs))
  1089. break;
  1090. r = policy_writeback_work(cache->policy, &oblock, &cblock);
  1091. if (r)
  1092. break;
  1093. r = get_cell(cache, oblock, &structs, &old_ocell);
  1094. if (r) {
  1095. policy_set_dirty(cache->policy, oblock);
  1096. break;
  1097. }
  1098. writeback(cache, &structs, oblock, cblock, old_ocell);
  1099. }
  1100. prealloc_free_structs(cache, &structs);
  1101. }
  1102. /*----------------------------------------------------------------
  1103. * Main worker loop
  1104. *--------------------------------------------------------------*/
  1105. static bool is_quiescing(struct cache *cache)
  1106. {
  1107. return atomic_read(&cache->quiescing);
  1108. }
  1109. static void ack_quiescing(struct cache *cache)
  1110. {
  1111. if (is_quiescing(cache)) {
  1112. atomic_inc(&cache->quiescing_ack);
  1113. wake_up(&cache->quiescing_wait);
  1114. }
  1115. }
  1116. static void wait_for_quiescing_ack(struct cache *cache)
  1117. {
  1118. wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
  1119. }
  1120. static void start_quiescing(struct cache *cache)
  1121. {
  1122. atomic_inc(&cache->quiescing);
  1123. wait_for_quiescing_ack(cache);
  1124. }
  1125. static void stop_quiescing(struct cache *cache)
  1126. {
  1127. atomic_set(&cache->quiescing, 0);
  1128. atomic_set(&cache->quiescing_ack, 0);
  1129. }
  1130. static void wait_for_migrations(struct cache *cache)
  1131. {
  1132. wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
  1133. }
  1134. static void stop_worker(struct cache *cache)
  1135. {
  1136. cancel_delayed_work(&cache->waker);
  1137. flush_workqueue(cache->wq);
  1138. }
  1139. static void requeue_deferred_io(struct cache *cache)
  1140. {
  1141. struct bio *bio;
  1142. struct bio_list bios;
  1143. bio_list_init(&bios);
  1144. bio_list_merge(&bios, &cache->deferred_bios);
  1145. bio_list_init(&cache->deferred_bios);
  1146. while ((bio = bio_list_pop(&bios)))
  1147. bio_endio(bio, DM_ENDIO_REQUEUE);
  1148. }
  1149. static int more_work(struct cache *cache)
  1150. {
  1151. if (is_quiescing(cache))
  1152. return !list_empty(&cache->quiesced_migrations) ||
  1153. !list_empty(&cache->completed_migrations) ||
  1154. !list_empty(&cache->need_commit_migrations);
  1155. else
  1156. return !bio_list_empty(&cache->deferred_bios) ||
  1157. !bio_list_empty(&cache->deferred_flush_bios) ||
  1158. !bio_list_empty(&cache->deferred_writethrough_bios) ||
  1159. !list_empty(&cache->quiesced_migrations) ||
  1160. !list_empty(&cache->completed_migrations) ||
  1161. !list_empty(&cache->need_commit_migrations);
  1162. }
  1163. static void do_worker(struct work_struct *ws)
  1164. {
  1165. struct cache *cache = container_of(ws, struct cache, worker);
  1166. do {
  1167. if (!is_quiescing(cache)) {
  1168. writeback_some_dirty_blocks(cache);
  1169. process_deferred_writethrough_bios(cache);
  1170. process_deferred_bios(cache);
  1171. }
  1172. process_migrations(cache, &cache->quiesced_migrations, issue_copy);
  1173. process_migrations(cache, &cache->completed_migrations, complete_migration);
  1174. if (commit_if_needed(cache)) {
  1175. process_deferred_flush_bios(cache, false);
  1176. /*
  1177. * FIXME: rollback metadata or just go into a
  1178. * failure mode and error everything
  1179. */
  1180. } else {
  1181. process_deferred_flush_bios(cache, true);
  1182. process_migrations(cache, &cache->need_commit_migrations,
  1183. migration_success_post_commit);
  1184. }
  1185. ack_quiescing(cache);
  1186. } while (more_work(cache));
  1187. }
  1188. /*
  1189. * We want to commit periodically so that not too much
  1190. * unwritten metadata builds up.
  1191. */
  1192. static void do_waker(struct work_struct *ws)
  1193. {
  1194. struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
  1195. policy_tick(cache->policy);
  1196. wake_worker(cache);
  1197. queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
  1198. }
  1199. /*----------------------------------------------------------------*/
  1200. static int is_congested(struct dm_dev *dev, int bdi_bits)
  1201. {
  1202. struct request_queue *q = bdev_get_queue(dev->bdev);
  1203. return bdi_congested(&q->backing_dev_info, bdi_bits);
  1204. }
  1205. static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
  1206. {
  1207. struct cache *cache = container_of(cb, struct cache, callbacks);
  1208. return is_congested(cache->origin_dev, bdi_bits) ||
  1209. is_congested(cache->cache_dev, bdi_bits);
  1210. }
  1211. /*----------------------------------------------------------------
  1212. * Target methods
  1213. *--------------------------------------------------------------*/
  1214. /*
  1215. * This function gets called on the error paths of the constructor, so we
  1216. * have to cope with a partially initialised struct.
  1217. */
  1218. static void destroy(struct cache *cache)
  1219. {
  1220. unsigned i;
  1221. if (cache->next_migration)
  1222. mempool_free(cache->next_migration, cache->migration_pool);
  1223. if (cache->migration_pool)
  1224. mempool_destroy(cache->migration_pool);
  1225. if (cache->all_io_ds)
  1226. dm_deferred_set_destroy(cache->all_io_ds);
  1227. if (cache->prison)
  1228. dm_bio_prison_destroy(cache->prison);
  1229. if (cache->wq)
  1230. destroy_workqueue(cache->wq);
  1231. if (cache->dirty_bitset)
  1232. free_bitset(cache->dirty_bitset);
  1233. if (cache->discard_bitset)
  1234. free_bitset(cache->discard_bitset);
  1235. if (cache->copier)
  1236. dm_kcopyd_client_destroy(cache->copier);
  1237. if (cache->cmd)
  1238. dm_cache_metadata_close(cache->cmd);
  1239. if (cache->metadata_dev)
  1240. dm_put_device(cache->ti, cache->metadata_dev);
  1241. if (cache->origin_dev)
  1242. dm_put_device(cache->ti, cache->origin_dev);
  1243. if (cache->cache_dev)
  1244. dm_put_device(cache->ti, cache->cache_dev);
  1245. if (cache->policy)
  1246. dm_cache_policy_destroy(cache->policy);
  1247. for (i = 0; i < cache->nr_ctr_args ; i++)
  1248. kfree(cache->ctr_args[i]);
  1249. kfree(cache->ctr_args);
  1250. kfree(cache);
  1251. }
  1252. static void cache_dtr(struct dm_target *ti)
  1253. {
  1254. struct cache *cache = ti->private;
  1255. destroy(cache);
  1256. }
  1257. static sector_t get_dev_size(struct dm_dev *dev)
  1258. {
  1259. return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
  1260. }
  1261. /*----------------------------------------------------------------*/
  1262. /*
  1263. * Construct a cache device mapping.
  1264. *
  1265. * cache <metadata dev> <cache dev> <origin dev> <block size>
  1266. * <#feature args> [<feature arg>]*
  1267. * <policy> <#policy args> [<policy arg>]*
  1268. *
  1269. * metadata dev : fast device holding the persistent metadata
  1270. * cache dev : fast device holding cached data blocks
  1271. * origin dev : slow device holding original data blocks
  1272. * block size : cache unit size in sectors
  1273. *
  1274. * #feature args : number of feature arguments passed
  1275. * feature args : writethrough. (The default is writeback.)
  1276. *
  1277. * policy : the replacement policy to use
  1278. * #policy args : an even number of policy arguments corresponding
  1279. * to key/value pairs passed to the policy
  1280. * policy args : key/value pairs passed to the policy
  1281. * E.g. 'sequential_threshold 1024'
  1282. * See cache-policies.txt for details.
  1283. *
  1284. * Optional feature arguments are:
  1285. * writethrough : write through caching that prohibits cache block
  1286. * content from being different from origin block content.
  1287. * Without this argument, the default behaviour is to write
  1288. * back cache block contents later for performance reasons,
  1289. * so they may differ from the corresponding origin blocks.
  1290. */
  1291. struct cache_args {
  1292. struct dm_target *ti;
  1293. struct dm_dev *metadata_dev;
  1294. struct dm_dev *cache_dev;
  1295. sector_t cache_sectors;
  1296. struct dm_dev *origin_dev;
  1297. sector_t origin_sectors;
  1298. uint32_t block_size;
  1299. const char *policy_name;
  1300. int policy_argc;
  1301. const char **policy_argv;
  1302. struct cache_features features;
  1303. };
  1304. static void destroy_cache_args(struct cache_args *ca)
  1305. {
  1306. if (ca->metadata_dev)
  1307. dm_put_device(ca->ti, ca->metadata_dev);
  1308. if (ca->cache_dev)
  1309. dm_put_device(ca->ti, ca->cache_dev);
  1310. if (ca->origin_dev)
  1311. dm_put_device(ca->ti, ca->origin_dev);
  1312. kfree(ca);
  1313. }
  1314. static bool at_least_one_arg(struct dm_arg_set *as, char **error)
  1315. {
  1316. if (!as->argc) {
  1317. *error = "Insufficient args";
  1318. return false;
  1319. }
  1320. return true;
  1321. }
  1322. static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
  1323. char **error)
  1324. {
  1325. int r;
  1326. sector_t metadata_dev_size;
  1327. char b[BDEVNAME_SIZE];
  1328. if (!at_least_one_arg(as, error))
  1329. return -EINVAL;
  1330. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1331. &ca->metadata_dev);
  1332. if (r) {
  1333. *error = "Error opening metadata device";
  1334. return r;
  1335. }
  1336. metadata_dev_size = get_dev_size(ca->metadata_dev);
  1337. if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
  1338. DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
  1339. bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
  1340. return 0;
  1341. }
  1342. static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
  1343. char **error)
  1344. {
  1345. int r;
  1346. if (!at_least_one_arg(as, error))
  1347. return -EINVAL;
  1348. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1349. &ca->cache_dev);
  1350. if (r) {
  1351. *error = "Error opening cache device";
  1352. return r;
  1353. }
  1354. ca->cache_sectors = get_dev_size(ca->cache_dev);
  1355. return 0;
  1356. }
  1357. static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
  1358. char **error)
  1359. {
  1360. int r;
  1361. if (!at_least_one_arg(as, error))
  1362. return -EINVAL;
  1363. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1364. &ca->origin_dev);
  1365. if (r) {
  1366. *error = "Error opening origin device";
  1367. return r;
  1368. }
  1369. ca->origin_sectors = get_dev_size(ca->origin_dev);
  1370. if (ca->ti->len > ca->origin_sectors) {
  1371. *error = "Device size larger than cached device";
  1372. return -EINVAL;
  1373. }
  1374. return 0;
  1375. }
  1376. static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
  1377. char **error)
  1378. {
  1379. unsigned long block_size;
  1380. if (!at_least_one_arg(as, error))
  1381. return -EINVAL;
  1382. if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
  1383. block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
  1384. block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
  1385. block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
  1386. *error = "Invalid data block size";
  1387. return -EINVAL;
  1388. }
  1389. if (block_size > ca->cache_sectors) {
  1390. *error = "Data block size is larger than the cache device";
  1391. return -EINVAL;
  1392. }
  1393. ca->block_size = block_size;
  1394. return 0;
  1395. }
  1396. static void init_features(struct cache_features *cf)
  1397. {
  1398. cf->mode = CM_WRITE;
  1399. cf->write_through = false;
  1400. }
  1401. static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
  1402. char **error)
  1403. {
  1404. static struct dm_arg _args[] = {
  1405. {0, 1, "Invalid number of cache feature arguments"},
  1406. };
  1407. int r;
  1408. unsigned argc;
  1409. const char *arg;
  1410. struct cache_features *cf = &ca->features;
  1411. init_features(cf);
  1412. r = dm_read_arg_group(_args, as, &argc, error);
  1413. if (r)
  1414. return -EINVAL;
  1415. while (argc--) {
  1416. arg = dm_shift_arg(as);
  1417. if (!strcasecmp(arg, "writeback"))
  1418. cf->write_through = false;
  1419. else if (!strcasecmp(arg, "writethrough"))
  1420. cf->write_through = true;
  1421. else {
  1422. *error = "Unrecognised cache feature requested";
  1423. return -EINVAL;
  1424. }
  1425. }
  1426. return 0;
  1427. }
  1428. static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
  1429. char **error)
  1430. {
  1431. static struct dm_arg _args[] = {
  1432. {0, 1024, "Invalid number of policy arguments"},
  1433. };
  1434. int r;
  1435. if (!at_least_one_arg(as, error))
  1436. return -EINVAL;
  1437. ca->policy_name = dm_shift_arg(as);
  1438. r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
  1439. if (r)
  1440. return -EINVAL;
  1441. ca->policy_argv = (const char **)as->argv;
  1442. dm_consume_args(as, ca->policy_argc);
  1443. return 0;
  1444. }
  1445. static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
  1446. char **error)
  1447. {
  1448. int r;
  1449. struct dm_arg_set as;
  1450. as.argc = argc;
  1451. as.argv = argv;
  1452. r = parse_metadata_dev(ca, &as, error);
  1453. if (r)
  1454. return r;
  1455. r = parse_cache_dev(ca, &as, error);
  1456. if (r)
  1457. return r;
  1458. r = parse_origin_dev(ca, &as, error);
  1459. if (r)
  1460. return r;
  1461. r = parse_block_size(ca, &as, error);
  1462. if (r)
  1463. return r;
  1464. r = parse_features(ca, &as, error);
  1465. if (r)
  1466. return r;
  1467. r = parse_policy(ca, &as, error);
  1468. if (r)
  1469. return r;
  1470. return 0;
  1471. }
  1472. /*----------------------------------------------------------------*/
  1473. static struct kmem_cache *migration_cache;
  1474. #define NOT_CORE_OPTION 1
  1475. static int process_config_option(struct cache *cache, const char *key, const char *value)
  1476. {
  1477. unsigned long tmp;
  1478. if (!strcasecmp(key, "migration_threshold")) {
  1479. if (kstrtoul(value, 10, &tmp))
  1480. return -EINVAL;
  1481. cache->migration_threshold = tmp;
  1482. return 0;
  1483. }
  1484. return NOT_CORE_OPTION;
  1485. }
  1486. static int set_config_value(struct cache *cache, const char *key, const char *value)
  1487. {
  1488. int r = process_config_option(cache, key, value);
  1489. if (r == NOT_CORE_OPTION)
  1490. r = policy_set_config_value(cache->policy, key, value);
  1491. if (r)
  1492. DMWARN("bad config value for %s: %s", key, value);
  1493. return r;
  1494. }
  1495. static int set_config_values(struct cache *cache, int argc, const char **argv)
  1496. {
  1497. int r = 0;
  1498. if (argc & 1) {
  1499. DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
  1500. return -EINVAL;
  1501. }
  1502. while (argc) {
  1503. r = set_config_value(cache, argv[0], argv[1]);
  1504. if (r)
  1505. break;
  1506. argc -= 2;
  1507. argv += 2;
  1508. }
  1509. return r;
  1510. }
  1511. static int create_cache_policy(struct cache *cache, struct cache_args *ca,
  1512. char **error)
  1513. {
  1514. struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
  1515. cache->cache_size,
  1516. cache->origin_sectors,
  1517. cache->sectors_per_block);
  1518. if (IS_ERR(p)) {
  1519. *error = "Error creating cache's policy";
  1520. return PTR_ERR(p);
  1521. }
  1522. cache->policy = p;
  1523. return 0;
  1524. }
  1525. /*
  1526. * We want the discard block size to be a power of two, at least the size
  1527. * of the cache block size, and have no more than 2^14 discard blocks
  1528. * across the origin.
  1529. */
  1530. #define MAX_DISCARD_BLOCKS (1 << 14)
  1531. static bool too_many_discard_blocks(sector_t discard_block_size,
  1532. sector_t origin_size)
  1533. {
  1534. (void) sector_div(origin_size, discard_block_size);
  1535. return origin_size > MAX_DISCARD_BLOCKS;
  1536. }
  1537. static sector_t calculate_discard_block_size(sector_t cache_block_size,
  1538. sector_t origin_size)
  1539. {
  1540. sector_t discard_block_size;
  1541. discard_block_size = roundup_pow_of_two(cache_block_size);
  1542. if (origin_size)
  1543. while (too_many_discard_blocks(discard_block_size, origin_size))
  1544. discard_block_size *= 2;
  1545. return discard_block_size;
  1546. }
  1547. #define DEFAULT_MIGRATION_THRESHOLD 2048
  1548. static int cache_create(struct cache_args *ca, struct cache **result)
  1549. {
  1550. int r = 0;
  1551. char **error = &ca->ti->error;
  1552. struct cache *cache;
  1553. struct dm_target *ti = ca->ti;
  1554. dm_block_t origin_blocks;
  1555. struct dm_cache_metadata *cmd;
  1556. bool may_format = ca->features.mode == CM_WRITE;
  1557. cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  1558. if (!cache)
  1559. return -ENOMEM;
  1560. cache->ti = ca->ti;
  1561. ti->private = cache;
  1562. ti->num_flush_bios = 2;
  1563. ti->flush_supported = true;
  1564. ti->num_discard_bios = 1;
  1565. ti->discards_supported = true;
  1566. ti->discard_zeroes_data_unsupported = true;
  1567. cache->features = ca->features;
  1568. ti->per_bio_data_size = get_per_bio_data_size(cache);
  1569. cache->callbacks.congested_fn = cache_is_congested;
  1570. dm_table_add_target_callbacks(ti->table, &cache->callbacks);
  1571. cache->metadata_dev = ca->metadata_dev;
  1572. cache->origin_dev = ca->origin_dev;
  1573. cache->cache_dev = ca->cache_dev;
  1574. ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
  1575. /* FIXME: factor out this whole section */
  1576. origin_blocks = cache->origin_sectors = ca->origin_sectors;
  1577. origin_blocks = block_div(origin_blocks, ca->block_size);
  1578. cache->origin_blocks = to_oblock(origin_blocks);
  1579. cache->sectors_per_block = ca->block_size;
  1580. if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
  1581. r = -EINVAL;
  1582. goto bad;
  1583. }
  1584. if (ca->block_size & (ca->block_size - 1)) {
  1585. dm_block_t cache_size = ca->cache_sectors;
  1586. cache->sectors_per_block_shift = -1;
  1587. cache_size = block_div(cache_size, ca->block_size);
  1588. cache->cache_size = to_cblock(cache_size);
  1589. } else {
  1590. cache->sectors_per_block_shift = __ffs(ca->block_size);
  1591. cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
  1592. }
  1593. r = create_cache_policy(cache, ca, error);
  1594. if (r)
  1595. goto bad;
  1596. cache->policy_nr_args = ca->policy_argc;
  1597. cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
  1598. r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
  1599. if (r) {
  1600. *error = "Error setting cache policy's config values";
  1601. goto bad;
  1602. }
  1603. cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
  1604. ca->block_size, may_format,
  1605. dm_cache_policy_get_hint_size(cache->policy));
  1606. if (IS_ERR(cmd)) {
  1607. *error = "Error creating metadata object";
  1608. r = PTR_ERR(cmd);
  1609. goto bad;
  1610. }
  1611. cache->cmd = cmd;
  1612. spin_lock_init(&cache->lock);
  1613. bio_list_init(&cache->deferred_bios);
  1614. bio_list_init(&cache->deferred_flush_bios);
  1615. bio_list_init(&cache->deferred_writethrough_bios);
  1616. INIT_LIST_HEAD(&cache->quiesced_migrations);
  1617. INIT_LIST_HEAD(&cache->completed_migrations);
  1618. INIT_LIST_HEAD(&cache->need_commit_migrations);
  1619. atomic_set(&cache->nr_migrations, 0);
  1620. init_waitqueue_head(&cache->migration_wait);
  1621. init_waitqueue_head(&cache->quiescing_wait);
  1622. atomic_set(&cache->quiescing, 0);
  1623. atomic_set(&cache->quiescing_ack, 0);
  1624. r = -ENOMEM;
  1625. cache->nr_dirty = 0;
  1626. cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
  1627. if (!cache->dirty_bitset) {
  1628. *error = "could not allocate dirty bitset";
  1629. goto bad;
  1630. }
  1631. clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
  1632. cache->discard_block_size =
  1633. calculate_discard_block_size(cache->sectors_per_block,
  1634. cache->origin_sectors);
  1635. cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
  1636. cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
  1637. if (!cache->discard_bitset) {
  1638. *error = "could not allocate discard bitset";
  1639. goto bad;
  1640. }
  1641. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  1642. cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
  1643. if (IS_ERR(cache->copier)) {
  1644. *error = "could not create kcopyd client";
  1645. r = PTR_ERR(cache->copier);
  1646. goto bad;
  1647. }
  1648. cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
  1649. if (!cache->wq) {
  1650. *error = "could not create workqueue for metadata object";
  1651. goto bad;
  1652. }
  1653. INIT_WORK(&cache->worker, do_worker);
  1654. INIT_DELAYED_WORK(&cache->waker, do_waker);
  1655. cache->last_commit_jiffies = jiffies;
  1656. cache->prison = dm_bio_prison_create(PRISON_CELLS);
  1657. if (!cache->prison) {
  1658. *error = "could not create bio prison";
  1659. goto bad;
  1660. }
  1661. cache->all_io_ds = dm_deferred_set_create();
  1662. if (!cache->all_io_ds) {
  1663. *error = "could not create all_io deferred set";
  1664. goto bad;
  1665. }
  1666. cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
  1667. migration_cache);
  1668. if (!cache->migration_pool) {
  1669. *error = "Error creating cache's migration mempool";
  1670. goto bad;
  1671. }
  1672. cache->next_migration = NULL;
  1673. cache->need_tick_bio = true;
  1674. cache->sized = false;
  1675. cache->commit_requested = false;
  1676. cache->loaded_mappings = false;
  1677. cache->loaded_discards = false;
  1678. load_stats(cache);
  1679. atomic_set(&cache->stats.demotion, 0);
  1680. atomic_set(&cache->stats.promotion, 0);
  1681. atomic_set(&cache->stats.copies_avoided, 0);
  1682. atomic_set(&cache->stats.cache_cell_clash, 0);
  1683. atomic_set(&cache->stats.commit_count, 0);
  1684. atomic_set(&cache->stats.discard_count, 0);
  1685. *result = cache;
  1686. return 0;
  1687. bad:
  1688. destroy(cache);
  1689. return r;
  1690. }
  1691. static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
  1692. {
  1693. unsigned i;
  1694. const char **copy;
  1695. copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
  1696. if (!copy)
  1697. return -ENOMEM;
  1698. for (i = 0; i < argc; i++) {
  1699. copy[i] = kstrdup(argv[i], GFP_KERNEL);
  1700. if (!copy[i]) {
  1701. while (i--)
  1702. kfree(copy[i]);
  1703. kfree(copy);
  1704. return -ENOMEM;
  1705. }
  1706. }
  1707. cache->nr_ctr_args = argc;
  1708. cache->ctr_args = copy;
  1709. return 0;
  1710. }
  1711. static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
  1712. {
  1713. int r = -EINVAL;
  1714. struct cache_args *ca;
  1715. struct cache *cache = NULL;
  1716. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  1717. if (!ca) {
  1718. ti->error = "Error allocating memory for cache";
  1719. return -ENOMEM;
  1720. }
  1721. ca->ti = ti;
  1722. r = parse_cache_args(ca, argc, argv, &ti->error);
  1723. if (r)
  1724. goto out;
  1725. r = cache_create(ca, &cache);
  1726. if (r)
  1727. goto out;
  1728. r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
  1729. if (r) {
  1730. destroy(cache);
  1731. goto out;
  1732. }
  1733. ti->private = cache;
  1734. out:
  1735. destroy_cache_args(ca);
  1736. return r;
  1737. }
  1738. static int cache_map(struct dm_target *ti, struct bio *bio)
  1739. {
  1740. struct cache *cache = ti->private;
  1741. int r;
  1742. dm_oblock_t block = get_bio_block(cache, bio);
  1743. size_t pb_data_size = get_per_bio_data_size(cache);
  1744. bool can_migrate = false;
  1745. bool discarded_block;
  1746. struct dm_bio_prison_cell *cell;
  1747. struct policy_result lookup_result;
  1748. struct per_bio_data *pb;
  1749. if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
  1750. /*
  1751. * This can only occur if the io goes to a partial block at
  1752. * the end of the origin device. We don't cache these.
  1753. * Just remap to the origin and carry on.
  1754. */
  1755. remap_to_origin_clear_discard(cache, bio, block);
  1756. return DM_MAPIO_REMAPPED;
  1757. }
  1758. pb = init_per_bio_data(bio, pb_data_size);
  1759. if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
  1760. defer_bio(cache, bio);
  1761. return DM_MAPIO_SUBMITTED;
  1762. }
  1763. /*
  1764. * Check to see if that block is currently migrating.
  1765. */
  1766. cell = alloc_prison_cell(cache);
  1767. if (!cell) {
  1768. defer_bio(cache, bio);
  1769. return DM_MAPIO_SUBMITTED;
  1770. }
  1771. r = bio_detain(cache, block, bio, cell,
  1772. (cell_free_fn) free_prison_cell,
  1773. cache, &cell);
  1774. if (r) {
  1775. if (r < 0)
  1776. defer_bio(cache, bio);
  1777. return DM_MAPIO_SUBMITTED;
  1778. }
  1779. discarded_block = is_discarded_oblock(cache, block);
  1780. r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
  1781. bio, &lookup_result);
  1782. if (r == -EWOULDBLOCK) {
  1783. cell_defer(cache, cell, true);
  1784. return DM_MAPIO_SUBMITTED;
  1785. } else if (r) {
  1786. DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
  1787. bio_io_error(bio);
  1788. return DM_MAPIO_SUBMITTED;
  1789. }
  1790. switch (lookup_result.op) {
  1791. case POLICY_HIT:
  1792. inc_hit_counter(cache, bio);
  1793. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  1794. if (is_writethrough_io(cache, bio, lookup_result.cblock))
  1795. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  1796. else
  1797. remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
  1798. cell_defer(cache, cell, false);
  1799. break;
  1800. case POLICY_MISS:
  1801. inc_miss_counter(cache, bio);
  1802. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  1803. if (pb->req_nr != 0) {
  1804. /*
  1805. * This is a duplicate writethrough io that is no
  1806. * longer needed because the block has been demoted.
  1807. */
  1808. bio_endio(bio, 0);
  1809. cell_defer(cache, cell, false);
  1810. return DM_MAPIO_SUBMITTED;
  1811. } else {
  1812. remap_to_origin_clear_discard(cache, bio, block);
  1813. cell_defer(cache, cell, false);
  1814. }
  1815. break;
  1816. default:
  1817. DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
  1818. (unsigned) lookup_result.op);
  1819. bio_io_error(bio);
  1820. return DM_MAPIO_SUBMITTED;
  1821. }
  1822. return DM_MAPIO_REMAPPED;
  1823. }
  1824. static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
  1825. {
  1826. struct cache *cache = ti->private;
  1827. unsigned long flags;
  1828. size_t pb_data_size = get_per_bio_data_size(cache);
  1829. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1830. if (pb->tick) {
  1831. policy_tick(cache->policy);
  1832. spin_lock_irqsave(&cache->lock, flags);
  1833. cache->need_tick_bio = true;
  1834. spin_unlock_irqrestore(&cache->lock, flags);
  1835. }
  1836. check_for_quiesced_migrations(cache, pb);
  1837. return 0;
  1838. }
  1839. static int write_dirty_bitset(struct cache *cache)
  1840. {
  1841. unsigned i, r;
  1842. for (i = 0; i < from_cblock(cache->cache_size); i++) {
  1843. r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
  1844. is_dirty(cache, to_cblock(i)));
  1845. if (r)
  1846. return r;
  1847. }
  1848. return 0;
  1849. }
  1850. static int write_discard_bitset(struct cache *cache)
  1851. {
  1852. unsigned i, r;
  1853. r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
  1854. cache->discard_nr_blocks);
  1855. if (r) {
  1856. DMERR("could not resize on-disk discard bitset");
  1857. return r;
  1858. }
  1859. for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
  1860. r = dm_cache_set_discard(cache->cmd, to_dblock(i),
  1861. is_discarded(cache, to_dblock(i)));
  1862. if (r)
  1863. return r;
  1864. }
  1865. return 0;
  1866. }
  1867. static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
  1868. uint32_t hint)
  1869. {
  1870. struct cache *cache = context;
  1871. return dm_cache_save_hint(cache->cmd, cblock, hint);
  1872. }
  1873. static int write_hints(struct cache *cache)
  1874. {
  1875. int r;
  1876. r = dm_cache_begin_hints(cache->cmd, cache->policy);
  1877. if (r) {
  1878. DMERR("dm_cache_begin_hints failed");
  1879. return r;
  1880. }
  1881. r = policy_walk_mappings(cache->policy, save_hint, cache);
  1882. if (r)
  1883. DMERR("policy_walk_mappings failed");
  1884. return r;
  1885. }
  1886. /*
  1887. * returns true on success
  1888. */
  1889. static bool sync_metadata(struct cache *cache)
  1890. {
  1891. int r1, r2, r3, r4;
  1892. r1 = write_dirty_bitset(cache);
  1893. if (r1)
  1894. DMERR("could not write dirty bitset");
  1895. r2 = write_discard_bitset(cache);
  1896. if (r2)
  1897. DMERR("could not write discard bitset");
  1898. save_stats(cache);
  1899. r3 = write_hints(cache);
  1900. if (r3)
  1901. DMERR("could not write hints");
  1902. /*
  1903. * If writing the above metadata failed, we still commit, but don't
  1904. * set the clean shutdown flag. This will effectively force every
  1905. * dirty bit to be set on reload.
  1906. */
  1907. r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
  1908. if (r4)
  1909. DMERR("could not write cache metadata. Data loss may occur.");
  1910. return !r1 && !r2 && !r3 && !r4;
  1911. }
  1912. static void cache_postsuspend(struct dm_target *ti)
  1913. {
  1914. struct cache *cache = ti->private;
  1915. start_quiescing(cache);
  1916. wait_for_migrations(cache);
  1917. stop_worker(cache);
  1918. requeue_deferred_io(cache);
  1919. stop_quiescing(cache);
  1920. (void) sync_metadata(cache);
  1921. }
  1922. static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
  1923. bool dirty, uint32_t hint, bool hint_valid)
  1924. {
  1925. int r;
  1926. struct cache *cache = context;
  1927. r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
  1928. if (r)
  1929. return r;
  1930. if (dirty)
  1931. set_dirty(cache, oblock, cblock);
  1932. else
  1933. clear_dirty(cache, oblock, cblock);
  1934. return 0;
  1935. }
  1936. static int load_discard(void *context, sector_t discard_block_size,
  1937. dm_dblock_t dblock, bool discard)
  1938. {
  1939. struct cache *cache = context;
  1940. /* FIXME: handle mis-matched block size */
  1941. if (discard)
  1942. set_discard(cache, dblock);
  1943. else
  1944. clear_discard(cache, dblock);
  1945. return 0;
  1946. }
  1947. static int cache_preresume(struct dm_target *ti)
  1948. {
  1949. int r = 0;
  1950. struct cache *cache = ti->private;
  1951. sector_t actual_cache_size = get_dev_size(cache->cache_dev);
  1952. (void) sector_div(actual_cache_size, cache->sectors_per_block);
  1953. /*
  1954. * Check to see if the cache has resized.
  1955. */
  1956. if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
  1957. cache->cache_size = to_cblock(actual_cache_size);
  1958. r = dm_cache_resize(cache->cmd, cache->cache_size);
  1959. if (r) {
  1960. DMERR("could not resize cache metadata");
  1961. return r;
  1962. }
  1963. cache->sized = true;
  1964. }
  1965. if (!cache->loaded_mappings) {
  1966. r = dm_cache_load_mappings(cache->cmd, cache->policy,
  1967. load_mapping, cache);
  1968. if (r) {
  1969. DMERR("could not load cache mappings");
  1970. return r;
  1971. }
  1972. cache->loaded_mappings = true;
  1973. }
  1974. if (!cache->loaded_discards) {
  1975. r = dm_cache_load_discards(cache->cmd, load_discard, cache);
  1976. if (r) {
  1977. DMERR("could not load origin discards");
  1978. return r;
  1979. }
  1980. cache->loaded_discards = true;
  1981. }
  1982. return r;
  1983. }
  1984. static void cache_resume(struct dm_target *ti)
  1985. {
  1986. struct cache *cache = ti->private;
  1987. cache->need_tick_bio = true;
  1988. do_waker(&cache->waker.work);
  1989. }
  1990. /*
  1991. * Status format:
  1992. *
  1993. * <#used metadata blocks>/<#total metadata blocks>
  1994. * <#read hits> <#read misses> <#write hits> <#write misses>
  1995. * <#demotions> <#promotions> <#blocks in cache> <#dirty>
  1996. * <#features> <features>*
  1997. * <#core args> <core args>
  1998. * <#policy args> <policy args>*
  1999. */
  2000. static void cache_status(struct dm_target *ti, status_type_t type,
  2001. unsigned status_flags, char *result, unsigned maxlen)
  2002. {
  2003. int r = 0;
  2004. unsigned i;
  2005. ssize_t sz = 0;
  2006. dm_block_t nr_free_blocks_metadata = 0;
  2007. dm_block_t nr_blocks_metadata = 0;
  2008. char buf[BDEVNAME_SIZE];
  2009. struct cache *cache = ti->private;
  2010. dm_cblock_t residency;
  2011. switch (type) {
  2012. case STATUSTYPE_INFO:
  2013. /* Commit to ensure statistics aren't out-of-date */
  2014. if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
  2015. r = dm_cache_commit(cache->cmd, false);
  2016. if (r)
  2017. DMERR("could not commit metadata for accurate status");
  2018. }
  2019. r = dm_cache_get_free_metadata_block_count(cache->cmd,
  2020. &nr_free_blocks_metadata);
  2021. if (r) {
  2022. DMERR("could not get metadata free block count");
  2023. goto err;
  2024. }
  2025. r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
  2026. if (r) {
  2027. DMERR("could not get metadata device size");
  2028. goto err;
  2029. }
  2030. residency = policy_residency(cache->policy);
  2031. DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
  2032. (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
  2033. (unsigned long long)nr_blocks_metadata,
  2034. (unsigned) atomic_read(&cache->stats.read_hit),
  2035. (unsigned) atomic_read(&cache->stats.read_miss),
  2036. (unsigned) atomic_read(&cache->stats.write_hit),
  2037. (unsigned) atomic_read(&cache->stats.write_miss),
  2038. (unsigned) atomic_read(&cache->stats.demotion),
  2039. (unsigned) atomic_read(&cache->stats.promotion),
  2040. (unsigned long long) from_cblock(residency),
  2041. cache->nr_dirty);
  2042. if (cache->features.write_through)
  2043. DMEMIT("1 writethrough ");
  2044. else
  2045. DMEMIT("0 ");
  2046. DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
  2047. if (sz < maxlen) {
  2048. r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
  2049. if (r)
  2050. DMERR("policy_emit_config_values returned %d", r);
  2051. }
  2052. break;
  2053. case STATUSTYPE_TABLE:
  2054. format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
  2055. DMEMIT("%s ", buf);
  2056. format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
  2057. DMEMIT("%s ", buf);
  2058. format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
  2059. DMEMIT("%s", buf);
  2060. for (i = 0; i < cache->nr_ctr_args - 1; i++)
  2061. DMEMIT(" %s", cache->ctr_args[i]);
  2062. if (cache->nr_ctr_args)
  2063. DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
  2064. }
  2065. return;
  2066. err:
  2067. DMEMIT("Error");
  2068. }
  2069. /*
  2070. * Supports <key> <value>.
  2071. *
  2072. * The key migration_threshold is supported by the cache target core.
  2073. */
  2074. static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
  2075. {
  2076. struct cache *cache = ti->private;
  2077. if (argc != 2)
  2078. return -EINVAL;
  2079. return set_config_value(cache, argv[0], argv[1]);
  2080. }
  2081. static int cache_iterate_devices(struct dm_target *ti,
  2082. iterate_devices_callout_fn fn, void *data)
  2083. {
  2084. int r = 0;
  2085. struct cache *cache = ti->private;
  2086. r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
  2087. if (!r)
  2088. r = fn(ti, cache->origin_dev, 0, ti->len, data);
  2089. return r;
  2090. }
  2091. /*
  2092. * We assume I/O is going to the origin (which is the volume
  2093. * more likely to have restrictions e.g. by being striped).
  2094. * (Looking up the exact location of the data would be expensive
  2095. * and could always be out of date by the time the bio is submitted.)
  2096. */
  2097. static int cache_bvec_merge(struct dm_target *ti,
  2098. struct bvec_merge_data *bvm,
  2099. struct bio_vec *biovec, int max_size)
  2100. {
  2101. struct cache *cache = ti->private;
  2102. struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
  2103. if (!q->merge_bvec_fn)
  2104. return max_size;
  2105. bvm->bi_bdev = cache->origin_dev->bdev;
  2106. return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
  2107. }
  2108. static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
  2109. {
  2110. /*
  2111. * FIXME: these limits may be incompatible with the cache device
  2112. */
  2113. limits->max_discard_sectors = cache->discard_block_size * 1024;
  2114. limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
  2115. }
  2116. static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
  2117. {
  2118. struct cache *cache = ti->private;
  2119. uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
  2120. /*
  2121. * If the system-determined stacked limits are compatible with the
  2122. * cache's blocksize (io_opt is a factor) do not override them.
  2123. */
  2124. if (io_opt_sectors < cache->sectors_per_block ||
  2125. do_div(io_opt_sectors, cache->sectors_per_block)) {
  2126. blk_limits_io_min(limits, 0);
  2127. blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
  2128. }
  2129. set_discard_limits(cache, limits);
  2130. }
  2131. /*----------------------------------------------------------------*/
  2132. static struct target_type cache_target = {
  2133. .name = "cache",
  2134. .version = {1, 1, 1},
  2135. .module = THIS_MODULE,
  2136. .ctr = cache_ctr,
  2137. .dtr = cache_dtr,
  2138. .map = cache_map,
  2139. .end_io = cache_end_io,
  2140. .postsuspend = cache_postsuspend,
  2141. .preresume = cache_preresume,
  2142. .resume = cache_resume,
  2143. .status = cache_status,
  2144. .message = cache_message,
  2145. .iterate_devices = cache_iterate_devices,
  2146. .merge = cache_bvec_merge,
  2147. .io_hints = cache_io_hints,
  2148. };
  2149. static int __init dm_cache_init(void)
  2150. {
  2151. int r;
  2152. r = dm_register_target(&cache_target);
  2153. if (r) {
  2154. DMERR("cache target registration failed: %d", r);
  2155. return r;
  2156. }
  2157. migration_cache = KMEM_CACHE(dm_cache_migration, 0);
  2158. if (!migration_cache) {
  2159. dm_unregister_target(&cache_target);
  2160. return -ENOMEM;
  2161. }
  2162. return 0;
  2163. }
  2164. static void __exit dm_cache_exit(void)
  2165. {
  2166. dm_unregister_target(&cache_target);
  2167. kmem_cache_destroy(migration_cache);
  2168. }
  2169. module_init(dm_cache_init);
  2170. module_exit(dm_cache_exit);
  2171. MODULE_DESCRIPTION(DM_NAME " cache target");
  2172. MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
  2173. MODULE_LICENSE("GPL");