dm-cache-target.c 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660
  1. /*
  2. * Copyright (C) 2012 Red Hat. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison.h"
  8. #include "dm-bio-record.h"
  9. #include "dm-cache-metadata.h"
  10. #include <linux/dm-io.h>
  11. #include <linux/dm-kcopyd.h>
  12. #include <linux/init.h>
  13. #include <linux/mempool.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #define DM_MSG_PREFIX "cache"
  18. DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
  19. "A percentage of time allocated for copying to and/or from cache");
  20. /*----------------------------------------------------------------*/
  21. /*
  22. * Glossary:
  23. *
  24. * oblock: index of an origin block
  25. * cblock: index of a cache block
  26. * promotion: movement of a block from origin to cache
  27. * demotion: movement of a block from cache to origin
  28. * migration: movement of a block between the origin and cache device,
  29. * either direction
  30. */
  31. /*----------------------------------------------------------------*/
  32. static size_t bitset_size_in_bytes(unsigned nr_entries)
  33. {
  34. return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
  35. }
  36. static unsigned long *alloc_bitset(unsigned nr_entries)
  37. {
  38. size_t s = bitset_size_in_bytes(nr_entries);
  39. return vzalloc(s);
  40. }
  41. static void clear_bitset(void *bitset, unsigned nr_entries)
  42. {
  43. size_t s = bitset_size_in_bytes(nr_entries);
  44. memset(bitset, 0, s);
  45. }
  46. static void free_bitset(unsigned long *bits)
  47. {
  48. vfree(bits);
  49. }
  50. /*----------------------------------------------------------------*/
  51. #define PRISON_CELLS 1024
  52. #define MIGRATION_POOL_SIZE 128
  53. #define COMMIT_PERIOD HZ
  54. #define MIGRATION_COUNT_WINDOW 10
  55. /*
  56. * The block size of the device holding cache data must be >= 32KB
  57. */
  58. #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
  59. /*
  60. * FIXME: the cache is read/write for the time being.
  61. */
  62. enum cache_mode {
  63. CM_WRITE, /* metadata may be changed */
  64. CM_READ_ONLY, /* metadata may not be changed */
  65. };
  66. struct cache_features {
  67. enum cache_mode mode;
  68. bool write_through:1;
  69. };
  70. struct cache_stats {
  71. atomic_t read_hit;
  72. atomic_t read_miss;
  73. atomic_t write_hit;
  74. atomic_t write_miss;
  75. atomic_t demotion;
  76. atomic_t promotion;
  77. atomic_t copies_avoided;
  78. atomic_t cache_cell_clash;
  79. atomic_t commit_count;
  80. atomic_t discard_count;
  81. };
  82. struct cache {
  83. struct dm_target *ti;
  84. struct dm_target_callbacks callbacks;
  85. /*
  86. * Metadata is written to this device.
  87. */
  88. struct dm_dev *metadata_dev;
  89. /*
  90. * The slower of the two data devices. Typically a spindle.
  91. */
  92. struct dm_dev *origin_dev;
  93. /*
  94. * The faster of the two data devices. Typically an SSD.
  95. */
  96. struct dm_dev *cache_dev;
  97. /*
  98. * Cache features such as write-through.
  99. */
  100. struct cache_features features;
  101. /*
  102. * Size of the origin device in _complete_ blocks and native sectors.
  103. */
  104. dm_oblock_t origin_blocks;
  105. sector_t origin_sectors;
  106. /*
  107. * Size of the cache device in blocks.
  108. */
  109. dm_cblock_t cache_size;
  110. /*
  111. * Fields for converting from sectors to blocks.
  112. */
  113. uint32_t sectors_per_block;
  114. int sectors_per_block_shift;
  115. struct dm_cache_metadata *cmd;
  116. spinlock_t lock;
  117. struct bio_list deferred_bios;
  118. struct bio_list deferred_flush_bios;
  119. struct bio_list deferred_writethrough_bios;
  120. struct list_head quiesced_migrations;
  121. struct list_head completed_migrations;
  122. struct list_head need_commit_migrations;
  123. sector_t migration_threshold;
  124. atomic_t nr_migrations;
  125. wait_queue_head_t migration_wait;
  126. /*
  127. * cache_size entries, dirty if set
  128. */
  129. dm_cblock_t nr_dirty;
  130. unsigned long *dirty_bitset;
  131. /*
  132. * origin_blocks entries, discarded if set.
  133. */
  134. uint32_t discard_block_size; /* a power of 2 times sectors per block */
  135. dm_dblock_t discard_nr_blocks;
  136. unsigned long *discard_bitset;
  137. struct dm_kcopyd_client *copier;
  138. struct workqueue_struct *wq;
  139. struct work_struct worker;
  140. struct delayed_work waker;
  141. unsigned long last_commit_jiffies;
  142. struct dm_bio_prison *prison;
  143. struct dm_deferred_set *all_io_ds;
  144. mempool_t *migration_pool;
  145. struct dm_cache_migration *next_migration;
  146. struct dm_cache_policy *policy;
  147. unsigned policy_nr_args;
  148. bool need_tick_bio:1;
  149. bool sized:1;
  150. bool quiescing:1;
  151. bool commit_requested:1;
  152. bool loaded_mappings:1;
  153. bool loaded_discards:1;
  154. struct cache_stats stats;
  155. /*
  156. * Rather than reconstructing the table line for the status we just
  157. * save it and regurgitate.
  158. */
  159. unsigned nr_ctr_args;
  160. const char **ctr_args;
  161. };
  162. struct per_bio_data {
  163. bool tick:1;
  164. unsigned req_nr:2;
  165. struct dm_deferred_entry *all_io_entry;
  166. /*
  167. * writethrough fields. These MUST remain at the end of this
  168. * structure and the 'cache' member must be the first as it
  169. * is used to determine the offsetof the writethrough fields.
  170. */
  171. struct cache *cache;
  172. dm_cblock_t cblock;
  173. bio_end_io_t *saved_bi_end_io;
  174. struct dm_bio_details bio_details;
  175. };
  176. struct dm_cache_migration {
  177. struct list_head list;
  178. struct cache *cache;
  179. unsigned long start_jiffies;
  180. dm_oblock_t old_oblock;
  181. dm_oblock_t new_oblock;
  182. dm_cblock_t cblock;
  183. bool err:1;
  184. bool writeback:1;
  185. bool demote:1;
  186. bool promote:1;
  187. struct dm_bio_prison_cell *old_ocell;
  188. struct dm_bio_prison_cell *new_ocell;
  189. };
  190. /*
  191. * Processing a bio in the worker thread may require these memory
  192. * allocations. We prealloc to avoid deadlocks (the same worker thread
  193. * frees them back to the mempool).
  194. */
  195. struct prealloc {
  196. struct dm_cache_migration *mg;
  197. struct dm_bio_prison_cell *cell1;
  198. struct dm_bio_prison_cell *cell2;
  199. };
  200. static void wake_worker(struct cache *cache)
  201. {
  202. queue_work(cache->wq, &cache->worker);
  203. }
  204. /*----------------------------------------------------------------*/
  205. static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
  206. {
  207. /* FIXME: change to use a local slab. */
  208. return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
  209. }
  210. static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
  211. {
  212. dm_bio_prison_free_cell(cache->prison, cell);
  213. }
  214. static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
  215. {
  216. if (!p->mg) {
  217. p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
  218. if (!p->mg)
  219. return -ENOMEM;
  220. }
  221. if (!p->cell1) {
  222. p->cell1 = alloc_prison_cell(cache);
  223. if (!p->cell1)
  224. return -ENOMEM;
  225. }
  226. if (!p->cell2) {
  227. p->cell2 = alloc_prison_cell(cache);
  228. if (!p->cell2)
  229. return -ENOMEM;
  230. }
  231. return 0;
  232. }
  233. static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
  234. {
  235. if (p->cell2)
  236. free_prison_cell(cache, p->cell2);
  237. if (p->cell1)
  238. free_prison_cell(cache, p->cell1);
  239. if (p->mg)
  240. mempool_free(p->mg, cache->migration_pool);
  241. }
  242. static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
  243. {
  244. struct dm_cache_migration *mg = p->mg;
  245. BUG_ON(!mg);
  246. p->mg = NULL;
  247. return mg;
  248. }
  249. /*
  250. * You must have a cell within the prealloc struct to return. If not this
  251. * function will BUG() rather than returning NULL.
  252. */
  253. static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
  254. {
  255. struct dm_bio_prison_cell *r = NULL;
  256. if (p->cell1) {
  257. r = p->cell1;
  258. p->cell1 = NULL;
  259. } else if (p->cell2) {
  260. r = p->cell2;
  261. p->cell2 = NULL;
  262. } else
  263. BUG();
  264. return r;
  265. }
  266. /*
  267. * You can't have more than two cells in a prealloc struct. BUG() will be
  268. * called if you try and overfill.
  269. */
  270. static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
  271. {
  272. if (!p->cell2)
  273. p->cell2 = cell;
  274. else if (!p->cell1)
  275. p->cell1 = cell;
  276. else
  277. BUG();
  278. }
  279. /*----------------------------------------------------------------*/
  280. static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
  281. {
  282. key->virtual = 0;
  283. key->dev = 0;
  284. key->block = from_oblock(oblock);
  285. }
  286. /*
  287. * The caller hands in a preallocated cell, and a free function for it.
  288. * The cell will be freed if there's an error, or if it wasn't used because
  289. * a cell with that key already exists.
  290. */
  291. typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
  292. static int bio_detain(struct cache *cache, dm_oblock_t oblock,
  293. struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
  294. cell_free_fn free_fn, void *free_context,
  295. struct dm_bio_prison_cell **cell_result)
  296. {
  297. int r;
  298. struct dm_cell_key key;
  299. build_key(oblock, &key);
  300. r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
  301. if (r)
  302. free_fn(free_context, cell_prealloc);
  303. return r;
  304. }
  305. static int get_cell(struct cache *cache,
  306. dm_oblock_t oblock,
  307. struct prealloc *structs,
  308. struct dm_bio_prison_cell **cell_result)
  309. {
  310. int r;
  311. struct dm_cell_key key;
  312. struct dm_bio_prison_cell *cell_prealloc;
  313. cell_prealloc = prealloc_get_cell(structs);
  314. build_key(oblock, &key);
  315. r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
  316. if (r)
  317. prealloc_put_cell(structs, cell_prealloc);
  318. return r;
  319. }
  320. /*----------------------------------------------------------------*/
  321. static bool is_dirty(struct cache *cache, dm_cblock_t b)
  322. {
  323. return test_bit(from_cblock(b), cache->dirty_bitset);
  324. }
  325. static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  326. {
  327. if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
  328. cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
  329. policy_set_dirty(cache->policy, oblock);
  330. }
  331. }
  332. static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
  333. {
  334. if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
  335. policy_clear_dirty(cache->policy, oblock);
  336. cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
  337. if (!from_cblock(cache->nr_dirty))
  338. dm_table_event(cache->ti->table);
  339. }
  340. }
  341. /*----------------------------------------------------------------*/
  342. static bool block_size_is_power_of_two(struct cache *cache)
  343. {
  344. return cache->sectors_per_block_shift >= 0;
  345. }
  346. static dm_block_t block_div(dm_block_t b, uint32_t n)
  347. {
  348. do_div(b, n);
  349. return b;
  350. }
  351. static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
  352. {
  353. uint32_t discard_blocks = cache->discard_block_size;
  354. dm_block_t b = from_oblock(oblock);
  355. if (!block_size_is_power_of_two(cache))
  356. discard_blocks = discard_blocks / cache->sectors_per_block;
  357. else
  358. discard_blocks >>= cache->sectors_per_block_shift;
  359. b = block_div(b, discard_blocks);
  360. return to_dblock(b);
  361. }
  362. static void set_discard(struct cache *cache, dm_dblock_t b)
  363. {
  364. unsigned long flags;
  365. atomic_inc(&cache->stats.discard_count);
  366. spin_lock_irqsave(&cache->lock, flags);
  367. set_bit(from_dblock(b), cache->discard_bitset);
  368. spin_unlock_irqrestore(&cache->lock, flags);
  369. }
  370. static void clear_discard(struct cache *cache, dm_dblock_t b)
  371. {
  372. unsigned long flags;
  373. spin_lock_irqsave(&cache->lock, flags);
  374. clear_bit(from_dblock(b), cache->discard_bitset);
  375. spin_unlock_irqrestore(&cache->lock, flags);
  376. }
  377. static bool is_discarded(struct cache *cache, dm_dblock_t b)
  378. {
  379. int r;
  380. unsigned long flags;
  381. spin_lock_irqsave(&cache->lock, flags);
  382. r = test_bit(from_dblock(b), cache->discard_bitset);
  383. spin_unlock_irqrestore(&cache->lock, flags);
  384. return r;
  385. }
  386. static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
  387. {
  388. int r;
  389. unsigned long flags;
  390. spin_lock_irqsave(&cache->lock, flags);
  391. r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
  392. cache->discard_bitset);
  393. spin_unlock_irqrestore(&cache->lock, flags);
  394. return r;
  395. }
  396. /*----------------------------------------------------------------*/
  397. static void load_stats(struct cache *cache)
  398. {
  399. struct dm_cache_statistics stats;
  400. dm_cache_metadata_get_stats(cache->cmd, &stats);
  401. atomic_set(&cache->stats.read_hit, stats.read_hits);
  402. atomic_set(&cache->stats.read_miss, stats.read_misses);
  403. atomic_set(&cache->stats.write_hit, stats.write_hits);
  404. atomic_set(&cache->stats.write_miss, stats.write_misses);
  405. }
  406. static void save_stats(struct cache *cache)
  407. {
  408. struct dm_cache_statistics stats;
  409. stats.read_hits = atomic_read(&cache->stats.read_hit);
  410. stats.read_misses = atomic_read(&cache->stats.read_miss);
  411. stats.write_hits = atomic_read(&cache->stats.write_hit);
  412. stats.write_misses = atomic_read(&cache->stats.write_miss);
  413. dm_cache_metadata_set_stats(cache->cmd, &stats);
  414. }
  415. /*----------------------------------------------------------------
  416. * Per bio data
  417. *--------------------------------------------------------------*/
  418. /*
  419. * If using writeback, leave out struct per_bio_data's writethrough fields.
  420. */
  421. #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
  422. #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
  423. static size_t get_per_bio_data_size(struct cache *cache)
  424. {
  425. return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
  426. }
  427. static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
  428. {
  429. struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
  430. BUG_ON(!pb);
  431. return pb;
  432. }
  433. static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
  434. {
  435. struct per_bio_data *pb = get_per_bio_data(bio, data_size);
  436. pb->tick = false;
  437. pb->req_nr = dm_bio_get_target_bio_nr(bio);
  438. pb->all_io_entry = NULL;
  439. return pb;
  440. }
  441. /*----------------------------------------------------------------
  442. * Remapping
  443. *--------------------------------------------------------------*/
  444. static void remap_to_origin(struct cache *cache, struct bio *bio)
  445. {
  446. bio->bi_bdev = cache->origin_dev->bdev;
  447. }
  448. static void remap_to_cache(struct cache *cache, struct bio *bio,
  449. dm_cblock_t cblock)
  450. {
  451. sector_t bi_sector = bio->bi_sector;
  452. bio->bi_bdev = cache->cache_dev->bdev;
  453. if (!block_size_is_power_of_two(cache))
  454. bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
  455. sector_div(bi_sector, cache->sectors_per_block);
  456. else
  457. bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
  458. (bi_sector & (cache->sectors_per_block - 1));
  459. }
  460. static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
  461. {
  462. unsigned long flags;
  463. size_t pb_data_size = get_per_bio_data_size(cache);
  464. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  465. spin_lock_irqsave(&cache->lock, flags);
  466. if (cache->need_tick_bio &&
  467. !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
  468. pb->tick = true;
  469. cache->need_tick_bio = false;
  470. }
  471. spin_unlock_irqrestore(&cache->lock, flags);
  472. }
  473. static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
  474. dm_oblock_t oblock)
  475. {
  476. check_if_tick_bio_needed(cache, bio);
  477. remap_to_origin(cache, bio);
  478. if (bio_data_dir(bio) == WRITE)
  479. clear_discard(cache, oblock_to_dblock(cache, oblock));
  480. }
  481. static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
  482. dm_oblock_t oblock, dm_cblock_t cblock)
  483. {
  484. remap_to_cache(cache, bio, cblock);
  485. if (bio_data_dir(bio) == WRITE) {
  486. set_dirty(cache, oblock, cblock);
  487. clear_discard(cache, oblock_to_dblock(cache, oblock));
  488. }
  489. }
  490. static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
  491. {
  492. sector_t block_nr = bio->bi_sector;
  493. if (!block_size_is_power_of_two(cache))
  494. (void) sector_div(block_nr, cache->sectors_per_block);
  495. else
  496. block_nr >>= cache->sectors_per_block_shift;
  497. return to_oblock(block_nr);
  498. }
  499. static int bio_triggers_commit(struct cache *cache, struct bio *bio)
  500. {
  501. return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
  502. }
  503. static void issue(struct cache *cache, struct bio *bio)
  504. {
  505. unsigned long flags;
  506. if (!bio_triggers_commit(cache, bio)) {
  507. generic_make_request(bio);
  508. return;
  509. }
  510. /*
  511. * Batch together any bios that trigger commits and then issue a
  512. * single commit for them in do_worker().
  513. */
  514. spin_lock_irqsave(&cache->lock, flags);
  515. cache->commit_requested = true;
  516. bio_list_add(&cache->deferred_flush_bios, bio);
  517. spin_unlock_irqrestore(&cache->lock, flags);
  518. }
  519. static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
  520. {
  521. unsigned long flags;
  522. spin_lock_irqsave(&cache->lock, flags);
  523. bio_list_add(&cache->deferred_writethrough_bios, bio);
  524. spin_unlock_irqrestore(&cache->lock, flags);
  525. wake_worker(cache);
  526. }
  527. static void writethrough_endio(struct bio *bio, int err)
  528. {
  529. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  530. bio->bi_end_io = pb->saved_bi_end_io;
  531. if (err) {
  532. bio_endio(bio, err);
  533. return;
  534. }
  535. dm_bio_restore(&pb->bio_details, bio);
  536. remap_to_cache(pb->cache, bio, pb->cblock);
  537. /*
  538. * We can't issue this bio directly, since we're in interrupt
  539. * context. So it get's put on a bio list for processing by the
  540. * worker thread.
  541. */
  542. defer_writethrough_bio(pb->cache, bio);
  543. }
  544. /*
  545. * When running in writethrough mode we need to send writes to clean blocks
  546. * to both the cache and origin devices. In future we'd like to clone the
  547. * bio and send them in parallel, but for now we're doing them in
  548. * series as this is easier.
  549. */
  550. static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
  551. dm_oblock_t oblock, dm_cblock_t cblock)
  552. {
  553. struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
  554. pb->cache = cache;
  555. pb->cblock = cblock;
  556. pb->saved_bi_end_io = bio->bi_end_io;
  557. dm_bio_record(&pb->bio_details, bio);
  558. bio->bi_end_io = writethrough_endio;
  559. remap_to_origin_clear_discard(pb->cache, bio, oblock);
  560. }
  561. /*----------------------------------------------------------------
  562. * Migration processing
  563. *
  564. * Migration covers moving data from the origin device to the cache, or
  565. * vice versa.
  566. *--------------------------------------------------------------*/
  567. static void free_migration(struct dm_cache_migration *mg)
  568. {
  569. mempool_free(mg, mg->cache->migration_pool);
  570. }
  571. static void inc_nr_migrations(struct cache *cache)
  572. {
  573. atomic_inc(&cache->nr_migrations);
  574. }
  575. static void dec_nr_migrations(struct cache *cache)
  576. {
  577. atomic_dec(&cache->nr_migrations);
  578. /*
  579. * Wake the worker in case we're suspending the target.
  580. */
  581. wake_up(&cache->migration_wait);
  582. }
  583. static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
  584. bool holder)
  585. {
  586. (holder ? dm_cell_release : dm_cell_release_no_holder)
  587. (cache->prison, cell, &cache->deferred_bios);
  588. free_prison_cell(cache, cell);
  589. }
  590. static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
  591. bool holder)
  592. {
  593. unsigned long flags;
  594. spin_lock_irqsave(&cache->lock, flags);
  595. __cell_defer(cache, cell, holder);
  596. spin_unlock_irqrestore(&cache->lock, flags);
  597. wake_worker(cache);
  598. }
  599. static void cleanup_migration(struct dm_cache_migration *mg)
  600. {
  601. dec_nr_migrations(mg->cache);
  602. free_migration(mg);
  603. }
  604. static void migration_failure(struct dm_cache_migration *mg)
  605. {
  606. struct cache *cache = mg->cache;
  607. if (mg->writeback) {
  608. DMWARN_LIMIT("writeback failed; couldn't copy block");
  609. set_dirty(cache, mg->old_oblock, mg->cblock);
  610. cell_defer(cache, mg->old_ocell, false);
  611. } else if (mg->demote) {
  612. DMWARN_LIMIT("demotion failed; couldn't copy block");
  613. policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
  614. cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
  615. if (mg->promote)
  616. cell_defer(cache, mg->new_ocell, 1);
  617. } else {
  618. DMWARN_LIMIT("promotion failed; couldn't copy block");
  619. policy_remove_mapping(cache->policy, mg->new_oblock);
  620. cell_defer(cache, mg->new_ocell, 1);
  621. }
  622. cleanup_migration(mg);
  623. }
  624. static void migration_success_pre_commit(struct dm_cache_migration *mg)
  625. {
  626. unsigned long flags;
  627. struct cache *cache = mg->cache;
  628. if (mg->writeback) {
  629. cell_defer(cache, mg->old_ocell, false);
  630. clear_dirty(cache, mg->old_oblock, mg->cblock);
  631. cleanup_migration(mg);
  632. return;
  633. } else if (mg->demote) {
  634. if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
  635. DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
  636. policy_force_mapping(cache->policy, mg->new_oblock,
  637. mg->old_oblock);
  638. if (mg->promote)
  639. cell_defer(cache, mg->new_ocell, true);
  640. cleanup_migration(mg);
  641. return;
  642. }
  643. } else {
  644. if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
  645. DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
  646. policy_remove_mapping(cache->policy, mg->new_oblock);
  647. cleanup_migration(mg);
  648. return;
  649. }
  650. }
  651. spin_lock_irqsave(&cache->lock, flags);
  652. list_add_tail(&mg->list, &cache->need_commit_migrations);
  653. cache->commit_requested = true;
  654. spin_unlock_irqrestore(&cache->lock, flags);
  655. }
  656. static void migration_success_post_commit(struct dm_cache_migration *mg)
  657. {
  658. unsigned long flags;
  659. struct cache *cache = mg->cache;
  660. if (mg->writeback) {
  661. DMWARN("writeback unexpectedly triggered commit");
  662. return;
  663. } else if (mg->demote) {
  664. cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
  665. if (mg->promote) {
  666. mg->demote = false;
  667. spin_lock_irqsave(&cache->lock, flags);
  668. list_add_tail(&mg->list, &cache->quiesced_migrations);
  669. spin_unlock_irqrestore(&cache->lock, flags);
  670. } else
  671. cleanup_migration(mg);
  672. } else {
  673. cell_defer(cache, mg->new_ocell, true);
  674. clear_dirty(cache, mg->new_oblock, mg->cblock);
  675. cleanup_migration(mg);
  676. }
  677. }
  678. static void copy_complete(int read_err, unsigned long write_err, void *context)
  679. {
  680. unsigned long flags;
  681. struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
  682. struct cache *cache = mg->cache;
  683. if (read_err || write_err)
  684. mg->err = true;
  685. spin_lock_irqsave(&cache->lock, flags);
  686. list_add_tail(&mg->list, &cache->completed_migrations);
  687. spin_unlock_irqrestore(&cache->lock, flags);
  688. wake_worker(cache);
  689. }
  690. static void issue_copy_real(struct dm_cache_migration *mg)
  691. {
  692. int r;
  693. struct dm_io_region o_region, c_region;
  694. struct cache *cache = mg->cache;
  695. o_region.bdev = cache->origin_dev->bdev;
  696. o_region.count = cache->sectors_per_block;
  697. c_region.bdev = cache->cache_dev->bdev;
  698. c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
  699. c_region.count = cache->sectors_per_block;
  700. if (mg->writeback || mg->demote) {
  701. /* demote */
  702. o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
  703. r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
  704. } else {
  705. /* promote */
  706. o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
  707. r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
  708. }
  709. if (r < 0)
  710. migration_failure(mg);
  711. }
  712. static void avoid_copy(struct dm_cache_migration *mg)
  713. {
  714. atomic_inc(&mg->cache->stats.copies_avoided);
  715. migration_success_pre_commit(mg);
  716. }
  717. static void issue_copy(struct dm_cache_migration *mg)
  718. {
  719. bool avoid;
  720. struct cache *cache = mg->cache;
  721. if (mg->writeback || mg->demote)
  722. avoid = !is_dirty(cache, mg->cblock) ||
  723. is_discarded_oblock(cache, mg->old_oblock);
  724. else
  725. avoid = is_discarded_oblock(cache, mg->new_oblock);
  726. avoid ? avoid_copy(mg) : issue_copy_real(mg);
  727. }
  728. static void complete_migration(struct dm_cache_migration *mg)
  729. {
  730. if (mg->err)
  731. migration_failure(mg);
  732. else
  733. migration_success_pre_commit(mg);
  734. }
  735. static void process_migrations(struct cache *cache, struct list_head *head,
  736. void (*fn)(struct dm_cache_migration *))
  737. {
  738. unsigned long flags;
  739. struct list_head list;
  740. struct dm_cache_migration *mg, *tmp;
  741. INIT_LIST_HEAD(&list);
  742. spin_lock_irqsave(&cache->lock, flags);
  743. list_splice_init(head, &list);
  744. spin_unlock_irqrestore(&cache->lock, flags);
  745. list_for_each_entry_safe(mg, tmp, &list, list)
  746. fn(mg);
  747. }
  748. static void __queue_quiesced_migration(struct dm_cache_migration *mg)
  749. {
  750. list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
  751. }
  752. static void queue_quiesced_migration(struct dm_cache_migration *mg)
  753. {
  754. unsigned long flags;
  755. struct cache *cache = mg->cache;
  756. spin_lock_irqsave(&cache->lock, flags);
  757. __queue_quiesced_migration(mg);
  758. spin_unlock_irqrestore(&cache->lock, flags);
  759. wake_worker(cache);
  760. }
  761. static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
  762. {
  763. unsigned long flags;
  764. struct dm_cache_migration *mg, *tmp;
  765. spin_lock_irqsave(&cache->lock, flags);
  766. list_for_each_entry_safe(mg, tmp, work, list)
  767. __queue_quiesced_migration(mg);
  768. spin_unlock_irqrestore(&cache->lock, flags);
  769. wake_worker(cache);
  770. }
  771. static void check_for_quiesced_migrations(struct cache *cache,
  772. struct per_bio_data *pb)
  773. {
  774. struct list_head work;
  775. if (!pb->all_io_entry)
  776. return;
  777. INIT_LIST_HEAD(&work);
  778. if (pb->all_io_entry)
  779. dm_deferred_entry_dec(pb->all_io_entry, &work);
  780. if (!list_empty(&work))
  781. queue_quiesced_migrations(cache, &work);
  782. }
  783. static void quiesce_migration(struct dm_cache_migration *mg)
  784. {
  785. if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
  786. queue_quiesced_migration(mg);
  787. }
  788. static void promote(struct cache *cache, struct prealloc *structs,
  789. dm_oblock_t oblock, dm_cblock_t cblock,
  790. struct dm_bio_prison_cell *cell)
  791. {
  792. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  793. mg->err = false;
  794. mg->writeback = false;
  795. mg->demote = false;
  796. mg->promote = true;
  797. mg->cache = cache;
  798. mg->new_oblock = oblock;
  799. mg->cblock = cblock;
  800. mg->old_ocell = NULL;
  801. mg->new_ocell = cell;
  802. mg->start_jiffies = jiffies;
  803. inc_nr_migrations(cache);
  804. quiesce_migration(mg);
  805. }
  806. static void writeback(struct cache *cache, struct prealloc *structs,
  807. dm_oblock_t oblock, dm_cblock_t cblock,
  808. struct dm_bio_prison_cell *cell)
  809. {
  810. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  811. mg->err = false;
  812. mg->writeback = true;
  813. mg->demote = false;
  814. mg->promote = false;
  815. mg->cache = cache;
  816. mg->old_oblock = oblock;
  817. mg->cblock = cblock;
  818. mg->old_ocell = cell;
  819. mg->new_ocell = NULL;
  820. mg->start_jiffies = jiffies;
  821. inc_nr_migrations(cache);
  822. quiesce_migration(mg);
  823. }
  824. static void demote_then_promote(struct cache *cache, struct prealloc *structs,
  825. dm_oblock_t old_oblock, dm_oblock_t new_oblock,
  826. dm_cblock_t cblock,
  827. struct dm_bio_prison_cell *old_ocell,
  828. struct dm_bio_prison_cell *new_ocell)
  829. {
  830. struct dm_cache_migration *mg = prealloc_get_migration(structs);
  831. mg->err = false;
  832. mg->writeback = false;
  833. mg->demote = true;
  834. mg->promote = true;
  835. mg->cache = cache;
  836. mg->old_oblock = old_oblock;
  837. mg->new_oblock = new_oblock;
  838. mg->cblock = cblock;
  839. mg->old_ocell = old_ocell;
  840. mg->new_ocell = new_ocell;
  841. mg->start_jiffies = jiffies;
  842. inc_nr_migrations(cache);
  843. quiesce_migration(mg);
  844. }
  845. /*----------------------------------------------------------------
  846. * bio processing
  847. *--------------------------------------------------------------*/
  848. static void defer_bio(struct cache *cache, struct bio *bio)
  849. {
  850. unsigned long flags;
  851. spin_lock_irqsave(&cache->lock, flags);
  852. bio_list_add(&cache->deferred_bios, bio);
  853. spin_unlock_irqrestore(&cache->lock, flags);
  854. wake_worker(cache);
  855. }
  856. static void process_flush_bio(struct cache *cache, struct bio *bio)
  857. {
  858. size_t pb_data_size = get_per_bio_data_size(cache);
  859. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  860. BUG_ON(bio->bi_size);
  861. if (!pb->req_nr)
  862. remap_to_origin(cache, bio);
  863. else
  864. remap_to_cache(cache, bio, 0);
  865. issue(cache, bio);
  866. }
  867. /*
  868. * People generally discard large parts of a device, eg, the whole device
  869. * when formatting. Splitting these large discards up into cache block
  870. * sized ios and then quiescing (always neccessary for discard) takes too
  871. * long.
  872. *
  873. * We keep it simple, and allow any size of discard to come in, and just
  874. * mark off blocks on the discard bitset. No passdown occurs!
  875. *
  876. * To implement passdown we need to change the bio_prison such that a cell
  877. * can have a key that spans many blocks.
  878. */
  879. static void process_discard_bio(struct cache *cache, struct bio *bio)
  880. {
  881. dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
  882. cache->discard_block_size);
  883. dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
  884. dm_block_t b;
  885. end_block = block_div(end_block, cache->discard_block_size);
  886. for (b = start_block; b < end_block; b++)
  887. set_discard(cache, to_dblock(b));
  888. bio_endio(bio, 0);
  889. }
  890. static bool spare_migration_bandwidth(struct cache *cache)
  891. {
  892. sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
  893. cache->sectors_per_block;
  894. return current_volume < cache->migration_threshold;
  895. }
  896. static bool is_writethrough_io(struct cache *cache, struct bio *bio,
  897. dm_cblock_t cblock)
  898. {
  899. return bio_data_dir(bio) == WRITE &&
  900. cache->features.write_through && !is_dirty(cache, cblock);
  901. }
  902. static void inc_hit_counter(struct cache *cache, struct bio *bio)
  903. {
  904. atomic_inc(bio_data_dir(bio) == READ ?
  905. &cache->stats.read_hit : &cache->stats.write_hit);
  906. }
  907. static void inc_miss_counter(struct cache *cache, struct bio *bio)
  908. {
  909. atomic_inc(bio_data_dir(bio) == READ ?
  910. &cache->stats.read_miss : &cache->stats.write_miss);
  911. }
  912. static void process_bio(struct cache *cache, struct prealloc *structs,
  913. struct bio *bio)
  914. {
  915. int r;
  916. bool release_cell = true;
  917. dm_oblock_t block = get_bio_block(cache, bio);
  918. struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
  919. struct policy_result lookup_result;
  920. size_t pb_data_size = get_per_bio_data_size(cache);
  921. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  922. bool discarded_block = is_discarded_oblock(cache, block);
  923. bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
  924. /*
  925. * Check to see if that block is currently migrating.
  926. */
  927. cell_prealloc = prealloc_get_cell(structs);
  928. r = bio_detain(cache, block, bio, cell_prealloc,
  929. (cell_free_fn) prealloc_put_cell,
  930. structs, &new_ocell);
  931. if (r > 0)
  932. return;
  933. r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
  934. bio, &lookup_result);
  935. if (r == -EWOULDBLOCK)
  936. /* migration has been denied */
  937. lookup_result.op = POLICY_MISS;
  938. switch (lookup_result.op) {
  939. case POLICY_HIT:
  940. inc_hit_counter(cache, bio);
  941. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  942. if (is_writethrough_io(cache, bio, lookup_result.cblock))
  943. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  944. else
  945. remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
  946. issue(cache, bio);
  947. break;
  948. case POLICY_MISS:
  949. inc_miss_counter(cache, bio);
  950. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  951. remap_to_origin_clear_discard(cache, bio, block);
  952. issue(cache, bio);
  953. break;
  954. case POLICY_NEW:
  955. atomic_inc(&cache->stats.promotion);
  956. promote(cache, structs, block, lookup_result.cblock, new_ocell);
  957. release_cell = false;
  958. break;
  959. case POLICY_REPLACE:
  960. cell_prealloc = prealloc_get_cell(structs);
  961. r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
  962. (cell_free_fn) prealloc_put_cell,
  963. structs, &old_ocell);
  964. if (r > 0) {
  965. /*
  966. * We have to be careful to avoid lock inversion of
  967. * the cells. So we back off, and wait for the
  968. * old_ocell to become free.
  969. */
  970. policy_force_mapping(cache->policy, block,
  971. lookup_result.old_oblock);
  972. atomic_inc(&cache->stats.cache_cell_clash);
  973. break;
  974. }
  975. atomic_inc(&cache->stats.demotion);
  976. atomic_inc(&cache->stats.promotion);
  977. demote_then_promote(cache, structs, lookup_result.old_oblock,
  978. block, lookup_result.cblock,
  979. old_ocell, new_ocell);
  980. release_cell = false;
  981. break;
  982. default:
  983. DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
  984. (unsigned) lookup_result.op);
  985. bio_io_error(bio);
  986. }
  987. if (release_cell)
  988. cell_defer(cache, new_ocell, false);
  989. }
  990. static int need_commit_due_to_time(struct cache *cache)
  991. {
  992. return jiffies < cache->last_commit_jiffies ||
  993. jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
  994. }
  995. static int commit_if_needed(struct cache *cache)
  996. {
  997. if (dm_cache_changed_this_transaction(cache->cmd) &&
  998. (cache->commit_requested || need_commit_due_to_time(cache))) {
  999. atomic_inc(&cache->stats.commit_count);
  1000. cache->last_commit_jiffies = jiffies;
  1001. cache->commit_requested = false;
  1002. return dm_cache_commit(cache->cmd, false);
  1003. }
  1004. return 0;
  1005. }
  1006. static void process_deferred_bios(struct cache *cache)
  1007. {
  1008. unsigned long flags;
  1009. struct bio_list bios;
  1010. struct bio *bio;
  1011. struct prealloc structs;
  1012. memset(&structs, 0, sizeof(structs));
  1013. bio_list_init(&bios);
  1014. spin_lock_irqsave(&cache->lock, flags);
  1015. bio_list_merge(&bios, &cache->deferred_bios);
  1016. bio_list_init(&cache->deferred_bios);
  1017. spin_unlock_irqrestore(&cache->lock, flags);
  1018. while (!bio_list_empty(&bios)) {
  1019. /*
  1020. * If we've got no free migration structs, and processing
  1021. * this bio might require one, we pause until there are some
  1022. * prepared mappings to process.
  1023. */
  1024. if (prealloc_data_structs(cache, &structs)) {
  1025. spin_lock_irqsave(&cache->lock, flags);
  1026. bio_list_merge(&cache->deferred_bios, &bios);
  1027. spin_unlock_irqrestore(&cache->lock, flags);
  1028. break;
  1029. }
  1030. bio = bio_list_pop(&bios);
  1031. if (bio->bi_rw & REQ_FLUSH)
  1032. process_flush_bio(cache, bio);
  1033. else if (bio->bi_rw & REQ_DISCARD)
  1034. process_discard_bio(cache, bio);
  1035. else
  1036. process_bio(cache, &structs, bio);
  1037. }
  1038. prealloc_free_structs(cache, &structs);
  1039. }
  1040. static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
  1041. {
  1042. unsigned long flags;
  1043. struct bio_list bios;
  1044. struct bio *bio;
  1045. bio_list_init(&bios);
  1046. spin_lock_irqsave(&cache->lock, flags);
  1047. bio_list_merge(&bios, &cache->deferred_flush_bios);
  1048. bio_list_init(&cache->deferred_flush_bios);
  1049. spin_unlock_irqrestore(&cache->lock, flags);
  1050. while ((bio = bio_list_pop(&bios)))
  1051. submit_bios ? generic_make_request(bio) : bio_io_error(bio);
  1052. }
  1053. static void process_deferred_writethrough_bios(struct cache *cache)
  1054. {
  1055. unsigned long flags;
  1056. struct bio_list bios;
  1057. struct bio *bio;
  1058. bio_list_init(&bios);
  1059. spin_lock_irqsave(&cache->lock, flags);
  1060. bio_list_merge(&bios, &cache->deferred_writethrough_bios);
  1061. bio_list_init(&cache->deferred_writethrough_bios);
  1062. spin_unlock_irqrestore(&cache->lock, flags);
  1063. while ((bio = bio_list_pop(&bios)))
  1064. generic_make_request(bio);
  1065. }
  1066. static void writeback_some_dirty_blocks(struct cache *cache)
  1067. {
  1068. int r = 0;
  1069. dm_oblock_t oblock;
  1070. dm_cblock_t cblock;
  1071. struct prealloc structs;
  1072. struct dm_bio_prison_cell *old_ocell;
  1073. memset(&structs, 0, sizeof(structs));
  1074. while (spare_migration_bandwidth(cache)) {
  1075. if (prealloc_data_structs(cache, &structs))
  1076. break;
  1077. r = policy_writeback_work(cache->policy, &oblock, &cblock);
  1078. if (r)
  1079. break;
  1080. r = get_cell(cache, oblock, &structs, &old_ocell);
  1081. if (r) {
  1082. policy_set_dirty(cache->policy, oblock);
  1083. break;
  1084. }
  1085. writeback(cache, &structs, oblock, cblock, old_ocell);
  1086. }
  1087. prealloc_free_structs(cache, &structs);
  1088. }
  1089. /*----------------------------------------------------------------
  1090. * Main worker loop
  1091. *--------------------------------------------------------------*/
  1092. static void start_quiescing(struct cache *cache)
  1093. {
  1094. unsigned long flags;
  1095. spin_lock_irqsave(&cache->lock, flags);
  1096. cache->quiescing = 1;
  1097. spin_unlock_irqrestore(&cache->lock, flags);
  1098. }
  1099. static void stop_quiescing(struct cache *cache)
  1100. {
  1101. unsigned long flags;
  1102. spin_lock_irqsave(&cache->lock, flags);
  1103. cache->quiescing = 0;
  1104. spin_unlock_irqrestore(&cache->lock, flags);
  1105. }
  1106. static bool is_quiescing(struct cache *cache)
  1107. {
  1108. int r;
  1109. unsigned long flags;
  1110. spin_lock_irqsave(&cache->lock, flags);
  1111. r = cache->quiescing;
  1112. spin_unlock_irqrestore(&cache->lock, flags);
  1113. return r;
  1114. }
  1115. static void wait_for_migrations(struct cache *cache)
  1116. {
  1117. wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
  1118. }
  1119. static void stop_worker(struct cache *cache)
  1120. {
  1121. cancel_delayed_work(&cache->waker);
  1122. flush_workqueue(cache->wq);
  1123. }
  1124. static void requeue_deferred_io(struct cache *cache)
  1125. {
  1126. struct bio *bio;
  1127. struct bio_list bios;
  1128. bio_list_init(&bios);
  1129. bio_list_merge(&bios, &cache->deferred_bios);
  1130. bio_list_init(&cache->deferred_bios);
  1131. while ((bio = bio_list_pop(&bios)))
  1132. bio_endio(bio, DM_ENDIO_REQUEUE);
  1133. }
  1134. static int more_work(struct cache *cache)
  1135. {
  1136. if (is_quiescing(cache))
  1137. return !list_empty(&cache->quiesced_migrations) ||
  1138. !list_empty(&cache->completed_migrations) ||
  1139. !list_empty(&cache->need_commit_migrations);
  1140. else
  1141. return !bio_list_empty(&cache->deferred_bios) ||
  1142. !bio_list_empty(&cache->deferred_flush_bios) ||
  1143. !bio_list_empty(&cache->deferred_writethrough_bios) ||
  1144. !list_empty(&cache->quiesced_migrations) ||
  1145. !list_empty(&cache->completed_migrations) ||
  1146. !list_empty(&cache->need_commit_migrations);
  1147. }
  1148. static void do_worker(struct work_struct *ws)
  1149. {
  1150. struct cache *cache = container_of(ws, struct cache, worker);
  1151. do {
  1152. if (!is_quiescing(cache))
  1153. process_deferred_bios(cache);
  1154. process_migrations(cache, &cache->quiesced_migrations, issue_copy);
  1155. process_migrations(cache, &cache->completed_migrations, complete_migration);
  1156. writeback_some_dirty_blocks(cache);
  1157. process_deferred_writethrough_bios(cache);
  1158. if (commit_if_needed(cache)) {
  1159. process_deferred_flush_bios(cache, false);
  1160. /*
  1161. * FIXME: rollback metadata or just go into a
  1162. * failure mode and error everything
  1163. */
  1164. } else {
  1165. process_deferred_flush_bios(cache, true);
  1166. process_migrations(cache, &cache->need_commit_migrations,
  1167. migration_success_post_commit);
  1168. }
  1169. } while (more_work(cache));
  1170. }
  1171. /*
  1172. * We want to commit periodically so that not too much
  1173. * unwritten metadata builds up.
  1174. */
  1175. static void do_waker(struct work_struct *ws)
  1176. {
  1177. struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
  1178. policy_tick(cache->policy);
  1179. wake_worker(cache);
  1180. queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
  1181. }
  1182. /*----------------------------------------------------------------*/
  1183. static int is_congested(struct dm_dev *dev, int bdi_bits)
  1184. {
  1185. struct request_queue *q = bdev_get_queue(dev->bdev);
  1186. return bdi_congested(&q->backing_dev_info, bdi_bits);
  1187. }
  1188. static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
  1189. {
  1190. struct cache *cache = container_of(cb, struct cache, callbacks);
  1191. return is_congested(cache->origin_dev, bdi_bits) ||
  1192. is_congested(cache->cache_dev, bdi_bits);
  1193. }
  1194. /*----------------------------------------------------------------
  1195. * Target methods
  1196. *--------------------------------------------------------------*/
  1197. /*
  1198. * This function gets called on the error paths of the constructor, so we
  1199. * have to cope with a partially initialised struct.
  1200. */
  1201. static void destroy(struct cache *cache)
  1202. {
  1203. unsigned i;
  1204. if (cache->next_migration)
  1205. mempool_free(cache->next_migration, cache->migration_pool);
  1206. if (cache->migration_pool)
  1207. mempool_destroy(cache->migration_pool);
  1208. if (cache->all_io_ds)
  1209. dm_deferred_set_destroy(cache->all_io_ds);
  1210. if (cache->prison)
  1211. dm_bio_prison_destroy(cache->prison);
  1212. if (cache->wq)
  1213. destroy_workqueue(cache->wq);
  1214. if (cache->dirty_bitset)
  1215. free_bitset(cache->dirty_bitset);
  1216. if (cache->discard_bitset)
  1217. free_bitset(cache->discard_bitset);
  1218. if (cache->copier)
  1219. dm_kcopyd_client_destroy(cache->copier);
  1220. if (cache->cmd)
  1221. dm_cache_metadata_close(cache->cmd);
  1222. if (cache->metadata_dev)
  1223. dm_put_device(cache->ti, cache->metadata_dev);
  1224. if (cache->origin_dev)
  1225. dm_put_device(cache->ti, cache->origin_dev);
  1226. if (cache->cache_dev)
  1227. dm_put_device(cache->ti, cache->cache_dev);
  1228. if (cache->policy)
  1229. dm_cache_policy_destroy(cache->policy);
  1230. for (i = 0; i < cache->nr_ctr_args ; i++)
  1231. kfree(cache->ctr_args[i]);
  1232. kfree(cache->ctr_args);
  1233. kfree(cache);
  1234. }
  1235. static void cache_dtr(struct dm_target *ti)
  1236. {
  1237. struct cache *cache = ti->private;
  1238. destroy(cache);
  1239. }
  1240. static sector_t get_dev_size(struct dm_dev *dev)
  1241. {
  1242. return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
  1243. }
  1244. /*----------------------------------------------------------------*/
  1245. /*
  1246. * Construct a cache device mapping.
  1247. *
  1248. * cache <metadata dev> <cache dev> <origin dev> <block size>
  1249. * <#feature args> [<feature arg>]*
  1250. * <policy> <#policy args> [<policy arg>]*
  1251. *
  1252. * metadata dev : fast device holding the persistent metadata
  1253. * cache dev : fast device holding cached data blocks
  1254. * origin dev : slow device holding original data blocks
  1255. * block size : cache unit size in sectors
  1256. *
  1257. * #feature args : number of feature arguments passed
  1258. * feature args : writethrough. (The default is writeback.)
  1259. *
  1260. * policy : the replacement policy to use
  1261. * #policy args : an even number of policy arguments corresponding
  1262. * to key/value pairs passed to the policy
  1263. * policy args : key/value pairs passed to the policy
  1264. * E.g. 'sequential_threshold 1024'
  1265. * See cache-policies.txt for details.
  1266. *
  1267. * Optional feature arguments are:
  1268. * writethrough : write through caching that prohibits cache block
  1269. * content from being different from origin block content.
  1270. * Without this argument, the default behaviour is to write
  1271. * back cache block contents later for performance reasons,
  1272. * so they may differ from the corresponding origin blocks.
  1273. */
  1274. struct cache_args {
  1275. struct dm_target *ti;
  1276. struct dm_dev *metadata_dev;
  1277. struct dm_dev *cache_dev;
  1278. sector_t cache_sectors;
  1279. struct dm_dev *origin_dev;
  1280. sector_t origin_sectors;
  1281. uint32_t block_size;
  1282. const char *policy_name;
  1283. int policy_argc;
  1284. const char **policy_argv;
  1285. struct cache_features features;
  1286. };
  1287. static void destroy_cache_args(struct cache_args *ca)
  1288. {
  1289. if (ca->metadata_dev)
  1290. dm_put_device(ca->ti, ca->metadata_dev);
  1291. if (ca->cache_dev)
  1292. dm_put_device(ca->ti, ca->cache_dev);
  1293. if (ca->origin_dev)
  1294. dm_put_device(ca->ti, ca->origin_dev);
  1295. kfree(ca);
  1296. }
  1297. static bool at_least_one_arg(struct dm_arg_set *as, char **error)
  1298. {
  1299. if (!as->argc) {
  1300. *error = "Insufficient args";
  1301. return false;
  1302. }
  1303. return true;
  1304. }
  1305. static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
  1306. char **error)
  1307. {
  1308. int r;
  1309. sector_t metadata_dev_size;
  1310. char b[BDEVNAME_SIZE];
  1311. if (!at_least_one_arg(as, error))
  1312. return -EINVAL;
  1313. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1314. &ca->metadata_dev);
  1315. if (r) {
  1316. *error = "Error opening metadata device";
  1317. return r;
  1318. }
  1319. metadata_dev_size = get_dev_size(ca->metadata_dev);
  1320. if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
  1321. DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
  1322. bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
  1323. return 0;
  1324. }
  1325. static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
  1326. char **error)
  1327. {
  1328. int r;
  1329. if (!at_least_one_arg(as, error))
  1330. return -EINVAL;
  1331. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1332. &ca->cache_dev);
  1333. if (r) {
  1334. *error = "Error opening cache device";
  1335. return r;
  1336. }
  1337. ca->cache_sectors = get_dev_size(ca->cache_dev);
  1338. return 0;
  1339. }
  1340. static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
  1341. char **error)
  1342. {
  1343. int r;
  1344. if (!at_least_one_arg(as, error))
  1345. return -EINVAL;
  1346. r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
  1347. &ca->origin_dev);
  1348. if (r) {
  1349. *error = "Error opening origin device";
  1350. return r;
  1351. }
  1352. ca->origin_sectors = get_dev_size(ca->origin_dev);
  1353. if (ca->ti->len > ca->origin_sectors) {
  1354. *error = "Device size larger than cached device";
  1355. return -EINVAL;
  1356. }
  1357. return 0;
  1358. }
  1359. static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
  1360. char **error)
  1361. {
  1362. unsigned long tmp;
  1363. if (!at_least_one_arg(as, error))
  1364. return -EINVAL;
  1365. if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
  1366. tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
  1367. tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
  1368. *error = "Invalid data block size";
  1369. return -EINVAL;
  1370. }
  1371. if (tmp > ca->cache_sectors) {
  1372. *error = "Data block size is larger than the cache device";
  1373. return -EINVAL;
  1374. }
  1375. ca->block_size = tmp;
  1376. return 0;
  1377. }
  1378. static void init_features(struct cache_features *cf)
  1379. {
  1380. cf->mode = CM_WRITE;
  1381. cf->write_through = false;
  1382. }
  1383. static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
  1384. char **error)
  1385. {
  1386. static struct dm_arg _args[] = {
  1387. {0, 1, "Invalid number of cache feature arguments"},
  1388. };
  1389. int r;
  1390. unsigned argc;
  1391. const char *arg;
  1392. struct cache_features *cf = &ca->features;
  1393. init_features(cf);
  1394. r = dm_read_arg_group(_args, as, &argc, error);
  1395. if (r)
  1396. return -EINVAL;
  1397. while (argc--) {
  1398. arg = dm_shift_arg(as);
  1399. if (!strcasecmp(arg, "writeback"))
  1400. cf->write_through = false;
  1401. else if (!strcasecmp(arg, "writethrough"))
  1402. cf->write_through = true;
  1403. else {
  1404. *error = "Unrecognised cache feature requested";
  1405. return -EINVAL;
  1406. }
  1407. }
  1408. return 0;
  1409. }
  1410. static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
  1411. char **error)
  1412. {
  1413. static struct dm_arg _args[] = {
  1414. {0, 1024, "Invalid number of policy arguments"},
  1415. };
  1416. int r;
  1417. if (!at_least_one_arg(as, error))
  1418. return -EINVAL;
  1419. ca->policy_name = dm_shift_arg(as);
  1420. r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
  1421. if (r)
  1422. return -EINVAL;
  1423. ca->policy_argv = (const char **)as->argv;
  1424. dm_consume_args(as, ca->policy_argc);
  1425. return 0;
  1426. }
  1427. static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
  1428. char **error)
  1429. {
  1430. int r;
  1431. struct dm_arg_set as;
  1432. as.argc = argc;
  1433. as.argv = argv;
  1434. r = parse_metadata_dev(ca, &as, error);
  1435. if (r)
  1436. return r;
  1437. r = parse_cache_dev(ca, &as, error);
  1438. if (r)
  1439. return r;
  1440. r = parse_origin_dev(ca, &as, error);
  1441. if (r)
  1442. return r;
  1443. r = parse_block_size(ca, &as, error);
  1444. if (r)
  1445. return r;
  1446. r = parse_features(ca, &as, error);
  1447. if (r)
  1448. return r;
  1449. r = parse_policy(ca, &as, error);
  1450. if (r)
  1451. return r;
  1452. return 0;
  1453. }
  1454. /*----------------------------------------------------------------*/
  1455. static struct kmem_cache *migration_cache;
  1456. static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
  1457. {
  1458. int r = 0;
  1459. if (argc & 1) {
  1460. DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
  1461. return -EINVAL;
  1462. }
  1463. while (argc) {
  1464. r = policy_set_config_value(p, argv[0], argv[1]);
  1465. if (r) {
  1466. DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
  1467. argv[0], argv[1]);
  1468. return r;
  1469. }
  1470. argc -= 2;
  1471. argv += 2;
  1472. }
  1473. return r;
  1474. }
  1475. static int create_cache_policy(struct cache *cache, struct cache_args *ca,
  1476. char **error)
  1477. {
  1478. int r;
  1479. cache->policy = dm_cache_policy_create(ca->policy_name,
  1480. cache->cache_size,
  1481. cache->origin_sectors,
  1482. cache->sectors_per_block);
  1483. if (!cache->policy) {
  1484. *error = "Error creating cache's policy";
  1485. return -ENOMEM;
  1486. }
  1487. r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
  1488. if (r) {
  1489. *error = "Error setting cache policy's config values";
  1490. dm_cache_policy_destroy(cache->policy);
  1491. cache->policy = NULL;
  1492. }
  1493. return r;
  1494. }
  1495. /*
  1496. * We want the discard block size to be a power of two, at least the size
  1497. * of the cache block size, and have no more than 2^14 discard blocks
  1498. * across the origin.
  1499. */
  1500. #define MAX_DISCARD_BLOCKS (1 << 14)
  1501. static bool too_many_discard_blocks(sector_t discard_block_size,
  1502. sector_t origin_size)
  1503. {
  1504. (void) sector_div(origin_size, discard_block_size);
  1505. return origin_size > MAX_DISCARD_BLOCKS;
  1506. }
  1507. static sector_t calculate_discard_block_size(sector_t cache_block_size,
  1508. sector_t origin_size)
  1509. {
  1510. sector_t discard_block_size;
  1511. discard_block_size = roundup_pow_of_two(cache_block_size);
  1512. if (origin_size)
  1513. while (too_many_discard_blocks(discard_block_size, origin_size))
  1514. discard_block_size *= 2;
  1515. return discard_block_size;
  1516. }
  1517. #define DEFAULT_MIGRATION_THRESHOLD 2048
  1518. static int cache_create(struct cache_args *ca, struct cache **result)
  1519. {
  1520. int r = 0;
  1521. char **error = &ca->ti->error;
  1522. struct cache *cache;
  1523. struct dm_target *ti = ca->ti;
  1524. dm_block_t origin_blocks;
  1525. struct dm_cache_metadata *cmd;
  1526. bool may_format = ca->features.mode == CM_WRITE;
  1527. cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  1528. if (!cache)
  1529. return -ENOMEM;
  1530. cache->ti = ca->ti;
  1531. ti->private = cache;
  1532. ti->num_flush_bios = 2;
  1533. ti->flush_supported = true;
  1534. ti->num_discard_bios = 1;
  1535. ti->discards_supported = true;
  1536. ti->discard_zeroes_data_unsupported = true;
  1537. memcpy(&cache->features, &ca->features, sizeof(cache->features));
  1538. ti->per_bio_data_size = get_per_bio_data_size(cache);
  1539. cache->callbacks.congested_fn = cache_is_congested;
  1540. dm_table_add_target_callbacks(ti->table, &cache->callbacks);
  1541. cache->metadata_dev = ca->metadata_dev;
  1542. cache->origin_dev = ca->origin_dev;
  1543. cache->cache_dev = ca->cache_dev;
  1544. ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
  1545. /* FIXME: factor out this whole section */
  1546. origin_blocks = cache->origin_sectors = ca->origin_sectors;
  1547. origin_blocks = block_div(origin_blocks, ca->block_size);
  1548. cache->origin_blocks = to_oblock(origin_blocks);
  1549. cache->sectors_per_block = ca->block_size;
  1550. if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
  1551. r = -EINVAL;
  1552. goto bad;
  1553. }
  1554. if (ca->block_size & (ca->block_size - 1)) {
  1555. dm_block_t cache_size = ca->cache_sectors;
  1556. cache->sectors_per_block_shift = -1;
  1557. cache_size = block_div(cache_size, ca->block_size);
  1558. cache->cache_size = to_cblock(cache_size);
  1559. } else {
  1560. cache->sectors_per_block_shift = __ffs(ca->block_size);
  1561. cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
  1562. }
  1563. r = create_cache_policy(cache, ca, error);
  1564. if (r)
  1565. goto bad;
  1566. cache->policy_nr_args = ca->policy_argc;
  1567. cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
  1568. ca->block_size, may_format,
  1569. dm_cache_policy_get_hint_size(cache->policy));
  1570. if (IS_ERR(cmd)) {
  1571. *error = "Error creating metadata object";
  1572. r = PTR_ERR(cmd);
  1573. goto bad;
  1574. }
  1575. cache->cmd = cmd;
  1576. spin_lock_init(&cache->lock);
  1577. bio_list_init(&cache->deferred_bios);
  1578. bio_list_init(&cache->deferred_flush_bios);
  1579. bio_list_init(&cache->deferred_writethrough_bios);
  1580. INIT_LIST_HEAD(&cache->quiesced_migrations);
  1581. INIT_LIST_HEAD(&cache->completed_migrations);
  1582. INIT_LIST_HEAD(&cache->need_commit_migrations);
  1583. cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
  1584. atomic_set(&cache->nr_migrations, 0);
  1585. init_waitqueue_head(&cache->migration_wait);
  1586. r = -ENOMEM;
  1587. cache->nr_dirty = 0;
  1588. cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
  1589. if (!cache->dirty_bitset) {
  1590. *error = "could not allocate dirty bitset";
  1591. goto bad;
  1592. }
  1593. clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
  1594. cache->discard_block_size =
  1595. calculate_discard_block_size(cache->sectors_per_block,
  1596. cache->origin_sectors);
  1597. cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
  1598. cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
  1599. if (!cache->discard_bitset) {
  1600. *error = "could not allocate discard bitset";
  1601. goto bad;
  1602. }
  1603. clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
  1604. cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
  1605. if (IS_ERR(cache->copier)) {
  1606. *error = "could not create kcopyd client";
  1607. r = PTR_ERR(cache->copier);
  1608. goto bad;
  1609. }
  1610. cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
  1611. if (!cache->wq) {
  1612. *error = "could not create workqueue for metadata object";
  1613. goto bad;
  1614. }
  1615. INIT_WORK(&cache->worker, do_worker);
  1616. INIT_DELAYED_WORK(&cache->waker, do_waker);
  1617. cache->last_commit_jiffies = jiffies;
  1618. cache->prison = dm_bio_prison_create(PRISON_CELLS);
  1619. if (!cache->prison) {
  1620. *error = "could not create bio prison";
  1621. goto bad;
  1622. }
  1623. cache->all_io_ds = dm_deferred_set_create();
  1624. if (!cache->all_io_ds) {
  1625. *error = "could not create all_io deferred set";
  1626. goto bad;
  1627. }
  1628. cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
  1629. migration_cache);
  1630. if (!cache->migration_pool) {
  1631. *error = "Error creating cache's migration mempool";
  1632. goto bad;
  1633. }
  1634. cache->next_migration = NULL;
  1635. cache->need_tick_bio = true;
  1636. cache->sized = false;
  1637. cache->quiescing = false;
  1638. cache->commit_requested = false;
  1639. cache->loaded_mappings = false;
  1640. cache->loaded_discards = false;
  1641. load_stats(cache);
  1642. atomic_set(&cache->stats.demotion, 0);
  1643. atomic_set(&cache->stats.promotion, 0);
  1644. atomic_set(&cache->stats.copies_avoided, 0);
  1645. atomic_set(&cache->stats.cache_cell_clash, 0);
  1646. atomic_set(&cache->stats.commit_count, 0);
  1647. atomic_set(&cache->stats.discard_count, 0);
  1648. *result = cache;
  1649. return 0;
  1650. bad:
  1651. destroy(cache);
  1652. return r;
  1653. }
  1654. static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
  1655. {
  1656. unsigned i;
  1657. const char **copy;
  1658. copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
  1659. if (!copy)
  1660. return -ENOMEM;
  1661. for (i = 0; i < argc; i++) {
  1662. copy[i] = kstrdup(argv[i], GFP_KERNEL);
  1663. if (!copy[i]) {
  1664. while (i--)
  1665. kfree(copy[i]);
  1666. kfree(copy);
  1667. return -ENOMEM;
  1668. }
  1669. }
  1670. cache->nr_ctr_args = argc;
  1671. cache->ctr_args = copy;
  1672. return 0;
  1673. }
  1674. static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
  1675. {
  1676. int r = -EINVAL;
  1677. struct cache_args *ca;
  1678. struct cache *cache = NULL;
  1679. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  1680. if (!ca) {
  1681. ti->error = "Error allocating memory for cache";
  1682. return -ENOMEM;
  1683. }
  1684. ca->ti = ti;
  1685. r = parse_cache_args(ca, argc, argv, &ti->error);
  1686. if (r)
  1687. goto out;
  1688. r = cache_create(ca, &cache);
  1689. if (r)
  1690. goto out;
  1691. r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
  1692. if (r) {
  1693. destroy(cache);
  1694. goto out;
  1695. }
  1696. ti->private = cache;
  1697. out:
  1698. destroy_cache_args(ca);
  1699. return r;
  1700. }
  1701. static int cache_map(struct dm_target *ti, struct bio *bio)
  1702. {
  1703. struct cache *cache = ti->private;
  1704. int r;
  1705. dm_oblock_t block = get_bio_block(cache, bio);
  1706. size_t pb_data_size = get_per_bio_data_size(cache);
  1707. bool can_migrate = false;
  1708. bool discarded_block;
  1709. struct dm_bio_prison_cell *cell;
  1710. struct policy_result lookup_result;
  1711. struct per_bio_data *pb;
  1712. if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
  1713. /*
  1714. * This can only occur if the io goes to a partial block at
  1715. * the end of the origin device. We don't cache these.
  1716. * Just remap to the origin and carry on.
  1717. */
  1718. remap_to_origin_clear_discard(cache, bio, block);
  1719. return DM_MAPIO_REMAPPED;
  1720. }
  1721. pb = init_per_bio_data(bio, pb_data_size);
  1722. if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
  1723. defer_bio(cache, bio);
  1724. return DM_MAPIO_SUBMITTED;
  1725. }
  1726. /*
  1727. * Check to see if that block is currently migrating.
  1728. */
  1729. cell = alloc_prison_cell(cache);
  1730. if (!cell) {
  1731. defer_bio(cache, bio);
  1732. return DM_MAPIO_SUBMITTED;
  1733. }
  1734. r = bio_detain(cache, block, bio, cell,
  1735. (cell_free_fn) free_prison_cell,
  1736. cache, &cell);
  1737. if (r) {
  1738. if (r < 0)
  1739. defer_bio(cache, bio);
  1740. return DM_MAPIO_SUBMITTED;
  1741. }
  1742. discarded_block = is_discarded_oblock(cache, block);
  1743. r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
  1744. bio, &lookup_result);
  1745. if (r == -EWOULDBLOCK) {
  1746. cell_defer(cache, cell, true);
  1747. return DM_MAPIO_SUBMITTED;
  1748. } else if (r) {
  1749. DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
  1750. bio_io_error(bio);
  1751. return DM_MAPIO_SUBMITTED;
  1752. }
  1753. switch (lookup_result.op) {
  1754. case POLICY_HIT:
  1755. inc_hit_counter(cache, bio);
  1756. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  1757. if (is_writethrough_io(cache, bio, lookup_result.cblock))
  1758. remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
  1759. else
  1760. remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
  1761. cell_defer(cache, cell, false);
  1762. break;
  1763. case POLICY_MISS:
  1764. inc_miss_counter(cache, bio);
  1765. pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
  1766. if (pb->req_nr != 0) {
  1767. /*
  1768. * This is a duplicate writethrough io that is no
  1769. * longer needed because the block has been demoted.
  1770. */
  1771. bio_endio(bio, 0);
  1772. cell_defer(cache, cell, false);
  1773. return DM_MAPIO_SUBMITTED;
  1774. } else {
  1775. remap_to_origin_clear_discard(cache, bio, block);
  1776. cell_defer(cache, cell, false);
  1777. }
  1778. break;
  1779. default:
  1780. DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
  1781. (unsigned) lookup_result.op);
  1782. bio_io_error(bio);
  1783. return DM_MAPIO_SUBMITTED;
  1784. }
  1785. return DM_MAPIO_REMAPPED;
  1786. }
  1787. static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
  1788. {
  1789. struct cache *cache = ti->private;
  1790. unsigned long flags;
  1791. size_t pb_data_size = get_per_bio_data_size(cache);
  1792. struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
  1793. if (pb->tick) {
  1794. policy_tick(cache->policy);
  1795. spin_lock_irqsave(&cache->lock, flags);
  1796. cache->need_tick_bio = true;
  1797. spin_unlock_irqrestore(&cache->lock, flags);
  1798. }
  1799. check_for_quiesced_migrations(cache, pb);
  1800. return 0;
  1801. }
  1802. static int write_dirty_bitset(struct cache *cache)
  1803. {
  1804. unsigned i, r;
  1805. for (i = 0; i < from_cblock(cache->cache_size); i++) {
  1806. r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
  1807. is_dirty(cache, to_cblock(i)));
  1808. if (r)
  1809. return r;
  1810. }
  1811. return 0;
  1812. }
  1813. static int write_discard_bitset(struct cache *cache)
  1814. {
  1815. unsigned i, r;
  1816. r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
  1817. cache->discard_nr_blocks);
  1818. if (r) {
  1819. DMERR("could not resize on-disk discard bitset");
  1820. return r;
  1821. }
  1822. for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
  1823. r = dm_cache_set_discard(cache->cmd, to_dblock(i),
  1824. is_discarded(cache, to_dblock(i)));
  1825. if (r)
  1826. return r;
  1827. }
  1828. return 0;
  1829. }
  1830. static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
  1831. uint32_t hint)
  1832. {
  1833. struct cache *cache = context;
  1834. return dm_cache_save_hint(cache->cmd, cblock, hint);
  1835. }
  1836. static int write_hints(struct cache *cache)
  1837. {
  1838. int r;
  1839. r = dm_cache_begin_hints(cache->cmd, cache->policy);
  1840. if (r) {
  1841. DMERR("dm_cache_begin_hints failed");
  1842. return r;
  1843. }
  1844. r = policy_walk_mappings(cache->policy, save_hint, cache);
  1845. if (r)
  1846. DMERR("policy_walk_mappings failed");
  1847. return r;
  1848. }
  1849. /*
  1850. * returns true on success
  1851. */
  1852. static bool sync_metadata(struct cache *cache)
  1853. {
  1854. int r1, r2, r3, r4;
  1855. r1 = write_dirty_bitset(cache);
  1856. if (r1)
  1857. DMERR("could not write dirty bitset");
  1858. r2 = write_discard_bitset(cache);
  1859. if (r2)
  1860. DMERR("could not write discard bitset");
  1861. save_stats(cache);
  1862. r3 = write_hints(cache);
  1863. if (r3)
  1864. DMERR("could not write hints");
  1865. /*
  1866. * If writing the above metadata failed, we still commit, but don't
  1867. * set the clean shutdown flag. This will effectively force every
  1868. * dirty bit to be set on reload.
  1869. */
  1870. r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
  1871. if (r4)
  1872. DMERR("could not write cache metadata. Data loss may occur.");
  1873. return !r1 && !r2 && !r3 && !r4;
  1874. }
  1875. static void cache_postsuspend(struct dm_target *ti)
  1876. {
  1877. struct cache *cache = ti->private;
  1878. start_quiescing(cache);
  1879. wait_for_migrations(cache);
  1880. stop_worker(cache);
  1881. requeue_deferred_io(cache);
  1882. stop_quiescing(cache);
  1883. (void) sync_metadata(cache);
  1884. }
  1885. static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
  1886. bool dirty, uint32_t hint, bool hint_valid)
  1887. {
  1888. int r;
  1889. struct cache *cache = context;
  1890. r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
  1891. if (r)
  1892. return r;
  1893. if (dirty)
  1894. set_dirty(cache, oblock, cblock);
  1895. else
  1896. clear_dirty(cache, oblock, cblock);
  1897. return 0;
  1898. }
  1899. static int load_discard(void *context, sector_t discard_block_size,
  1900. dm_dblock_t dblock, bool discard)
  1901. {
  1902. struct cache *cache = context;
  1903. /* FIXME: handle mis-matched block size */
  1904. if (discard)
  1905. set_discard(cache, dblock);
  1906. else
  1907. clear_discard(cache, dblock);
  1908. return 0;
  1909. }
  1910. static int cache_preresume(struct dm_target *ti)
  1911. {
  1912. int r = 0;
  1913. struct cache *cache = ti->private;
  1914. sector_t actual_cache_size = get_dev_size(cache->cache_dev);
  1915. (void) sector_div(actual_cache_size, cache->sectors_per_block);
  1916. /*
  1917. * Check to see if the cache has resized.
  1918. */
  1919. if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
  1920. cache->cache_size = to_cblock(actual_cache_size);
  1921. r = dm_cache_resize(cache->cmd, cache->cache_size);
  1922. if (r) {
  1923. DMERR("could not resize cache metadata");
  1924. return r;
  1925. }
  1926. cache->sized = true;
  1927. }
  1928. if (!cache->loaded_mappings) {
  1929. r = dm_cache_load_mappings(cache->cmd, cache->policy,
  1930. load_mapping, cache);
  1931. if (r) {
  1932. DMERR("could not load cache mappings");
  1933. return r;
  1934. }
  1935. cache->loaded_mappings = true;
  1936. }
  1937. if (!cache->loaded_discards) {
  1938. r = dm_cache_load_discards(cache->cmd, load_discard, cache);
  1939. if (r) {
  1940. DMERR("could not load origin discards");
  1941. return r;
  1942. }
  1943. cache->loaded_discards = true;
  1944. }
  1945. return r;
  1946. }
  1947. static void cache_resume(struct dm_target *ti)
  1948. {
  1949. struct cache *cache = ti->private;
  1950. cache->need_tick_bio = true;
  1951. do_waker(&cache->waker.work);
  1952. }
  1953. /*
  1954. * Status format:
  1955. *
  1956. * <#used metadata blocks>/<#total metadata blocks>
  1957. * <#read hits> <#read misses> <#write hits> <#write misses>
  1958. * <#demotions> <#promotions> <#blocks in cache> <#dirty>
  1959. * <#features> <features>*
  1960. * <#core args> <core args>
  1961. * <#policy args> <policy args>*
  1962. */
  1963. static void cache_status(struct dm_target *ti, status_type_t type,
  1964. unsigned status_flags, char *result, unsigned maxlen)
  1965. {
  1966. int r = 0;
  1967. unsigned i;
  1968. ssize_t sz = 0;
  1969. dm_block_t nr_free_blocks_metadata = 0;
  1970. dm_block_t nr_blocks_metadata = 0;
  1971. char buf[BDEVNAME_SIZE];
  1972. struct cache *cache = ti->private;
  1973. dm_cblock_t residency;
  1974. switch (type) {
  1975. case STATUSTYPE_INFO:
  1976. /* Commit to ensure statistics aren't out-of-date */
  1977. if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
  1978. r = dm_cache_commit(cache->cmd, false);
  1979. if (r)
  1980. DMERR("could not commit metadata for accurate status");
  1981. }
  1982. r = dm_cache_get_free_metadata_block_count(cache->cmd,
  1983. &nr_free_blocks_metadata);
  1984. if (r) {
  1985. DMERR("could not get metadata free block count");
  1986. goto err;
  1987. }
  1988. r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
  1989. if (r) {
  1990. DMERR("could not get metadata device size");
  1991. goto err;
  1992. }
  1993. residency = policy_residency(cache->policy);
  1994. DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
  1995. (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
  1996. (unsigned long long)nr_blocks_metadata,
  1997. (unsigned) atomic_read(&cache->stats.read_hit),
  1998. (unsigned) atomic_read(&cache->stats.read_miss),
  1999. (unsigned) atomic_read(&cache->stats.write_hit),
  2000. (unsigned) atomic_read(&cache->stats.write_miss),
  2001. (unsigned) atomic_read(&cache->stats.demotion),
  2002. (unsigned) atomic_read(&cache->stats.promotion),
  2003. (unsigned long long) from_cblock(residency),
  2004. cache->nr_dirty);
  2005. if (cache->features.write_through)
  2006. DMEMIT("1 writethrough ");
  2007. else
  2008. DMEMIT("0 ");
  2009. DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
  2010. if (sz < maxlen) {
  2011. r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
  2012. if (r)
  2013. DMERR("policy_emit_config_values returned %d", r);
  2014. }
  2015. break;
  2016. case STATUSTYPE_TABLE:
  2017. format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
  2018. DMEMIT("%s ", buf);
  2019. format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
  2020. DMEMIT("%s ", buf);
  2021. format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
  2022. DMEMIT("%s", buf);
  2023. for (i = 0; i < cache->nr_ctr_args - 1; i++)
  2024. DMEMIT(" %s", cache->ctr_args[i]);
  2025. if (cache->nr_ctr_args)
  2026. DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
  2027. }
  2028. return;
  2029. err:
  2030. DMEMIT("Error");
  2031. }
  2032. #define NOT_CORE_OPTION 1
  2033. static int process_config_option(struct cache *cache, char **argv)
  2034. {
  2035. unsigned long tmp;
  2036. if (!strcasecmp(argv[0], "migration_threshold")) {
  2037. if (kstrtoul(argv[1], 10, &tmp))
  2038. return -EINVAL;
  2039. cache->migration_threshold = tmp;
  2040. return 0;
  2041. }
  2042. return NOT_CORE_OPTION;
  2043. }
  2044. /*
  2045. * Supports <key> <value>.
  2046. *
  2047. * The key migration_threshold is supported by the cache target core.
  2048. */
  2049. static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
  2050. {
  2051. int r;
  2052. struct cache *cache = ti->private;
  2053. if (argc != 2)
  2054. return -EINVAL;
  2055. r = process_config_option(cache, argv);
  2056. if (r == NOT_CORE_OPTION)
  2057. return policy_set_config_value(cache->policy, argv[0], argv[1]);
  2058. return r;
  2059. }
  2060. static int cache_iterate_devices(struct dm_target *ti,
  2061. iterate_devices_callout_fn fn, void *data)
  2062. {
  2063. int r = 0;
  2064. struct cache *cache = ti->private;
  2065. r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
  2066. if (!r)
  2067. r = fn(ti, cache->origin_dev, 0, ti->len, data);
  2068. return r;
  2069. }
  2070. /*
  2071. * We assume I/O is going to the origin (which is the volume
  2072. * more likely to have restrictions e.g. by being striped).
  2073. * (Looking up the exact location of the data would be expensive
  2074. * and could always be out of date by the time the bio is submitted.)
  2075. */
  2076. static int cache_bvec_merge(struct dm_target *ti,
  2077. struct bvec_merge_data *bvm,
  2078. struct bio_vec *biovec, int max_size)
  2079. {
  2080. struct cache *cache = ti->private;
  2081. struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
  2082. if (!q->merge_bvec_fn)
  2083. return max_size;
  2084. bvm->bi_bdev = cache->origin_dev->bdev;
  2085. return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
  2086. }
  2087. static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
  2088. {
  2089. /*
  2090. * FIXME: these limits may be incompatible with the cache device
  2091. */
  2092. limits->max_discard_sectors = cache->discard_block_size * 1024;
  2093. limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
  2094. }
  2095. static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
  2096. {
  2097. struct cache *cache = ti->private;
  2098. blk_limits_io_min(limits, 0);
  2099. blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
  2100. set_discard_limits(cache, limits);
  2101. }
  2102. /*----------------------------------------------------------------*/
  2103. static struct target_type cache_target = {
  2104. .name = "cache",
  2105. .version = {1, 1, 0},
  2106. .module = THIS_MODULE,
  2107. .ctr = cache_ctr,
  2108. .dtr = cache_dtr,
  2109. .map = cache_map,
  2110. .end_io = cache_end_io,
  2111. .postsuspend = cache_postsuspend,
  2112. .preresume = cache_preresume,
  2113. .resume = cache_resume,
  2114. .status = cache_status,
  2115. .message = cache_message,
  2116. .iterate_devices = cache_iterate_devices,
  2117. .merge = cache_bvec_merge,
  2118. .io_hints = cache_io_hints,
  2119. };
  2120. static int __init dm_cache_init(void)
  2121. {
  2122. int r;
  2123. r = dm_register_target(&cache_target);
  2124. if (r) {
  2125. DMERR("cache target registration failed: %d", r);
  2126. return r;
  2127. }
  2128. migration_cache = KMEM_CACHE(dm_cache_migration, 0);
  2129. if (!migration_cache) {
  2130. dm_unregister_target(&cache_target);
  2131. return -ENOMEM;
  2132. }
  2133. return 0;
  2134. }
  2135. static void __exit dm_cache_exit(void)
  2136. {
  2137. dm_unregister_target(&cache_target);
  2138. kmem_cache_destroy(migration_cache);
  2139. }
  2140. module_init(dm_cache_init);
  2141. module_exit(dm_cache_exit);
  2142. MODULE_DESCRIPTION(DM_NAME " cache target");
  2143. MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
  2144. MODULE_LICENSE("GPL");