dm.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771
  1. /*
  2. * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm.h"
  8. #include "dm-uevent.h"
  9. #include <linux/init.h>
  10. #include <linux/module.h>
  11. #include <linux/mutex.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/blkpg.h>
  14. #include <linux/bio.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/mempool.h>
  17. #include <linux/slab.h>
  18. #include <linux/idr.h>
  19. #include <linux/hdreg.h>
  20. #include <trace/events/block.h>
  21. #define DM_MSG_PREFIX "core"
  22. /*
  23. * Cookies are numeric values sent with CHANGE and REMOVE
  24. * uevents while resuming, removing or renaming the device.
  25. */
  26. #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
  27. #define DM_COOKIE_LENGTH 24
  28. static const char *_name = DM_NAME;
  29. static unsigned int major = 0;
  30. static unsigned int _major = 0;
  31. static DEFINE_SPINLOCK(_minor_lock);
  32. /*
  33. * For bio-based dm.
  34. * One of these is allocated per bio.
  35. */
  36. struct dm_io {
  37. struct mapped_device *md;
  38. int error;
  39. atomic_t io_count;
  40. struct bio *bio;
  41. unsigned long start_time;
  42. spinlock_t endio_lock;
  43. };
  44. /*
  45. * For bio-based dm.
  46. * One of these is allocated per target within a bio. Hopefully
  47. * this will be simplified out one day.
  48. */
  49. struct dm_target_io {
  50. struct dm_io *io;
  51. struct dm_target *ti;
  52. union map_info info;
  53. };
  54. /*
  55. * For request-based dm.
  56. * One of these is allocated per request.
  57. */
  58. struct dm_rq_target_io {
  59. struct mapped_device *md;
  60. struct dm_target *ti;
  61. struct request *orig, clone;
  62. int error;
  63. union map_info info;
  64. };
  65. /*
  66. * For request-based dm.
  67. * One of these is allocated per bio.
  68. */
  69. struct dm_rq_clone_bio_info {
  70. struct bio *orig;
  71. struct dm_rq_target_io *tio;
  72. };
  73. union map_info *dm_get_mapinfo(struct bio *bio)
  74. {
  75. if (bio && bio->bi_private)
  76. return &((struct dm_target_io *)bio->bi_private)->info;
  77. return NULL;
  78. }
  79. union map_info *dm_get_rq_mapinfo(struct request *rq)
  80. {
  81. if (rq && rq->end_io_data)
  82. return &((struct dm_rq_target_io *)rq->end_io_data)->info;
  83. return NULL;
  84. }
  85. EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
  86. #define MINOR_ALLOCED ((void *)-1)
  87. /*
  88. * Bits for the md->flags field.
  89. */
  90. #define DMF_BLOCK_IO_FOR_SUSPEND 0
  91. #define DMF_SUSPENDED 1
  92. #define DMF_FROZEN 2
  93. #define DMF_FREEING 3
  94. #define DMF_DELETING 4
  95. #define DMF_NOFLUSH_SUSPENDING 5
  96. #define DMF_QUEUE_IO_TO_THREAD 6
  97. /*
  98. * Work processed by per-device workqueue.
  99. */
  100. struct mapped_device {
  101. struct rw_semaphore io_lock;
  102. struct mutex suspend_lock;
  103. rwlock_t map_lock;
  104. atomic_t holders;
  105. atomic_t open_count;
  106. unsigned long flags;
  107. struct request_queue *queue;
  108. struct gendisk *disk;
  109. char name[16];
  110. void *interface_ptr;
  111. /*
  112. * A list of ios that arrived while we were suspended.
  113. */
  114. atomic_t pending[2];
  115. wait_queue_head_t wait;
  116. struct work_struct work;
  117. struct bio_list deferred;
  118. spinlock_t deferred_lock;
  119. /*
  120. * An error from the barrier request currently being processed.
  121. */
  122. int barrier_error;
  123. /*
  124. * Protect barrier_error from concurrent endio processing
  125. * in request-based dm.
  126. */
  127. spinlock_t barrier_error_lock;
  128. /*
  129. * Processing queue (flush/barriers)
  130. */
  131. struct workqueue_struct *wq;
  132. struct work_struct barrier_work;
  133. /* A pointer to the currently processing pre/post flush request */
  134. struct request *flush_request;
  135. /*
  136. * The current mapping.
  137. */
  138. struct dm_table *map;
  139. /*
  140. * io objects are allocated from here.
  141. */
  142. mempool_t *io_pool;
  143. mempool_t *tio_pool;
  144. struct bio_set *bs;
  145. /*
  146. * Event handling.
  147. */
  148. atomic_t event_nr;
  149. wait_queue_head_t eventq;
  150. atomic_t uevent_seq;
  151. struct list_head uevent_list;
  152. spinlock_t uevent_lock; /* Protect access to uevent_list */
  153. /*
  154. * freeze/thaw support require holding onto a super block
  155. */
  156. struct super_block *frozen_sb;
  157. struct block_device *bdev;
  158. /* forced geometry settings */
  159. struct hd_geometry geometry;
  160. /* For saving the address of __make_request for request based dm */
  161. make_request_fn *saved_make_request_fn;
  162. /* sysfs handle */
  163. struct kobject kobj;
  164. /* zero-length barrier that will be cloned and submitted to targets */
  165. struct bio barrier_bio;
  166. };
  167. /*
  168. * For mempools pre-allocation at the table loading time.
  169. */
  170. struct dm_md_mempools {
  171. mempool_t *io_pool;
  172. mempool_t *tio_pool;
  173. struct bio_set *bs;
  174. };
  175. #define MIN_IOS 256
  176. static struct kmem_cache *_io_cache;
  177. static struct kmem_cache *_tio_cache;
  178. static struct kmem_cache *_rq_tio_cache;
  179. static struct kmem_cache *_rq_bio_info_cache;
  180. static int __init local_init(void)
  181. {
  182. int r = -ENOMEM;
  183. /* allocate a slab for the dm_ios */
  184. _io_cache = KMEM_CACHE(dm_io, 0);
  185. if (!_io_cache)
  186. return r;
  187. /* allocate a slab for the target ios */
  188. _tio_cache = KMEM_CACHE(dm_target_io, 0);
  189. if (!_tio_cache)
  190. goto out_free_io_cache;
  191. _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
  192. if (!_rq_tio_cache)
  193. goto out_free_tio_cache;
  194. _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
  195. if (!_rq_bio_info_cache)
  196. goto out_free_rq_tio_cache;
  197. r = dm_uevent_init();
  198. if (r)
  199. goto out_free_rq_bio_info_cache;
  200. _major = major;
  201. r = register_blkdev(_major, _name);
  202. if (r < 0)
  203. goto out_uevent_exit;
  204. if (!_major)
  205. _major = r;
  206. return 0;
  207. out_uevent_exit:
  208. dm_uevent_exit();
  209. out_free_rq_bio_info_cache:
  210. kmem_cache_destroy(_rq_bio_info_cache);
  211. out_free_rq_tio_cache:
  212. kmem_cache_destroy(_rq_tio_cache);
  213. out_free_tio_cache:
  214. kmem_cache_destroy(_tio_cache);
  215. out_free_io_cache:
  216. kmem_cache_destroy(_io_cache);
  217. return r;
  218. }
  219. static void local_exit(void)
  220. {
  221. kmem_cache_destroy(_rq_bio_info_cache);
  222. kmem_cache_destroy(_rq_tio_cache);
  223. kmem_cache_destroy(_tio_cache);
  224. kmem_cache_destroy(_io_cache);
  225. unregister_blkdev(_major, _name);
  226. dm_uevent_exit();
  227. _major = 0;
  228. DMINFO("cleaned up");
  229. }
  230. static int (*_inits[])(void) __initdata = {
  231. local_init,
  232. dm_target_init,
  233. dm_linear_init,
  234. dm_stripe_init,
  235. dm_io_init,
  236. dm_kcopyd_init,
  237. dm_interface_init,
  238. };
  239. static void (*_exits[])(void) = {
  240. local_exit,
  241. dm_target_exit,
  242. dm_linear_exit,
  243. dm_stripe_exit,
  244. dm_io_exit,
  245. dm_kcopyd_exit,
  246. dm_interface_exit,
  247. };
  248. static int __init dm_init(void)
  249. {
  250. const int count = ARRAY_SIZE(_inits);
  251. int r, i;
  252. for (i = 0; i < count; i++) {
  253. r = _inits[i]();
  254. if (r)
  255. goto bad;
  256. }
  257. return 0;
  258. bad:
  259. while (i--)
  260. _exits[i]();
  261. return r;
  262. }
  263. static void __exit dm_exit(void)
  264. {
  265. int i = ARRAY_SIZE(_exits);
  266. while (i--)
  267. _exits[i]();
  268. }
  269. /*
  270. * Block device functions
  271. */
  272. int dm_deleting_md(struct mapped_device *md)
  273. {
  274. return test_bit(DMF_DELETING, &md->flags);
  275. }
  276. static int dm_blk_open(struct block_device *bdev, fmode_t mode)
  277. {
  278. struct mapped_device *md;
  279. spin_lock(&_minor_lock);
  280. md = bdev->bd_disk->private_data;
  281. if (!md)
  282. goto out;
  283. if (test_bit(DMF_FREEING, &md->flags) ||
  284. dm_deleting_md(md)) {
  285. md = NULL;
  286. goto out;
  287. }
  288. dm_get(md);
  289. atomic_inc(&md->open_count);
  290. out:
  291. spin_unlock(&_minor_lock);
  292. return md ? 0 : -ENXIO;
  293. }
  294. static int dm_blk_close(struct gendisk *disk, fmode_t mode)
  295. {
  296. struct mapped_device *md = disk->private_data;
  297. atomic_dec(&md->open_count);
  298. dm_put(md);
  299. return 0;
  300. }
  301. int dm_open_count(struct mapped_device *md)
  302. {
  303. return atomic_read(&md->open_count);
  304. }
  305. /*
  306. * Guarantees nothing is using the device before it's deleted.
  307. */
  308. int dm_lock_for_deletion(struct mapped_device *md)
  309. {
  310. int r = 0;
  311. spin_lock(&_minor_lock);
  312. if (dm_open_count(md))
  313. r = -EBUSY;
  314. else
  315. set_bit(DMF_DELETING, &md->flags);
  316. spin_unlock(&_minor_lock);
  317. return r;
  318. }
  319. static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  320. {
  321. struct mapped_device *md = bdev->bd_disk->private_data;
  322. return dm_get_geometry(md, geo);
  323. }
  324. static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
  325. unsigned int cmd, unsigned long arg)
  326. {
  327. struct mapped_device *md = bdev->bd_disk->private_data;
  328. struct dm_table *map = dm_get_live_table(md);
  329. struct dm_target *tgt;
  330. int r = -ENOTTY;
  331. if (!map || !dm_table_get_size(map))
  332. goto out;
  333. /* We only support devices that have a single target */
  334. if (dm_table_get_num_targets(map) != 1)
  335. goto out;
  336. tgt = dm_table_get_target(map, 0);
  337. if (dm_suspended(md)) {
  338. r = -EAGAIN;
  339. goto out;
  340. }
  341. if (tgt->type->ioctl)
  342. r = tgt->type->ioctl(tgt, cmd, arg);
  343. out:
  344. dm_table_put(map);
  345. return r;
  346. }
  347. static struct dm_io *alloc_io(struct mapped_device *md)
  348. {
  349. return mempool_alloc(md->io_pool, GFP_NOIO);
  350. }
  351. static void free_io(struct mapped_device *md, struct dm_io *io)
  352. {
  353. mempool_free(io, md->io_pool);
  354. }
  355. static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
  356. {
  357. mempool_free(tio, md->tio_pool);
  358. }
  359. static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
  360. gfp_t gfp_mask)
  361. {
  362. return mempool_alloc(md->tio_pool, gfp_mask);
  363. }
  364. static void free_rq_tio(struct dm_rq_target_io *tio)
  365. {
  366. mempool_free(tio, tio->md->tio_pool);
  367. }
  368. static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
  369. {
  370. return mempool_alloc(md->io_pool, GFP_ATOMIC);
  371. }
  372. static void free_bio_info(struct dm_rq_clone_bio_info *info)
  373. {
  374. mempool_free(info, info->tio->md->io_pool);
  375. }
  376. static int md_in_flight(struct mapped_device *md)
  377. {
  378. return atomic_read(&md->pending[READ]) +
  379. atomic_read(&md->pending[WRITE]);
  380. }
  381. static void start_io_acct(struct dm_io *io)
  382. {
  383. struct mapped_device *md = io->md;
  384. int cpu;
  385. int rw = bio_data_dir(io->bio);
  386. io->start_time = jiffies;
  387. cpu = part_stat_lock();
  388. part_round_stats(cpu, &dm_disk(md)->part0);
  389. part_stat_unlock();
  390. dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
  391. }
  392. static void end_io_acct(struct dm_io *io)
  393. {
  394. struct mapped_device *md = io->md;
  395. struct bio *bio = io->bio;
  396. unsigned long duration = jiffies - io->start_time;
  397. int pending, cpu;
  398. int rw = bio_data_dir(bio);
  399. cpu = part_stat_lock();
  400. part_round_stats(cpu, &dm_disk(md)->part0);
  401. part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
  402. part_stat_unlock();
  403. /*
  404. * After this is decremented the bio must not be touched if it is
  405. * a barrier.
  406. */
  407. dm_disk(md)->part0.in_flight[rw] = pending =
  408. atomic_dec_return(&md->pending[rw]);
  409. pending += atomic_read(&md->pending[rw^0x1]);
  410. /* nudge anyone waiting on suspend queue */
  411. if (!pending)
  412. wake_up(&md->wait);
  413. }
  414. /*
  415. * Add the bio to the list of deferred io.
  416. */
  417. static void queue_io(struct mapped_device *md, struct bio *bio)
  418. {
  419. down_write(&md->io_lock);
  420. spin_lock_irq(&md->deferred_lock);
  421. bio_list_add(&md->deferred, bio);
  422. spin_unlock_irq(&md->deferred_lock);
  423. if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
  424. queue_work(md->wq, &md->work);
  425. up_write(&md->io_lock);
  426. }
  427. /*
  428. * Everyone (including functions in this file), should use this
  429. * function to access the md->map field, and make sure they call
  430. * dm_table_put() when finished.
  431. */
  432. struct dm_table *dm_get_live_table(struct mapped_device *md)
  433. {
  434. struct dm_table *t;
  435. unsigned long flags;
  436. read_lock_irqsave(&md->map_lock, flags);
  437. t = md->map;
  438. if (t)
  439. dm_table_get(t);
  440. read_unlock_irqrestore(&md->map_lock, flags);
  441. return t;
  442. }
  443. /*
  444. * Get the geometry associated with a dm device
  445. */
  446. int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
  447. {
  448. *geo = md->geometry;
  449. return 0;
  450. }
  451. /*
  452. * Set the geometry of a device.
  453. */
  454. int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
  455. {
  456. sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
  457. if (geo->start > sz) {
  458. DMWARN("Start sector is beyond the geometry limits.");
  459. return -EINVAL;
  460. }
  461. md->geometry = *geo;
  462. return 0;
  463. }
  464. /*-----------------------------------------------------------------
  465. * CRUD START:
  466. * A more elegant soln is in the works that uses the queue
  467. * merge fn, unfortunately there are a couple of changes to
  468. * the block layer that I want to make for this. So in the
  469. * interests of getting something for people to use I give
  470. * you this clearly demarcated crap.
  471. *---------------------------------------------------------------*/
  472. static int __noflush_suspending(struct mapped_device *md)
  473. {
  474. return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
  475. }
  476. /*
  477. * Decrements the number of outstanding ios that a bio has been
  478. * cloned into, completing the original io if necc.
  479. */
  480. static void dec_pending(struct dm_io *io, int error)
  481. {
  482. unsigned long flags;
  483. int io_error;
  484. struct bio *bio;
  485. struct mapped_device *md = io->md;
  486. /* Push-back supersedes any I/O errors */
  487. if (unlikely(error)) {
  488. spin_lock_irqsave(&io->endio_lock, flags);
  489. if (!(io->error > 0 && __noflush_suspending(md)))
  490. io->error = error;
  491. spin_unlock_irqrestore(&io->endio_lock, flags);
  492. }
  493. if (atomic_dec_and_test(&io->io_count)) {
  494. if (io->error == DM_ENDIO_REQUEUE) {
  495. /*
  496. * Target requested pushing back the I/O.
  497. */
  498. spin_lock_irqsave(&md->deferred_lock, flags);
  499. if (__noflush_suspending(md)) {
  500. if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
  501. bio_list_add_head(&md->deferred,
  502. io->bio);
  503. } else
  504. /* noflush suspend was interrupted. */
  505. io->error = -EIO;
  506. spin_unlock_irqrestore(&md->deferred_lock, flags);
  507. }
  508. io_error = io->error;
  509. bio = io->bio;
  510. if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
  511. /*
  512. * There can be just one barrier request so we use
  513. * a per-device variable for error reporting.
  514. * Note that you can't touch the bio after end_io_acct
  515. */
  516. if (!md->barrier_error && io_error != -EOPNOTSUPP)
  517. md->barrier_error = io_error;
  518. end_io_acct(io);
  519. } else {
  520. end_io_acct(io);
  521. if (io_error != DM_ENDIO_REQUEUE) {
  522. trace_block_bio_complete(md->queue, bio);
  523. bio_endio(bio, io_error);
  524. }
  525. }
  526. free_io(md, io);
  527. }
  528. }
  529. static void clone_endio(struct bio *bio, int error)
  530. {
  531. int r = 0;
  532. struct dm_target_io *tio = bio->bi_private;
  533. struct dm_io *io = tio->io;
  534. struct mapped_device *md = tio->io->md;
  535. dm_endio_fn endio = tio->ti->type->end_io;
  536. if (!bio_flagged(bio, BIO_UPTODATE) && !error)
  537. error = -EIO;
  538. if (endio) {
  539. r = endio(tio->ti, bio, error, &tio->info);
  540. if (r < 0 || r == DM_ENDIO_REQUEUE)
  541. /*
  542. * error and requeue request are handled
  543. * in dec_pending().
  544. */
  545. error = r;
  546. else if (r == DM_ENDIO_INCOMPLETE)
  547. /* The target will handle the io */
  548. return;
  549. else if (r) {
  550. DMWARN("unimplemented target endio return value: %d", r);
  551. BUG();
  552. }
  553. }
  554. /*
  555. * Store md for cleanup instead of tio which is about to get freed.
  556. */
  557. bio->bi_private = md->bs;
  558. free_tio(md, tio);
  559. bio_put(bio);
  560. dec_pending(io, error);
  561. }
  562. /*
  563. * Partial completion handling for request-based dm
  564. */
  565. static void end_clone_bio(struct bio *clone, int error)
  566. {
  567. struct dm_rq_clone_bio_info *info = clone->bi_private;
  568. struct dm_rq_target_io *tio = info->tio;
  569. struct bio *bio = info->orig;
  570. unsigned int nr_bytes = info->orig->bi_size;
  571. bio_put(clone);
  572. if (tio->error)
  573. /*
  574. * An error has already been detected on the request.
  575. * Once error occurred, just let clone->end_io() handle
  576. * the remainder.
  577. */
  578. return;
  579. else if (error) {
  580. /*
  581. * Don't notice the error to the upper layer yet.
  582. * The error handling decision is made by the target driver,
  583. * when the request is completed.
  584. */
  585. tio->error = error;
  586. return;
  587. }
  588. /*
  589. * I/O for the bio successfully completed.
  590. * Notice the data completion to the upper layer.
  591. */
  592. /*
  593. * bios are processed from the head of the list.
  594. * So the completing bio should always be rq->bio.
  595. * If it's not, something wrong is happening.
  596. */
  597. if (tio->orig->bio != bio)
  598. DMERR("bio completion is going in the middle of the request");
  599. /*
  600. * Update the original request.
  601. * Do not use blk_end_request() here, because it may complete
  602. * the original request before the clone, and break the ordering.
  603. */
  604. blk_update_request(tio->orig, 0, nr_bytes);
  605. }
  606. static void store_barrier_error(struct mapped_device *md, int error)
  607. {
  608. unsigned long flags;
  609. spin_lock_irqsave(&md->barrier_error_lock, flags);
  610. /*
  611. * Basically, the first error is taken, but:
  612. * -EOPNOTSUPP supersedes any I/O error.
  613. * Requeue request supersedes any I/O error but -EOPNOTSUPP.
  614. */
  615. if (!md->barrier_error || error == -EOPNOTSUPP ||
  616. (md->barrier_error != -EOPNOTSUPP &&
  617. error == DM_ENDIO_REQUEUE))
  618. md->barrier_error = error;
  619. spin_unlock_irqrestore(&md->barrier_error_lock, flags);
  620. }
  621. /*
  622. * Don't touch any member of the md after calling this function because
  623. * the md may be freed in dm_put() at the end of this function.
  624. * Or do dm_get() before calling this function and dm_put() later.
  625. */
  626. static void rq_completed(struct mapped_device *md, int rw, int run_queue)
  627. {
  628. atomic_dec(&md->pending[rw]);
  629. /* nudge anyone waiting on suspend queue */
  630. if (!md_in_flight(md))
  631. wake_up(&md->wait);
  632. if (run_queue)
  633. blk_run_queue(md->queue);
  634. /*
  635. * dm_put() must be at the end of this function. See the comment above
  636. */
  637. dm_put(md);
  638. }
  639. static void free_rq_clone(struct request *clone)
  640. {
  641. struct dm_rq_target_io *tio = clone->end_io_data;
  642. blk_rq_unprep_clone(clone);
  643. free_rq_tio(tio);
  644. }
  645. /*
  646. * Complete the clone and the original request.
  647. * Must be called without queue lock.
  648. */
  649. static void dm_end_request(struct request *clone, int error)
  650. {
  651. int rw = rq_data_dir(clone);
  652. int run_queue = 1;
  653. bool is_barrier = blk_barrier_rq(clone);
  654. struct dm_rq_target_io *tio = clone->end_io_data;
  655. struct mapped_device *md = tio->md;
  656. struct request *rq = tio->orig;
  657. if (blk_pc_request(rq) && !is_barrier) {
  658. rq->errors = clone->errors;
  659. rq->resid_len = clone->resid_len;
  660. if (rq->sense)
  661. /*
  662. * We are using the sense buffer of the original
  663. * request.
  664. * So setting the length of the sense data is enough.
  665. */
  666. rq->sense_len = clone->sense_len;
  667. }
  668. free_rq_clone(clone);
  669. if (unlikely(is_barrier)) {
  670. if (unlikely(error))
  671. store_barrier_error(md, error);
  672. run_queue = 0;
  673. } else
  674. blk_end_request_all(rq, error);
  675. rq_completed(md, rw, run_queue);
  676. }
  677. static void dm_unprep_request(struct request *rq)
  678. {
  679. struct request *clone = rq->special;
  680. rq->special = NULL;
  681. rq->cmd_flags &= ~REQ_DONTPREP;
  682. free_rq_clone(clone);
  683. }
  684. /*
  685. * Requeue the original request of a clone.
  686. */
  687. void dm_requeue_unmapped_request(struct request *clone)
  688. {
  689. int rw = rq_data_dir(clone);
  690. struct dm_rq_target_io *tio = clone->end_io_data;
  691. struct mapped_device *md = tio->md;
  692. struct request *rq = tio->orig;
  693. struct request_queue *q = rq->q;
  694. unsigned long flags;
  695. if (unlikely(blk_barrier_rq(clone))) {
  696. /*
  697. * Barrier clones share an original request.
  698. * Leave it to dm_end_request(), which handles this special
  699. * case.
  700. */
  701. dm_end_request(clone, DM_ENDIO_REQUEUE);
  702. return;
  703. }
  704. dm_unprep_request(rq);
  705. spin_lock_irqsave(q->queue_lock, flags);
  706. if (elv_queue_empty(q))
  707. blk_plug_device(q);
  708. blk_requeue_request(q, rq);
  709. spin_unlock_irqrestore(q->queue_lock, flags);
  710. rq_completed(md, rw, 0);
  711. }
  712. EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
  713. static void __stop_queue(struct request_queue *q)
  714. {
  715. blk_stop_queue(q);
  716. }
  717. static void stop_queue(struct request_queue *q)
  718. {
  719. unsigned long flags;
  720. spin_lock_irqsave(q->queue_lock, flags);
  721. __stop_queue(q);
  722. spin_unlock_irqrestore(q->queue_lock, flags);
  723. }
  724. static void __start_queue(struct request_queue *q)
  725. {
  726. if (blk_queue_stopped(q))
  727. blk_start_queue(q);
  728. }
  729. static void start_queue(struct request_queue *q)
  730. {
  731. unsigned long flags;
  732. spin_lock_irqsave(q->queue_lock, flags);
  733. __start_queue(q);
  734. spin_unlock_irqrestore(q->queue_lock, flags);
  735. }
  736. static void dm_done(struct request *clone, int error, bool mapped)
  737. {
  738. int r = error;
  739. struct dm_rq_target_io *tio = clone->end_io_data;
  740. dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
  741. if (mapped && rq_end_io)
  742. r = rq_end_io(tio->ti, clone, error, &tio->info);
  743. if (r <= 0)
  744. /* The target wants to complete the I/O */
  745. dm_end_request(clone, r);
  746. else if (r == DM_ENDIO_INCOMPLETE)
  747. /* The target will handle the I/O */
  748. return;
  749. else if (r == DM_ENDIO_REQUEUE)
  750. /* The target wants to requeue the I/O */
  751. dm_requeue_unmapped_request(clone);
  752. else {
  753. DMWARN("unimplemented target endio return value: %d", r);
  754. BUG();
  755. }
  756. }
  757. /*
  758. * Request completion handler for request-based dm
  759. */
  760. static void dm_softirq_done(struct request *rq)
  761. {
  762. bool mapped = true;
  763. struct request *clone = rq->completion_data;
  764. struct dm_rq_target_io *tio = clone->end_io_data;
  765. if (rq->cmd_flags & REQ_FAILED)
  766. mapped = false;
  767. dm_done(clone, tio->error, mapped);
  768. }
  769. /*
  770. * Complete the clone and the original request with the error status
  771. * through softirq context.
  772. */
  773. static void dm_complete_request(struct request *clone, int error)
  774. {
  775. struct dm_rq_target_io *tio = clone->end_io_data;
  776. struct request *rq = tio->orig;
  777. if (unlikely(blk_barrier_rq(clone))) {
  778. /*
  779. * Barrier clones share an original request. So can't use
  780. * softirq_done with the original.
  781. * Pass the clone to dm_done() directly in this special case.
  782. * It is safe (even if clone->q->queue_lock is held here)
  783. * because there is no I/O dispatching during the completion
  784. * of barrier clone.
  785. */
  786. dm_done(clone, error, true);
  787. return;
  788. }
  789. tio->error = error;
  790. rq->completion_data = clone;
  791. blk_complete_request(rq);
  792. }
  793. /*
  794. * Complete the not-mapped clone and the original request with the error status
  795. * through softirq context.
  796. * Target's rq_end_io() function isn't called.
  797. * This may be used when the target's map_rq() function fails.
  798. */
  799. void dm_kill_unmapped_request(struct request *clone, int error)
  800. {
  801. struct dm_rq_target_io *tio = clone->end_io_data;
  802. struct request *rq = tio->orig;
  803. if (unlikely(blk_barrier_rq(clone))) {
  804. /*
  805. * Barrier clones share an original request.
  806. * Leave it to dm_end_request(), which handles this special
  807. * case.
  808. */
  809. BUG_ON(error > 0);
  810. dm_end_request(clone, error);
  811. return;
  812. }
  813. rq->cmd_flags |= REQ_FAILED;
  814. dm_complete_request(clone, error);
  815. }
  816. EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
  817. /*
  818. * Called with the queue lock held
  819. */
  820. static void end_clone_request(struct request *clone, int error)
  821. {
  822. /*
  823. * For just cleaning up the information of the queue in which
  824. * the clone was dispatched.
  825. * The clone is *NOT* freed actually here because it is alloced from
  826. * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
  827. */
  828. __blk_put_request(clone->q, clone);
  829. /*
  830. * Actual request completion is done in a softirq context which doesn't
  831. * hold the queue lock. Otherwise, deadlock could occur because:
  832. * - another request may be submitted by the upper level driver
  833. * of the stacking during the completion
  834. * - the submission which requires queue lock may be done
  835. * against this queue
  836. */
  837. dm_complete_request(clone, error);
  838. }
  839. static sector_t max_io_len(struct mapped_device *md,
  840. sector_t sector, struct dm_target *ti)
  841. {
  842. sector_t offset = sector - ti->begin;
  843. sector_t len = ti->len - offset;
  844. /*
  845. * Does the target need to split even further ?
  846. */
  847. if (ti->split_io) {
  848. sector_t boundary;
  849. boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
  850. - offset;
  851. if (len > boundary)
  852. len = boundary;
  853. }
  854. return len;
  855. }
  856. static void __map_bio(struct dm_target *ti, struct bio *clone,
  857. struct dm_target_io *tio)
  858. {
  859. int r;
  860. sector_t sector;
  861. struct mapped_device *md;
  862. clone->bi_end_io = clone_endio;
  863. clone->bi_private = tio;
  864. /*
  865. * Map the clone. If r == 0 we don't need to do
  866. * anything, the target has assumed ownership of
  867. * this io.
  868. */
  869. atomic_inc(&tio->io->io_count);
  870. sector = clone->bi_sector;
  871. r = ti->type->map(ti, clone, &tio->info);
  872. if (r == DM_MAPIO_REMAPPED) {
  873. /* the bio has been remapped so dispatch it */
  874. trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
  875. tio->io->bio->bi_bdev->bd_dev, sector);
  876. generic_make_request(clone);
  877. } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
  878. /* error the io and bail out, or requeue it if needed */
  879. md = tio->io->md;
  880. dec_pending(tio->io, r);
  881. /*
  882. * Store bio_set for cleanup.
  883. */
  884. clone->bi_private = md->bs;
  885. bio_put(clone);
  886. free_tio(md, tio);
  887. } else if (r) {
  888. DMWARN("unimplemented target map return value: %d", r);
  889. BUG();
  890. }
  891. }
  892. struct clone_info {
  893. struct mapped_device *md;
  894. struct dm_table *map;
  895. struct bio *bio;
  896. struct dm_io *io;
  897. sector_t sector;
  898. sector_t sector_count;
  899. unsigned short idx;
  900. };
  901. static void dm_bio_destructor(struct bio *bio)
  902. {
  903. struct bio_set *bs = bio->bi_private;
  904. bio_free(bio, bs);
  905. }
  906. /*
  907. * Creates a little bio that is just does part of a bvec.
  908. */
  909. static struct bio *split_bvec(struct bio *bio, sector_t sector,
  910. unsigned short idx, unsigned int offset,
  911. unsigned int len, struct bio_set *bs)
  912. {
  913. struct bio *clone;
  914. struct bio_vec *bv = bio->bi_io_vec + idx;
  915. clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
  916. clone->bi_destructor = dm_bio_destructor;
  917. *clone->bi_io_vec = *bv;
  918. clone->bi_sector = sector;
  919. clone->bi_bdev = bio->bi_bdev;
  920. clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
  921. clone->bi_vcnt = 1;
  922. clone->bi_size = to_bytes(len);
  923. clone->bi_io_vec->bv_offset = offset;
  924. clone->bi_io_vec->bv_len = clone->bi_size;
  925. clone->bi_flags |= 1 << BIO_CLONED;
  926. if (bio_integrity(bio)) {
  927. bio_integrity_clone(clone, bio, GFP_NOIO, bs);
  928. bio_integrity_trim(clone,
  929. bio_sector_offset(bio, idx, offset), len);
  930. }
  931. return clone;
  932. }
  933. /*
  934. * Creates a bio that consists of range of complete bvecs.
  935. */
  936. static struct bio *clone_bio(struct bio *bio, sector_t sector,
  937. unsigned short idx, unsigned short bv_count,
  938. unsigned int len, struct bio_set *bs)
  939. {
  940. struct bio *clone;
  941. clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
  942. __bio_clone(clone, bio);
  943. clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
  944. clone->bi_destructor = dm_bio_destructor;
  945. clone->bi_sector = sector;
  946. clone->bi_idx = idx;
  947. clone->bi_vcnt = idx + bv_count;
  948. clone->bi_size = to_bytes(len);
  949. clone->bi_flags &= ~(1 << BIO_SEG_VALID);
  950. if (bio_integrity(bio)) {
  951. bio_integrity_clone(clone, bio, GFP_NOIO, bs);
  952. if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
  953. bio_integrity_trim(clone,
  954. bio_sector_offset(bio, idx, 0), len);
  955. }
  956. return clone;
  957. }
  958. static struct dm_target_io *alloc_tio(struct clone_info *ci,
  959. struct dm_target *ti)
  960. {
  961. struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
  962. tio->io = ci->io;
  963. tio->ti = ti;
  964. memset(&tio->info, 0, sizeof(tio->info));
  965. return tio;
  966. }
  967. static void __flush_target(struct clone_info *ci, struct dm_target *ti,
  968. unsigned flush_nr)
  969. {
  970. struct dm_target_io *tio = alloc_tio(ci, ti);
  971. struct bio *clone;
  972. tio->info.flush_request = flush_nr;
  973. clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
  974. __bio_clone(clone, ci->bio);
  975. clone->bi_destructor = dm_bio_destructor;
  976. __map_bio(ti, clone, tio);
  977. }
  978. static int __clone_and_map_empty_barrier(struct clone_info *ci)
  979. {
  980. unsigned target_nr = 0, flush_nr;
  981. struct dm_target *ti;
  982. while ((ti = dm_table_get_target(ci->map, target_nr++)))
  983. for (flush_nr = 0; flush_nr < ti->num_flush_requests;
  984. flush_nr++)
  985. __flush_target(ci, ti, flush_nr);
  986. ci->sector_count = 0;
  987. return 0;
  988. }
  989. static int __clone_and_map(struct clone_info *ci)
  990. {
  991. struct bio *clone, *bio = ci->bio;
  992. struct dm_target *ti;
  993. sector_t len = 0, max;
  994. struct dm_target_io *tio;
  995. if (unlikely(bio_empty_barrier(bio)))
  996. return __clone_and_map_empty_barrier(ci);
  997. ti = dm_table_find_target(ci->map, ci->sector);
  998. if (!dm_target_is_valid(ti))
  999. return -EIO;
  1000. max = max_io_len(ci->md, ci->sector, ti);
  1001. /*
  1002. * Allocate a target io object.
  1003. */
  1004. tio = alloc_tio(ci, ti);
  1005. if (ci->sector_count <= max) {
  1006. /*
  1007. * Optimise for the simple case where we can do all of
  1008. * the remaining io with a single clone.
  1009. */
  1010. clone = clone_bio(bio, ci->sector, ci->idx,
  1011. bio->bi_vcnt - ci->idx, ci->sector_count,
  1012. ci->md->bs);
  1013. __map_bio(ti, clone, tio);
  1014. ci->sector_count = 0;
  1015. } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
  1016. /*
  1017. * There are some bvecs that don't span targets.
  1018. * Do as many of these as possible.
  1019. */
  1020. int i;
  1021. sector_t remaining = max;
  1022. sector_t bv_len;
  1023. for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
  1024. bv_len = to_sector(bio->bi_io_vec[i].bv_len);
  1025. if (bv_len > remaining)
  1026. break;
  1027. remaining -= bv_len;
  1028. len += bv_len;
  1029. }
  1030. clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
  1031. ci->md->bs);
  1032. __map_bio(ti, clone, tio);
  1033. ci->sector += len;
  1034. ci->sector_count -= len;
  1035. ci->idx = i;
  1036. } else {
  1037. /*
  1038. * Handle a bvec that must be split between two or more targets.
  1039. */
  1040. struct bio_vec *bv = bio->bi_io_vec + ci->idx;
  1041. sector_t remaining = to_sector(bv->bv_len);
  1042. unsigned int offset = 0;
  1043. do {
  1044. if (offset) {
  1045. ti = dm_table_find_target(ci->map, ci->sector);
  1046. if (!dm_target_is_valid(ti))
  1047. return -EIO;
  1048. max = max_io_len(ci->md, ci->sector, ti);
  1049. tio = alloc_tio(ci, ti);
  1050. }
  1051. len = min(remaining, max);
  1052. clone = split_bvec(bio, ci->sector, ci->idx,
  1053. bv->bv_offset + offset, len,
  1054. ci->md->bs);
  1055. __map_bio(ti, clone, tio);
  1056. ci->sector += len;
  1057. ci->sector_count -= len;
  1058. offset += to_bytes(len);
  1059. } while (remaining -= len);
  1060. ci->idx++;
  1061. }
  1062. return 0;
  1063. }
  1064. /*
  1065. * Split the bio into several clones and submit it to targets.
  1066. */
  1067. static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
  1068. {
  1069. struct clone_info ci;
  1070. int error = 0;
  1071. ci.map = dm_get_live_table(md);
  1072. if (unlikely(!ci.map)) {
  1073. if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
  1074. bio_io_error(bio);
  1075. else
  1076. if (!md->barrier_error)
  1077. md->barrier_error = -EIO;
  1078. return;
  1079. }
  1080. ci.md = md;
  1081. ci.bio = bio;
  1082. ci.io = alloc_io(md);
  1083. ci.io->error = 0;
  1084. atomic_set(&ci.io->io_count, 1);
  1085. ci.io->bio = bio;
  1086. ci.io->md = md;
  1087. spin_lock_init(&ci.io->endio_lock);
  1088. ci.sector = bio->bi_sector;
  1089. ci.sector_count = bio_sectors(bio);
  1090. if (unlikely(bio_empty_barrier(bio)))
  1091. ci.sector_count = 1;
  1092. ci.idx = bio->bi_idx;
  1093. start_io_acct(ci.io);
  1094. while (ci.sector_count && !error)
  1095. error = __clone_and_map(&ci);
  1096. /* drop the extra reference count */
  1097. dec_pending(ci.io, error);
  1098. dm_table_put(ci.map);
  1099. }
  1100. /*-----------------------------------------------------------------
  1101. * CRUD END
  1102. *---------------------------------------------------------------*/
  1103. static int dm_merge_bvec(struct request_queue *q,
  1104. struct bvec_merge_data *bvm,
  1105. struct bio_vec *biovec)
  1106. {
  1107. struct mapped_device *md = q->queuedata;
  1108. struct dm_table *map = dm_get_live_table(md);
  1109. struct dm_target *ti;
  1110. sector_t max_sectors;
  1111. int max_size = 0;
  1112. if (unlikely(!map))
  1113. goto out;
  1114. ti = dm_table_find_target(map, bvm->bi_sector);
  1115. if (!dm_target_is_valid(ti))
  1116. goto out_table;
  1117. /*
  1118. * Find maximum amount of I/O that won't need splitting
  1119. */
  1120. max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
  1121. (sector_t) BIO_MAX_SECTORS);
  1122. max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
  1123. if (max_size < 0)
  1124. max_size = 0;
  1125. /*
  1126. * merge_bvec_fn() returns number of bytes
  1127. * it can accept at this offset
  1128. * max is precomputed maximal io size
  1129. */
  1130. if (max_size && ti->type->merge)
  1131. max_size = ti->type->merge(ti, bvm, biovec, max_size);
  1132. /*
  1133. * If the target doesn't support merge method and some of the devices
  1134. * provided their merge_bvec method (we know this by looking at
  1135. * queue_max_hw_sectors), then we can't allow bios with multiple vector
  1136. * entries. So always set max_size to 0, and the code below allows
  1137. * just one page.
  1138. */
  1139. else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
  1140. max_size = 0;
  1141. out_table:
  1142. dm_table_put(map);
  1143. out:
  1144. /*
  1145. * Always allow an entire first page
  1146. */
  1147. if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
  1148. max_size = biovec->bv_len;
  1149. return max_size;
  1150. }
  1151. /*
  1152. * The request function that just remaps the bio built up by
  1153. * dm_merge_bvec.
  1154. */
  1155. static int _dm_request(struct request_queue *q, struct bio *bio)
  1156. {
  1157. int rw = bio_data_dir(bio);
  1158. struct mapped_device *md = q->queuedata;
  1159. int cpu;
  1160. down_read(&md->io_lock);
  1161. cpu = part_stat_lock();
  1162. part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
  1163. part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
  1164. part_stat_unlock();
  1165. /*
  1166. * If we're suspended or the thread is processing barriers
  1167. * we have to queue this io for later.
  1168. */
  1169. if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
  1170. unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
  1171. up_read(&md->io_lock);
  1172. if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
  1173. bio_rw(bio) == READA) {
  1174. bio_io_error(bio);
  1175. return 0;
  1176. }
  1177. queue_io(md, bio);
  1178. return 0;
  1179. }
  1180. __split_and_process_bio(md, bio);
  1181. up_read(&md->io_lock);
  1182. return 0;
  1183. }
  1184. static int dm_make_request(struct request_queue *q, struct bio *bio)
  1185. {
  1186. struct mapped_device *md = q->queuedata;
  1187. return md->saved_make_request_fn(q, bio); /* call __make_request() */
  1188. }
  1189. static int dm_request_based(struct mapped_device *md)
  1190. {
  1191. return blk_queue_stackable(md->queue);
  1192. }
  1193. static int dm_request(struct request_queue *q, struct bio *bio)
  1194. {
  1195. struct mapped_device *md = q->queuedata;
  1196. if (dm_request_based(md))
  1197. return dm_make_request(q, bio);
  1198. return _dm_request(q, bio);
  1199. }
  1200. /*
  1201. * Mark this request as flush request, so that dm_request_fn() can
  1202. * recognize.
  1203. */
  1204. static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq)
  1205. {
  1206. rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
  1207. rq->cmd[0] = REQ_LB_OP_FLUSH;
  1208. }
  1209. static bool dm_rq_is_flush_request(struct request *rq)
  1210. {
  1211. if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK &&
  1212. rq->cmd[0] == REQ_LB_OP_FLUSH)
  1213. return true;
  1214. else
  1215. return false;
  1216. }
  1217. void dm_dispatch_request(struct request *rq)
  1218. {
  1219. int r;
  1220. if (blk_queue_io_stat(rq->q))
  1221. rq->cmd_flags |= REQ_IO_STAT;
  1222. rq->start_time = jiffies;
  1223. r = blk_insert_cloned_request(rq->q, rq);
  1224. if (r)
  1225. dm_complete_request(rq, r);
  1226. }
  1227. EXPORT_SYMBOL_GPL(dm_dispatch_request);
  1228. static void dm_rq_bio_destructor(struct bio *bio)
  1229. {
  1230. struct dm_rq_clone_bio_info *info = bio->bi_private;
  1231. struct mapped_device *md = info->tio->md;
  1232. free_bio_info(info);
  1233. bio_free(bio, md->bs);
  1234. }
  1235. static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
  1236. void *data)
  1237. {
  1238. struct dm_rq_target_io *tio = data;
  1239. struct mapped_device *md = tio->md;
  1240. struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
  1241. if (!info)
  1242. return -ENOMEM;
  1243. info->orig = bio_orig;
  1244. info->tio = tio;
  1245. bio->bi_end_io = end_clone_bio;
  1246. bio->bi_private = info;
  1247. bio->bi_destructor = dm_rq_bio_destructor;
  1248. return 0;
  1249. }
  1250. static int setup_clone(struct request *clone, struct request *rq,
  1251. struct dm_rq_target_io *tio)
  1252. {
  1253. int r;
  1254. if (dm_rq_is_flush_request(rq)) {
  1255. blk_rq_init(NULL, clone);
  1256. clone->cmd_type = REQ_TYPE_FS;
  1257. clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
  1258. } else {
  1259. r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
  1260. dm_rq_bio_constructor, tio);
  1261. if (r)
  1262. return r;
  1263. clone->cmd = rq->cmd;
  1264. clone->cmd_len = rq->cmd_len;
  1265. clone->sense = rq->sense;
  1266. clone->buffer = rq->buffer;
  1267. }
  1268. clone->end_io = end_clone_request;
  1269. clone->end_io_data = tio;
  1270. return 0;
  1271. }
  1272. static struct request *clone_rq(struct request *rq, struct mapped_device *md,
  1273. gfp_t gfp_mask)
  1274. {
  1275. struct request *clone;
  1276. struct dm_rq_target_io *tio;
  1277. tio = alloc_rq_tio(md, gfp_mask);
  1278. if (!tio)
  1279. return NULL;
  1280. tio->md = md;
  1281. tio->ti = NULL;
  1282. tio->orig = rq;
  1283. tio->error = 0;
  1284. memset(&tio->info, 0, sizeof(tio->info));
  1285. clone = &tio->clone;
  1286. if (setup_clone(clone, rq, tio)) {
  1287. /* -ENOMEM */
  1288. free_rq_tio(tio);
  1289. return NULL;
  1290. }
  1291. return clone;
  1292. }
  1293. /*
  1294. * Called with the queue lock held.
  1295. */
  1296. static int dm_prep_fn(struct request_queue *q, struct request *rq)
  1297. {
  1298. struct mapped_device *md = q->queuedata;
  1299. struct request *clone;
  1300. if (unlikely(dm_rq_is_flush_request(rq)))
  1301. return BLKPREP_OK;
  1302. if (unlikely(rq->special)) {
  1303. DMWARN("Already has something in rq->special.");
  1304. return BLKPREP_KILL;
  1305. }
  1306. clone = clone_rq(rq, md, GFP_ATOMIC);
  1307. if (!clone)
  1308. return BLKPREP_DEFER;
  1309. rq->special = clone;
  1310. rq->cmd_flags |= REQ_DONTPREP;
  1311. return BLKPREP_OK;
  1312. }
  1313. static void map_request(struct dm_target *ti, struct request *clone,
  1314. struct mapped_device *md)
  1315. {
  1316. int r;
  1317. struct dm_rq_target_io *tio = clone->end_io_data;
  1318. /*
  1319. * Hold the md reference here for the in-flight I/O.
  1320. * We can't rely on the reference count by device opener,
  1321. * because the device may be closed during the request completion
  1322. * when all bios are completed.
  1323. * See the comment in rq_completed() too.
  1324. */
  1325. dm_get(md);
  1326. tio->ti = ti;
  1327. r = ti->type->map_rq(ti, clone, &tio->info);
  1328. switch (r) {
  1329. case DM_MAPIO_SUBMITTED:
  1330. /* The target has taken the I/O to submit by itself later */
  1331. break;
  1332. case DM_MAPIO_REMAPPED:
  1333. /* The target has remapped the I/O so dispatch it */
  1334. dm_dispatch_request(clone);
  1335. break;
  1336. case DM_MAPIO_REQUEUE:
  1337. /* The target wants to requeue the I/O */
  1338. dm_requeue_unmapped_request(clone);
  1339. break;
  1340. default:
  1341. if (r > 0) {
  1342. DMWARN("unimplemented target map return value: %d", r);
  1343. BUG();
  1344. }
  1345. /* The target wants to complete the I/O */
  1346. dm_kill_unmapped_request(clone, r);
  1347. break;
  1348. }
  1349. }
  1350. /*
  1351. * q->request_fn for request-based dm.
  1352. * Called with the queue lock held.
  1353. */
  1354. static void dm_request_fn(struct request_queue *q)
  1355. {
  1356. struct mapped_device *md = q->queuedata;
  1357. struct dm_table *map = dm_get_live_table(md);
  1358. struct dm_target *ti;
  1359. struct request *rq, *clone;
  1360. /*
  1361. * For suspend, check blk_queue_stopped() and increment
  1362. * ->pending within a single queue_lock not to increment the
  1363. * number of in-flight I/Os after the queue is stopped in
  1364. * dm_suspend().
  1365. */
  1366. while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
  1367. rq = blk_peek_request(q);
  1368. if (!rq)
  1369. goto plug_and_out;
  1370. if (unlikely(dm_rq_is_flush_request(rq))) {
  1371. BUG_ON(md->flush_request);
  1372. md->flush_request = rq;
  1373. blk_start_request(rq);
  1374. queue_work(md->wq, &md->barrier_work);
  1375. goto out;
  1376. }
  1377. ti = dm_table_find_target(map, blk_rq_pos(rq));
  1378. if (ti->type->busy && ti->type->busy(ti))
  1379. goto plug_and_out;
  1380. blk_start_request(rq);
  1381. clone = rq->special;
  1382. atomic_inc(&md->pending[rq_data_dir(clone)]);
  1383. spin_unlock(q->queue_lock);
  1384. map_request(ti, clone, md);
  1385. spin_lock_irq(q->queue_lock);
  1386. }
  1387. goto out;
  1388. plug_and_out:
  1389. if (!elv_queue_empty(q))
  1390. /* Some requests still remain, retry later */
  1391. blk_plug_device(q);
  1392. out:
  1393. dm_table_put(map);
  1394. return;
  1395. }
  1396. int dm_underlying_device_busy(struct request_queue *q)
  1397. {
  1398. return blk_lld_busy(q);
  1399. }
  1400. EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
  1401. static int dm_lld_busy(struct request_queue *q)
  1402. {
  1403. int r;
  1404. struct mapped_device *md = q->queuedata;
  1405. struct dm_table *map = dm_get_live_table(md);
  1406. if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
  1407. r = 1;
  1408. else
  1409. r = dm_table_any_busy_target(map);
  1410. dm_table_put(map);
  1411. return r;
  1412. }
  1413. static void dm_unplug_all(struct request_queue *q)
  1414. {
  1415. struct mapped_device *md = q->queuedata;
  1416. struct dm_table *map = dm_get_live_table(md);
  1417. if (map) {
  1418. if (dm_request_based(md))
  1419. generic_unplug_device(q);
  1420. dm_table_unplug_all(map);
  1421. dm_table_put(map);
  1422. }
  1423. }
  1424. static int dm_any_congested(void *congested_data, int bdi_bits)
  1425. {
  1426. int r = bdi_bits;
  1427. struct mapped_device *md = congested_data;
  1428. struct dm_table *map;
  1429. if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
  1430. map = dm_get_live_table(md);
  1431. if (map) {
  1432. /*
  1433. * Request-based dm cares about only own queue for
  1434. * the query about congestion status of request_queue
  1435. */
  1436. if (dm_request_based(md))
  1437. r = md->queue->backing_dev_info.state &
  1438. bdi_bits;
  1439. else
  1440. r = dm_table_any_congested(map, bdi_bits);
  1441. dm_table_put(map);
  1442. }
  1443. }
  1444. return r;
  1445. }
  1446. /*-----------------------------------------------------------------
  1447. * An IDR is used to keep track of allocated minor numbers.
  1448. *---------------------------------------------------------------*/
  1449. static DEFINE_IDR(_minor_idr);
  1450. static void free_minor(int minor)
  1451. {
  1452. spin_lock(&_minor_lock);
  1453. idr_remove(&_minor_idr, minor);
  1454. spin_unlock(&_minor_lock);
  1455. }
  1456. /*
  1457. * See if the device with a specific minor # is free.
  1458. */
  1459. static int specific_minor(int minor)
  1460. {
  1461. int r, m;
  1462. if (minor >= (1 << MINORBITS))
  1463. return -EINVAL;
  1464. r = idr_pre_get(&_minor_idr, GFP_KERNEL);
  1465. if (!r)
  1466. return -ENOMEM;
  1467. spin_lock(&_minor_lock);
  1468. if (idr_find(&_minor_idr, minor)) {
  1469. r = -EBUSY;
  1470. goto out;
  1471. }
  1472. r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
  1473. if (r)
  1474. goto out;
  1475. if (m != minor) {
  1476. idr_remove(&_minor_idr, m);
  1477. r = -EBUSY;
  1478. goto out;
  1479. }
  1480. out:
  1481. spin_unlock(&_minor_lock);
  1482. return r;
  1483. }
  1484. static int next_free_minor(int *minor)
  1485. {
  1486. int r, m;
  1487. r = idr_pre_get(&_minor_idr, GFP_KERNEL);
  1488. if (!r)
  1489. return -ENOMEM;
  1490. spin_lock(&_minor_lock);
  1491. r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
  1492. if (r)
  1493. goto out;
  1494. if (m >= (1 << MINORBITS)) {
  1495. idr_remove(&_minor_idr, m);
  1496. r = -ENOSPC;
  1497. goto out;
  1498. }
  1499. *minor = m;
  1500. out:
  1501. spin_unlock(&_minor_lock);
  1502. return r;
  1503. }
  1504. static const struct block_device_operations dm_blk_dops;
  1505. static void dm_wq_work(struct work_struct *work);
  1506. static void dm_rq_barrier_work(struct work_struct *work);
  1507. /*
  1508. * Allocate and initialise a blank device with a given minor.
  1509. */
  1510. static struct mapped_device *alloc_dev(int minor)
  1511. {
  1512. int r;
  1513. struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
  1514. void *old_md;
  1515. if (!md) {
  1516. DMWARN("unable to allocate device, out of memory.");
  1517. return NULL;
  1518. }
  1519. if (!try_module_get(THIS_MODULE))
  1520. goto bad_module_get;
  1521. /* get a minor number for the dev */
  1522. if (minor == DM_ANY_MINOR)
  1523. r = next_free_minor(&minor);
  1524. else
  1525. r = specific_minor(minor);
  1526. if (r < 0)
  1527. goto bad_minor;
  1528. init_rwsem(&md->io_lock);
  1529. mutex_init(&md->suspend_lock);
  1530. spin_lock_init(&md->deferred_lock);
  1531. spin_lock_init(&md->barrier_error_lock);
  1532. rwlock_init(&md->map_lock);
  1533. atomic_set(&md->holders, 1);
  1534. atomic_set(&md->open_count, 0);
  1535. atomic_set(&md->event_nr, 0);
  1536. atomic_set(&md->uevent_seq, 0);
  1537. INIT_LIST_HEAD(&md->uevent_list);
  1538. spin_lock_init(&md->uevent_lock);
  1539. md->queue = blk_init_queue(dm_request_fn, NULL);
  1540. if (!md->queue)
  1541. goto bad_queue;
  1542. /*
  1543. * Request-based dm devices cannot be stacked on top of bio-based dm
  1544. * devices. The type of this dm device has not been decided yet,
  1545. * although we initialized the queue using blk_init_queue().
  1546. * The type is decided at the first table loading time.
  1547. * To prevent problematic device stacking, clear the queue flag
  1548. * for request stacking support until then.
  1549. *
  1550. * This queue is new, so no concurrency on the queue_flags.
  1551. */
  1552. queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
  1553. md->saved_make_request_fn = md->queue->make_request_fn;
  1554. md->queue->queuedata = md;
  1555. md->queue->backing_dev_info.congested_fn = dm_any_congested;
  1556. md->queue->backing_dev_info.congested_data = md;
  1557. blk_queue_make_request(md->queue, dm_request);
  1558. blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
  1559. md->queue->unplug_fn = dm_unplug_all;
  1560. blk_queue_merge_bvec(md->queue, dm_merge_bvec);
  1561. blk_queue_softirq_done(md->queue, dm_softirq_done);
  1562. blk_queue_prep_rq(md->queue, dm_prep_fn);
  1563. blk_queue_lld_busy(md->queue, dm_lld_busy);
  1564. blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH,
  1565. dm_rq_prepare_flush);
  1566. md->disk = alloc_disk(1);
  1567. if (!md->disk)
  1568. goto bad_disk;
  1569. atomic_set(&md->pending[0], 0);
  1570. atomic_set(&md->pending[1], 0);
  1571. init_waitqueue_head(&md->wait);
  1572. INIT_WORK(&md->work, dm_wq_work);
  1573. INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
  1574. init_waitqueue_head(&md->eventq);
  1575. md->disk->major = _major;
  1576. md->disk->first_minor = minor;
  1577. md->disk->fops = &dm_blk_dops;
  1578. md->disk->queue = md->queue;
  1579. md->disk->private_data = md;
  1580. sprintf(md->disk->disk_name, "dm-%d", minor);
  1581. add_disk(md->disk);
  1582. format_dev_t(md->name, MKDEV(_major, minor));
  1583. md->wq = create_singlethread_workqueue("kdmflush");
  1584. if (!md->wq)
  1585. goto bad_thread;
  1586. md->bdev = bdget_disk(md->disk, 0);
  1587. if (!md->bdev)
  1588. goto bad_bdev;
  1589. /* Populate the mapping, nobody knows we exist yet */
  1590. spin_lock(&_minor_lock);
  1591. old_md = idr_replace(&_minor_idr, md, minor);
  1592. spin_unlock(&_minor_lock);
  1593. BUG_ON(old_md != MINOR_ALLOCED);
  1594. return md;
  1595. bad_bdev:
  1596. destroy_workqueue(md->wq);
  1597. bad_thread:
  1598. del_gendisk(md->disk);
  1599. put_disk(md->disk);
  1600. bad_disk:
  1601. blk_cleanup_queue(md->queue);
  1602. bad_queue:
  1603. free_minor(minor);
  1604. bad_minor:
  1605. module_put(THIS_MODULE);
  1606. bad_module_get:
  1607. kfree(md);
  1608. return NULL;
  1609. }
  1610. static void unlock_fs(struct mapped_device *md);
  1611. static void free_dev(struct mapped_device *md)
  1612. {
  1613. int minor = MINOR(disk_devt(md->disk));
  1614. unlock_fs(md);
  1615. bdput(md->bdev);
  1616. destroy_workqueue(md->wq);
  1617. if (md->tio_pool)
  1618. mempool_destroy(md->tio_pool);
  1619. if (md->io_pool)
  1620. mempool_destroy(md->io_pool);
  1621. if (md->bs)
  1622. bioset_free(md->bs);
  1623. blk_integrity_unregister(md->disk);
  1624. del_gendisk(md->disk);
  1625. free_minor(minor);
  1626. spin_lock(&_minor_lock);
  1627. md->disk->private_data = NULL;
  1628. spin_unlock(&_minor_lock);
  1629. put_disk(md->disk);
  1630. blk_cleanup_queue(md->queue);
  1631. module_put(THIS_MODULE);
  1632. kfree(md);
  1633. }
  1634. static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
  1635. {
  1636. struct dm_md_mempools *p;
  1637. if (md->io_pool && md->tio_pool && md->bs)
  1638. /* the md already has necessary mempools */
  1639. goto out;
  1640. p = dm_table_get_md_mempools(t);
  1641. BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
  1642. md->io_pool = p->io_pool;
  1643. p->io_pool = NULL;
  1644. md->tio_pool = p->tio_pool;
  1645. p->tio_pool = NULL;
  1646. md->bs = p->bs;
  1647. p->bs = NULL;
  1648. out:
  1649. /* mempool bind completed, now no need any mempools in the table */
  1650. dm_table_free_md_mempools(t);
  1651. }
  1652. /*
  1653. * Bind a table to the device.
  1654. */
  1655. static void event_callback(void *context)
  1656. {
  1657. unsigned long flags;
  1658. LIST_HEAD(uevents);
  1659. struct mapped_device *md = (struct mapped_device *) context;
  1660. spin_lock_irqsave(&md->uevent_lock, flags);
  1661. list_splice_init(&md->uevent_list, &uevents);
  1662. spin_unlock_irqrestore(&md->uevent_lock, flags);
  1663. dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
  1664. atomic_inc(&md->event_nr);
  1665. wake_up(&md->eventq);
  1666. }
  1667. static void __set_size(struct mapped_device *md, sector_t size)
  1668. {
  1669. set_capacity(md->disk, size);
  1670. mutex_lock(&md->bdev->bd_inode->i_mutex);
  1671. i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
  1672. mutex_unlock(&md->bdev->bd_inode->i_mutex);
  1673. }
  1674. /*
  1675. * Returns old map, which caller must destroy.
  1676. */
  1677. static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
  1678. struct queue_limits *limits)
  1679. {
  1680. struct dm_table *old_map;
  1681. struct request_queue *q = md->queue;
  1682. sector_t size;
  1683. unsigned long flags;
  1684. size = dm_table_get_size(t);
  1685. /*
  1686. * Wipe any geometry if the size of the table changed.
  1687. */
  1688. if (size != get_capacity(md->disk))
  1689. memset(&md->geometry, 0, sizeof(md->geometry));
  1690. __set_size(md, size);
  1691. dm_table_event_callback(t, event_callback, md);
  1692. /*
  1693. * The queue hasn't been stopped yet, if the old table type wasn't
  1694. * for request-based during suspension. So stop it to prevent
  1695. * I/O mapping before resume.
  1696. * This must be done before setting the queue restrictions,
  1697. * because request-based dm may be run just after the setting.
  1698. */
  1699. if (dm_table_request_based(t) && !blk_queue_stopped(q))
  1700. stop_queue(q);
  1701. __bind_mempools(md, t);
  1702. write_lock_irqsave(&md->map_lock, flags);
  1703. old_map = md->map;
  1704. md->map = t;
  1705. dm_table_set_restrictions(t, q, limits);
  1706. write_unlock_irqrestore(&md->map_lock, flags);
  1707. return old_map;
  1708. }
  1709. /*
  1710. * Returns unbound table for the caller to free.
  1711. */
  1712. static struct dm_table *__unbind(struct mapped_device *md)
  1713. {
  1714. struct dm_table *map = md->map;
  1715. unsigned long flags;
  1716. if (!map)
  1717. return NULL;
  1718. dm_table_event_callback(map, NULL, NULL);
  1719. write_lock_irqsave(&md->map_lock, flags);
  1720. md->map = NULL;
  1721. write_unlock_irqrestore(&md->map_lock, flags);
  1722. return map;
  1723. }
  1724. /*
  1725. * Constructor for a new device.
  1726. */
  1727. int dm_create(int minor, struct mapped_device **result)
  1728. {
  1729. struct mapped_device *md;
  1730. md = alloc_dev(minor);
  1731. if (!md)
  1732. return -ENXIO;
  1733. dm_sysfs_init(md);
  1734. *result = md;
  1735. return 0;
  1736. }
  1737. static struct mapped_device *dm_find_md(dev_t dev)
  1738. {
  1739. struct mapped_device *md;
  1740. unsigned minor = MINOR(dev);
  1741. if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
  1742. return NULL;
  1743. spin_lock(&_minor_lock);
  1744. md = idr_find(&_minor_idr, minor);
  1745. if (md && (md == MINOR_ALLOCED ||
  1746. (MINOR(disk_devt(dm_disk(md))) != minor) ||
  1747. test_bit(DMF_FREEING, &md->flags))) {
  1748. md = NULL;
  1749. goto out;
  1750. }
  1751. out:
  1752. spin_unlock(&_minor_lock);
  1753. return md;
  1754. }
  1755. struct mapped_device *dm_get_md(dev_t dev)
  1756. {
  1757. struct mapped_device *md = dm_find_md(dev);
  1758. if (md)
  1759. dm_get(md);
  1760. return md;
  1761. }
  1762. void *dm_get_mdptr(struct mapped_device *md)
  1763. {
  1764. return md->interface_ptr;
  1765. }
  1766. void dm_set_mdptr(struct mapped_device *md, void *ptr)
  1767. {
  1768. md->interface_ptr = ptr;
  1769. }
  1770. void dm_get(struct mapped_device *md)
  1771. {
  1772. atomic_inc(&md->holders);
  1773. }
  1774. const char *dm_device_name(struct mapped_device *md)
  1775. {
  1776. return md->name;
  1777. }
  1778. EXPORT_SYMBOL_GPL(dm_device_name);
  1779. void dm_put(struct mapped_device *md)
  1780. {
  1781. struct dm_table *map;
  1782. BUG_ON(test_bit(DMF_FREEING, &md->flags));
  1783. if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
  1784. map = dm_get_live_table(md);
  1785. idr_replace(&_minor_idr, MINOR_ALLOCED,
  1786. MINOR(disk_devt(dm_disk(md))));
  1787. set_bit(DMF_FREEING, &md->flags);
  1788. spin_unlock(&_minor_lock);
  1789. if (!dm_suspended(md)) {
  1790. dm_table_presuspend_targets(map);
  1791. dm_table_postsuspend_targets(map);
  1792. }
  1793. dm_sysfs_exit(md);
  1794. dm_table_put(map);
  1795. dm_table_destroy(__unbind(md));
  1796. free_dev(md);
  1797. }
  1798. }
  1799. EXPORT_SYMBOL_GPL(dm_put);
  1800. static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
  1801. {
  1802. int r = 0;
  1803. DECLARE_WAITQUEUE(wait, current);
  1804. dm_unplug_all(md->queue);
  1805. add_wait_queue(&md->wait, &wait);
  1806. while (1) {
  1807. set_current_state(interruptible);
  1808. smp_mb();
  1809. if (!md_in_flight(md))
  1810. break;
  1811. if (interruptible == TASK_INTERRUPTIBLE &&
  1812. signal_pending(current)) {
  1813. r = -EINTR;
  1814. break;
  1815. }
  1816. io_schedule();
  1817. }
  1818. set_current_state(TASK_RUNNING);
  1819. remove_wait_queue(&md->wait, &wait);
  1820. return r;
  1821. }
  1822. static void dm_flush(struct mapped_device *md)
  1823. {
  1824. dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
  1825. bio_init(&md->barrier_bio);
  1826. md->barrier_bio.bi_bdev = md->bdev;
  1827. md->barrier_bio.bi_rw = WRITE_BARRIER;
  1828. __split_and_process_bio(md, &md->barrier_bio);
  1829. dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
  1830. }
  1831. static void process_barrier(struct mapped_device *md, struct bio *bio)
  1832. {
  1833. md->barrier_error = 0;
  1834. dm_flush(md);
  1835. if (!bio_empty_barrier(bio)) {
  1836. __split_and_process_bio(md, bio);
  1837. dm_flush(md);
  1838. }
  1839. if (md->barrier_error != DM_ENDIO_REQUEUE)
  1840. bio_endio(bio, md->barrier_error);
  1841. else {
  1842. spin_lock_irq(&md->deferred_lock);
  1843. bio_list_add_head(&md->deferred, bio);
  1844. spin_unlock_irq(&md->deferred_lock);
  1845. }
  1846. }
  1847. /*
  1848. * Process the deferred bios
  1849. */
  1850. static void dm_wq_work(struct work_struct *work)
  1851. {
  1852. struct mapped_device *md = container_of(work, struct mapped_device,
  1853. work);
  1854. struct bio *c;
  1855. down_write(&md->io_lock);
  1856. while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
  1857. spin_lock_irq(&md->deferred_lock);
  1858. c = bio_list_pop(&md->deferred);
  1859. spin_unlock_irq(&md->deferred_lock);
  1860. if (!c) {
  1861. clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
  1862. break;
  1863. }
  1864. up_write(&md->io_lock);
  1865. if (dm_request_based(md))
  1866. generic_make_request(c);
  1867. else {
  1868. if (bio_rw_flagged(c, BIO_RW_BARRIER))
  1869. process_barrier(md, c);
  1870. else
  1871. __split_and_process_bio(md, c);
  1872. }
  1873. down_write(&md->io_lock);
  1874. }
  1875. up_write(&md->io_lock);
  1876. }
  1877. static void dm_queue_flush(struct mapped_device *md)
  1878. {
  1879. clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
  1880. smp_mb__after_clear_bit();
  1881. queue_work(md->wq, &md->work);
  1882. }
  1883. static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
  1884. {
  1885. struct dm_rq_target_io *tio = clone->end_io_data;
  1886. tio->info.flush_request = flush_nr;
  1887. }
  1888. /* Issue barrier requests to targets and wait for their completion. */
  1889. static int dm_rq_barrier(struct mapped_device *md)
  1890. {
  1891. int i, j;
  1892. struct dm_table *map = dm_get_live_table(md);
  1893. unsigned num_targets = dm_table_get_num_targets(map);
  1894. struct dm_target *ti;
  1895. struct request *clone;
  1896. md->barrier_error = 0;
  1897. for (i = 0; i < num_targets; i++) {
  1898. ti = dm_table_get_target(map, i);
  1899. for (j = 0; j < ti->num_flush_requests; j++) {
  1900. clone = clone_rq(md->flush_request, md, GFP_NOIO);
  1901. dm_rq_set_flush_nr(clone, j);
  1902. atomic_inc(&md->pending[rq_data_dir(clone)]);
  1903. map_request(ti, clone, md);
  1904. }
  1905. }
  1906. dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
  1907. dm_table_put(map);
  1908. return md->barrier_error;
  1909. }
  1910. static void dm_rq_barrier_work(struct work_struct *work)
  1911. {
  1912. int error;
  1913. struct mapped_device *md = container_of(work, struct mapped_device,
  1914. barrier_work);
  1915. struct request_queue *q = md->queue;
  1916. struct request *rq;
  1917. unsigned long flags;
  1918. /*
  1919. * Hold the md reference here and leave it at the last part so that
  1920. * the md can't be deleted by device opener when the barrier request
  1921. * completes.
  1922. */
  1923. dm_get(md);
  1924. error = dm_rq_barrier(md);
  1925. rq = md->flush_request;
  1926. md->flush_request = NULL;
  1927. if (error == DM_ENDIO_REQUEUE) {
  1928. spin_lock_irqsave(q->queue_lock, flags);
  1929. blk_requeue_request(q, rq);
  1930. spin_unlock_irqrestore(q->queue_lock, flags);
  1931. } else
  1932. blk_end_request_all(rq, error);
  1933. blk_run_queue(q);
  1934. dm_put(md);
  1935. }
  1936. /*
  1937. * Swap in a new table, returning the old one for the caller to destroy.
  1938. */
  1939. struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
  1940. {
  1941. struct dm_table *map = ERR_PTR(-EINVAL);
  1942. struct queue_limits limits;
  1943. int r;
  1944. mutex_lock(&md->suspend_lock);
  1945. /* device must be suspended */
  1946. if (!dm_suspended(md))
  1947. goto out;
  1948. r = dm_calculate_queue_limits(table, &limits);
  1949. if (r) {
  1950. map = ERR_PTR(r);
  1951. goto out;
  1952. }
  1953. /* cannot change the device type, once a table is bound */
  1954. if (md->map &&
  1955. (dm_table_get_type(md->map) != dm_table_get_type(table))) {
  1956. DMWARN("can't change the device type after a table is bound");
  1957. goto out;
  1958. }
  1959. map = __bind(md, table, &limits);
  1960. out:
  1961. mutex_unlock(&md->suspend_lock);
  1962. return map;
  1963. }
  1964. /*
  1965. * Functions to lock and unlock any filesystem running on the
  1966. * device.
  1967. */
  1968. static int lock_fs(struct mapped_device *md)
  1969. {
  1970. int r;
  1971. WARN_ON(md->frozen_sb);
  1972. md->frozen_sb = freeze_bdev(md->bdev);
  1973. if (IS_ERR(md->frozen_sb)) {
  1974. r = PTR_ERR(md->frozen_sb);
  1975. md->frozen_sb = NULL;
  1976. return r;
  1977. }
  1978. set_bit(DMF_FROZEN, &md->flags);
  1979. return 0;
  1980. }
  1981. static void unlock_fs(struct mapped_device *md)
  1982. {
  1983. if (!test_bit(DMF_FROZEN, &md->flags))
  1984. return;
  1985. thaw_bdev(md->bdev, md->frozen_sb);
  1986. md->frozen_sb = NULL;
  1987. clear_bit(DMF_FROZEN, &md->flags);
  1988. }
  1989. /*
  1990. * We need to be able to change a mapping table under a mounted
  1991. * filesystem. For example we might want to move some data in
  1992. * the background. Before the table can be swapped with
  1993. * dm_bind_table, dm_suspend must be called to flush any in
  1994. * flight bios and ensure that any further io gets deferred.
  1995. */
  1996. /*
  1997. * Suspend mechanism in request-based dm.
  1998. *
  1999. * 1. Flush all I/Os by lock_fs() if needed.
  2000. * 2. Stop dispatching any I/O by stopping the request_queue.
  2001. * 3. Wait for all in-flight I/Os to be completed or requeued.
  2002. *
  2003. * To abort suspend, start the request_queue.
  2004. */
  2005. int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
  2006. {
  2007. struct dm_table *map = NULL;
  2008. int r = 0;
  2009. int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
  2010. int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
  2011. mutex_lock(&md->suspend_lock);
  2012. if (dm_suspended(md)) {
  2013. r = -EINVAL;
  2014. goto out_unlock;
  2015. }
  2016. map = dm_get_live_table(md);
  2017. /*
  2018. * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
  2019. * This flag is cleared before dm_suspend returns.
  2020. */
  2021. if (noflush)
  2022. set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
  2023. /* This does not get reverted if there's an error later. */
  2024. dm_table_presuspend_targets(map);
  2025. /*
  2026. * Flush I/O to the device.
  2027. * Any I/O submitted after lock_fs() may not be flushed.
  2028. * noflush takes precedence over do_lockfs.
  2029. * (lock_fs() flushes I/Os and waits for them to complete.)
  2030. */
  2031. if (!noflush && do_lockfs) {
  2032. r = lock_fs(md);
  2033. if (r)
  2034. goto out;
  2035. }
  2036. /*
  2037. * Here we must make sure that no processes are submitting requests
  2038. * to target drivers i.e. no one may be executing
  2039. * __split_and_process_bio. This is called from dm_request and
  2040. * dm_wq_work.
  2041. *
  2042. * To get all processes out of __split_and_process_bio in dm_request,
  2043. * we take the write lock. To prevent any process from reentering
  2044. * __split_and_process_bio from dm_request, we set
  2045. * DMF_QUEUE_IO_TO_THREAD.
  2046. *
  2047. * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
  2048. * and call flush_workqueue(md->wq). flush_workqueue will wait until
  2049. * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
  2050. * further calls to __split_and_process_bio from dm_wq_work.
  2051. */
  2052. down_write(&md->io_lock);
  2053. set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
  2054. set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
  2055. up_write(&md->io_lock);
  2056. /*
  2057. * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
  2058. * can be kicked until md->queue is stopped. So stop md->queue before
  2059. * flushing md->wq.
  2060. */
  2061. if (dm_request_based(md))
  2062. stop_queue(md->queue);
  2063. flush_workqueue(md->wq);
  2064. /*
  2065. * At this point no more requests are entering target request routines.
  2066. * We call dm_wait_for_completion to wait for all existing requests
  2067. * to finish.
  2068. */
  2069. r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
  2070. down_write(&md->io_lock);
  2071. if (noflush)
  2072. clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
  2073. up_write(&md->io_lock);
  2074. /* were we interrupted ? */
  2075. if (r < 0) {
  2076. dm_queue_flush(md);
  2077. if (dm_request_based(md))
  2078. start_queue(md->queue);
  2079. unlock_fs(md);
  2080. goto out; /* pushback list is already flushed, so skip flush */
  2081. }
  2082. /*
  2083. * If dm_wait_for_completion returned 0, the device is completely
  2084. * quiescent now. There is no request-processing activity. All new
  2085. * requests are being added to md->deferred list.
  2086. */
  2087. dm_table_postsuspend_targets(map);
  2088. set_bit(DMF_SUSPENDED, &md->flags);
  2089. out:
  2090. dm_table_put(map);
  2091. out_unlock:
  2092. mutex_unlock(&md->suspend_lock);
  2093. return r;
  2094. }
  2095. int dm_resume(struct mapped_device *md)
  2096. {
  2097. int r = -EINVAL;
  2098. struct dm_table *map = NULL;
  2099. mutex_lock(&md->suspend_lock);
  2100. if (!dm_suspended(md))
  2101. goto out;
  2102. map = dm_get_live_table(md);
  2103. if (!map || !dm_table_get_size(map))
  2104. goto out;
  2105. r = dm_table_resume_targets(map);
  2106. if (r)
  2107. goto out;
  2108. dm_queue_flush(md);
  2109. /*
  2110. * Flushing deferred I/Os must be done after targets are resumed
  2111. * so that mapping of targets can work correctly.
  2112. * Request-based dm is queueing the deferred I/Os in its request_queue.
  2113. */
  2114. if (dm_request_based(md))
  2115. start_queue(md->queue);
  2116. unlock_fs(md);
  2117. clear_bit(DMF_SUSPENDED, &md->flags);
  2118. dm_table_unplug_all(map);
  2119. r = 0;
  2120. out:
  2121. dm_table_put(map);
  2122. mutex_unlock(&md->suspend_lock);
  2123. return r;
  2124. }
  2125. /*-----------------------------------------------------------------
  2126. * Event notification.
  2127. *---------------------------------------------------------------*/
  2128. void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
  2129. unsigned cookie)
  2130. {
  2131. char udev_cookie[DM_COOKIE_LENGTH];
  2132. char *envp[] = { udev_cookie, NULL };
  2133. if (!cookie)
  2134. kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
  2135. else {
  2136. snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
  2137. DM_COOKIE_ENV_VAR_NAME, cookie);
  2138. kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
  2139. }
  2140. }
  2141. uint32_t dm_next_uevent_seq(struct mapped_device *md)
  2142. {
  2143. return atomic_add_return(1, &md->uevent_seq);
  2144. }
  2145. uint32_t dm_get_event_nr(struct mapped_device *md)
  2146. {
  2147. return atomic_read(&md->event_nr);
  2148. }
  2149. int dm_wait_event(struct mapped_device *md, int event_nr)
  2150. {
  2151. return wait_event_interruptible(md->eventq,
  2152. (event_nr != atomic_read(&md->event_nr)));
  2153. }
  2154. void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
  2155. {
  2156. unsigned long flags;
  2157. spin_lock_irqsave(&md->uevent_lock, flags);
  2158. list_add(elist, &md->uevent_list);
  2159. spin_unlock_irqrestore(&md->uevent_lock, flags);
  2160. }
  2161. /*
  2162. * The gendisk is only valid as long as you have a reference
  2163. * count on 'md'.
  2164. */
  2165. struct gendisk *dm_disk(struct mapped_device *md)
  2166. {
  2167. return md->disk;
  2168. }
  2169. struct kobject *dm_kobject(struct mapped_device *md)
  2170. {
  2171. return &md->kobj;
  2172. }
  2173. /*
  2174. * struct mapped_device should not be exported outside of dm.c
  2175. * so use this check to verify that kobj is part of md structure
  2176. */
  2177. struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
  2178. {
  2179. struct mapped_device *md;
  2180. md = container_of(kobj, struct mapped_device, kobj);
  2181. if (&md->kobj != kobj)
  2182. return NULL;
  2183. if (test_bit(DMF_FREEING, &md->flags) ||
  2184. dm_deleting_md(md))
  2185. return NULL;
  2186. dm_get(md);
  2187. return md;
  2188. }
  2189. int dm_suspended(struct mapped_device *md)
  2190. {
  2191. return test_bit(DMF_SUSPENDED, &md->flags);
  2192. }
  2193. int dm_noflush_suspending(struct dm_target *ti)
  2194. {
  2195. struct mapped_device *md = dm_table_get_md(ti->table);
  2196. int r = __noflush_suspending(md);
  2197. dm_put(md);
  2198. return r;
  2199. }
  2200. EXPORT_SYMBOL_GPL(dm_noflush_suspending);
  2201. struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
  2202. {
  2203. struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
  2204. if (!pools)
  2205. return NULL;
  2206. pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
  2207. mempool_create_slab_pool(MIN_IOS, _io_cache) :
  2208. mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
  2209. if (!pools->io_pool)
  2210. goto free_pools_and_out;
  2211. pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
  2212. mempool_create_slab_pool(MIN_IOS, _tio_cache) :
  2213. mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
  2214. if (!pools->tio_pool)
  2215. goto free_io_pool_and_out;
  2216. pools->bs = (type == DM_TYPE_BIO_BASED) ?
  2217. bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
  2218. if (!pools->bs)
  2219. goto free_tio_pool_and_out;
  2220. return pools;
  2221. free_tio_pool_and_out:
  2222. mempool_destroy(pools->tio_pool);
  2223. free_io_pool_and_out:
  2224. mempool_destroy(pools->io_pool);
  2225. free_pools_and_out:
  2226. kfree(pools);
  2227. return NULL;
  2228. }
  2229. void dm_free_md_mempools(struct dm_md_mempools *pools)
  2230. {
  2231. if (!pools)
  2232. return;
  2233. if (pools->io_pool)
  2234. mempool_destroy(pools->io_pool);
  2235. if (pools->tio_pool)
  2236. mempool_destroy(pools->tio_pool);
  2237. if (pools->bs)
  2238. bioset_free(pools->bs);
  2239. kfree(pools);
  2240. }
  2241. static const struct block_device_operations dm_blk_dops = {
  2242. .open = dm_blk_open,
  2243. .release = dm_blk_close,
  2244. .ioctl = dm_blk_ioctl,
  2245. .getgeo = dm_blk_getgeo,
  2246. .owner = THIS_MODULE
  2247. };
  2248. EXPORT_SYMBOL(dm_get_mapinfo);
  2249. /*
  2250. * module hooks
  2251. */
  2252. module_init(dm_init);
  2253. module_exit(dm_exit);
  2254. module_param(major, uint, 0);
  2255. MODULE_PARM_DESC(major, "The major number of the device mapper");
  2256. MODULE_DESCRIPTION(DM_NAME " driver");
  2257. MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
  2258. MODULE_LICENSE("GPL");