dm-mpath.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include <linux/device-mapper.h>
  8. #include "dm-path-selector.h"
  9. #include "dm-bio-record.h"
  10. #include "dm-uevent.h"
  11. #include <linux/ctype.h>
  12. #include <linux/init.h>
  13. #include <linux/mempool.h>
  14. #include <linux/module.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/slab.h>
  17. #include <linux/time.h>
  18. #include <linux/workqueue.h>
  19. #include <scsi/scsi_dh.h>
  20. #include <asm/atomic.h>
  21. #define DM_MSG_PREFIX "multipath"
  22. #define MESG_STR(x) x, sizeof(x)
  23. /* Path properties */
  24. struct pgpath {
  25. struct list_head list;
  26. struct priority_group *pg; /* Owning PG */
  27. unsigned is_active; /* Path status */
  28. unsigned fail_count; /* Cumulative failure count */
  29. struct dm_path path;
  30. struct work_struct deactivate_path;
  31. };
  32. #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  33. /*
  34. * Paths are grouped into Priority Groups and numbered from 1 upwards.
  35. * Each has a path selector which controls which path gets used.
  36. */
  37. struct priority_group {
  38. struct list_head list;
  39. struct multipath *m; /* Owning multipath instance */
  40. struct path_selector ps;
  41. unsigned pg_num; /* Reference number */
  42. unsigned bypassed; /* Temporarily bypass this PG? */
  43. unsigned nr_pgpaths; /* Number of paths in PG */
  44. struct list_head pgpaths;
  45. };
  46. /* Multipath context */
  47. struct multipath {
  48. struct list_head list;
  49. struct dm_target *ti;
  50. spinlock_t lock;
  51. const char *hw_handler_name;
  52. struct work_struct activate_path;
  53. struct pgpath *pgpath_to_activate;
  54. unsigned nr_priority_groups;
  55. struct list_head priority_groups;
  56. unsigned pg_init_required; /* pg_init needs calling? */
  57. unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
  58. unsigned nr_valid_paths; /* Total number of usable paths */
  59. struct pgpath *current_pgpath;
  60. struct priority_group *current_pg;
  61. struct priority_group *next_pg; /* Switch to this PG if set */
  62. unsigned repeat_count; /* I/Os left before calling PS again */
  63. unsigned queue_io; /* Must we queue all I/O? */
  64. unsigned queue_if_no_path; /* Queue I/O if last path fails? */
  65. unsigned saved_queue_if_no_path;/* Saved state during suspension */
  66. unsigned pg_init_retries; /* Number of times to retry pg_init */
  67. unsigned pg_init_count; /* Number of times pg_init called */
  68. struct work_struct process_queued_ios;
  69. struct bio_list queued_ios;
  70. unsigned queue_size;
  71. struct work_struct trigger_event;
  72. /*
  73. * We must use a mempool of dm_mpath_io structs so that we
  74. * can resubmit bios on error.
  75. */
  76. mempool_t *mpio_pool;
  77. };
  78. /*
  79. * Context information attached to each bio we process.
  80. */
  81. struct dm_mpath_io {
  82. struct pgpath *pgpath;
  83. struct dm_bio_details details;
  84. };
  85. typedef int (*action_fn) (struct pgpath *pgpath);
  86. #define MIN_IOS 256 /* Mempool size */
  87. static struct kmem_cache *_mpio_cache;
  88. static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
  89. static void process_queued_ios(struct work_struct *work);
  90. static void trigger_event(struct work_struct *work);
  91. static void activate_path(struct work_struct *work);
  92. static void deactivate_path(struct work_struct *work);
  93. /*-----------------------------------------------
  94. * Allocation routines
  95. *-----------------------------------------------*/
  96. static struct pgpath *alloc_pgpath(void)
  97. {
  98. struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
  99. if (pgpath) {
  100. pgpath->is_active = 1;
  101. INIT_WORK(&pgpath->deactivate_path, deactivate_path);
  102. }
  103. return pgpath;
  104. }
  105. static void free_pgpath(struct pgpath *pgpath)
  106. {
  107. kfree(pgpath);
  108. }
  109. static void deactivate_path(struct work_struct *work)
  110. {
  111. struct pgpath *pgpath =
  112. container_of(work, struct pgpath, deactivate_path);
  113. blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
  114. }
  115. static struct priority_group *alloc_priority_group(void)
  116. {
  117. struct priority_group *pg;
  118. pg = kzalloc(sizeof(*pg), GFP_KERNEL);
  119. if (pg)
  120. INIT_LIST_HEAD(&pg->pgpaths);
  121. return pg;
  122. }
  123. static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
  124. {
  125. unsigned long flags;
  126. struct pgpath *pgpath, *tmp;
  127. struct multipath *m = ti->private;
  128. list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
  129. list_del(&pgpath->list);
  130. if (m->hw_handler_name)
  131. scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
  132. dm_put_device(ti, pgpath->path.dev);
  133. spin_lock_irqsave(&m->lock, flags);
  134. if (m->pgpath_to_activate == pgpath)
  135. m->pgpath_to_activate = NULL;
  136. spin_unlock_irqrestore(&m->lock, flags);
  137. free_pgpath(pgpath);
  138. }
  139. }
  140. static void free_priority_group(struct priority_group *pg,
  141. struct dm_target *ti)
  142. {
  143. struct path_selector *ps = &pg->ps;
  144. if (ps->type) {
  145. ps->type->destroy(ps);
  146. dm_put_path_selector(ps->type);
  147. }
  148. free_pgpaths(&pg->pgpaths, ti);
  149. kfree(pg);
  150. }
  151. static struct multipath *alloc_multipath(struct dm_target *ti)
  152. {
  153. struct multipath *m;
  154. m = kzalloc(sizeof(*m), GFP_KERNEL);
  155. if (m) {
  156. INIT_LIST_HEAD(&m->priority_groups);
  157. spin_lock_init(&m->lock);
  158. m->queue_io = 1;
  159. INIT_WORK(&m->process_queued_ios, process_queued_ios);
  160. INIT_WORK(&m->trigger_event, trigger_event);
  161. INIT_WORK(&m->activate_path, activate_path);
  162. m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
  163. if (!m->mpio_pool) {
  164. kfree(m);
  165. return NULL;
  166. }
  167. m->ti = ti;
  168. ti->private = m;
  169. }
  170. return m;
  171. }
  172. static void free_multipath(struct multipath *m)
  173. {
  174. struct priority_group *pg, *tmp;
  175. list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
  176. list_del(&pg->list);
  177. free_priority_group(pg, m->ti);
  178. }
  179. kfree(m->hw_handler_name);
  180. mempool_destroy(m->mpio_pool);
  181. kfree(m);
  182. }
  183. /*-----------------------------------------------
  184. * Path selection
  185. *-----------------------------------------------*/
  186. static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
  187. {
  188. m->current_pg = pgpath->pg;
  189. /* Must we initialise the PG first, and queue I/O till it's ready? */
  190. if (m->hw_handler_name) {
  191. m->pg_init_required = 1;
  192. m->queue_io = 1;
  193. } else {
  194. m->pg_init_required = 0;
  195. m->queue_io = 0;
  196. }
  197. m->pg_init_count = 0;
  198. }
  199. static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg)
  200. {
  201. struct dm_path *path;
  202. path = pg->ps.type->select_path(&pg->ps, &m->repeat_count);
  203. if (!path)
  204. return -ENXIO;
  205. m->current_pgpath = path_to_pgpath(path);
  206. if (m->current_pg != pg)
  207. __switch_pg(m, m->current_pgpath);
  208. return 0;
  209. }
  210. static void __choose_pgpath(struct multipath *m)
  211. {
  212. struct priority_group *pg;
  213. unsigned bypassed = 1;
  214. if (!m->nr_valid_paths)
  215. goto failed;
  216. /* Were we instructed to switch PG? */
  217. if (m->next_pg) {
  218. pg = m->next_pg;
  219. m->next_pg = NULL;
  220. if (!__choose_path_in_pg(m, pg))
  221. return;
  222. }
  223. /* Don't change PG until it has no remaining paths */
  224. if (m->current_pg && !__choose_path_in_pg(m, m->current_pg))
  225. return;
  226. /*
  227. * Loop through priority groups until we find a valid path.
  228. * First time we skip PGs marked 'bypassed'.
  229. * Second time we only try the ones we skipped.
  230. */
  231. do {
  232. list_for_each_entry(pg, &m->priority_groups, list) {
  233. if (pg->bypassed == bypassed)
  234. continue;
  235. if (!__choose_path_in_pg(m, pg))
  236. return;
  237. }
  238. } while (bypassed--);
  239. failed:
  240. m->current_pgpath = NULL;
  241. m->current_pg = NULL;
  242. }
  243. /*
  244. * Check whether bios must be queued in the device-mapper core rather
  245. * than here in the target.
  246. *
  247. * m->lock must be held on entry.
  248. *
  249. * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
  250. * same value then we are not between multipath_presuspend()
  251. * and multipath_resume() calls and we have no need to check
  252. * for the DMF_NOFLUSH_SUSPENDING flag.
  253. */
  254. static int __must_push_back(struct multipath *m)
  255. {
  256. return (m->queue_if_no_path != m->saved_queue_if_no_path &&
  257. dm_noflush_suspending(m->ti));
  258. }
  259. static int map_io(struct multipath *m, struct bio *bio,
  260. struct dm_mpath_io *mpio, unsigned was_queued)
  261. {
  262. int r = DM_MAPIO_REMAPPED;
  263. unsigned long flags;
  264. struct pgpath *pgpath;
  265. spin_lock_irqsave(&m->lock, flags);
  266. /* Do we need to select a new pgpath? */
  267. if (!m->current_pgpath ||
  268. (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
  269. __choose_pgpath(m);
  270. pgpath = m->current_pgpath;
  271. if (was_queued)
  272. m->queue_size--;
  273. if ((pgpath && m->queue_io) ||
  274. (!pgpath && m->queue_if_no_path)) {
  275. /* Queue for the daemon to resubmit */
  276. bio_list_add(&m->queued_ios, bio);
  277. m->queue_size++;
  278. if ((m->pg_init_required && !m->pg_init_in_progress) ||
  279. !m->queue_io)
  280. queue_work(kmultipathd, &m->process_queued_ios);
  281. pgpath = NULL;
  282. r = DM_MAPIO_SUBMITTED;
  283. } else if (pgpath)
  284. bio->bi_bdev = pgpath->path.dev->bdev;
  285. else if (__must_push_back(m))
  286. r = DM_MAPIO_REQUEUE;
  287. else
  288. r = -EIO; /* Failed */
  289. mpio->pgpath = pgpath;
  290. spin_unlock_irqrestore(&m->lock, flags);
  291. return r;
  292. }
  293. /*
  294. * If we run out of usable paths, should we queue I/O or error it?
  295. */
  296. static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
  297. unsigned save_old_value)
  298. {
  299. unsigned long flags;
  300. spin_lock_irqsave(&m->lock, flags);
  301. if (save_old_value)
  302. m->saved_queue_if_no_path = m->queue_if_no_path;
  303. else
  304. m->saved_queue_if_no_path = queue_if_no_path;
  305. m->queue_if_no_path = queue_if_no_path;
  306. if (!m->queue_if_no_path && m->queue_size)
  307. queue_work(kmultipathd, &m->process_queued_ios);
  308. spin_unlock_irqrestore(&m->lock, flags);
  309. return 0;
  310. }
  311. /*-----------------------------------------------------------------
  312. * The multipath daemon is responsible for resubmitting queued ios.
  313. *---------------------------------------------------------------*/
  314. static void dispatch_queued_ios(struct multipath *m)
  315. {
  316. int r;
  317. unsigned long flags;
  318. struct bio *bio = NULL, *next;
  319. struct dm_mpath_io *mpio;
  320. union map_info *info;
  321. spin_lock_irqsave(&m->lock, flags);
  322. bio = bio_list_get(&m->queued_ios);
  323. spin_unlock_irqrestore(&m->lock, flags);
  324. while (bio) {
  325. next = bio->bi_next;
  326. bio->bi_next = NULL;
  327. info = dm_get_mapinfo(bio);
  328. mpio = info->ptr;
  329. r = map_io(m, bio, mpio, 1);
  330. if (r < 0)
  331. bio_endio(bio, r);
  332. else if (r == DM_MAPIO_REMAPPED)
  333. generic_make_request(bio);
  334. else if (r == DM_MAPIO_REQUEUE)
  335. bio_endio(bio, -EIO);
  336. bio = next;
  337. }
  338. }
  339. static void process_queued_ios(struct work_struct *work)
  340. {
  341. struct multipath *m =
  342. container_of(work, struct multipath, process_queued_ios);
  343. struct pgpath *pgpath = NULL;
  344. unsigned init_required = 0, must_queue = 1;
  345. unsigned long flags;
  346. spin_lock_irqsave(&m->lock, flags);
  347. if (!m->queue_size)
  348. goto out;
  349. if (!m->current_pgpath)
  350. __choose_pgpath(m);
  351. pgpath = m->current_pgpath;
  352. if ((pgpath && !m->queue_io) ||
  353. (!pgpath && !m->queue_if_no_path))
  354. must_queue = 0;
  355. if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
  356. m->pgpath_to_activate = pgpath;
  357. m->pg_init_count++;
  358. m->pg_init_required = 0;
  359. m->pg_init_in_progress = 1;
  360. init_required = 1;
  361. }
  362. out:
  363. spin_unlock_irqrestore(&m->lock, flags);
  364. if (init_required)
  365. queue_work(kmpath_handlerd, &m->activate_path);
  366. if (!must_queue)
  367. dispatch_queued_ios(m);
  368. }
  369. /*
  370. * An event is triggered whenever a path is taken out of use.
  371. * Includes path failure and PG bypass.
  372. */
  373. static void trigger_event(struct work_struct *work)
  374. {
  375. struct multipath *m =
  376. container_of(work, struct multipath, trigger_event);
  377. dm_table_event(m->ti->table);
  378. }
  379. /*-----------------------------------------------------------------
  380. * Constructor/argument parsing:
  381. * <#multipath feature args> [<arg>]*
  382. * <#hw_handler args> [hw_handler [<arg>]*]
  383. * <#priority groups>
  384. * <initial priority group>
  385. * [<selector> <#selector args> [<arg>]*
  386. * <#paths> <#per-path selector args>
  387. * [<path> [<arg>]* ]+ ]+
  388. *---------------------------------------------------------------*/
  389. struct param {
  390. unsigned min;
  391. unsigned max;
  392. char *error;
  393. };
  394. static int read_param(struct param *param, char *str, unsigned *v, char **error)
  395. {
  396. if (!str ||
  397. (sscanf(str, "%u", v) != 1) ||
  398. (*v < param->min) ||
  399. (*v > param->max)) {
  400. *error = param->error;
  401. return -EINVAL;
  402. }
  403. return 0;
  404. }
  405. struct arg_set {
  406. unsigned argc;
  407. char **argv;
  408. };
  409. static char *shift(struct arg_set *as)
  410. {
  411. char *r;
  412. if (as->argc) {
  413. as->argc--;
  414. r = *as->argv;
  415. as->argv++;
  416. return r;
  417. }
  418. return NULL;
  419. }
  420. static void consume(struct arg_set *as, unsigned n)
  421. {
  422. BUG_ON (as->argc < n);
  423. as->argc -= n;
  424. as->argv += n;
  425. }
  426. static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
  427. struct dm_target *ti)
  428. {
  429. int r;
  430. struct path_selector_type *pst;
  431. unsigned ps_argc;
  432. static struct param _params[] = {
  433. {0, 1024, "invalid number of path selector args"},
  434. };
  435. pst = dm_get_path_selector(shift(as));
  436. if (!pst) {
  437. ti->error = "unknown path selector type";
  438. return -EINVAL;
  439. }
  440. r = read_param(_params, shift(as), &ps_argc, &ti->error);
  441. if (r) {
  442. dm_put_path_selector(pst);
  443. return -EINVAL;
  444. }
  445. if (ps_argc > as->argc) {
  446. dm_put_path_selector(pst);
  447. ti->error = "not enough arguments for path selector";
  448. return -EINVAL;
  449. }
  450. r = pst->create(&pg->ps, ps_argc, as->argv);
  451. if (r) {
  452. dm_put_path_selector(pst);
  453. ti->error = "path selector constructor failed";
  454. return r;
  455. }
  456. pg->ps.type = pst;
  457. consume(as, ps_argc);
  458. return 0;
  459. }
  460. static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
  461. struct dm_target *ti)
  462. {
  463. int r;
  464. struct pgpath *p;
  465. struct multipath *m = ti->private;
  466. /* we need at least a path arg */
  467. if (as->argc < 1) {
  468. ti->error = "no device given";
  469. return ERR_PTR(-EINVAL);
  470. }
  471. p = alloc_pgpath();
  472. if (!p)
  473. return ERR_PTR(-ENOMEM);
  474. r = dm_get_device(ti, shift(as), ti->begin, ti->len,
  475. dm_table_get_mode(ti->table), &p->path.dev);
  476. if (r) {
  477. ti->error = "error getting device";
  478. goto bad;
  479. }
  480. if (m->hw_handler_name) {
  481. r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
  482. m->hw_handler_name);
  483. if (r < 0) {
  484. dm_put_device(ti, p->path.dev);
  485. goto bad;
  486. }
  487. }
  488. r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
  489. if (r) {
  490. dm_put_device(ti, p->path.dev);
  491. goto bad;
  492. }
  493. return p;
  494. bad:
  495. free_pgpath(p);
  496. return ERR_PTR(r);
  497. }
  498. static struct priority_group *parse_priority_group(struct arg_set *as,
  499. struct multipath *m)
  500. {
  501. static struct param _params[] = {
  502. {1, 1024, "invalid number of paths"},
  503. {0, 1024, "invalid number of selector args"}
  504. };
  505. int r;
  506. unsigned i, nr_selector_args, nr_params;
  507. struct priority_group *pg;
  508. struct dm_target *ti = m->ti;
  509. if (as->argc < 2) {
  510. as->argc = 0;
  511. ti->error = "not enough priority group arguments";
  512. return ERR_PTR(-EINVAL);
  513. }
  514. pg = alloc_priority_group();
  515. if (!pg) {
  516. ti->error = "couldn't allocate priority group";
  517. return ERR_PTR(-ENOMEM);
  518. }
  519. pg->m = m;
  520. r = parse_path_selector(as, pg, ti);
  521. if (r)
  522. goto bad;
  523. /*
  524. * read the paths
  525. */
  526. r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error);
  527. if (r)
  528. goto bad;
  529. r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error);
  530. if (r)
  531. goto bad;
  532. nr_params = 1 + nr_selector_args;
  533. for (i = 0; i < pg->nr_pgpaths; i++) {
  534. struct pgpath *pgpath;
  535. struct arg_set path_args;
  536. if (as->argc < nr_params) {
  537. ti->error = "not enough path parameters";
  538. goto bad;
  539. }
  540. path_args.argc = nr_params;
  541. path_args.argv = as->argv;
  542. pgpath = parse_path(&path_args, &pg->ps, ti);
  543. if (IS_ERR(pgpath)) {
  544. r = PTR_ERR(pgpath);
  545. goto bad;
  546. }
  547. pgpath->pg = pg;
  548. list_add_tail(&pgpath->list, &pg->pgpaths);
  549. consume(as, nr_params);
  550. }
  551. return pg;
  552. bad:
  553. free_priority_group(pg, ti);
  554. return ERR_PTR(r);
  555. }
  556. static int parse_hw_handler(struct arg_set *as, struct multipath *m)
  557. {
  558. unsigned hw_argc;
  559. struct dm_target *ti = m->ti;
  560. static struct param _params[] = {
  561. {0, 1024, "invalid number of hardware handler args"},
  562. };
  563. if (read_param(_params, shift(as), &hw_argc, &ti->error))
  564. return -EINVAL;
  565. if (!hw_argc)
  566. return 0;
  567. m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
  568. request_module("scsi_dh_%s", m->hw_handler_name);
  569. if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
  570. ti->error = "unknown hardware handler type";
  571. kfree(m->hw_handler_name);
  572. m->hw_handler_name = NULL;
  573. return -EINVAL;
  574. }
  575. if (hw_argc > 1)
  576. DMWARN("Ignoring user-specified arguments for "
  577. "hardware handler \"%s\"", m->hw_handler_name);
  578. consume(as, hw_argc - 1);
  579. return 0;
  580. }
  581. static int parse_features(struct arg_set *as, struct multipath *m)
  582. {
  583. int r;
  584. unsigned argc;
  585. struct dm_target *ti = m->ti;
  586. const char *param_name;
  587. static struct param _params[] = {
  588. {0, 3, "invalid number of feature args"},
  589. {1, 50, "pg_init_retries must be between 1 and 50"},
  590. };
  591. r = read_param(_params, shift(as), &argc, &ti->error);
  592. if (r)
  593. return -EINVAL;
  594. if (!argc)
  595. return 0;
  596. do {
  597. param_name = shift(as);
  598. argc--;
  599. if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) {
  600. r = queue_if_no_path(m, 1, 0);
  601. continue;
  602. }
  603. if (!strnicmp(param_name, MESG_STR("pg_init_retries")) &&
  604. (argc >= 1)) {
  605. r = read_param(_params + 1, shift(as),
  606. &m->pg_init_retries, &ti->error);
  607. argc--;
  608. continue;
  609. }
  610. ti->error = "Unrecognised multipath feature request";
  611. r = -EINVAL;
  612. } while (argc && !r);
  613. return r;
  614. }
  615. static int multipath_ctr(struct dm_target *ti, unsigned int argc,
  616. char **argv)
  617. {
  618. /* target parameters */
  619. static struct param _params[] = {
  620. {1, 1024, "invalid number of priority groups"},
  621. {1, 1024, "invalid initial priority group number"},
  622. };
  623. int r;
  624. struct multipath *m;
  625. struct arg_set as;
  626. unsigned pg_count = 0;
  627. unsigned next_pg_num;
  628. as.argc = argc;
  629. as.argv = argv;
  630. m = alloc_multipath(ti);
  631. if (!m) {
  632. ti->error = "can't allocate multipath";
  633. return -EINVAL;
  634. }
  635. r = parse_features(&as, m);
  636. if (r)
  637. goto bad;
  638. r = parse_hw_handler(&as, m);
  639. if (r)
  640. goto bad;
  641. r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error);
  642. if (r)
  643. goto bad;
  644. r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error);
  645. if (r)
  646. goto bad;
  647. /* parse the priority groups */
  648. while (as.argc) {
  649. struct priority_group *pg;
  650. pg = parse_priority_group(&as, m);
  651. if (IS_ERR(pg)) {
  652. r = PTR_ERR(pg);
  653. goto bad;
  654. }
  655. m->nr_valid_paths += pg->nr_pgpaths;
  656. list_add_tail(&pg->list, &m->priority_groups);
  657. pg_count++;
  658. pg->pg_num = pg_count;
  659. if (!--next_pg_num)
  660. m->next_pg = pg;
  661. }
  662. if (pg_count != m->nr_priority_groups) {
  663. ti->error = "priority group count mismatch";
  664. r = -EINVAL;
  665. goto bad;
  666. }
  667. return 0;
  668. bad:
  669. free_multipath(m);
  670. return r;
  671. }
  672. static void multipath_dtr(struct dm_target *ti)
  673. {
  674. struct multipath *m = (struct multipath *) ti->private;
  675. flush_workqueue(kmpath_handlerd);
  676. flush_workqueue(kmultipathd);
  677. free_multipath(m);
  678. }
  679. /*
  680. * Map bios, recording original fields for later in case we have to resubmit
  681. */
  682. static int multipath_map(struct dm_target *ti, struct bio *bio,
  683. union map_info *map_context)
  684. {
  685. int r;
  686. struct dm_mpath_io *mpio;
  687. struct multipath *m = (struct multipath *) ti->private;
  688. mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
  689. dm_bio_record(&mpio->details, bio);
  690. map_context->ptr = mpio;
  691. bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
  692. r = map_io(m, bio, mpio, 0);
  693. if (r < 0 || r == DM_MAPIO_REQUEUE)
  694. mempool_free(mpio, m->mpio_pool);
  695. return r;
  696. }
  697. /*
  698. * Take a path out of use.
  699. */
  700. static int fail_path(struct pgpath *pgpath)
  701. {
  702. unsigned long flags;
  703. struct multipath *m = pgpath->pg->m;
  704. spin_lock_irqsave(&m->lock, flags);
  705. if (!pgpath->is_active)
  706. goto out;
  707. DMWARN("Failing path %s.", pgpath->path.dev->name);
  708. pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
  709. pgpath->is_active = 0;
  710. pgpath->fail_count++;
  711. m->nr_valid_paths--;
  712. if (pgpath == m->current_pgpath)
  713. m->current_pgpath = NULL;
  714. dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
  715. pgpath->path.dev->name, m->nr_valid_paths);
  716. schedule_work(&m->trigger_event);
  717. queue_work(kmultipathd, &pgpath->deactivate_path);
  718. out:
  719. spin_unlock_irqrestore(&m->lock, flags);
  720. return 0;
  721. }
  722. /*
  723. * Reinstate a previously-failed path
  724. */
  725. static int reinstate_path(struct pgpath *pgpath)
  726. {
  727. int r = 0;
  728. unsigned long flags;
  729. struct multipath *m = pgpath->pg->m;
  730. spin_lock_irqsave(&m->lock, flags);
  731. if (pgpath->is_active)
  732. goto out;
  733. if (!pgpath->pg->ps.type->reinstate_path) {
  734. DMWARN("Reinstate path not supported by path selector %s",
  735. pgpath->pg->ps.type->name);
  736. r = -EINVAL;
  737. goto out;
  738. }
  739. r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
  740. if (r)
  741. goto out;
  742. pgpath->is_active = 1;
  743. m->current_pgpath = NULL;
  744. if (!m->nr_valid_paths++ && m->queue_size)
  745. queue_work(kmultipathd, &m->process_queued_ios);
  746. dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
  747. pgpath->path.dev->name, m->nr_valid_paths);
  748. schedule_work(&m->trigger_event);
  749. out:
  750. spin_unlock_irqrestore(&m->lock, flags);
  751. return r;
  752. }
  753. /*
  754. * Fail or reinstate all paths that match the provided struct dm_dev.
  755. */
  756. static int action_dev(struct multipath *m, struct dm_dev *dev,
  757. action_fn action)
  758. {
  759. int r = 0;
  760. struct pgpath *pgpath;
  761. struct priority_group *pg;
  762. list_for_each_entry(pg, &m->priority_groups, list) {
  763. list_for_each_entry(pgpath, &pg->pgpaths, list) {
  764. if (pgpath->path.dev == dev)
  765. r = action(pgpath);
  766. }
  767. }
  768. return r;
  769. }
  770. /*
  771. * Temporarily try to avoid having to use the specified PG
  772. */
  773. static void bypass_pg(struct multipath *m, struct priority_group *pg,
  774. int bypassed)
  775. {
  776. unsigned long flags;
  777. spin_lock_irqsave(&m->lock, flags);
  778. pg->bypassed = bypassed;
  779. m->current_pgpath = NULL;
  780. m->current_pg = NULL;
  781. spin_unlock_irqrestore(&m->lock, flags);
  782. schedule_work(&m->trigger_event);
  783. }
  784. /*
  785. * Switch to using the specified PG from the next I/O that gets mapped
  786. */
  787. static int switch_pg_num(struct multipath *m, const char *pgstr)
  788. {
  789. struct priority_group *pg;
  790. unsigned pgnum;
  791. unsigned long flags;
  792. if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
  793. (pgnum > m->nr_priority_groups)) {
  794. DMWARN("invalid PG number supplied to switch_pg_num");
  795. return -EINVAL;
  796. }
  797. spin_lock_irqsave(&m->lock, flags);
  798. list_for_each_entry(pg, &m->priority_groups, list) {
  799. pg->bypassed = 0;
  800. if (--pgnum)
  801. continue;
  802. m->current_pgpath = NULL;
  803. m->current_pg = NULL;
  804. m->next_pg = pg;
  805. }
  806. spin_unlock_irqrestore(&m->lock, flags);
  807. schedule_work(&m->trigger_event);
  808. return 0;
  809. }
  810. /*
  811. * Set/clear bypassed status of a PG.
  812. * PGs are numbered upwards from 1 in the order they were declared.
  813. */
  814. static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
  815. {
  816. struct priority_group *pg;
  817. unsigned pgnum;
  818. if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
  819. (pgnum > m->nr_priority_groups)) {
  820. DMWARN("invalid PG number supplied to bypass_pg");
  821. return -EINVAL;
  822. }
  823. list_for_each_entry(pg, &m->priority_groups, list) {
  824. if (!--pgnum)
  825. break;
  826. }
  827. bypass_pg(m, pg, bypassed);
  828. return 0;
  829. }
  830. /*
  831. * Should we retry pg_init immediately?
  832. */
  833. static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
  834. {
  835. unsigned long flags;
  836. int limit_reached = 0;
  837. spin_lock_irqsave(&m->lock, flags);
  838. if (m->pg_init_count <= m->pg_init_retries)
  839. m->pg_init_required = 1;
  840. else
  841. limit_reached = 1;
  842. spin_unlock_irqrestore(&m->lock, flags);
  843. return limit_reached;
  844. }
  845. static void pg_init_done(struct dm_path *path, int errors)
  846. {
  847. struct pgpath *pgpath = path_to_pgpath(path);
  848. struct priority_group *pg = pgpath->pg;
  849. struct multipath *m = pg->m;
  850. unsigned long flags;
  851. /* device or driver problems */
  852. switch (errors) {
  853. case SCSI_DH_OK:
  854. break;
  855. case SCSI_DH_NOSYS:
  856. if (!m->hw_handler_name) {
  857. errors = 0;
  858. break;
  859. }
  860. DMERR("Cannot failover device because scsi_dh_%s was not "
  861. "loaded.", m->hw_handler_name);
  862. /*
  863. * Fail path for now, so we do not ping pong
  864. */
  865. fail_path(pgpath);
  866. break;
  867. case SCSI_DH_DEV_TEMP_BUSY:
  868. /*
  869. * Probably doing something like FW upgrade on the
  870. * controller so try the other pg.
  871. */
  872. bypass_pg(m, pg, 1);
  873. break;
  874. /* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
  875. case SCSI_DH_RETRY:
  876. case SCSI_DH_IMM_RETRY:
  877. case SCSI_DH_RES_TEMP_UNAVAIL:
  878. if (pg_init_limit_reached(m, pgpath))
  879. fail_path(pgpath);
  880. errors = 0;
  881. break;
  882. default:
  883. /*
  884. * We probably do not want to fail the path for a device
  885. * error, but this is what the old dm did. In future
  886. * patches we can do more advanced handling.
  887. */
  888. fail_path(pgpath);
  889. }
  890. spin_lock_irqsave(&m->lock, flags);
  891. if (errors) {
  892. DMERR("Could not failover device. Error %d.", errors);
  893. m->current_pgpath = NULL;
  894. m->current_pg = NULL;
  895. } else if (!m->pg_init_required) {
  896. m->queue_io = 0;
  897. pg->bypassed = 0;
  898. }
  899. m->pg_init_in_progress = 0;
  900. queue_work(kmultipathd, &m->process_queued_ios);
  901. spin_unlock_irqrestore(&m->lock, flags);
  902. }
  903. static void activate_path(struct work_struct *work)
  904. {
  905. int ret;
  906. struct multipath *m =
  907. container_of(work, struct multipath, activate_path);
  908. struct dm_path *path;
  909. unsigned long flags;
  910. spin_lock_irqsave(&m->lock, flags);
  911. path = &m->pgpath_to_activate->path;
  912. m->pgpath_to_activate = NULL;
  913. spin_unlock_irqrestore(&m->lock, flags);
  914. if (!path)
  915. return;
  916. ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
  917. pg_init_done(path, ret);
  918. }
  919. /*
  920. * end_io handling
  921. */
  922. static int do_end_io(struct multipath *m, struct bio *bio,
  923. int error, struct dm_mpath_io *mpio)
  924. {
  925. unsigned long flags;
  926. if (!error)
  927. return 0; /* I/O complete */
  928. if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
  929. return error;
  930. if (error == -EOPNOTSUPP)
  931. return error;
  932. spin_lock_irqsave(&m->lock, flags);
  933. if (!m->nr_valid_paths) {
  934. if (__must_push_back(m)) {
  935. spin_unlock_irqrestore(&m->lock, flags);
  936. return DM_ENDIO_REQUEUE;
  937. } else if (!m->queue_if_no_path) {
  938. spin_unlock_irqrestore(&m->lock, flags);
  939. return -EIO;
  940. } else {
  941. spin_unlock_irqrestore(&m->lock, flags);
  942. goto requeue;
  943. }
  944. }
  945. spin_unlock_irqrestore(&m->lock, flags);
  946. if (mpio->pgpath)
  947. fail_path(mpio->pgpath);
  948. requeue:
  949. dm_bio_restore(&mpio->details, bio);
  950. /* queue for the daemon to resubmit or fail */
  951. spin_lock_irqsave(&m->lock, flags);
  952. bio_list_add(&m->queued_ios, bio);
  953. m->queue_size++;
  954. if (!m->queue_io)
  955. queue_work(kmultipathd, &m->process_queued_ios);
  956. spin_unlock_irqrestore(&m->lock, flags);
  957. return DM_ENDIO_INCOMPLETE; /* io not complete */
  958. }
  959. static int multipath_end_io(struct dm_target *ti, struct bio *bio,
  960. int error, union map_info *map_context)
  961. {
  962. struct multipath *m = ti->private;
  963. struct dm_mpath_io *mpio = map_context->ptr;
  964. struct pgpath *pgpath = mpio->pgpath;
  965. struct path_selector *ps;
  966. int r;
  967. r = do_end_io(m, bio, error, mpio);
  968. if (pgpath) {
  969. ps = &pgpath->pg->ps;
  970. if (ps->type->end_io)
  971. ps->type->end_io(ps, &pgpath->path);
  972. }
  973. if (r != DM_ENDIO_INCOMPLETE)
  974. mempool_free(mpio, m->mpio_pool);
  975. return r;
  976. }
  977. /*
  978. * Suspend can't complete until all the I/O is processed so if
  979. * the last path fails we must error any remaining I/O.
  980. * Note that if the freeze_bdev fails while suspending, the
  981. * queue_if_no_path state is lost - userspace should reset it.
  982. */
  983. static void multipath_presuspend(struct dm_target *ti)
  984. {
  985. struct multipath *m = (struct multipath *) ti->private;
  986. queue_if_no_path(m, 0, 1);
  987. }
  988. /*
  989. * Restore the queue_if_no_path setting.
  990. */
  991. static void multipath_resume(struct dm_target *ti)
  992. {
  993. struct multipath *m = (struct multipath *) ti->private;
  994. unsigned long flags;
  995. spin_lock_irqsave(&m->lock, flags);
  996. m->queue_if_no_path = m->saved_queue_if_no_path;
  997. spin_unlock_irqrestore(&m->lock, flags);
  998. }
  999. /*
  1000. * Info output has the following format:
  1001. * num_multipath_feature_args [multipath_feature_args]*
  1002. * num_handler_status_args [handler_status_args]*
  1003. * num_groups init_group_number
  1004. * [A|D|E num_ps_status_args [ps_status_args]*
  1005. * num_paths num_selector_args
  1006. * [path_dev A|F fail_count [selector_args]* ]+ ]+
  1007. *
  1008. * Table output has the following format (identical to the constructor string):
  1009. * num_feature_args [features_args]*
  1010. * num_handler_args hw_handler [hw_handler_args]*
  1011. * num_groups init_group_number
  1012. * [priority selector-name num_ps_args [ps_args]*
  1013. * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
  1014. */
  1015. static int multipath_status(struct dm_target *ti, status_type_t type,
  1016. char *result, unsigned int maxlen)
  1017. {
  1018. int sz = 0;
  1019. unsigned long flags;
  1020. struct multipath *m = (struct multipath *) ti->private;
  1021. struct priority_group *pg;
  1022. struct pgpath *p;
  1023. unsigned pg_num;
  1024. char state;
  1025. spin_lock_irqsave(&m->lock, flags);
  1026. /* Features */
  1027. if (type == STATUSTYPE_INFO)
  1028. DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
  1029. else {
  1030. DMEMIT("%u ", m->queue_if_no_path +
  1031. (m->pg_init_retries > 0) * 2);
  1032. if (m->queue_if_no_path)
  1033. DMEMIT("queue_if_no_path ");
  1034. if (m->pg_init_retries)
  1035. DMEMIT("pg_init_retries %u ", m->pg_init_retries);
  1036. }
  1037. if (!m->hw_handler_name || type == STATUSTYPE_INFO)
  1038. DMEMIT("0 ");
  1039. else
  1040. DMEMIT("1 %s ", m->hw_handler_name);
  1041. DMEMIT("%u ", m->nr_priority_groups);
  1042. if (m->next_pg)
  1043. pg_num = m->next_pg->pg_num;
  1044. else if (m->current_pg)
  1045. pg_num = m->current_pg->pg_num;
  1046. else
  1047. pg_num = 1;
  1048. DMEMIT("%u ", pg_num);
  1049. switch (type) {
  1050. case STATUSTYPE_INFO:
  1051. list_for_each_entry(pg, &m->priority_groups, list) {
  1052. if (pg->bypassed)
  1053. state = 'D'; /* Disabled */
  1054. else if (pg == m->current_pg)
  1055. state = 'A'; /* Currently Active */
  1056. else
  1057. state = 'E'; /* Enabled */
  1058. DMEMIT("%c ", state);
  1059. if (pg->ps.type->status)
  1060. sz += pg->ps.type->status(&pg->ps, NULL, type,
  1061. result + sz,
  1062. maxlen - sz);
  1063. else
  1064. DMEMIT("0 ");
  1065. DMEMIT("%u %u ", pg->nr_pgpaths,
  1066. pg->ps.type->info_args);
  1067. list_for_each_entry(p, &pg->pgpaths, list) {
  1068. DMEMIT("%s %s %u ", p->path.dev->name,
  1069. p->is_active ? "A" : "F",
  1070. p->fail_count);
  1071. if (pg->ps.type->status)
  1072. sz += pg->ps.type->status(&pg->ps,
  1073. &p->path, type, result + sz,
  1074. maxlen - sz);
  1075. }
  1076. }
  1077. break;
  1078. case STATUSTYPE_TABLE:
  1079. list_for_each_entry(pg, &m->priority_groups, list) {
  1080. DMEMIT("%s ", pg->ps.type->name);
  1081. if (pg->ps.type->status)
  1082. sz += pg->ps.type->status(&pg->ps, NULL, type,
  1083. result + sz,
  1084. maxlen - sz);
  1085. else
  1086. DMEMIT("0 ");
  1087. DMEMIT("%u %u ", pg->nr_pgpaths,
  1088. pg->ps.type->table_args);
  1089. list_for_each_entry(p, &pg->pgpaths, list) {
  1090. DMEMIT("%s ", p->path.dev->name);
  1091. if (pg->ps.type->status)
  1092. sz += pg->ps.type->status(&pg->ps,
  1093. &p->path, type, result + sz,
  1094. maxlen - sz);
  1095. }
  1096. }
  1097. break;
  1098. }
  1099. spin_unlock_irqrestore(&m->lock, flags);
  1100. return 0;
  1101. }
  1102. static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
  1103. {
  1104. int r;
  1105. struct dm_dev *dev;
  1106. struct multipath *m = (struct multipath *) ti->private;
  1107. action_fn action;
  1108. if (argc == 1) {
  1109. if (!strnicmp(argv[0], MESG_STR("queue_if_no_path")))
  1110. return queue_if_no_path(m, 1, 0);
  1111. else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path")))
  1112. return queue_if_no_path(m, 0, 0);
  1113. }
  1114. if (argc != 2)
  1115. goto error;
  1116. if (!strnicmp(argv[0], MESG_STR("disable_group")))
  1117. return bypass_pg_num(m, argv[1], 1);
  1118. else if (!strnicmp(argv[0], MESG_STR("enable_group")))
  1119. return bypass_pg_num(m, argv[1], 0);
  1120. else if (!strnicmp(argv[0], MESG_STR("switch_group")))
  1121. return switch_pg_num(m, argv[1]);
  1122. else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
  1123. action = reinstate_path;
  1124. else if (!strnicmp(argv[0], MESG_STR("fail_path")))
  1125. action = fail_path;
  1126. else
  1127. goto error;
  1128. r = dm_get_device(ti, argv[1], ti->begin, ti->len,
  1129. dm_table_get_mode(ti->table), &dev);
  1130. if (r) {
  1131. DMWARN("message: error getting device %s",
  1132. argv[1]);
  1133. return -EINVAL;
  1134. }
  1135. r = action_dev(m, dev, action);
  1136. dm_put_device(ti, dev);
  1137. return r;
  1138. error:
  1139. DMWARN("Unrecognised multipath message received.");
  1140. return -EINVAL;
  1141. }
  1142. static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
  1143. unsigned long arg)
  1144. {
  1145. struct multipath *m = (struct multipath *) ti->private;
  1146. struct block_device *bdev = NULL;
  1147. fmode_t mode = 0;
  1148. unsigned long flags;
  1149. int r = 0;
  1150. spin_lock_irqsave(&m->lock, flags);
  1151. if (!m->current_pgpath)
  1152. __choose_pgpath(m);
  1153. if (m->current_pgpath) {
  1154. bdev = m->current_pgpath->path.dev->bdev;
  1155. mode = m->current_pgpath->path.dev->mode;
  1156. }
  1157. if (m->queue_io)
  1158. r = -EAGAIN;
  1159. else if (!bdev)
  1160. r = -EIO;
  1161. spin_unlock_irqrestore(&m->lock, flags);
  1162. return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
  1163. }
  1164. /*-----------------------------------------------------------------
  1165. * Module setup
  1166. *---------------------------------------------------------------*/
  1167. static struct target_type multipath_target = {
  1168. .name = "multipath",
  1169. .version = {1, 0, 5},
  1170. .module = THIS_MODULE,
  1171. .ctr = multipath_ctr,
  1172. .dtr = multipath_dtr,
  1173. .map = multipath_map,
  1174. .end_io = multipath_end_io,
  1175. .presuspend = multipath_presuspend,
  1176. .resume = multipath_resume,
  1177. .status = multipath_status,
  1178. .message = multipath_message,
  1179. .ioctl = multipath_ioctl,
  1180. };
  1181. static int __init dm_multipath_init(void)
  1182. {
  1183. int r;
  1184. /* allocate a slab for the dm_ios */
  1185. _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
  1186. if (!_mpio_cache)
  1187. return -ENOMEM;
  1188. r = dm_register_target(&multipath_target);
  1189. if (r < 0) {
  1190. DMERR("register failed %d", r);
  1191. kmem_cache_destroy(_mpio_cache);
  1192. return -EINVAL;
  1193. }
  1194. kmultipathd = create_workqueue("kmpathd");
  1195. if (!kmultipathd) {
  1196. DMERR("failed to create workqueue kmpathd");
  1197. dm_unregister_target(&multipath_target);
  1198. kmem_cache_destroy(_mpio_cache);
  1199. return -ENOMEM;
  1200. }
  1201. /*
  1202. * A separate workqueue is used to handle the device handlers
  1203. * to avoid overloading existing workqueue. Overloading the
  1204. * old workqueue would also create a bottleneck in the
  1205. * path of the storage hardware device activation.
  1206. */
  1207. kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
  1208. if (!kmpath_handlerd) {
  1209. DMERR("failed to create workqueue kmpath_handlerd");
  1210. destroy_workqueue(kmultipathd);
  1211. dm_unregister_target(&multipath_target);
  1212. kmem_cache_destroy(_mpio_cache);
  1213. return -ENOMEM;
  1214. }
  1215. DMINFO("version %u.%u.%u loaded",
  1216. multipath_target.version[0], multipath_target.version[1],
  1217. multipath_target.version[2]);
  1218. return r;
  1219. }
  1220. static void __exit dm_multipath_exit(void)
  1221. {
  1222. destroy_workqueue(kmpath_handlerd);
  1223. destroy_workqueue(kmultipathd);
  1224. dm_unregister_target(&multipath_target);
  1225. kmem_cache_destroy(_mpio_cache);
  1226. }
  1227. module_init(dm_multipath_init);
  1228. module_exit(dm_multipath_exit);
  1229. MODULE_DESCRIPTION(DM_NAME " multipath target");
  1230. MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
  1231. MODULE_LICENSE("GPL");