dm-mpath.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include <linux/device-mapper.h>
  8. #include "dm-path-selector.h"
  9. #include "dm-uevent.h"
  10. #include <linux/ctype.h>
  11. #include <linux/init.h>
  12. #include <linux/mempool.h>
  13. #include <linux/module.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/slab.h>
  16. #include <linux/time.h>
  17. #include <linux/workqueue.h>
  18. #include <scsi/scsi_dh.h>
  19. #include <asm/atomic.h>
  20. #define DM_MSG_PREFIX "multipath"
  21. #define MESG_STR(x) x, sizeof(x)
  22. #define DM_PG_INIT_DELAY_MSECS 2000
  23. #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  24. /* Path properties */
  25. struct pgpath {
  26. struct list_head list;
  27. struct priority_group *pg; /* Owning PG */
  28. unsigned is_active; /* Path status */
  29. unsigned fail_count; /* Cumulative failure count */
  30. struct dm_path path;
  31. struct delayed_work activate_path;
  32. };
  33. #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  34. /*
  35. * Paths are grouped into Priority Groups and numbered from 1 upwards.
  36. * Each has a path selector which controls which path gets used.
  37. */
  38. struct priority_group {
  39. struct list_head list;
  40. struct multipath *m; /* Owning multipath instance */
  41. struct path_selector ps;
  42. unsigned pg_num; /* Reference number */
  43. unsigned bypassed; /* Temporarily bypass this PG? */
  44. unsigned nr_pgpaths; /* Number of paths in PG */
  45. struct list_head pgpaths;
  46. };
  47. /* Multipath context */
  48. struct multipath {
  49. struct list_head list;
  50. struct dm_target *ti;
  51. spinlock_t lock;
  52. const char *hw_handler_name;
  53. char *hw_handler_params;
  54. unsigned nr_priority_groups;
  55. struct list_head priority_groups;
  56. wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
  57. unsigned pg_init_required; /* pg_init needs calling? */
  58. unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
  59. unsigned pg_init_delay_retry; /* Delay pg_init retry? */
  60. unsigned nr_valid_paths; /* Total number of usable paths */
  61. struct pgpath *current_pgpath;
  62. struct priority_group *current_pg;
  63. struct priority_group *next_pg; /* Switch to this PG if set */
  64. unsigned repeat_count; /* I/Os left before calling PS again */
  65. unsigned queue_io; /* Must we queue all I/O? */
  66. unsigned queue_if_no_path; /* Queue I/O if last path fails? */
  67. unsigned saved_queue_if_no_path;/* Saved state during suspension */
  68. unsigned pg_init_retries; /* Number of times to retry pg_init */
  69. unsigned pg_init_count; /* Number of times pg_init called */
  70. unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
  71. struct work_struct process_queued_ios;
  72. struct list_head queued_ios;
  73. unsigned queue_size;
  74. struct work_struct trigger_event;
  75. /*
  76. * We must use a mempool of dm_mpath_io structs so that we
  77. * can resubmit bios on error.
  78. */
  79. mempool_t *mpio_pool;
  80. struct mutex work_mutex;
  81. };
  82. /*
  83. * Context information attached to each bio we process.
  84. */
  85. struct dm_mpath_io {
  86. struct pgpath *pgpath;
  87. size_t nr_bytes;
  88. };
  89. typedef int (*action_fn) (struct pgpath *pgpath);
  90. #define MIN_IOS 256 /* Mempool size */
  91. static struct kmem_cache *_mpio_cache;
  92. static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
  93. static void process_queued_ios(struct work_struct *work);
  94. static void trigger_event(struct work_struct *work);
  95. static void activate_path(struct work_struct *work);
  96. /*-----------------------------------------------
  97. * Allocation routines
  98. *-----------------------------------------------*/
  99. static struct pgpath *alloc_pgpath(void)
  100. {
  101. struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
  102. if (pgpath) {
  103. pgpath->is_active = 1;
  104. INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
  105. }
  106. return pgpath;
  107. }
  108. static void free_pgpath(struct pgpath *pgpath)
  109. {
  110. kfree(pgpath);
  111. }
  112. static struct priority_group *alloc_priority_group(void)
  113. {
  114. struct priority_group *pg;
  115. pg = kzalloc(sizeof(*pg), GFP_KERNEL);
  116. if (pg)
  117. INIT_LIST_HEAD(&pg->pgpaths);
  118. return pg;
  119. }
  120. static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
  121. {
  122. struct pgpath *pgpath, *tmp;
  123. struct multipath *m = ti->private;
  124. list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
  125. list_del(&pgpath->list);
  126. if (m->hw_handler_name)
  127. scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
  128. dm_put_device(ti, pgpath->path.dev);
  129. free_pgpath(pgpath);
  130. }
  131. }
  132. static void free_priority_group(struct priority_group *pg,
  133. struct dm_target *ti)
  134. {
  135. struct path_selector *ps = &pg->ps;
  136. if (ps->type) {
  137. ps->type->destroy(ps);
  138. dm_put_path_selector(ps->type);
  139. }
  140. free_pgpaths(&pg->pgpaths, ti);
  141. kfree(pg);
  142. }
  143. static struct multipath *alloc_multipath(struct dm_target *ti)
  144. {
  145. struct multipath *m;
  146. m = kzalloc(sizeof(*m), GFP_KERNEL);
  147. if (m) {
  148. INIT_LIST_HEAD(&m->priority_groups);
  149. INIT_LIST_HEAD(&m->queued_ios);
  150. spin_lock_init(&m->lock);
  151. m->queue_io = 1;
  152. m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
  153. INIT_WORK(&m->process_queued_ios, process_queued_ios);
  154. INIT_WORK(&m->trigger_event, trigger_event);
  155. init_waitqueue_head(&m->pg_init_wait);
  156. mutex_init(&m->work_mutex);
  157. m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
  158. if (!m->mpio_pool) {
  159. kfree(m);
  160. return NULL;
  161. }
  162. m->ti = ti;
  163. ti->private = m;
  164. }
  165. return m;
  166. }
  167. static void free_multipath(struct multipath *m)
  168. {
  169. struct priority_group *pg, *tmp;
  170. list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
  171. list_del(&pg->list);
  172. free_priority_group(pg, m->ti);
  173. }
  174. kfree(m->hw_handler_name);
  175. kfree(m->hw_handler_params);
  176. mempool_destroy(m->mpio_pool);
  177. kfree(m);
  178. }
  179. /*-----------------------------------------------
  180. * Path selection
  181. *-----------------------------------------------*/
  182. static void __pg_init_all_paths(struct multipath *m)
  183. {
  184. struct pgpath *pgpath;
  185. unsigned long pg_init_delay = 0;
  186. m->pg_init_count++;
  187. m->pg_init_required = 0;
  188. if (m->pg_init_delay_retry)
  189. pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
  190. m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
  191. list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
  192. /* Skip failed paths */
  193. if (!pgpath->is_active)
  194. continue;
  195. if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
  196. pg_init_delay))
  197. m->pg_init_in_progress++;
  198. }
  199. }
  200. static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
  201. {
  202. m->current_pg = pgpath->pg;
  203. /* Must we initialise the PG first, and queue I/O till it's ready? */
  204. if (m->hw_handler_name) {
  205. m->pg_init_required = 1;
  206. m->queue_io = 1;
  207. } else {
  208. m->pg_init_required = 0;
  209. m->queue_io = 0;
  210. }
  211. m->pg_init_count = 0;
  212. }
  213. static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
  214. size_t nr_bytes)
  215. {
  216. struct dm_path *path;
  217. path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
  218. if (!path)
  219. return -ENXIO;
  220. m->current_pgpath = path_to_pgpath(path);
  221. if (m->current_pg != pg)
  222. __switch_pg(m, m->current_pgpath);
  223. return 0;
  224. }
  225. static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
  226. {
  227. struct priority_group *pg;
  228. unsigned bypassed = 1;
  229. if (!m->nr_valid_paths)
  230. goto failed;
  231. /* Were we instructed to switch PG? */
  232. if (m->next_pg) {
  233. pg = m->next_pg;
  234. m->next_pg = NULL;
  235. if (!__choose_path_in_pg(m, pg, nr_bytes))
  236. return;
  237. }
  238. /* Don't change PG until it has no remaining paths */
  239. if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
  240. return;
  241. /*
  242. * Loop through priority groups until we find a valid path.
  243. * First time we skip PGs marked 'bypassed'.
  244. * Second time we only try the ones we skipped.
  245. */
  246. do {
  247. list_for_each_entry(pg, &m->priority_groups, list) {
  248. if (pg->bypassed == bypassed)
  249. continue;
  250. if (!__choose_path_in_pg(m, pg, nr_bytes))
  251. return;
  252. }
  253. } while (bypassed--);
  254. failed:
  255. m->current_pgpath = NULL;
  256. m->current_pg = NULL;
  257. }
  258. /*
  259. * Check whether bios must be queued in the device-mapper core rather
  260. * than here in the target.
  261. *
  262. * m->lock must be held on entry.
  263. *
  264. * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
  265. * same value then we are not between multipath_presuspend()
  266. * and multipath_resume() calls and we have no need to check
  267. * for the DMF_NOFLUSH_SUSPENDING flag.
  268. */
  269. static int __must_push_back(struct multipath *m)
  270. {
  271. return (m->queue_if_no_path != m->saved_queue_if_no_path &&
  272. dm_noflush_suspending(m->ti));
  273. }
  274. static int map_io(struct multipath *m, struct request *clone,
  275. struct dm_mpath_io *mpio, unsigned was_queued)
  276. {
  277. int r = DM_MAPIO_REMAPPED;
  278. size_t nr_bytes = blk_rq_bytes(clone);
  279. unsigned long flags;
  280. struct pgpath *pgpath;
  281. struct block_device *bdev;
  282. spin_lock_irqsave(&m->lock, flags);
  283. /* Do we need to select a new pgpath? */
  284. if (!m->current_pgpath ||
  285. (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
  286. __choose_pgpath(m, nr_bytes);
  287. pgpath = m->current_pgpath;
  288. if (was_queued)
  289. m->queue_size--;
  290. if ((pgpath && m->queue_io) ||
  291. (!pgpath && m->queue_if_no_path)) {
  292. /* Queue for the daemon to resubmit */
  293. list_add_tail(&clone->queuelist, &m->queued_ios);
  294. m->queue_size++;
  295. if ((m->pg_init_required && !m->pg_init_in_progress) ||
  296. !m->queue_io)
  297. queue_work(kmultipathd, &m->process_queued_ios);
  298. pgpath = NULL;
  299. r = DM_MAPIO_SUBMITTED;
  300. } else if (pgpath) {
  301. bdev = pgpath->path.dev->bdev;
  302. clone->q = bdev_get_queue(bdev);
  303. clone->rq_disk = bdev->bd_disk;
  304. } else if (__must_push_back(m))
  305. r = DM_MAPIO_REQUEUE;
  306. else
  307. r = -EIO; /* Failed */
  308. mpio->pgpath = pgpath;
  309. mpio->nr_bytes = nr_bytes;
  310. if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
  311. pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
  312. nr_bytes);
  313. spin_unlock_irqrestore(&m->lock, flags);
  314. return r;
  315. }
  316. /*
  317. * If we run out of usable paths, should we queue I/O or error it?
  318. */
  319. static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
  320. unsigned save_old_value)
  321. {
  322. unsigned long flags;
  323. spin_lock_irqsave(&m->lock, flags);
  324. if (save_old_value)
  325. m->saved_queue_if_no_path = m->queue_if_no_path;
  326. else
  327. m->saved_queue_if_no_path = queue_if_no_path;
  328. m->queue_if_no_path = queue_if_no_path;
  329. if (!m->queue_if_no_path && m->queue_size)
  330. queue_work(kmultipathd, &m->process_queued_ios);
  331. spin_unlock_irqrestore(&m->lock, flags);
  332. return 0;
  333. }
  334. /*-----------------------------------------------------------------
  335. * The multipath daemon is responsible for resubmitting queued ios.
  336. *---------------------------------------------------------------*/
  337. static void dispatch_queued_ios(struct multipath *m)
  338. {
  339. int r;
  340. unsigned long flags;
  341. struct dm_mpath_io *mpio;
  342. union map_info *info;
  343. struct request *clone, *n;
  344. LIST_HEAD(cl);
  345. spin_lock_irqsave(&m->lock, flags);
  346. list_splice_init(&m->queued_ios, &cl);
  347. spin_unlock_irqrestore(&m->lock, flags);
  348. list_for_each_entry_safe(clone, n, &cl, queuelist) {
  349. list_del_init(&clone->queuelist);
  350. info = dm_get_rq_mapinfo(clone);
  351. mpio = info->ptr;
  352. r = map_io(m, clone, mpio, 1);
  353. if (r < 0) {
  354. mempool_free(mpio, m->mpio_pool);
  355. dm_kill_unmapped_request(clone, r);
  356. } else if (r == DM_MAPIO_REMAPPED)
  357. dm_dispatch_request(clone);
  358. else if (r == DM_MAPIO_REQUEUE) {
  359. mempool_free(mpio, m->mpio_pool);
  360. dm_requeue_unmapped_request(clone);
  361. }
  362. }
  363. }
  364. static void process_queued_ios(struct work_struct *work)
  365. {
  366. struct multipath *m =
  367. container_of(work, struct multipath, process_queued_ios);
  368. struct pgpath *pgpath = NULL;
  369. unsigned must_queue = 1;
  370. unsigned long flags;
  371. spin_lock_irqsave(&m->lock, flags);
  372. if (!m->queue_size)
  373. goto out;
  374. if (!m->current_pgpath)
  375. __choose_pgpath(m, 0);
  376. pgpath = m->current_pgpath;
  377. if ((pgpath && !m->queue_io) ||
  378. (!pgpath && !m->queue_if_no_path))
  379. must_queue = 0;
  380. if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
  381. __pg_init_all_paths(m);
  382. out:
  383. spin_unlock_irqrestore(&m->lock, flags);
  384. if (!must_queue)
  385. dispatch_queued_ios(m);
  386. }
  387. /*
  388. * An event is triggered whenever a path is taken out of use.
  389. * Includes path failure and PG bypass.
  390. */
  391. static void trigger_event(struct work_struct *work)
  392. {
  393. struct multipath *m =
  394. container_of(work, struct multipath, trigger_event);
  395. dm_table_event(m->ti->table);
  396. }
  397. /*-----------------------------------------------------------------
  398. * Constructor/argument parsing:
  399. * <#multipath feature args> [<arg>]*
  400. * <#hw_handler args> [hw_handler [<arg>]*]
  401. * <#priority groups>
  402. * <initial priority group>
  403. * [<selector> <#selector args> [<arg>]*
  404. * <#paths> <#per-path selector args>
  405. * [<path> [<arg>]* ]+ ]+
  406. *---------------------------------------------------------------*/
  407. struct param {
  408. unsigned min;
  409. unsigned max;
  410. char *error;
  411. };
  412. static int read_param(struct param *param, char *str, unsigned *v, char **error)
  413. {
  414. if (!str ||
  415. (sscanf(str, "%u", v) != 1) ||
  416. (*v < param->min) ||
  417. (*v > param->max)) {
  418. *error = param->error;
  419. return -EINVAL;
  420. }
  421. return 0;
  422. }
  423. struct arg_set {
  424. unsigned argc;
  425. char **argv;
  426. };
  427. static char *shift(struct arg_set *as)
  428. {
  429. char *r;
  430. if (as->argc) {
  431. as->argc--;
  432. r = *as->argv;
  433. as->argv++;
  434. return r;
  435. }
  436. return NULL;
  437. }
  438. static void consume(struct arg_set *as, unsigned n)
  439. {
  440. BUG_ON (as->argc < n);
  441. as->argc -= n;
  442. as->argv += n;
  443. }
  444. static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
  445. struct dm_target *ti)
  446. {
  447. int r;
  448. struct path_selector_type *pst;
  449. unsigned ps_argc;
  450. static struct param _params[] = {
  451. {0, 1024, "invalid number of path selector args"},
  452. };
  453. pst = dm_get_path_selector(shift(as));
  454. if (!pst) {
  455. ti->error = "unknown path selector type";
  456. return -EINVAL;
  457. }
  458. r = read_param(_params, shift(as), &ps_argc, &ti->error);
  459. if (r) {
  460. dm_put_path_selector(pst);
  461. return -EINVAL;
  462. }
  463. if (ps_argc > as->argc) {
  464. dm_put_path_selector(pst);
  465. ti->error = "not enough arguments for path selector";
  466. return -EINVAL;
  467. }
  468. r = pst->create(&pg->ps, ps_argc, as->argv);
  469. if (r) {
  470. dm_put_path_selector(pst);
  471. ti->error = "path selector constructor failed";
  472. return r;
  473. }
  474. pg->ps.type = pst;
  475. consume(as, ps_argc);
  476. return 0;
  477. }
  478. static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
  479. struct dm_target *ti)
  480. {
  481. int r;
  482. struct pgpath *p;
  483. struct multipath *m = ti->private;
  484. /* we need at least a path arg */
  485. if (as->argc < 1) {
  486. ti->error = "no device given";
  487. return ERR_PTR(-EINVAL);
  488. }
  489. p = alloc_pgpath();
  490. if (!p)
  491. return ERR_PTR(-ENOMEM);
  492. r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table),
  493. &p->path.dev);
  494. if (r) {
  495. ti->error = "error getting device";
  496. goto bad;
  497. }
  498. if (m->hw_handler_name) {
  499. struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
  500. r = scsi_dh_attach(q, m->hw_handler_name);
  501. if (r == -EBUSY) {
  502. /*
  503. * Already attached to different hw_handler,
  504. * try to reattach with correct one.
  505. */
  506. scsi_dh_detach(q);
  507. r = scsi_dh_attach(q, m->hw_handler_name);
  508. }
  509. if (r < 0) {
  510. ti->error = "error attaching hardware handler";
  511. dm_put_device(ti, p->path.dev);
  512. goto bad;
  513. }
  514. if (m->hw_handler_params) {
  515. r = scsi_dh_set_params(q, m->hw_handler_params);
  516. if (r < 0) {
  517. ti->error = "unable to set hardware "
  518. "handler parameters";
  519. scsi_dh_detach(q);
  520. dm_put_device(ti, p->path.dev);
  521. goto bad;
  522. }
  523. }
  524. }
  525. r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
  526. if (r) {
  527. dm_put_device(ti, p->path.dev);
  528. goto bad;
  529. }
  530. return p;
  531. bad:
  532. free_pgpath(p);
  533. return ERR_PTR(r);
  534. }
  535. static struct priority_group *parse_priority_group(struct arg_set *as,
  536. struct multipath *m)
  537. {
  538. static struct param _params[] = {
  539. {1, 1024, "invalid number of paths"},
  540. {0, 1024, "invalid number of selector args"}
  541. };
  542. int r;
  543. unsigned i, nr_selector_args, nr_params;
  544. struct priority_group *pg;
  545. struct dm_target *ti = m->ti;
  546. if (as->argc < 2) {
  547. as->argc = 0;
  548. ti->error = "not enough priority group arguments";
  549. return ERR_PTR(-EINVAL);
  550. }
  551. pg = alloc_priority_group();
  552. if (!pg) {
  553. ti->error = "couldn't allocate priority group";
  554. return ERR_PTR(-ENOMEM);
  555. }
  556. pg->m = m;
  557. r = parse_path_selector(as, pg, ti);
  558. if (r)
  559. goto bad;
  560. /*
  561. * read the paths
  562. */
  563. r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error);
  564. if (r)
  565. goto bad;
  566. r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error);
  567. if (r)
  568. goto bad;
  569. nr_params = 1 + nr_selector_args;
  570. for (i = 0; i < pg->nr_pgpaths; i++) {
  571. struct pgpath *pgpath;
  572. struct arg_set path_args;
  573. if (as->argc < nr_params) {
  574. ti->error = "not enough path parameters";
  575. r = -EINVAL;
  576. goto bad;
  577. }
  578. path_args.argc = nr_params;
  579. path_args.argv = as->argv;
  580. pgpath = parse_path(&path_args, &pg->ps, ti);
  581. if (IS_ERR(pgpath)) {
  582. r = PTR_ERR(pgpath);
  583. goto bad;
  584. }
  585. pgpath->pg = pg;
  586. list_add_tail(&pgpath->list, &pg->pgpaths);
  587. consume(as, nr_params);
  588. }
  589. return pg;
  590. bad:
  591. free_priority_group(pg, ti);
  592. return ERR_PTR(r);
  593. }
  594. static int parse_hw_handler(struct arg_set *as, struct multipath *m)
  595. {
  596. unsigned hw_argc;
  597. int ret;
  598. struct dm_target *ti = m->ti;
  599. static struct param _params[] = {
  600. {0, 1024, "invalid number of hardware handler args"},
  601. };
  602. if (read_param(_params, shift(as), &hw_argc, &ti->error))
  603. return -EINVAL;
  604. if (!hw_argc)
  605. return 0;
  606. if (hw_argc > as->argc) {
  607. ti->error = "not enough arguments for hardware handler";
  608. return -EINVAL;
  609. }
  610. m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
  611. request_module("scsi_dh_%s", m->hw_handler_name);
  612. if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
  613. ti->error = "unknown hardware handler type";
  614. ret = -EINVAL;
  615. goto fail;
  616. }
  617. if (hw_argc > 1) {
  618. char *p;
  619. int i, j, len = 4;
  620. for (i = 0; i <= hw_argc - 2; i++)
  621. len += strlen(as->argv[i]) + 1;
  622. p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
  623. if (!p) {
  624. ti->error = "memory allocation failed";
  625. ret = -ENOMEM;
  626. goto fail;
  627. }
  628. j = sprintf(p, "%d", hw_argc - 1);
  629. for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
  630. j = sprintf(p, "%s", as->argv[i]);
  631. }
  632. consume(as, hw_argc - 1);
  633. return 0;
  634. fail:
  635. kfree(m->hw_handler_name);
  636. m->hw_handler_name = NULL;
  637. return ret;
  638. }
  639. static int parse_features(struct arg_set *as, struct multipath *m)
  640. {
  641. int r;
  642. unsigned argc;
  643. struct dm_target *ti = m->ti;
  644. const char *param_name;
  645. static struct param _params[] = {
  646. {0, 5, "invalid number of feature args"},
  647. {1, 50, "pg_init_retries must be between 1 and 50"},
  648. {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
  649. };
  650. r = read_param(_params, shift(as), &argc, &ti->error);
  651. if (r)
  652. return -EINVAL;
  653. if (!argc)
  654. return 0;
  655. do {
  656. param_name = shift(as);
  657. argc--;
  658. if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) {
  659. r = queue_if_no_path(m, 1, 0);
  660. continue;
  661. }
  662. if (!strnicmp(param_name, MESG_STR("pg_init_retries")) &&
  663. (argc >= 1)) {
  664. r = read_param(_params + 1, shift(as),
  665. &m->pg_init_retries, &ti->error);
  666. argc--;
  667. continue;
  668. }
  669. if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) &&
  670. (argc >= 1)) {
  671. r = read_param(_params + 2, shift(as),
  672. &m->pg_init_delay_msecs, &ti->error);
  673. argc--;
  674. continue;
  675. }
  676. ti->error = "Unrecognised multipath feature request";
  677. r = -EINVAL;
  678. } while (argc && !r);
  679. return r;
  680. }
  681. static int multipath_ctr(struct dm_target *ti, unsigned int argc,
  682. char **argv)
  683. {
  684. /* target parameters */
  685. static struct param _params[] = {
  686. {1, 1024, "invalid number of priority groups"},
  687. {1, 1024, "invalid initial priority group number"},
  688. };
  689. int r;
  690. struct multipath *m;
  691. struct arg_set as;
  692. unsigned pg_count = 0;
  693. unsigned next_pg_num;
  694. as.argc = argc;
  695. as.argv = argv;
  696. m = alloc_multipath(ti);
  697. if (!m) {
  698. ti->error = "can't allocate multipath";
  699. return -EINVAL;
  700. }
  701. r = parse_features(&as, m);
  702. if (r)
  703. goto bad;
  704. r = parse_hw_handler(&as, m);
  705. if (r)
  706. goto bad;
  707. r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error);
  708. if (r)
  709. goto bad;
  710. r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error);
  711. if (r)
  712. goto bad;
  713. /* parse the priority groups */
  714. while (as.argc) {
  715. struct priority_group *pg;
  716. pg = parse_priority_group(&as, m);
  717. if (IS_ERR(pg)) {
  718. r = PTR_ERR(pg);
  719. goto bad;
  720. }
  721. m->nr_valid_paths += pg->nr_pgpaths;
  722. list_add_tail(&pg->list, &m->priority_groups);
  723. pg_count++;
  724. pg->pg_num = pg_count;
  725. if (!--next_pg_num)
  726. m->next_pg = pg;
  727. }
  728. if (pg_count != m->nr_priority_groups) {
  729. ti->error = "priority group count mismatch";
  730. r = -EINVAL;
  731. goto bad;
  732. }
  733. ti->num_flush_requests = 1;
  734. ti->num_discard_requests = 1;
  735. return 0;
  736. bad:
  737. free_multipath(m);
  738. return r;
  739. }
  740. static void multipath_wait_for_pg_init_completion(struct multipath *m)
  741. {
  742. DECLARE_WAITQUEUE(wait, current);
  743. unsigned long flags;
  744. add_wait_queue(&m->pg_init_wait, &wait);
  745. while (1) {
  746. set_current_state(TASK_UNINTERRUPTIBLE);
  747. spin_lock_irqsave(&m->lock, flags);
  748. if (!m->pg_init_in_progress) {
  749. spin_unlock_irqrestore(&m->lock, flags);
  750. break;
  751. }
  752. spin_unlock_irqrestore(&m->lock, flags);
  753. io_schedule();
  754. }
  755. set_current_state(TASK_RUNNING);
  756. remove_wait_queue(&m->pg_init_wait, &wait);
  757. }
  758. static void flush_multipath_work(struct multipath *m)
  759. {
  760. flush_workqueue(kmpath_handlerd);
  761. multipath_wait_for_pg_init_completion(m);
  762. flush_workqueue(kmultipathd);
  763. flush_work_sync(&m->trigger_event);
  764. }
  765. static void multipath_dtr(struct dm_target *ti)
  766. {
  767. struct multipath *m = ti->private;
  768. flush_multipath_work(m);
  769. free_multipath(m);
  770. }
  771. /*
  772. * Map cloned requests
  773. */
  774. static int multipath_map(struct dm_target *ti, struct request *clone,
  775. union map_info *map_context)
  776. {
  777. int r;
  778. struct dm_mpath_io *mpio;
  779. struct multipath *m = (struct multipath *) ti->private;
  780. mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
  781. if (!mpio)
  782. /* ENOMEM, requeue */
  783. return DM_MAPIO_REQUEUE;
  784. memset(mpio, 0, sizeof(*mpio));
  785. map_context->ptr = mpio;
  786. clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
  787. r = map_io(m, clone, mpio, 0);
  788. if (r < 0 || r == DM_MAPIO_REQUEUE)
  789. mempool_free(mpio, m->mpio_pool);
  790. return r;
  791. }
  792. /*
  793. * Take a path out of use.
  794. */
  795. static int fail_path(struct pgpath *pgpath)
  796. {
  797. unsigned long flags;
  798. struct multipath *m = pgpath->pg->m;
  799. spin_lock_irqsave(&m->lock, flags);
  800. if (!pgpath->is_active)
  801. goto out;
  802. DMWARN("Failing path %s.", pgpath->path.dev->name);
  803. pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
  804. pgpath->is_active = 0;
  805. pgpath->fail_count++;
  806. m->nr_valid_paths--;
  807. if (pgpath == m->current_pgpath)
  808. m->current_pgpath = NULL;
  809. dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
  810. pgpath->path.dev->name, m->nr_valid_paths);
  811. schedule_work(&m->trigger_event);
  812. out:
  813. spin_unlock_irqrestore(&m->lock, flags);
  814. return 0;
  815. }
  816. /*
  817. * Reinstate a previously-failed path
  818. */
  819. static int reinstate_path(struct pgpath *pgpath)
  820. {
  821. int r = 0;
  822. unsigned long flags;
  823. struct multipath *m = pgpath->pg->m;
  824. spin_lock_irqsave(&m->lock, flags);
  825. if (pgpath->is_active)
  826. goto out;
  827. if (!pgpath->pg->ps.type->reinstate_path) {
  828. DMWARN("Reinstate path not supported by path selector %s",
  829. pgpath->pg->ps.type->name);
  830. r = -EINVAL;
  831. goto out;
  832. }
  833. r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
  834. if (r)
  835. goto out;
  836. pgpath->is_active = 1;
  837. if (!m->nr_valid_paths++ && m->queue_size) {
  838. m->current_pgpath = NULL;
  839. queue_work(kmultipathd, &m->process_queued_ios);
  840. } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
  841. if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
  842. m->pg_init_in_progress++;
  843. }
  844. dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
  845. pgpath->path.dev->name, m->nr_valid_paths);
  846. schedule_work(&m->trigger_event);
  847. out:
  848. spin_unlock_irqrestore(&m->lock, flags);
  849. return r;
  850. }
  851. /*
  852. * Fail or reinstate all paths that match the provided struct dm_dev.
  853. */
  854. static int action_dev(struct multipath *m, struct dm_dev *dev,
  855. action_fn action)
  856. {
  857. int r = 0;
  858. struct pgpath *pgpath;
  859. struct priority_group *pg;
  860. list_for_each_entry(pg, &m->priority_groups, list) {
  861. list_for_each_entry(pgpath, &pg->pgpaths, list) {
  862. if (pgpath->path.dev == dev)
  863. r = action(pgpath);
  864. }
  865. }
  866. return r;
  867. }
  868. /*
  869. * Temporarily try to avoid having to use the specified PG
  870. */
  871. static void bypass_pg(struct multipath *m, struct priority_group *pg,
  872. int bypassed)
  873. {
  874. unsigned long flags;
  875. spin_lock_irqsave(&m->lock, flags);
  876. pg->bypassed = bypassed;
  877. m->current_pgpath = NULL;
  878. m->current_pg = NULL;
  879. spin_unlock_irqrestore(&m->lock, flags);
  880. schedule_work(&m->trigger_event);
  881. }
  882. /*
  883. * Switch to using the specified PG from the next I/O that gets mapped
  884. */
  885. static int switch_pg_num(struct multipath *m, const char *pgstr)
  886. {
  887. struct priority_group *pg;
  888. unsigned pgnum;
  889. unsigned long flags;
  890. if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
  891. (pgnum > m->nr_priority_groups)) {
  892. DMWARN("invalid PG number supplied to switch_pg_num");
  893. return -EINVAL;
  894. }
  895. spin_lock_irqsave(&m->lock, flags);
  896. list_for_each_entry(pg, &m->priority_groups, list) {
  897. pg->bypassed = 0;
  898. if (--pgnum)
  899. continue;
  900. m->current_pgpath = NULL;
  901. m->current_pg = NULL;
  902. m->next_pg = pg;
  903. }
  904. spin_unlock_irqrestore(&m->lock, flags);
  905. schedule_work(&m->trigger_event);
  906. return 0;
  907. }
  908. /*
  909. * Set/clear bypassed status of a PG.
  910. * PGs are numbered upwards from 1 in the order they were declared.
  911. */
  912. static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
  913. {
  914. struct priority_group *pg;
  915. unsigned pgnum;
  916. if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
  917. (pgnum > m->nr_priority_groups)) {
  918. DMWARN("invalid PG number supplied to bypass_pg");
  919. return -EINVAL;
  920. }
  921. list_for_each_entry(pg, &m->priority_groups, list) {
  922. if (!--pgnum)
  923. break;
  924. }
  925. bypass_pg(m, pg, bypassed);
  926. return 0;
  927. }
  928. /*
  929. * Should we retry pg_init immediately?
  930. */
  931. static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
  932. {
  933. unsigned long flags;
  934. int limit_reached = 0;
  935. spin_lock_irqsave(&m->lock, flags);
  936. if (m->pg_init_count <= m->pg_init_retries)
  937. m->pg_init_required = 1;
  938. else
  939. limit_reached = 1;
  940. spin_unlock_irqrestore(&m->lock, flags);
  941. return limit_reached;
  942. }
  943. static void pg_init_done(void *data, int errors)
  944. {
  945. struct pgpath *pgpath = data;
  946. struct priority_group *pg = pgpath->pg;
  947. struct multipath *m = pg->m;
  948. unsigned long flags;
  949. unsigned delay_retry = 0;
  950. /* device or driver problems */
  951. switch (errors) {
  952. case SCSI_DH_OK:
  953. break;
  954. case SCSI_DH_NOSYS:
  955. if (!m->hw_handler_name) {
  956. errors = 0;
  957. break;
  958. }
  959. DMERR("Could not failover the device: Handler scsi_dh_%s "
  960. "Error %d.", m->hw_handler_name, errors);
  961. /*
  962. * Fail path for now, so we do not ping pong
  963. */
  964. fail_path(pgpath);
  965. break;
  966. case SCSI_DH_DEV_TEMP_BUSY:
  967. /*
  968. * Probably doing something like FW upgrade on the
  969. * controller so try the other pg.
  970. */
  971. bypass_pg(m, pg, 1);
  972. break;
  973. case SCSI_DH_RETRY:
  974. /* Wait before retrying. */
  975. delay_retry = 1;
  976. case SCSI_DH_IMM_RETRY:
  977. case SCSI_DH_RES_TEMP_UNAVAIL:
  978. if (pg_init_limit_reached(m, pgpath))
  979. fail_path(pgpath);
  980. errors = 0;
  981. break;
  982. default:
  983. /*
  984. * We probably do not want to fail the path for a device
  985. * error, but this is what the old dm did. In future
  986. * patches we can do more advanced handling.
  987. */
  988. fail_path(pgpath);
  989. }
  990. spin_lock_irqsave(&m->lock, flags);
  991. if (errors) {
  992. if (pgpath == m->current_pgpath) {
  993. DMERR("Could not failover device. Error %d.", errors);
  994. m->current_pgpath = NULL;
  995. m->current_pg = NULL;
  996. }
  997. } else if (!m->pg_init_required)
  998. pg->bypassed = 0;
  999. if (--m->pg_init_in_progress)
  1000. /* Activations of other paths are still on going */
  1001. goto out;
  1002. if (!m->pg_init_required)
  1003. m->queue_io = 0;
  1004. m->pg_init_delay_retry = delay_retry;
  1005. queue_work(kmultipathd, &m->process_queued_ios);
  1006. /*
  1007. * Wake up any thread waiting to suspend.
  1008. */
  1009. wake_up(&m->pg_init_wait);
  1010. out:
  1011. spin_unlock_irqrestore(&m->lock, flags);
  1012. }
  1013. static void activate_path(struct work_struct *work)
  1014. {
  1015. struct pgpath *pgpath =
  1016. container_of(work, struct pgpath, activate_path.work);
  1017. scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
  1018. pg_init_done, pgpath);
  1019. }
  1020. /*
  1021. * end_io handling
  1022. */
  1023. static int do_end_io(struct multipath *m, struct request *clone,
  1024. int error, struct dm_mpath_io *mpio)
  1025. {
  1026. /*
  1027. * We don't queue any clone request inside the multipath target
  1028. * during end I/O handling, since those clone requests don't have
  1029. * bio clones. If we queue them inside the multipath target,
  1030. * we need to make bio clones, that requires memory allocation.
  1031. * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
  1032. * don't have bio clones.)
  1033. * Instead of queueing the clone request here, we queue the original
  1034. * request into dm core, which will remake a clone request and
  1035. * clone bios for it and resubmit it later.
  1036. */
  1037. int r = DM_ENDIO_REQUEUE;
  1038. unsigned long flags;
  1039. if (!error && !clone->errors)
  1040. return 0; /* I/O complete */
  1041. if (error == -EOPNOTSUPP || error == -EREMOTEIO)
  1042. return error;
  1043. if (mpio->pgpath)
  1044. fail_path(mpio->pgpath);
  1045. spin_lock_irqsave(&m->lock, flags);
  1046. if (!m->nr_valid_paths) {
  1047. if (!m->queue_if_no_path) {
  1048. if (!__must_push_back(m))
  1049. r = -EIO;
  1050. } else {
  1051. if (error == -EBADE)
  1052. r = error;
  1053. }
  1054. }
  1055. spin_unlock_irqrestore(&m->lock, flags);
  1056. return r;
  1057. }
  1058. static int multipath_end_io(struct dm_target *ti, struct request *clone,
  1059. int error, union map_info *map_context)
  1060. {
  1061. struct multipath *m = ti->private;
  1062. struct dm_mpath_io *mpio = map_context->ptr;
  1063. struct pgpath *pgpath = mpio->pgpath;
  1064. struct path_selector *ps;
  1065. int r;
  1066. r = do_end_io(m, clone, error, mpio);
  1067. if (pgpath) {
  1068. ps = &pgpath->pg->ps;
  1069. if (ps->type->end_io)
  1070. ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
  1071. }
  1072. mempool_free(mpio, m->mpio_pool);
  1073. return r;
  1074. }
  1075. /*
  1076. * Suspend can't complete until all the I/O is processed so if
  1077. * the last path fails we must error any remaining I/O.
  1078. * Note that if the freeze_bdev fails while suspending, the
  1079. * queue_if_no_path state is lost - userspace should reset it.
  1080. */
  1081. static void multipath_presuspend(struct dm_target *ti)
  1082. {
  1083. struct multipath *m = (struct multipath *) ti->private;
  1084. queue_if_no_path(m, 0, 1);
  1085. }
  1086. static void multipath_postsuspend(struct dm_target *ti)
  1087. {
  1088. struct multipath *m = ti->private;
  1089. mutex_lock(&m->work_mutex);
  1090. flush_multipath_work(m);
  1091. mutex_unlock(&m->work_mutex);
  1092. }
  1093. /*
  1094. * Restore the queue_if_no_path setting.
  1095. */
  1096. static void multipath_resume(struct dm_target *ti)
  1097. {
  1098. struct multipath *m = (struct multipath *) ti->private;
  1099. unsigned long flags;
  1100. spin_lock_irqsave(&m->lock, flags);
  1101. m->queue_if_no_path = m->saved_queue_if_no_path;
  1102. spin_unlock_irqrestore(&m->lock, flags);
  1103. }
  1104. /*
  1105. * Info output has the following format:
  1106. * num_multipath_feature_args [multipath_feature_args]*
  1107. * num_handler_status_args [handler_status_args]*
  1108. * num_groups init_group_number
  1109. * [A|D|E num_ps_status_args [ps_status_args]*
  1110. * num_paths num_selector_args
  1111. * [path_dev A|F fail_count [selector_args]* ]+ ]+
  1112. *
  1113. * Table output has the following format (identical to the constructor string):
  1114. * num_feature_args [features_args]*
  1115. * num_handler_args hw_handler [hw_handler_args]*
  1116. * num_groups init_group_number
  1117. * [priority selector-name num_ps_args [ps_args]*
  1118. * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
  1119. */
  1120. static int multipath_status(struct dm_target *ti, status_type_t type,
  1121. char *result, unsigned int maxlen)
  1122. {
  1123. int sz = 0;
  1124. unsigned long flags;
  1125. struct multipath *m = (struct multipath *) ti->private;
  1126. struct priority_group *pg;
  1127. struct pgpath *p;
  1128. unsigned pg_num;
  1129. char state;
  1130. spin_lock_irqsave(&m->lock, flags);
  1131. /* Features */
  1132. if (type == STATUSTYPE_INFO)
  1133. DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
  1134. else {
  1135. DMEMIT("%u ", m->queue_if_no_path +
  1136. (m->pg_init_retries > 0) * 2 +
  1137. (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
  1138. if (m->queue_if_no_path)
  1139. DMEMIT("queue_if_no_path ");
  1140. if (m->pg_init_retries)
  1141. DMEMIT("pg_init_retries %u ", m->pg_init_retries);
  1142. if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
  1143. DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
  1144. }
  1145. if (!m->hw_handler_name || type == STATUSTYPE_INFO)
  1146. DMEMIT("0 ");
  1147. else
  1148. DMEMIT("1 %s ", m->hw_handler_name);
  1149. DMEMIT("%u ", m->nr_priority_groups);
  1150. if (m->next_pg)
  1151. pg_num = m->next_pg->pg_num;
  1152. else if (m->current_pg)
  1153. pg_num = m->current_pg->pg_num;
  1154. else
  1155. pg_num = 1;
  1156. DMEMIT("%u ", pg_num);
  1157. switch (type) {
  1158. case STATUSTYPE_INFO:
  1159. list_for_each_entry(pg, &m->priority_groups, list) {
  1160. if (pg->bypassed)
  1161. state = 'D'; /* Disabled */
  1162. else if (pg == m->current_pg)
  1163. state = 'A'; /* Currently Active */
  1164. else
  1165. state = 'E'; /* Enabled */
  1166. DMEMIT("%c ", state);
  1167. if (pg->ps.type->status)
  1168. sz += pg->ps.type->status(&pg->ps, NULL, type,
  1169. result + sz,
  1170. maxlen - sz);
  1171. else
  1172. DMEMIT("0 ");
  1173. DMEMIT("%u %u ", pg->nr_pgpaths,
  1174. pg->ps.type->info_args);
  1175. list_for_each_entry(p, &pg->pgpaths, list) {
  1176. DMEMIT("%s %s %u ", p->path.dev->name,
  1177. p->is_active ? "A" : "F",
  1178. p->fail_count);
  1179. if (pg->ps.type->status)
  1180. sz += pg->ps.type->status(&pg->ps,
  1181. &p->path, type, result + sz,
  1182. maxlen - sz);
  1183. }
  1184. }
  1185. break;
  1186. case STATUSTYPE_TABLE:
  1187. list_for_each_entry(pg, &m->priority_groups, list) {
  1188. DMEMIT("%s ", pg->ps.type->name);
  1189. if (pg->ps.type->status)
  1190. sz += pg->ps.type->status(&pg->ps, NULL, type,
  1191. result + sz,
  1192. maxlen - sz);
  1193. else
  1194. DMEMIT("0 ");
  1195. DMEMIT("%u %u ", pg->nr_pgpaths,
  1196. pg->ps.type->table_args);
  1197. list_for_each_entry(p, &pg->pgpaths, list) {
  1198. DMEMIT("%s ", p->path.dev->name);
  1199. if (pg->ps.type->status)
  1200. sz += pg->ps.type->status(&pg->ps,
  1201. &p->path, type, result + sz,
  1202. maxlen - sz);
  1203. }
  1204. }
  1205. break;
  1206. }
  1207. spin_unlock_irqrestore(&m->lock, flags);
  1208. return 0;
  1209. }
  1210. static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
  1211. {
  1212. int r = -EINVAL;
  1213. struct dm_dev *dev;
  1214. struct multipath *m = (struct multipath *) ti->private;
  1215. action_fn action;
  1216. mutex_lock(&m->work_mutex);
  1217. if (dm_suspended(ti)) {
  1218. r = -EBUSY;
  1219. goto out;
  1220. }
  1221. if (argc == 1) {
  1222. if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) {
  1223. r = queue_if_no_path(m, 1, 0);
  1224. goto out;
  1225. } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) {
  1226. r = queue_if_no_path(m, 0, 0);
  1227. goto out;
  1228. }
  1229. }
  1230. if (argc != 2) {
  1231. DMWARN("Unrecognised multipath message received.");
  1232. goto out;
  1233. }
  1234. if (!strnicmp(argv[0], MESG_STR("disable_group"))) {
  1235. r = bypass_pg_num(m, argv[1], 1);
  1236. goto out;
  1237. } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) {
  1238. r = bypass_pg_num(m, argv[1], 0);
  1239. goto out;
  1240. } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) {
  1241. r = switch_pg_num(m, argv[1]);
  1242. goto out;
  1243. } else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
  1244. action = reinstate_path;
  1245. else if (!strnicmp(argv[0], MESG_STR("fail_path")))
  1246. action = fail_path;
  1247. else {
  1248. DMWARN("Unrecognised multipath message received.");
  1249. goto out;
  1250. }
  1251. r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
  1252. if (r) {
  1253. DMWARN("message: error getting device %s",
  1254. argv[1]);
  1255. goto out;
  1256. }
  1257. r = action_dev(m, dev, action);
  1258. dm_put_device(ti, dev);
  1259. out:
  1260. mutex_unlock(&m->work_mutex);
  1261. return r;
  1262. }
  1263. static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
  1264. unsigned long arg)
  1265. {
  1266. struct multipath *m = (struct multipath *) ti->private;
  1267. struct block_device *bdev = NULL;
  1268. fmode_t mode = 0;
  1269. unsigned long flags;
  1270. int r = 0;
  1271. spin_lock_irqsave(&m->lock, flags);
  1272. if (!m->current_pgpath)
  1273. __choose_pgpath(m, 0);
  1274. if (m->current_pgpath) {
  1275. bdev = m->current_pgpath->path.dev->bdev;
  1276. mode = m->current_pgpath->path.dev->mode;
  1277. }
  1278. if (m->queue_io)
  1279. r = -EAGAIN;
  1280. else if (!bdev)
  1281. r = -EIO;
  1282. spin_unlock_irqrestore(&m->lock, flags);
  1283. return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
  1284. }
  1285. static int multipath_iterate_devices(struct dm_target *ti,
  1286. iterate_devices_callout_fn fn, void *data)
  1287. {
  1288. struct multipath *m = ti->private;
  1289. struct priority_group *pg;
  1290. struct pgpath *p;
  1291. int ret = 0;
  1292. list_for_each_entry(pg, &m->priority_groups, list) {
  1293. list_for_each_entry(p, &pg->pgpaths, list) {
  1294. ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
  1295. if (ret)
  1296. goto out;
  1297. }
  1298. }
  1299. out:
  1300. return ret;
  1301. }
  1302. static int __pgpath_busy(struct pgpath *pgpath)
  1303. {
  1304. struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
  1305. return dm_underlying_device_busy(q);
  1306. }
  1307. /*
  1308. * We return "busy", only when we can map I/Os but underlying devices
  1309. * are busy (so even if we map I/Os now, the I/Os will wait on
  1310. * the underlying queue).
  1311. * In other words, if we want to kill I/Os or queue them inside us
  1312. * due to map unavailability, we don't return "busy". Otherwise,
  1313. * dm core won't give us the I/Os and we can't do what we want.
  1314. */
  1315. static int multipath_busy(struct dm_target *ti)
  1316. {
  1317. int busy = 0, has_active = 0;
  1318. struct multipath *m = ti->private;
  1319. struct priority_group *pg;
  1320. struct pgpath *pgpath;
  1321. unsigned long flags;
  1322. spin_lock_irqsave(&m->lock, flags);
  1323. /* Guess which priority_group will be used at next mapping time */
  1324. if (unlikely(!m->current_pgpath && m->next_pg))
  1325. pg = m->next_pg;
  1326. else if (likely(m->current_pg))
  1327. pg = m->current_pg;
  1328. else
  1329. /*
  1330. * We don't know which pg will be used at next mapping time.
  1331. * We don't call __choose_pgpath() here to avoid to trigger
  1332. * pg_init just by busy checking.
  1333. * So we don't know whether underlying devices we will be using
  1334. * at next mapping time are busy or not. Just try mapping.
  1335. */
  1336. goto out;
  1337. /*
  1338. * If there is one non-busy active path at least, the path selector
  1339. * will be able to select it. So we consider such a pg as not busy.
  1340. */
  1341. busy = 1;
  1342. list_for_each_entry(pgpath, &pg->pgpaths, list)
  1343. if (pgpath->is_active) {
  1344. has_active = 1;
  1345. if (!__pgpath_busy(pgpath)) {
  1346. busy = 0;
  1347. break;
  1348. }
  1349. }
  1350. if (!has_active)
  1351. /*
  1352. * No active path in this pg, so this pg won't be used and
  1353. * the current_pg will be changed at next mapping time.
  1354. * We need to try mapping to determine it.
  1355. */
  1356. busy = 0;
  1357. out:
  1358. spin_unlock_irqrestore(&m->lock, flags);
  1359. return busy;
  1360. }
  1361. /*-----------------------------------------------------------------
  1362. * Module setup
  1363. *---------------------------------------------------------------*/
  1364. static struct target_type multipath_target = {
  1365. .name = "multipath",
  1366. .version = {1, 2, 0},
  1367. .module = THIS_MODULE,
  1368. .ctr = multipath_ctr,
  1369. .dtr = multipath_dtr,
  1370. .map_rq = multipath_map,
  1371. .rq_end_io = multipath_end_io,
  1372. .presuspend = multipath_presuspend,
  1373. .postsuspend = multipath_postsuspend,
  1374. .resume = multipath_resume,
  1375. .status = multipath_status,
  1376. .message = multipath_message,
  1377. .ioctl = multipath_ioctl,
  1378. .iterate_devices = multipath_iterate_devices,
  1379. .busy = multipath_busy,
  1380. };
  1381. static int __init dm_multipath_init(void)
  1382. {
  1383. int r;
  1384. /* allocate a slab for the dm_ios */
  1385. _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
  1386. if (!_mpio_cache)
  1387. return -ENOMEM;
  1388. r = dm_register_target(&multipath_target);
  1389. if (r < 0) {
  1390. DMERR("register failed %d", r);
  1391. kmem_cache_destroy(_mpio_cache);
  1392. return -EINVAL;
  1393. }
  1394. kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
  1395. if (!kmultipathd) {
  1396. DMERR("failed to create workqueue kmpathd");
  1397. dm_unregister_target(&multipath_target);
  1398. kmem_cache_destroy(_mpio_cache);
  1399. return -ENOMEM;
  1400. }
  1401. /*
  1402. * A separate workqueue is used to handle the device handlers
  1403. * to avoid overloading existing workqueue. Overloading the
  1404. * old workqueue would also create a bottleneck in the
  1405. * path of the storage hardware device activation.
  1406. */
  1407. kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
  1408. WQ_MEM_RECLAIM);
  1409. if (!kmpath_handlerd) {
  1410. DMERR("failed to create workqueue kmpath_handlerd");
  1411. destroy_workqueue(kmultipathd);
  1412. dm_unregister_target(&multipath_target);
  1413. kmem_cache_destroy(_mpio_cache);
  1414. return -ENOMEM;
  1415. }
  1416. DMINFO("version %u.%u.%u loaded",
  1417. multipath_target.version[0], multipath_target.version[1],
  1418. multipath_target.version[2]);
  1419. return r;
  1420. }
  1421. static void __exit dm_multipath_exit(void)
  1422. {
  1423. destroy_workqueue(kmpath_handlerd);
  1424. destroy_workqueue(kmultipathd);
  1425. dm_unregister_target(&multipath_target);
  1426. kmem_cache_destroy(_mpio_cache);
  1427. }
  1428. module_init(dm_multipath_init);
  1429. module_exit(dm_multipath_exit);
  1430. MODULE_DESCRIPTION(DM_NAME " multipath target");
  1431. MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
  1432. MODULE_LICENSE("GPL");