dm-mpath-rdac.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * Engenio/LSI RDAC DM HW handler
  3. *
  4. * Copyright (C) 2005 Mike Christie. All rights reserved.
  5. * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. *
  21. */
  22. #include <scsi/scsi.h>
  23. #include <scsi/scsi_cmnd.h>
  24. #include <scsi/scsi_eh.h>
  25. #define DM_MSG_PREFIX "multipath rdac"
  26. #include "dm.h"
  27. #include "dm-hw-handler.h"
  28. #define RDAC_DM_HWH_NAME "rdac"
  29. #define RDAC_DM_HWH_VER "0.4"
  30. /*
  31. * LSI mode page stuff
  32. *
  33. * These struct definitions and the forming of the
  34. * mode page were taken from the LSI RDAC 2.4 GPL'd
  35. * driver, and then converted to Linux conventions.
  36. */
  37. #define RDAC_QUIESCENCE_TIME 20;
  38. /*
  39. * Page Codes
  40. */
  41. #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
  42. /*
  43. * Controller modes definitions
  44. */
  45. #define RDAC_MODE_TRANSFER_ALL_LUNS 0x01
  46. #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
  47. /*
  48. * RDAC Options field
  49. */
  50. #define RDAC_FORCED_QUIESENCE 0x02
  51. #define RDAC_FAILOVER_TIMEOUT (60 * HZ)
  52. struct rdac_mode_6_hdr {
  53. u8 data_len;
  54. u8 medium_type;
  55. u8 device_params;
  56. u8 block_desc_len;
  57. };
  58. struct rdac_mode_10_hdr {
  59. u16 data_len;
  60. u8 medium_type;
  61. u8 device_params;
  62. u16 reserved;
  63. u16 block_desc_len;
  64. };
  65. struct rdac_mode_common {
  66. u8 controller_serial[16];
  67. u8 alt_controller_serial[16];
  68. u8 rdac_mode[2];
  69. u8 alt_rdac_mode[2];
  70. u8 quiescence_timeout;
  71. u8 rdac_options;
  72. };
  73. struct rdac_pg_legacy {
  74. struct rdac_mode_6_hdr hdr;
  75. u8 page_code;
  76. u8 page_len;
  77. struct rdac_mode_common common;
  78. #define MODE6_MAX_LUN 32
  79. u8 lun_table[MODE6_MAX_LUN];
  80. u8 reserved2[32];
  81. u8 reserved3;
  82. u8 reserved4;
  83. };
  84. struct rdac_pg_expanded {
  85. struct rdac_mode_10_hdr hdr;
  86. u8 page_code;
  87. u8 subpage_code;
  88. u8 page_len[2];
  89. struct rdac_mode_common common;
  90. u8 lun_table[256];
  91. u8 reserved3;
  92. u8 reserved4;
  93. };
  94. struct c9_inquiry {
  95. u8 peripheral_info;
  96. u8 page_code; /* 0xC9 */
  97. u8 reserved1;
  98. u8 page_len;
  99. u8 page_id[4]; /* "vace" */
  100. u8 avte_cvp;
  101. u8 path_prio;
  102. u8 reserved2[38];
  103. };
  104. #define SUBSYS_ID_LEN 16
  105. #define SLOT_ID_LEN 2
  106. struct c4_inquiry {
  107. u8 peripheral_info;
  108. u8 page_code; /* 0xC4 */
  109. u8 reserved1;
  110. u8 page_len;
  111. u8 page_id[4]; /* "subs" */
  112. u8 subsys_id[SUBSYS_ID_LEN];
  113. u8 revision[4];
  114. u8 slot_id[SLOT_ID_LEN];
  115. u8 reserved[2];
  116. };
  117. struct rdac_controller {
  118. u8 subsys_id[SUBSYS_ID_LEN];
  119. u8 slot_id[SLOT_ID_LEN];
  120. int use_10_ms;
  121. struct kref kref;
  122. struct list_head node; /* list of all controllers */
  123. spinlock_t lock;
  124. int submitted;
  125. struct list_head cmd_list; /* list of commands to be submitted */
  126. union {
  127. struct rdac_pg_legacy legacy;
  128. struct rdac_pg_expanded expanded;
  129. } mode_select;
  130. };
  131. struct c8_inquiry {
  132. u8 peripheral_info;
  133. u8 page_code; /* 0xC8 */
  134. u8 reserved1;
  135. u8 page_len;
  136. u8 page_id[4]; /* "edid" */
  137. u8 reserved2[3];
  138. u8 vol_uniq_id_len;
  139. u8 vol_uniq_id[16];
  140. u8 vol_user_label_len;
  141. u8 vol_user_label[60];
  142. u8 array_uniq_id_len;
  143. u8 array_unique_id[16];
  144. u8 array_user_label_len;
  145. u8 array_user_label[60];
  146. u8 lun[8];
  147. };
  148. struct c2_inquiry {
  149. u8 peripheral_info;
  150. u8 page_code; /* 0xC2 */
  151. u8 reserved1;
  152. u8 page_len;
  153. u8 page_id[4]; /* "swr4" */
  154. u8 sw_version[3];
  155. u8 sw_date[3];
  156. u8 features_enabled;
  157. u8 max_lun_supported;
  158. u8 partitions[239]; /* Total allocation length should be 0xFF */
  159. };
  160. struct rdac_handler {
  161. struct list_head entry; /* list waiting to submit MODE SELECT */
  162. unsigned timeout;
  163. struct rdac_controller *ctlr;
  164. #define UNINITIALIZED_LUN (1 << 8)
  165. unsigned lun;
  166. unsigned char sense[SCSI_SENSE_BUFFERSIZE];
  167. struct dm_path *path;
  168. struct work_struct work;
  169. #define SEND_C2_INQUIRY 1
  170. #define SEND_C4_INQUIRY 2
  171. #define SEND_C8_INQUIRY 3
  172. #define SEND_C9_INQUIRY 4
  173. #define SEND_MODE_SELECT 5
  174. int cmd_to_send;
  175. union {
  176. struct c2_inquiry c2;
  177. struct c4_inquiry c4;
  178. struct c8_inquiry c8;
  179. struct c9_inquiry c9;
  180. } inq;
  181. };
  182. static LIST_HEAD(ctlr_list);
  183. static DEFINE_SPINLOCK(list_lock);
  184. static struct workqueue_struct *rdac_wkqd;
  185. static inline int had_failures(struct request *req, int error)
  186. {
  187. return (error || host_byte(req->errors) != DID_OK ||
  188. msg_byte(req->errors) != COMMAND_COMPLETE);
  189. }
  190. static void rdac_resubmit_all(struct rdac_handler *h)
  191. {
  192. struct rdac_controller *ctlr = h->ctlr;
  193. struct rdac_handler *tmp, *h1;
  194. spin_lock(&ctlr->lock);
  195. list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) {
  196. h1->cmd_to_send = SEND_C9_INQUIRY;
  197. queue_work(rdac_wkqd, &h1->work);
  198. list_del(&h1->entry);
  199. }
  200. ctlr->submitted = 0;
  201. spin_unlock(&ctlr->lock);
  202. }
  203. static void mode_select_endio(struct request *req, int error)
  204. {
  205. struct rdac_handler *h = req->end_io_data;
  206. struct scsi_sense_hdr sense_hdr;
  207. int sense = 0, fail = 0;
  208. if (had_failures(req, error)) {
  209. fail = 1;
  210. goto failed;
  211. }
  212. if (status_byte(req->errors) == CHECK_CONDITION) {
  213. scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE,
  214. &sense_hdr);
  215. sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
  216. sense_hdr.ascq;
  217. /* If it is retryable failure, submit the c9 inquiry again */
  218. if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
  219. sense == 0x62900) {
  220. /* 0x59136 - Command lock contention
  221. * 0x[6b]8b02 - Quiesense in progress or achieved
  222. * 0x62900 - Power On, Reset, or Bus Device Reset
  223. */
  224. h->cmd_to_send = SEND_C9_INQUIRY;
  225. queue_work(rdac_wkqd, &h->work);
  226. goto done;
  227. }
  228. if (sense)
  229. DMINFO("MODE_SELECT failed on %s with sense 0x%x",
  230. h->path->dev->name, sense);
  231. }
  232. failed:
  233. if (fail || sense)
  234. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  235. else
  236. dm_pg_init_complete(h->path, 0);
  237. done:
  238. rdac_resubmit_all(h);
  239. __blk_put_request(req->q, req);
  240. }
  241. static struct request *get_rdac_req(struct rdac_handler *h,
  242. void *buffer, unsigned buflen, int rw)
  243. {
  244. struct request *rq;
  245. struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
  246. rq = blk_get_request(q, rw, GFP_KERNEL);
  247. if (!rq) {
  248. DMINFO("get_rdac_req: blk_get_request failed");
  249. return NULL;
  250. }
  251. if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
  252. blk_put_request(rq);
  253. DMINFO("get_rdac_req: blk_rq_map_kern failed");
  254. return NULL;
  255. }
  256. memset(&rq->cmd, 0, BLK_MAX_CDB);
  257. rq->sense = h->sense;
  258. memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
  259. rq->sense_len = 0;
  260. rq->end_io_data = h;
  261. rq->timeout = h->timeout;
  262. rq->cmd_type = REQ_TYPE_BLOCK_PC;
  263. rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
  264. return rq;
  265. }
  266. static struct request *rdac_failover_get(struct rdac_handler *h)
  267. {
  268. struct request *rq;
  269. struct rdac_mode_common *common;
  270. unsigned data_size;
  271. if (h->ctlr->use_10_ms) {
  272. struct rdac_pg_expanded *rdac_pg;
  273. data_size = sizeof(struct rdac_pg_expanded);
  274. rdac_pg = &h->ctlr->mode_select.expanded;
  275. memset(rdac_pg, 0, data_size);
  276. common = &rdac_pg->common;
  277. rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
  278. rdac_pg->subpage_code = 0x1;
  279. rdac_pg->page_len[0] = 0x01;
  280. rdac_pg->page_len[1] = 0x28;
  281. rdac_pg->lun_table[h->lun] = 0x81;
  282. } else {
  283. struct rdac_pg_legacy *rdac_pg;
  284. data_size = sizeof(struct rdac_pg_legacy);
  285. rdac_pg = &h->ctlr->mode_select.legacy;
  286. memset(rdac_pg, 0, data_size);
  287. common = &rdac_pg->common;
  288. rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
  289. rdac_pg->page_len = 0x68;
  290. rdac_pg->lun_table[h->lun] = 0x81;
  291. }
  292. common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
  293. common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
  294. common->rdac_options = RDAC_FORCED_QUIESENCE;
  295. /* get request for block layer packet command */
  296. rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE);
  297. if (!rq) {
  298. DMERR("rdac_failover_get: no rq");
  299. return NULL;
  300. }
  301. /* Prepare the command. */
  302. if (h->ctlr->use_10_ms) {
  303. rq->cmd[0] = MODE_SELECT_10;
  304. rq->cmd[7] = data_size >> 8;
  305. rq->cmd[8] = data_size & 0xff;
  306. } else {
  307. rq->cmd[0] = MODE_SELECT;
  308. rq->cmd[4] = data_size;
  309. }
  310. rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
  311. return rq;
  312. }
  313. /* Acquires h->ctlr->lock */
  314. static void submit_mode_select(struct rdac_handler *h)
  315. {
  316. struct request *rq;
  317. struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
  318. spin_lock(&h->ctlr->lock);
  319. if (h->ctlr->submitted) {
  320. list_add(&h->entry, &h->ctlr->cmd_list);
  321. goto drop_lock;
  322. }
  323. if (!q) {
  324. DMINFO("submit_mode_select: no queue");
  325. goto fail_path;
  326. }
  327. rq = rdac_failover_get(h);
  328. if (!rq) {
  329. DMERR("submit_mode_select: no rq");
  330. goto fail_path;
  331. }
  332. DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name);
  333. blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio);
  334. h->ctlr->submitted = 1;
  335. goto drop_lock;
  336. fail_path:
  337. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  338. drop_lock:
  339. spin_unlock(&h->ctlr->lock);
  340. }
  341. static void release_ctlr(struct kref *kref)
  342. {
  343. struct rdac_controller *ctlr;
  344. ctlr = container_of(kref, struct rdac_controller, kref);
  345. spin_lock(&list_lock);
  346. list_del(&ctlr->node);
  347. spin_unlock(&list_lock);
  348. kfree(ctlr);
  349. }
  350. static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
  351. {
  352. struct rdac_controller *ctlr, *tmp;
  353. spin_lock(&list_lock);
  354. list_for_each_entry(tmp, &ctlr_list, node) {
  355. if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
  356. (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
  357. kref_get(&tmp->kref);
  358. spin_unlock(&list_lock);
  359. return tmp;
  360. }
  361. }
  362. ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
  363. if (!ctlr)
  364. goto done;
  365. /* initialize fields of controller */
  366. memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
  367. memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
  368. kref_init(&ctlr->kref);
  369. spin_lock_init(&ctlr->lock);
  370. ctlr->submitted = 0;
  371. ctlr->use_10_ms = -1;
  372. INIT_LIST_HEAD(&ctlr->cmd_list);
  373. list_add(&ctlr->node, &ctlr_list);
  374. done:
  375. spin_unlock(&list_lock);
  376. return ctlr;
  377. }
  378. static void c4_endio(struct request *req, int error)
  379. {
  380. struct rdac_handler *h = req->end_io_data;
  381. struct c4_inquiry *sp;
  382. if (had_failures(req, error)) {
  383. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  384. goto done;
  385. }
  386. sp = &h->inq.c4;
  387. h->ctlr = get_controller(sp->subsys_id, sp->slot_id);
  388. if (h->ctlr) {
  389. h->cmd_to_send = SEND_C9_INQUIRY;
  390. queue_work(rdac_wkqd, &h->work);
  391. } else
  392. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  393. done:
  394. __blk_put_request(req->q, req);
  395. }
  396. static void c2_endio(struct request *req, int error)
  397. {
  398. struct rdac_handler *h = req->end_io_data;
  399. struct c2_inquiry *sp;
  400. if (had_failures(req, error)) {
  401. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  402. goto done;
  403. }
  404. sp = &h->inq.c2;
  405. /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */
  406. if (sp->max_lun_supported >= MODE6_MAX_LUN)
  407. h->ctlr->use_10_ms = 1;
  408. else
  409. h->ctlr->use_10_ms = 0;
  410. h->cmd_to_send = SEND_MODE_SELECT;
  411. queue_work(rdac_wkqd, &h->work);
  412. done:
  413. __blk_put_request(req->q, req);
  414. }
  415. static void c9_endio(struct request *req, int error)
  416. {
  417. struct rdac_handler *h = req->end_io_data;
  418. struct c9_inquiry *sp;
  419. if (had_failures(req, error)) {
  420. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  421. goto done;
  422. }
  423. /* We need to look at the sense keys here to take clear action.
  424. * For now simple logic: If the host is in AVT mode or if controller
  425. * owns the lun, return dm_pg_init_complete(), otherwise submit
  426. * MODE SELECT.
  427. */
  428. sp = &h->inq.c9;
  429. /* If in AVT mode, return success */
  430. if ((sp->avte_cvp >> 7) == 0x1) {
  431. dm_pg_init_complete(h->path, 0);
  432. goto done;
  433. }
  434. /* If the controller on this path owns the LUN, return success */
  435. if (sp->avte_cvp & 0x1) {
  436. dm_pg_init_complete(h->path, 0);
  437. goto done;
  438. }
  439. if (h->ctlr) {
  440. if (h->ctlr->use_10_ms == -1)
  441. h->cmd_to_send = SEND_C2_INQUIRY;
  442. else
  443. h->cmd_to_send = SEND_MODE_SELECT;
  444. } else
  445. h->cmd_to_send = SEND_C4_INQUIRY;
  446. queue_work(rdac_wkqd, &h->work);
  447. done:
  448. __blk_put_request(req->q, req);
  449. }
  450. static void c8_endio(struct request *req, int error)
  451. {
  452. struct rdac_handler *h = req->end_io_data;
  453. struct c8_inquiry *sp;
  454. if (had_failures(req, error)) {
  455. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  456. goto done;
  457. }
  458. /* We need to look at the sense keys here to take clear action.
  459. * For now simple logic: Get the lun from the inquiry page.
  460. */
  461. sp = &h->inq.c8;
  462. h->lun = sp->lun[7]; /* currently it uses only one byte */
  463. h->cmd_to_send = SEND_C9_INQUIRY;
  464. queue_work(rdac_wkqd, &h->work);
  465. done:
  466. __blk_put_request(req->q, req);
  467. }
  468. static void submit_inquiry(struct rdac_handler *h, int page_code,
  469. unsigned int len, rq_end_io_fn endio)
  470. {
  471. struct request *rq;
  472. struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
  473. if (!q)
  474. goto fail_path;
  475. rq = get_rdac_req(h, &h->inq, len, READ);
  476. if (!rq)
  477. goto fail_path;
  478. /* Prepare the command. */
  479. rq->cmd[0] = INQUIRY;
  480. rq->cmd[1] = 1;
  481. rq->cmd[2] = page_code;
  482. rq->cmd[4] = len;
  483. rq->cmd_len = COMMAND_SIZE(INQUIRY);
  484. blk_execute_rq_nowait(q, NULL, rq, 1, endio);
  485. return;
  486. fail_path:
  487. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  488. }
  489. static void service_wkq(struct work_struct *work)
  490. {
  491. struct rdac_handler *h = container_of(work, struct rdac_handler, work);
  492. switch (h->cmd_to_send) {
  493. case SEND_C2_INQUIRY:
  494. submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio);
  495. break;
  496. case SEND_C4_INQUIRY:
  497. submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio);
  498. break;
  499. case SEND_C8_INQUIRY:
  500. submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
  501. break;
  502. case SEND_C9_INQUIRY:
  503. submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
  504. break;
  505. case SEND_MODE_SELECT:
  506. submit_mode_select(h);
  507. break;
  508. default:
  509. BUG();
  510. }
  511. }
  512. /*
  513. * only support subpage2c until we confirm that this is just a matter of
  514. * of updating firmware or not, and RDAC (basic AVT works already) for now
  515. * but we can add these in in when we get time and testers
  516. */
  517. static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv)
  518. {
  519. struct rdac_handler *h;
  520. unsigned timeout;
  521. if (argc == 0) {
  522. /* No arguments: use defaults */
  523. timeout = RDAC_FAILOVER_TIMEOUT;
  524. } else if (argc != 1) {
  525. DMWARN("incorrect number of arguments");
  526. return -EINVAL;
  527. } else {
  528. if (sscanf(argv[1], "%u", &timeout) != 1) {
  529. DMWARN("invalid timeout value");
  530. return -EINVAL;
  531. }
  532. }
  533. h = kzalloc(sizeof(*h), GFP_KERNEL);
  534. if (!h)
  535. return -ENOMEM;
  536. hwh->context = h;
  537. h->timeout = timeout;
  538. h->lun = UNINITIALIZED_LUN;
  539. INIT_WORK(&h->work, service_wkq);
  540. DMWARN("using RDAC command with timeout %u", h->timeout);
  541. return 0;
  542. }
  543. static void rdac_destroy(struct hw_handler *hwh)
  544. {
  545. struct rdac_handler *h = hwh->context;
  546. if (h->ctlr)
  547. kref_put(&h->ctlr->kref, release_ctlr);
  548. kfree(h);
  549. hwh->context = NULL;
  550. }
  551. static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio)
  552. {
  553. /* Try default handler */
  554. return dm_scsi_err_handler(hwh, bio);
  555. }
  556. static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed,
  557. struct dm_path *path)
  558. {
  559. struct rdac_handler *h = hwh->context;
  560. h->path = path;
  561. switch (h->lun) {
  562. case UNINITIALIZED_LUN:
  563. submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
  564. break;
  565. default:
  566. submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
  567. }
  568. }
  569. static struct hw_handler_type rdac_handler = {
  570. .name = RDAC_DM_HWH_NAME,
  571. .module = THIS_MODULE,
  572. .create = rdac_create,
  573. .destroy = rdac_destroy,
  574. .pg_init = rdac_pg_init,
  575. .error = rdac_error,
  576. };
  577. static int __init rdac_init(void)
  578. {
  579. int r;
  580. rdac_wkqd = create_singlethread_workqueue("rdac_wkqd");
  581. if (!rdac_wkqd) {
  582. DMERR("Failed to create workqueue rdac_wkqd.");
  583. return -ENOMEM;
  584. }
  585. r = dm_register_hw_handler(&rdac_handler);
  586. if (r < 0) {
  587. DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r);
  588. destroy_workqueue(rdac_wkqd);
  589. return r;
  590. }
  591. DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER);
  592. return 0;
  593. }
  594. static void __exit rdac_exit(void)
  595. {
  596. int r = dm_unregister_hw_handler(&rdac_handler);
  597. destroy_workqueue(rdac_wkqd);
  598. if (r < 0)
  599. DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r);
  600. }
  601. module_init(rdac_init);
  602. module_exit(rdac_exit);
  603. MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support");
  604. MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
  605. MODULE_LICENSE("GPL");
  606. MODULE_VERSION(RDAC_DM_HWH_VER);