dm-mpath-rdac.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /*
  2. * Engenio/LSI RDAC DM HW handler
  3. *
  4. * Copyright (C) 2005 Mike Christie. All rights reserved.
  5. * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20. *
  21. */
  22. #include <scsi/scsi.h>
  23. #include <scsi/scsi_cmnd.h>
  24. #include <scsi/scsi_eh.h>
  25. #define DM_MSG_PREFIX "multipath rdac"
  26. #include "dm.h"
  27. #include "dm-hw-handler.h"
  28. #define RDAC_DM_HWH_NAME "rdac"
  29. #define RDAC_DM_HWH_VER "0.4"
  30. /*
  31. * LSI mode page stuff
  32. *
  33. * These struct definitions and the forming of the
  34. * mode page were taken from the LSI RDAC 2.4 GPL'd
  35. * driver, and then converted to Linux conventions.
  36. */
  37. #define RDAC_QUIESCENCE_TIME 20;
  38. /*
  39. * Page Codes
  40. */
  41. #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
  42. /*
  43. * Controller modes definitions
  44. */
  45. #define RDAC_MODE_TRANSFER_ALL_LUNS 0x01
  46. #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
  47. /*
  48. * RDAC Options field
  49. */
  50. #define RDAC_FORCED_QUIESENCE 0x02
  51. #define RDAC_FAILOVER_TIMEOUT (60 * HZ)
  52. struct rdac_mode_6_hdr {
  53. u8 data_len;
  54. u8 medium_type;
  55. u8 device_params;
  56. u8 block_desc_len;
  57. };
  58. struct rdac_mode_10_hdr {
  59. u16 data_len;
  60. u8 medium_type;
  61. u8 device_params;
  62. u16 reserved;
  63. u16 block_desc_len;
  64. };
  65. struct rdac_mode_common {
  66. u8 controller_serial[16];
  67. u8 alt_controller_serial[16];
  68. u8 rdac_mode[2];
  69. u8 alt_rdac_mode[2];
  70. u8 quiescence_timeout;
  71. u8 rdac_options;
  72. };
  73. struct rdac_pg_legacy {
  74. struct rdac_mode_6_hdr hdr;
  75. u8 page_code;
  76. u8 page_len;
  77. struct rdac_mode_common common;
  78. #define MODE6_MAX_LUN 32
  79. u8 lun_table[MODE6_MAX_LUN];
  80. u8 reserved2[32];
  81. u8 reserved3;
  82. u8 reserved4;
  83. };
  84. struct rdac_pg_expanded {
  85. struct rdac_mode_10_hdr hdr;
  86. u8 page_code;
  87. u8 subpage_code;
  88. u8 page_len[2];
  89. struct rdac_mode_common common;
  90. u8 lun_table[256];
  91. u8 reserved3;
  92. u8 reserved4;
  93. };
  94. struct c9_inquiry {
  95. u8 peripheral_info;
  96. u8 page_code; /* 0xC9 */
  97. u8 reserved1;
  98. u8 page_len;
  99. u8 page_id[4]; /* "vace" */
  100. u8 avte_cvp;
  101. u8 path_prio;
  102. u8 reserved2[38];
  103. };
  104. #define SUBSYS_ID_LEN 16
  105. #define SLOT_ID_LEN 2
  106. struct c4_inquiry {
  107. u8 peripheral_info;
  108. u8 page_code; /* 0xC4 */
  109. u8 reserved1;
  110. u8 page_len;
  111. u8 page_id[4]; /* "subs" */
  112. u8 subsys_id[SUBSYS_ID_LEN];
  113. u8 revision[4];
  114. u8 slot_id[SLOT_ID_LEN];
  115. u8 reserved[2];
  116. };
  117. struct rdac_controller {
  118. u8 subsys_id[SUBSYS_ID_LEN];
  119. u8 slot_id[SLOT_ID_LEN];
  120. int use_10_ms;
  121. struct kref kref;
  122. struct list_head node; /* list of all controllers */
  123. spinlock_t lock;
  124. int submitted;
  125. struct list_head cmd_list; /* list of commands to be submitted */
  126. union {
  127. struct rdac_pg_legacy legacy;
  128. struct rdac_pg_expanded expanded;
  129. } mode_select;
  130. };
  131. struct c8_inquiry {
  132. u8 peripheral_info;
  133. u8 page_code; /* 0xC8 */
  134. u8 reserved1;
  135. u8 page_len;
  136. u8 page_id[4]; /* "edid" */
  137. u8 reserved2[3];
  138. u8 vol_uniq_id_len;
  139. u8 vol_uniq_id[16];
  140. u8 vol_user_label_len;
  141. u8 vol_user_label[60];
  142. u8 array_uniq_id_len;
  143. u8 array_unique_id[16];
  144. u8 array_user_label_len;
  145. u8 array_user_label[60];
  146. u8 lun[8];
  147. };
  148. struct c2_inquiry {
  149. u8 peripheral_info;
  150. u8 page_code; /* 0xC2 */
  151. u8 reserved1;
  152. u8 page_len;
  153. u8 page_id[4]; /* "swr4" */
  154. u8 sw_version[3];
  155. u8 sw_date[3];
  156. u8 features_enabled;
  157. u8 max_lun_supported;
  158. u8 partitions[239]; /* Total allocation length should be 0xFF */
  159. };
  160. struct rdac_handler {
  161. struct list_head entry; /* list waiting to submit MODE SELECT */
  162. unsigned timeout;
  163. struct rdac_controller *ctlr;
  164. #define UNINITIALIZED_LUN (1 << 8)
  165. unsigned lun;
  166. unsigned char sense[SCSI_SENSE_BUFFERSIZE];
  167. struct dm_path *path;
  168. struct work_struct work;
  169. #define SEND_C2_INQUIRY 1
  170. #define SEND_C4_INQUIRY 2
  171. #define SEND_C8_INQUIRY 3
  172. #define SEND_C9_INQUIRY 4
  173. #define SEND_MODE_SELECT 5
  174. int cmd_to_send;
  175. union {
  176. struct c2_inquiry c2;
  177. struct c4_inquiry c4;
  178. struct c8_inquiry c8;
  179. struct c9_inquiry c9;
  180. } inq;
  181. };
  182. static LIST_HEAD(ctlr_list);
  183. static DEFINE_SPINLOCK(list_lock);
  184. static struct workqueue_struct *rdac_wkqd;
  185. static inline int had_failures(struct request *req, int error)
  186. {
  187. return (error || host_byte(req->errors) != DID_OK ||
  188. msg_byte(req->errors) != COMMAND_COMPLETE);
  189. }
  190. static void rdac_resubmit_all(struct rdac_handler *h)
  191. {
  192. struct rdac_controller *ctlr = h->ctlr;
  193. struct rdac_handler *tmp, *h1;
  194. spin_lock(&ctlr->lock);
  195. list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) {
  196. h1->cmd_to_send = SEND_C9_INQUIRY;
  197. queue_work(rdac_wkqd, &h1->work);
  198. list_del(&h1->entry);
  199. }
  200. ctlr->submitted = 0;
  201. spin_unlock(&ctlr->lock);
  202. }
  203. static void mode_select_endio(struct request *req, int error)
  204. {
  205. struct rdac_handler *h = req->end_io_data;
  206. struct scsi_sense_hdr sense_hdr;
  207. int sense = 0, fail = 0;
  208. if (had_failures(req, error)) {
  209. fail = 1;
  210. goto failed;
  211. }
  212. if (status_byte(req->errors) == CHECK_CONDITION) {
  213. scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE,
  214. &sense_hdr);
  215. sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
  216. sense_hdr.ascq;
  217. /* If it is retryable failure, submit the c9 inquiry again */
  218. if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
  219. sense == 0x62900) {
  220. /* 0x59136 - Command lock contention
  221. * 0x[6b]8b02 - Quiesense in progress or achieved
  222. * 0x62900 - Power On, Reset, or Bus Device Reset
  223. */
  224. h->cmd_to_send = SEND_C9_INQUIRY;
  225. queue_work(rdac_wkqd, &h->work);
  226. goto done;
  227. }
  228. if (sense)
  229. DMINFO("MODE_SELECT failed on %s with sense 0x%x",
  230. h->path->dev->name, sense);
  231. }
  232. failed:
  233. if (fail || sense)
  234. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  235. else
  236. dm_pg_init_complete(h->path, 0);
  237. done:
  238. rdac_resubmit_all(h);
  239. __blk_put_request(req->q, req);
  240. }
  241. static struct request *get_rdac_req(struct rdac_handler *h,
  242. void *buffer, unsigned buflen, int rw)
  243. {
  244. struct request *rq;
  245. struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
  246. rq = blk_get_request(q, rw, GFP_KERNEL);
  247. if (!rq) {
  248. DMINFO("get_rdac_req: blk_get_request failed");
  249. return NULL;
  250. }
  251. if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
  252. blk_put_request(rq);
  253. DMINFO("get_rdac_req: blk_rq_map_kern failed");
  254. return NULL;
  255. }
  256. rq->sense = h->sense;
  257. memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
  258. rq->sense_len = 0;
  259. rq->end_io_data = h;
  260. rq->timeout = h->timeout;
  261. rq->cmd_type = REQ_TYPE_BLOCK_PC;
  262. rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
  263. return rq;
  264. }
  265. static struct request *rdac_failover_get(struct rdac_handler *h)
  266. {
  267. struct request *rq;
  268. struct rdac_mode_common *common;
  269. unsigned data_size;
  270. if (h->ctlr->use_10_ms) {
  271. struct rdac_pg_expanded *rdac_pg;
  272. data_size = sizeof(struct rdac_pg_expanded);
  273. rdac_pg = &h->ctlr->mode_select.expanded;
  274. memset(rdac_pg, 0, data_size);
  275. common = &rdac_pg->common;
  276. rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
  277. rdac_pg->subpage_code = 0x1;
  278. rdac_pg->page_len[0] = 0x01;
  279. rdac_pg->page_len[1] = 0x28;
  280. rdac_pg->lun_table[h->lun] = 0x81;
  281. } else {
  282. struct rdac_pg_legacy *rdac_pg;
  283. data_size = sizeof(struct rdac_pg_legacy);
  284. rdac_pg = &h->ctlr->mode_select.legacy;
  285. memset(rdac_pg, 0, data_size);
  286. common = &rdac_pg->common;
  287. rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
  288. rdac_pg->page_len = 0x68;
  289. rdac_pg->lun_table[h->lun] = 0x81;
  290. }
  291. common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
  292. common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
  293. common->rdac_options = RDAC_FORCED_QUIESENCE;
  294. /* get request for block layer packet command */
  295. rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE);
  296. if (!rq) {
  297. DMERR("rdac_failover_get: no rq");
  298. return NULL;
  299. }
  300. /* Prepare the command. */
  301. if (h->ctlr->use_10_ms) {
  302. rq->cmd[0] = MODE_SELECT_10;
  303. rq->cmd[7] = data_size >> 8;
  304. rq->cmd[8] = data_size & 0xff;
  305. } else {
  306. rq->cmd[0] = MODE_SELECT;
  307. rq->cmd[4] = data_size;
  308. }
  309. rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
  310. return rq;
  311. }
  312. /* Acquires h->ctlr->lock */
  313. static void submit_mode_select(struct rdac_handler *h)
  314. {
  315. struct request *rq;
  316. struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
  317. spin_lock(&h->ctlr->lock);
  318. if (h->ctlr->submitted) {
  319. list_add(&h->entry, &h->ctlr->cmd_list);
  320. goto drop_lock;
  321. }
  322. if (!q) {
  323. DMINFO("submit_mode_select: no queue");
  324. goto fail_path;
  325. }
  326. rq = rdac_failover_get(h);
  327. if (!rq) {
  328. DMERR("submit_mode_select: no rq");
  329. goto fail_path;
  330. }
  331. DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name);
  332. blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio);
  333. h->ctlr->submitted = 1;
  334. goto drop_lock;
  335. fail_path:
  336. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  337. drop_lock:
  338. spin_unlock(&h->ctlr->lock);
  339. }
  340. static void release_ctlr(struct kref *kref)
  341. {
  342. struct rdac_controller *ctlr;
  343. ctlr = container_of(kref, struct rdac_controller, kref);
  344. spin_lock(&list_lock);
  345. list_del(&ctlr->node);
  346. spin_unlock(&list_lock);
  347. kfree(ctlr);
  348. }
  349. static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
  350. {
  351. struct rdac_controller *ctlr, *tmp;
  352. spin_lock(&list_lock);
  353. list_for_each_entry(tmp, &ctlr_list, node) {
  354. if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
  355. (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
  356. kref_get(&tmp->kref);
  357. spin_unlock(&list_lock);
  358. return tmp;
  359. }
  360. }
  361. ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
  362. if (!ctlr)
  363. goto done;
  364. /* initialize fields of controller */
  365. memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
  366. memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
  367. kref_init(&ctlr->kref);
  368. spin_lock_init(&ctlr->lock);
  369. ctlr->submitted = 0;
  370. ctlr->use_10_ms = -1;
  371. INIT_LIST_HEAD(&ctlr->cmd_list);
  372. list_add(&ctlr->node, &ctlr_list);
  373. done:
  374. spin_unlock(&list_lock);
  375. return ctlr;
  376. }
  377. static void c4_endio(struct request *req, int error)
  378. {
  379. struct rdac_handler *h = req->end_io_data;
  380. struct c4_inquiry *sp;
  381. if (had_failures(req, error)) {
  382. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  383. goto done;
  384. }
  385. sp = &h->inq.c4;
  386. h->ctlr = get_controller(sp->subsys_id, sp->slot_id);
  387. if (h->ctlr) {
  388. h->cmd_to_send = SEND_C9_INQUIRY;
  389. queue_work(rdac_wkqd, &h->work);
  390. } else
  391. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  392. done:
  393. __blk_put_request(req->q, req);
  394. }
  395. static void c2_endio(struct request *req, int error)
  396. {
  397. struct rdac_handler *h = req->end_io_data;
  398. struct c2_inquiry *sp;
  399. if (had_failures(req, error)) {
  400. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  401. goto done;
  402. }
  403. sp = &h->inq.c2;
  404. /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */
  405. if (sp->max_lun_supported >= MODE6_MAX_LUN)
  406. h->ctlr->use_10_ms = 1;
  407. else
  408. h->ctlr->use_10_ms = 0;
  409. h->cmd_to_send = SEND_MODE_SELECT;
  410. queue_work(rdac_wkqd, &h->work);
  411. done:
  412. __blk_put_request(req->q, req);
  413. }
  414. static void c9_endio(struct request *req, int error)
  415. {
  416. struct rdac_handler *h = req->end_io_data;
  417. struct c9_inquiry *sp;
  418. if (had_failures(req, error)) {
  419. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  420. goto done;
  421. }
  422. /* We need to look at the sense keys here to take clear action.
  423. * For now simple logic: If the host is in AVT mode or if controller
  424. * owns the lun, return dm_pg_init_complete(), otherwise submit
  425. * MODE SELECT.
  426. */
  427. sp = &h->inq.c9;
  428. /* If in AVT mode, return success */
  429. if ((sp->avte_cvp >> 7) == 0x1) {
  430. dm_pg_init_complete(h->path, 0);
  431. goto done;
  432. }
  433. /* If the controller on this path owns the LUN, return success */
  434. if (sp->avte_cvp & 0x1) {
  435. dm_pg_init_complete(h->path, 0);
  436. goto done;
  437. }
  438. if (h->ctlr) {
  439. if (h->ctlr->use_10_ms == -1)
  440. h->cmd_to_send = SEND_C2_INQUIRY;
  441. else
  442. h->cmd_to_send = SEND_MODE_SELECT;
  443. } else
  444. h->cmd_to_send = SEND_C4_INQUIRY;
  445. queue_work(rdac_wkqd, &h->work);
  446. done:
  447. __blk_put_request(req->q, req);
  448. }
  449. static void c8_endio(struct request *req, int error)
  450. {
  451. struct rdac_handler *h = req->end_io_data;
  452. struct c8_inquiry *sp;
  453. if (had_failures(req, error)) {
  454. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  455. goto done;
  456. }
  457. /* We need to look at the sense keys here to take clear action.
  458. * For now simple logic: Get the lun from the inquiry page.
  459. */
  460. sp = &h->inq.c8;
  461. h->lun = sp->lun[7]; /* currently it uses only one byte */
  462. h->cmd_to_send = SEND_C9_INQUIRY;
  463. queue_work(rdac_wkqd, &h->work);
  464. done:
  465. __blk_put_request(req->q, req);
  466. }
  467. static void submit_inquiry(struct rdac_handler *h, int page_code,
  468. unsigned int len, rq_end_io_fn endio)
  469. {
  470. struct request *rq;
  471. struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
  472. if (!q)
  473. goto fail_path;
  474. rq = get_rdac_req(h, &h->inq, len, READ);
  475. if (!rq)
  476. goto fail_path;
  477. /* Prepare the command. */
  478. rq->cmd[0] = INQUIRY;
  479. rq->cmd[1] = 1;
  480. rq->cmd[2] = page_code;
  481. rq->cmd[4] = len;
  482. rq->cmd_len = COMMAND_SIZE(INQUIRY);
  483. blk_execute_rq_nowait(q, NULL, rq, 1, endio);
  484. return;
  485. fail_path:
  486. dm_pg_init_complete(h->path, MP_FAIL_PATH);
  487. }
  488. static void service_wkq(struct work_struct *work)
  489. {
  490. struct rdac_handler *h = container_of(work, struct rdac_handler, work);
  491. switch (h->cmd_to_send) {
  492. case SEND_C2_INQUIRY:
  493. submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio);
  494. break;
  495. case SEND_C4_INQUIRY:
  496. submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio);
  497. break;
  498. case SEND_C8_INQUIRY:
  499. submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
  500. break;
  501. case SEND_C9_INQUIRY:
  502. submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
  503. break;
  504. case SEND_MODE_SELECT:
  505. submit_mode_select(h);
  506. break;
  507. default:
  508. BUG();
  509. }
  510. }
  511. /*
  512. * only support subpage2c until we confirm that this is just a matter of
  513. * of updating firmware or not, and RDAC (basic AVT works already) for now
  514. * but we can add these in in when we get time and testers
  515. */
  516. static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv)
  517. {
  518. struct rdac_handler *h;
  519. unsigned timeout;
  520. if (argc == 0) {
  521. /* No arguments: use defaults */
  522. timeout = RDAC_FAILOVER_TIMEOUT;
  523. } else if (argc != 1) {
  524. DMWARN("incorrect number of arguments");
  525. return -EINVAL;
  526. } else {
  527. if (sscanf(argv[1], "%u", &timeout) != 1) {
  528. DMWARN("invalid timeout value");
  529. return -EINVAL;
  530. }
  531. }
  532. h = kzalloc(sizeof(*h), GFP_KERNEL);
  533. if (!h)
  534. return -ENOMEM;
  535. hwh->context = h;
  536. h->timeout = timeout;
  537. h->lun = UNINITIALIZED_LUN;
  538. INIT_WORK(&h->work, service_wkq);
  539. DMWARN("using RDAC command with timeout %u", h->timeout);
  540. return 0;
  541. }
  542. static void rdac_destroy(struct hw_handler *hwh)
  543. {
  544. struct rdac_handler *h = hwh->context;
  545. if (h->ctlr)
  546. kref_put(&h->ctlr->kref, release_ctlr);
  547. kfree(h);
  548. hwh->context = NULL;
  549. }
  550. static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio)
  551. {
  552. /* Try default handler */
  553. return dm_scsi_err_handler(hwh, bio);
  554. }
  555. static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed,
  556. struct dm_path *path)
  557. {
  558. struct rdac_handler *h = hwh->context;
  559. h->path = path;
  560. switch (h->lun) {
  561. case UNINITIALIZED_LUN:
  562. submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
  563. break;
  564. default:
  565. submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
  566. }
  567. }
  568. static struct hw_handler_type rdac_handler = {
  569. .name = RDAC_DM_HWH_NAME,
  570. .module = THIS_MODULE,
  571. .create = rdac_create,
  572. .destroy = rdac_destroy,
  573. .pg_init = rdac_pg_init,
  574. .error = rdac_error,
  575. };
  576. static int __init rdac_init(void)
  577. {
  578. int r;
  579. rdac_wkqd = create_singlethread_workqueue("rdac_wkqd");
  580. if (!rdac_wkqd) {
  581. DMERR("Failed to create workqueue rdac_wkqd.");
  582. return -ENOMEM;
  583. }
  584. r = dm_register_hw_handler(&rdac_handler);
  585. if (r < 0) {
  586. DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r);
  587. destroy_workqueue(rdac_wkqd);
  588. return r;
  589. }
  590. DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER);
  591. return 0;
  592. }
  593. static void __exit rdac_exit(void)
  594. {
  595. int r = dm_unregister_hw_handler(&rdac_handler);
  596. destroy_workqueue(rdac_wkqd);
  597. if (r < 0)
  598. DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r);
  599. }
  600. module_init(rdac_init);
  601. module_exit(rdac_exit);
  602. MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support");
  603. MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
  604. MODULE_LICENSE("GPL");
  605. MODULE_VERSION(RDAC_DM_HWH_VER);