qla_bsg.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/kthread.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/delay.h>
  11. /* BSG support for ELS/CT pass through */
  12. inline srb_t *
  13. qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
  14. {
  15. srb_t *sp;
  16. struct qla_hw_data *ha = vha->hw;
  17. struct srb_bsg_ctx *ctx;
  18. sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
  19. if (!sp)
  20. goto done;
  21. ctx = kzalloc(size, GFP_KERNEL);
  22. if (!ctx) {
  23. mempool_free(sp, ha->srb_mempool);
  24. sp = NULL;
  25. goto done;
  26. }
  27. memset(sp, 0, sizeof(*sp));
  28. sp->fcport = fcport;
  29. sp->ctx = ctx;
  30. done:
  31. return sp;
  32. }
  33. static int
  34. qla2x00_process_els(struct fc_bsg_job *bsg_job)
  35. {
  36. struct fc_rport *rport;
  37. fc_port_t *fcport;
  38. struct Scsi_Host *host;
  39. scsi_qla_host_t *vha;
  40. struct qla_hw_data *ha;
  41. srb_t *sp;
  42. const char *type;
  43. int req_sg_cnt, rsp_sg_cnt;
  44. int rval = (DRIVER_ERROR << 16);
  45. uint16_t nextlid = 0;
  46. struct srb_bsg *els;
  47. /* Multiple SG's are not supported for ELS requests */
  48. if (bsg_job->request_payload.sg_cnt > 1 ||
  49. bsg_job->reply_payload.sg_cnt > 1) {
  50. DEBUG2(printk(KERN_INFO
  51. "multiple SG's are not supported for ELS requests"
  52. " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
  53. bsg_job->request_payload.sg_cnt,
  54. bsg_job->reply_payload.sg_cnt));
  55. rval = -EPERM;
  56. goto done;
  57. }
  58. /* ELS request for rport */
  59. if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
  60. rport = bsg_job->rport;
  61. fcport = *(fc_port_t **) rport->dd_data;
  62. host = rport_to_shost(rport);
  63. vha = shost_priv(host);
  64. ha = vha->hw;
  65. type = "FC_BSG_RPT_ELS";
  66. /* make sure the rport is logged in,
  67. * if not perform fabric login
  68. */
  69. if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
  70. DEBUG2(qla_printk(KERN_WARNING, ha,
  71. "failed to login port %06X for ELS passthru\n",
  72. fcport->d_id.b24));
  73. rval = -EIO;
  74. goto done;
  75. }
  76. } else {
  77. host = bsg_job->shost;
  78. vha = shost_priv(host);
  79. ha = vha->hw;
  80. type = "FC_BSG_HST_ELS_NOLOGIN";
  81. /* Allocate a dummy fcport structure, since functions
  82. * preparing the IOCB and mailbox command retrieves port
  83. * specific information from fcport structure. For Host based
  84. * ELS commands there will be no fcport structure allocated
  85. */
  86. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  87. if (!fcport) {
  88. rval = -ENOMEM;
  89. goto done;
  90. }
  91. /* Initialize all required fields of fcport */
  92. fcport->vha = vha;
  93. fcport->vp_idx = vha->vp_idx;
  94. fcport->d_id.b.al_pa =
  95. bsg_job->request->rqst_data.h_els.port_id[0];
  96. fcport->d_id.b.area =
  97. bsg_job->request->rqst_data.h_els.port_id[1];
  98. fcport->d_id.b.domain =
  99. bsg_job->request->rqst_data.h_els.port_id[2];
  100. fcport->loop_id =
  101. (fcport->d_id.b.al_pa == 0xFD) ?
  102. NPH_FABRIC_CONTROLLER : NPH_F_PORT;
  103. }
  104. if (!vha->flags.online) {
  105. DEBUG2(qla_printk(KERN_WARNING, ha,
  106. "host not online\n"));
  107. rval = -EIO;
  108. goto done;
  109. }
  110. req_sg_cnt =
  111. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  112. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  113. if (!req_sg_cnt) {
  114. rval = -ENOMEM;
  115. goto done_free_fcport;
  116. }
  117. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  118. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  119. if (!rsp_sg_cnt) {
  120. rval = -ENOMEM;
  121. goto done_free_fcport;
  122. }
  123. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  124. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
  125. {
  126. DEBUG2(printk(KERN_INFO
  127. "dma mapping resulted in different sg counts \
  128. [request_sg_cnt: %x dma_request_sg_cnt: %x\
  129. reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
  130. bsg_job->request_payload.sg_cnt, req_sg_cnt,
  131. bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
  132. rval = -EAGAIN;
  133. goto done_unmap_sg;
  134. }
  135. /* Alloc SRB structure */
  136. sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
  137. if (!sp) {
  138. rval = -ENOMEM;
  139. goto done_unmap_sg;
  140. }
  141. els = sp->ctx;
  142. els->ctx.type =
  143. (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
  144. SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
  145. els->bsg_job = bsg_job;
  146. DEBUG2(qla_printk(KERN_INFO, ha,
  147. "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
  148. "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
  149. bsg_job->request->rqst_data.h_els.command_code,
  150. fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
  151. fcport->d_id.b.al_pa));
  152. rval = qla2x00_start_sp(sp);
  153. if (rval != QLA_SUCCESS) {
  154. kfree(sp->ctx);
  155. mempool_free(sp, ha->srb_mempool);
  156. rval = -EIO;
  157. goto done_unmap_sg;
  158. }
  159. return rval;
  160. done_unmap_sg:
  161. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  162. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  163. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  164. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  165. goto done_free_fcport;
  166. done_free_fcport:
  167. if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
  168. kfree(fcport);
  169. done:
  170. return rval;
  171. }
  172. static int
  173. qla2x00_process_ct(struct fc_bsg_job *bsg_job)
  174. {
  175. srb_t *sp;
  176. struct Scsi_Host *host = bsg_job->shost;
  177. scsi_qla_host_t *vha = shost_priv(host);
  178. struct qla_hw_data *ha = vha->hw;
  179. int rval = (DRIVER_ERROR << 16);
  180. int req_sg_cnt, rsp_sg_cnt;
  181. uint16_t loop_id;
  182. struct fc_port *fcport;
  183. char *type = "FC_BSG_HST_CT";
  184. struct srb_bsg *ct;
  185. /* pass through is supported only for ISP 4Gb or higher */
  186. if (!IS_FWI2_CAPABLE(ha)) {
  187. DEBUG2(qla_printk(KERN_INFO, ha,
  188. "scsi(%ld):Firmware is not capable to support FC "
  189. "CT pass thru\n", vha->host_no));
  190. rval = -EPERM;
  191. goto done;
  192. }
  193. req_sg_cnt =
  194. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  195. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  196. if (!req_sg_cnt) {
  197. rval = -ENOMEM;
  198. goto done;
  199. }
  200. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  201. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  202. if (!rsp_sg_cnt) {
  203. rval = -ENOMEM;
  204. goto done;
  205. }
  206. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  207. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
  208. {
  209. DEBUG2(qla_printk(KERN_WARNING, ha,
  210. "[request_sg_cnt: %x dma_request_sg_cnt: %x\
  211. reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
  212. bsg_job->request_payload.sg_cnt, req_sg_cnt,
  213. bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
  214. rval = -EAGAIN;
  215. goto done_unmap_sg;
  216. }
  217. if (!vha->flags.online) {
  218. DEBUG2(qla_printk(KERN_WARNING, ha,
  219. "host not online\n"));
  220. rval = -EIO;
  221. goto done_unmap_sg;
  222. }
  223. loop_id =
  224. (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
  225. >> 24;
  226. switch (loop_id) {
  227. case 0xFC:
  228. loop_id = cpu_to_le16(NPH_SNS);
  229. break;
  230. case 0xFA:
  231. loop_id = vha->mgmt_svr_loop_id;
  232. break;
  233. default:
  234. DEBUG2(qla_printk(KERN_INFO, ha,
  235. "Unknown loop id: %x\n", loop_id));
  236. rval = -EINVAL;
  237. goto done_unmap_sg;
  238. }
  239. /* Allocate a dummy fcport structure, since functions preparing the
  240. * IOCB and mailbox command retrieves port specific information
  241. * from fcport structure. For Host based ELS commands there will be
  242. * no fcport structure allocated
  243. */
  244. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  245. if (!fcport)
  246. {
  247. rval = -ENOMEM;
  248. goto done_unmap_sg;
  249. }
  250. /* Initialize all required fields of fcport */
  251. fcport->vha = vha;
  252. fcport->vp_idx = vha->vp_idx;
  253. fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
  254. fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
  255. fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
  256. fcport->loop_id = loop_id;
  257. /* Alloc SRB structure */
  258. sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
  259. if (!sp) {
  260. rval = -ENOMEM;
  261. goto done_free_fcport;
  262. }
  263. ct = sp->ctx;
  264. ct->ctx.type = SRB_CT_CMD;
  265. ct->bsg_job = bsg_job;
  266. DEBUG2(qla_printk(KERN_INFO, ha,
  267. "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
  268. "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
  269. (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
  270. fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
  271. fcport->d_id.b.al_pa));
  272. rval = qla2x00_start_sp(sp);
  273. if (rval != QLA_SUCCESS) {
  274. kfree(sp->ctx);
  275. mempool_free(sp, ha->srb_mempool);
  276. rval = -EIO;
  277. goto done_free_fcport;
  278. }
  279. return rval;
  280. done_free_fcport:
  281. kfree(fcport);
  282. done_unmap_sg:
  283. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  284. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  285. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  286. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  287. done:
  288. return rval;
  289. }
  290. static int
  291. qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
  292. {
  293. struct Scsi_Host *host = bsg_job->shost;
  294. scsi_qla_host_t *vha = shost_priv(host);
  295. struct qla_hw_data *ha = vha->hw;
  296. int rval;
  297. uint8_t command_sent;
  298. char *type;
  299. struct msg_echo_lb elreq;
  300. uint16_t response[MAILBOX_REGISTER_COUNT];
  301. uint8_t* fw_sts_ptr;
  302. uint8_t *req_data = NULL;
  303. dma_addr_t req_data_dma;
  304. uint32_t req_data_len;
  305. uint8_t *rsp_data = NULL;
  306. dma_addr_t rsp_data_dma;
  307. uint32_t rsp_data_len;
  308. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  309. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  310. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  311. return -EBUSY;
  312. if (!vha->flags.online) {
  313. DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
  314. return -EIO;
  315. }
  316. elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
  317. bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
  318. DMA_TO_DEVICE);
  319. if (!elreq.req_sg_cnt)
  320. return -ENOMEM;
  321. elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
  322. bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
  323. DMA_FROM_DEVICE);
  324. if (!elreq.rsp_sg_cnt) {
  325. rval = -ENOMEM;
  326. goto done_unmap_req_sg;
  327. }
  328. if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  329. (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  330. DEBUG2(printk(KERN_INFO
  331. "dma mapping resulted in different sg counts "
  332. "[request_sg_cnt: %x dma_request_sg_cnt: %x "
  333. "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
  334. bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
  335. bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
  336. rval = -EAGAIN;
  337. goto done_unmap_sg;
  338. }
  339. req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
  340. req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
  341. &req_data_dma, GFP_KERNEL);
  342. if (!req_data) {
  343. DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
  344. "failed for host=%lu\n", __func__, vha->host_no));
  345. rval = -ENOMEM;
  346. goto done_unmap_sg;
  347. }
  348. rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
  349. &rsp_data_dma, GFP_KERNEL);
  350. if (!rsp_data) {
  351. DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
  352. "failed for host=%lu\n", __func__, vha->host_no));
  353. rval = -ENOMEM;
  354. goto done_free_dma_req;
  355. }
  356. /* Copy the request buffer in req_data now */
  357. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  358. bsg_job->request_payload.sg_cnt, req_data, req_data_len);
  359. elreq.send_dma = req_data_dma;
  360. elreq.rcv_dma = rsp_data_dma;
  361. elreq.transfer_size = req_data_len;
  362. elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  363. if (ha->current_topology != ISP_CFG_F) {
  364. type = "FC_BSG_HST_VENDOR_LOOPBACK";
  365. DEBUG2(qla_printk(KERN_INFO, ha,
  366. "scsi(%ld) bsg rqst type: %s\n",
  367. vha->host_no, type));
  368. command_sent = INT_DEF_LB_LOOPBACK_CMD;
  369. rval = qla2x00_loopback_test(vha, &elreq, response);
  370. if (IS_QLA81XX(ha)) {
  371. if (response[0] == MBS_COMMAND_ERROR &&
  372. response[1] == MBS_LB_RESET) {
  373. DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
  374. "ISP\n", __func__, vha->host_no));
  375. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  376. qla2xxx_wake_dpc(vha);
  377. }
  378. }
  379. } else {
  380. type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
  381. DEBUG2(qla_printk(KERN_INFO, ha,
  382. "scsi(%ld) bsg rqst type: %s\n" ,vha->host_no, type));
  383. command_sent = INT_DEF_LB_ECHO_CMD;
  384. rval = qla2x00_echo_test(vha, &elreq, response);
  385. }
  386. if (rval) {
  387. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  388. "request %s failed\n", vha->host_no, type));
  389. fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
  390. sizeof(struct fc_bsg_reply);
  391. memcpy(fw_sts_ptr, response, sizeof(response));
  392. fw_sts_ptr += sizeof(response);
  393. *fw_sts_ptr = command_sent;
  394. rval = 0;
  395. bsg_job->reply->reply_payload_rcv_len = 0;
  396. bsg_job->reply->result = (DID_ERROR << 16);
  397. } else {
  398. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  399. "request %s completed\n", vha->host_no, type));
  400. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  401. sizeof(response) + sizeof(uint8_t);
  402. bsg_job->reply->reply_payload_rcv_len =
  403. bsg_job->reply_payload.payload_len;
  404. fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
  405. sizeof(struct fc_bsg_reply);
  406. memcpy(fw_sts_ptr, response, sizeof(response));
  407. fw_sts_ptr += sizeof(response);
  408. *fw_sts_ptr = command_sent;
  409. bsg_job->reply->result = DID_OK;
  410. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  411. bsg_job->reply_payload.sg_cnt, rsp_data,
  412. rsp_data_len);
  413. }
  414. bsg_job->job_done(bsg_job);
  415. dma_free_coherent(&ha->pdev->dev, rsp_data_len,
  416. rsp_data, rsp_data_dma);
  417. done_free_dma_req:
  418. dma_free_coherent(&ha->pdev->dev, req_data_len,
  419. req_data, req_data_dma);
  420. done_unmap_sg:
  421. dma_unmap_sg(&ha->pdev->dev,
  422. bsg_job->reply_payload.sg_list,
  423. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  424. done_unmap_req_sg:
  425. dma_unmap_sg(&ha->pdev->dev,
  426. bsg_job->request_payload.sg_list,
  427. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  428. return rval;
  429. }
  430. static int
  431. qla84xx_reset(struct fc_bsg_job *bsg_job)
  432. {
  433. struct Scsi_Host *host = bsg_job->shost;
  434. scsi_qla_host_t *vha = shost_priv(host);
  435. struct qla_hw_data *ha = vha->hw;
  436. int rval = 0;
  437. uint32_t flag;
  438. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  439. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  440. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  441. return -EBUSY;
  442. if (!IS_QLA84XX(ha)) {
  443. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
  444. "exiting.\n", vha->host_no));
  445. return -EINVAL;
  446. }
  447. flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  448. rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
  449. if (rval) {
  450. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  451. "request 84xx reset failed\n", vha->host_no));
  452. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  453. bsg_job->reply->result = (DID_ERROR << 16);
  454. } else {
  455. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  456. "request 84xx reset completed\n", vha->host_no));
  457. bsg_job->reply->result = DID_OK;
  458. }
  459. bsg_job->job_done(bsg_job);
  460. return rval;
  461. }
  462. static int
  463. qla84xx_updatefw(struct fc_bsg_job *bsg_job)
  464. {
  465. struct Scsi_Host *host = bsg_job->shost;
  466. scsi_qla_host_t *vha = shost_priv(host);
  467. struct qla_hw_data *ha = vha->hw;
  468. struct verify_chip_entry_84xx *mn = NULL;
  469. dma_addr_t mn_dma, fw_dma;
  470. void *fw_buf = NULL;
  471. int rval = 0;
  472. uint32_t sg_cnt;
  473. uint32_t data_len;
  474. uint16_t options;
  475. uint32_t flag;
  476. uint32_t fw_ver;
  477. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  478. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  479. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  480. return -EBUSY;
  481. if (!IS_QLA84XX(ha)) {
  482. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
  483. "exiting.\n", vha->host_no));
  484. return -EINVAL;
  485. }
  486. sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  487. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  488. if (!sg_cnt)
  489. return -ENOMEM;
  490. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  491. DEBUG2(printk(KERN_INFO
  492. "dma mapping resulted in different sg counts "
  493. "request_sg_cnt: %x dma_request_sg_cnt: %x ",
  494. bsg_job->request_payload.sg_cnt, sg_cnt));
  495. rval = -EAGAIN;
  496. goto done_unmap_sg;
  497. }
  498. data_len = bsg_job->request_payload.payload_len;
  499. fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
  500. &fw_dma, GFP_KERNEL);
  501. if (!fw_buf) {
  502. DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
  503. "failed for host=%lu\n", __func__, vha->host_no));
  504. rval = -ENOMEM;
  505. goto done_unmap_sg;
  506. }
  507. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  508. bsg_job->request_payload.sg_cnt, fw_buf, data_len);
  509. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  510. if (!mn) {
  511. DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
  512. "failed for host=%lu\n", __func__, vha->host_no));
  513. rval = -ENOMEM;
  514. goto done_free_fw_buf;
  515. }
  516. flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  517. fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
  518. memset(mn, 0, sizeof(struct access_chip_84xx));
  519. mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
  520. mn->entry_count = 1;
  521. options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
  522. if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
  523. options |= VCO_DIAG_FW;
  524. mn->options = cpu_to_le16(options);
  525. mn->fw_ver = cpu_to_le32(fw_ver);
  526. mn->fw_size = cpu_to_le32(data_len);
  527. mn->fw_seq_size = cpu_to_le32(data_len);
  528. mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
  529. mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
  530. mn->dseg_length = cpu_to_le32(data_len);
  531. mn->data_seg_cnt = cpu_to_le16(1);
  532. rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
  533. if (rval) {
  534. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  535. "request 84xx updatefw failed\n", vha->host_no));
  536. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  537. bsg_job->reply->result = (DID_ERROR << 16);
  538. } else {
  539. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  540. "request 84xx updatefw completed\n", vha->host_no));
  541. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  542. bsg_job->reply->result = DID_OK;
  543. }
  544. bsg_job->job_done(bsg_job);
  545. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  546. done_free_fw_buf:
  547. dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
  548. done_unmap_sg:
  549. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  550. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  551. return rval;
  552. }
  553. static int
  554. qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
  555. {
  556. struct Scsi_Host *host = bsg_job->shost;
  557. scsi_qla_host_t *vha = shost_priv(host);
  558. struct qla_hw_data *ha = vha->hw;
  559. struct access_chip_84xx *mn = NULL;
  560. dma_addr_t mn_dma, mgmt_dma;
  561. void *mgmt_b = NULL;
  562. int rval = 0;
  563. struct qla_bsg_a84_mgmt *ql84_mgmt;
  564. uint32_t sg_cnt;
  565. uint32_t data_len;
  566. uint32_t dma_direction = DMA_NONE;
  567. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  568. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  569. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  570. return -EBUSY;
  571. if (!IS_QLA84XX(ha)) {
  572. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
  573. "exiting.\n", vha->host_no));
  574. return -EINVAL;
  575. }
  576. ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
  577. sizeof(struct fc_bsg_request));
  578. if (!ql84_mgmt) {
  579. DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
  580. __func__, vha->host_no));
  581. return -EINVAL;
  582. }
  583. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  584. if (!mn) {
  585. DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
  586. "failed for host=%lu\n", __func__, vha->host_no));
  587. return -ENOMEM;
  588. }
  589. memset(mn, 0, sizeof(struct access_chip_84xx));
  590. mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
  591. mn->entry_count = 1;
  592. switch (ql84_mgmt->mgmt.cmd) {
  593. case QLA84_MGMT_READ_MEM:
  594. case QLA84_MGMT_GET_INFO:
  595. sg_cnt = dma_map_sg(&ha->pdev->dev,
  596. bsg_job->reply_payload.sg_list,
  597. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  598. if (!sg_cnt) {
  599. rval = -ENOMEM;
  600. goto exit_mgmt;
  601. }
  602. dma_direction = DMA_FROM_DEVICE;
  603. if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
  604. DEBUG2(printk(KERN_INFO
  605. "dma mapping resulted in different sg counts "
  606. "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
  607. bsg_job->reply_payload.sg_cnt, sg_cnt));
  608. rval = -EAGAIN;
  609. goto done_unmap_sg;
  610. }
  611. data_len = bsg_job->reply_payload.payload_len;
  612. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  613. &mgmt_dma, GFP_KERNEL);
  614. if (!mgmt_b) {
  615. DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
  616. "failed for host=%lu\n",
  617. __func__, vha->host_no));
  618. rval = -ENOMEM;
  619. goto done_unmap_sg;
  620. }
  621. if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
  622. mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
  623. mn->parameter1 =
  624. cpu_to_le32(
  625. ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  626. } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
  627. mn->options = cpu_to_le16(ACO_REQUEST_INFO);
  628. mn->parameter1 =
  629. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
  630. mn->parameter2 =
  631. cpu_to_le32(
  632. ql84_mgmt->mgmt.mgmtp.u.info.context);
  633. }
  634. break;
  635. case QLA84_MGMT_WRITE_MEM:
  636. sg_cnt = dma_map_sg(&ha->pdev->dev,
  637. bsg_job->request_payload.sg_list,
  638. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  639. if (!sg_cnt) {
  640. rval = -ENOMEM;
  641. goto exit_mgmt;
  642. }
  643. dma_direction = DMA_TO_DEVICE;
  644. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  645. DEBUG2(printk(KERN_INFO
  646. "dma mapping resulted in different sg counts "
  647. "request_sg_cnt: %x dma_request_sg_cnt: %x ",
  648. bsg_job->request_payload.sg_cnt, sg_cnt));
  649. rval = -EAGAIN;
  650. goto done_unmap_sg;
  651. }
  652. data_len = bsg_job->request_payload.payload_len;
  653. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  654. &mgmt_dma, GFP_KERNEL);
  655. if (!mgmt_b) {
  656. DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
  657. "failed for host=%lu\n",
  658. __func__, vha->host_no));
  659. rval = -ENOMEM;
  660. goto done_unmap_sg;
  661. }
  662. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  663. bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
  664. mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
  665. mn->parameter1 =
  666. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  667. break;
  668. case QLA84_MGMT_CHNG_CONFIG:
  669. mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
  670. mn->parameter1 =
  671. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
  672. mn->parameter2 =
  673. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
  674. mn->parameter3 =
  675. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
  676. break;
  677. default:
  678. rval = -EIO;
  679. goto exit_mgmt;
  680. }
  681. if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
  682. mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
  683. mn->dseg_count = cpu_to_le16(1);
  684. mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
  685. mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
  686. mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
  687. }
  688. rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
  689. if (rval) {
  690. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  691. "request 84xx mgmt failed\n", vha->host_no));
  692. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  693. bsg_job->reply->result = (DID_ERROR << 16);
  694. } else {
  695. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  696. "request 84xx mgmt completed\n", vha->host_no));
  697. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  698. bsg_job->reply->result = DID_OK;
  699. if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
  700. (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
  701. bsg_job->reply->reply_payload_rcv_len =
  702. bsg_job->reply_payload.payload_len;
  703. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  704. bsg_job->reply_payload.sg_cnt, mgmt_b, data_len);
  705. }
  706. }
  707. bsg_job->job_done(bsg_job);
  708. dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
  709. done_unmap_sg:
  710. if (dma_direction == DMA_TO_DEVICE)
  711. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  712. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  713. else if (dma_direction == DMA_FROM_DEVICE)
  714. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  715. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  716. exit_mgmt:
  717. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  718. return rval;
  719. }
  720. static int
  721. qla24xx_iidma(struct fc_bsg_job *bsg_job)
  722. {
  723. struct Scsi_Host *host = bsg_job->shost;
  724. scsi_qla_host_t *vha = shost_priv(host);
  725. struct qla_hw_data *ha = vha->hw;
  726. int rval = 0;
  727. struct qla_port_param *port_param = NULL;
  728. fc_port_t *fcport = NULL;
  729. uint16_t mb[MAILBOX_REGISTER_COUNT];
  730. uint8_t *rsp_ptr = NULL;
  731. bsg_job->reply->reply_payload_rcv_len = 0;
  732. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  733. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  734. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  735. return -EBUSY;
  736. if (!IS_IIDMA_CAPABLE(vha->hw)) {
  737. DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
  738. "supported\n", __func__, vha->host_no));
  739. return -EINVAL;
  740. }
  741. port_param = (struct qla_port_param *)((char *)bsg_job->request +
  742. sizeof(struct fc_bsg_request));
  743. if (!port_param) {
  744. DEBUG2(printk("%s(%ld): port_param header not provided, "
  745. "exiting.\n", __func__, vha->host_no));
  746. return -EINVAL;
  747. }
  748. if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
  749. DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
  750. __func__, vha->host_no));
  751. return -EINVAL;
  752. }
  753. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  754. if (fcport->port_type != FCT_TARGET)
  755. continue;
  756. if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
  757. fcport->port_name, sizeof(fcport->port_name)))
  758. continue;
  759. break;
  760. }
  761. if (!fcport) {
  762. DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
  763. __func__, vha->host_no));
  764. return -EINVAL;
  765. }
  766. if (port_param->mode)
  767. rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
  768. port_param->speed, mb);
  769. else
  770. rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
  771. &port_param->speed, mb);
  772. if (rval) {
  773. DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
  774. "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
  775. vha->host_no, fcport->port_name[0],
  776. fcport->port_name[1],
  777. fcport->port_name[2], fcport->port_name[3],
  778. fcport->port_name[4], fcport->port_name[5],
  779. fcport->port_name[6], fcport->port_name[7], rval,
  780. fcport->fp_speed, mb[0], mb[1]));
  781. rval = 0;
  782. bsg_job->reply->result = (DID_ERROR << 16);
  783. } else {
  784. if (!port_param->mode) {
  785. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  786. sizeof(struct qla_port_param);
  787. rsp_ptr = ((uint8_t *)bsg_job->reply) +
  788. sizeof(struct fc_bsg_reply);
  789. memcpy(rsp_ptr, port_param,
  790. sizeof(struct qla_port_param));
  791. }
  792. bsg_job->reply->result = DID_OK;
  793. }
  794. bsg_job->job_done(bsg_job);
  795. return rval;
  796. }
  797. static int
  798. qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
  799. {
  800. switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
  801. case QL_VND_LOOPBACK:
  802. return qla2x00_process_loopback(bsg_job);
  803. case QL_VND_A84_RESET:
  804. return qla84xx_reset(bsg_job);
  805. case QL_VND_A84_UPDATE_FW:
  806. return qla84xx_updatefw(bsg_job);
  807. case QL_VND_A84_MGMT_CMD:
  808. return qla84xx_mgmt_cmd(bsg_job);
  809. case QL_VND_IIDMA:
  810. return qla24xx_iidma(bsg_job);
  811. default:
  812. bsg_job->reply->result = (DID_ERROR << 16);
  813. bsg_job->job_done(bsg_job);
  814. return -ENOSYS;
  815. }
  816. }
  817. int
  818. qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
  819. {
  820. int ret = -EINVAL;
  821. switch (bsg_job->request->msgcode) {
  822. case FC_BSG_RPT_ELS:
  823. case FC_BSG_HST_ELS_NOLOGIN:
  824. ret = qla2x00_process_els(bsg_job);
  825. break;
  826. case FC_BSG_HST_CT:
  827. ret = qla2x00_process_ct(bsg_job);
  828. break;
  829. case FC_BSG_HST_VENDOR:
  830. ret = qla2x00_process_vendor_specific(bsg_job);
  831. break;
  832. case FC_BSG_HST_ADD_RPORT:
  833. case FC_BSG_HST_DEL_RPORT:
  834. case FC_BSG_RPT_CT:
  835. default:
  836. DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
  837. break;
  838. }
  839. return ret;
  840. }
  841. int
  842. qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
  843. {
  844. scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
  845. struct qla_hw_data *ha = vha->hw;
  846. srb_t *sp;
  847. int cnt, que;
  848. unsigned long flags;
  849. struct req_que *req;
  850. struct srb_bsg *sp_bsg;
  851. /* find the bsg job from the active list of commands */
  852. spin_lock_irqsave(&ha->hardware_lock, flags);
  853. for (que = 0; que < ha->max_req_queues; que++) {
  854. req = ha->req_q_map[que];
  855. if (!req)
  856. continue;
  857. for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
  858. sp = req->outstanding_cmds[cnt];
  859. if (sp) {
  860. sp_bsg = (struct srb_bsg*)sp->ctx;
  861. if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
  862. (sp_bsg->ctx.type == SRB_ELS_CMD_HST))
  863. && (sp_bsg->bsg_job == bsg_job)) {
  864. if (ha->isp_ops->abort_command(sp)) {
  865. DEBUG2(qla_printk(KERN_INFO, ha,
  866. "scsi(%ld): mbx abort_command failed\n", vha->host_no));
  867. bsg_job->req->errors =
  868. bsg_job->reply->result = -EIO;
  869. } else {
  870. DEBUG2(qla_printk(KERN_INFO, ha,
  871. "scsi(%ld): mbx abort_command success\n", vha->host_no));
  872. bsg_job->req->errors =
  873. bsg_job->reply->result = 0;
  874. }
  875. goto done;
  876. }
  877. }
  878. }
  879. }
  880. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  881. DEBUG2(qla_printk(KERN_INFO, ha,
  882. "scsi(%ld) SRB not found to abort\n", vha->host_no));
  883. bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
  884. return 0;
  885. done:
  886. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  887. if (bsg_job->request->msgcode == FC_BSG_HST_CT)
  888. kfree(sp->fcport);
  889. kfree(sp->ctx);
  890. mempool_free(sp, ha->srb_mempool);
  891. return 0;
  892. }