qla_bsg.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/kthread.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/delay.h>
  11. /* BSG support for ELS/CT pass through */
  12. inline srb_t *
  13. qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
  14. {
  15. srb_t *sp;
  16. struct qla_hw_data *ha = vha->hw;
  17. struct srb_ctx *ctx;
  18. sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
  19. if (!sp)
  20. goto done;
  21. ctx = kzalloc(size, GFP_KERNEL);
  22. if (!ctx) {
  23. mempool_free(sp, ha->srb_mempool);
  24. sp = NULL;
  25. goto done;
  26. }
  27. memset(sp, 0, sizeof(*sp));
  28. sp->fcport = fcport;
  29. sp->ctx = ctx;
  30. done:
  31. return sp;
  32. }
  33. int
  34. qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
  35. {
  36. int i, ret, num_valid;
  37. uint8_t *bcode;
  38. struct qla_fcp_prio_entry *pri_entry;
  39. uint32_t *bcode_val_ptr, bcode_val;
  40. ret = 1;
  41. num_valid = 0;
  42. bcode = (uint8_t *)pri_cfg;
  43. bcode_val_ptr = (uint32_t *)pri_cfg;
  44. bcode_val = (uint32_t)(*bcode_val_ptr);
  45. if (bcode_val == 0xFFFFFFFF) {
  46. /* No FCP Priority config data in flash */
  47. DEBUG2(printk(KERN_INFO
  48. "%s: No FCP priority config data.\n",
  49. __func__));
  50. return 0;
  51. }
  52. if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
  53. bcode[3] != 'S') {
  54. /* Invalid FCP priority data header*/
  55. DEBUG2(printk(KERN_ERR
  56. "%s: Invalid FCP Priority data header. bcode=0x%x\n",
  57. __func__, bcode_val));
  58. return 0;
  59. }
  60. if (flag != 1)
  61. return ret;
  62. pri_entry = &pri_cfg->entry[0];
  63. for (i = 0; i < pri_cfg->num_entries; i++) {
  64. if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
  65. num_valid++;
  66. pri_entry++;
  67. }
  68. if (num_valid == 0) {
  69. /* No valid FCP priority data entries */
  70. DEBUG2(printk(KERN_ERR
  71. "%s: No valid FCP Priority data entries.\n",
  72. __func__));
  73. ret = 0;
  74. } else {
  75. /* FCP priority data is valid */
  76. DEBUG2(printk(KERN_INFO
  77. "%s: Valid FCP priority data. num entries = %d\n",
  78. __func__, num_valid));
  79. }
  80. return ret;
  81. }
  82. static int
  83. qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
  84. {
  85. struct Scsi_Host *host = bsg_job->shost;
  86. scsi_qla_host_t *vha = shost_priv(host);
  87. struct qla_hw_data *ha = vha->hw;
  88. int ret = 0;
  89. uint32_t len;
  90. uint32_t oper;
  91. bsg_job->reply->reply_payload_rcv_len = 0;
  92. if (!IS_QLA24XX_TYPE(ha) || !IS_QLA25XX(ha)) {
  93. ret = -EINVAL;
  94. goto exit_fcp_prio_cfg;
  95. }
  96. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  97. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  98. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  99. ret = -EBUSY;
  100. goto exit_fcp_prio_cfg;
  101. }
  102. /* Get the sub command */
  103. oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  104. /* Only set config is allowed if config memory is not allocated */
  105. if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
  106. ret = -EINVAL;
  107. goto exit_fcp_prio_cfg;
  108. }
  109. switch (oper) {
  110. case QLFC_FCP_PRIO_DISABLE:
  111. if (ha->flags.fcp_prio_enabled) {
  112. ha->flags.fcp_prio_enabled = 0;
  113. ha->fcp_prio_cfg->attributes &=
  114. ~FCP_PRIO_ATTR_ENABLE;
  115. qla24xx_update_all_fcp_prio(vha);
  116. bsg_job->reply->result = DID_OK;
  117. } else {
  118. ret = -EINVAL;
  119. bsg_job->reply->result = (DID_ERROR << 16);
  120. goto exit_fcp_prio_cfg;
  121. }
  122. break;
  123. case QLFC_FCP_PRIO_ENABLE:
  124. if (!ha->flags.fcp_prio_enabled) {
  125. if (ha->fcp_prio_cfg) {
  126. ha->flags.fcp_prio_enabled = 1;
  127. ha->fcp_prio_cfg->attributes |=
  128. FCP_PRIO_ATTR_ENABLE;
  129. qla24xx_update_all_fcp_prio(vha);
  130. bsg_job->reply->result = DID_OK;
  131. } else {
  132. ret = -EINVAL;
  133. bsg_job->reply->result = (DID_ERROR << 16);
  134. goto exit_fcp_prio_cfg;
  135. }
  136. }
  137. break;
  138. case QLFC_FCP_PRIO_GET_CONFIG:
  139. len = bsg_job->reply_payload.payload_len;
  140. if (!len || len > FCP_PRIO_CFG_SIZE) {
  141. ret = -EINVAL;
  142. bsg_job->reply->result = (DID_ERROR << 16);
  143. goto exit_fcp_prio_cfg;
  144. }
  145. bsg_job->reply->result = DID_OK;
  146. bsg_job->reply->reply_payload_rcv_len =
  147. sg_copy_from_buffer(
  148. bsg_job->reply_payload.sg_list,
  149. bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
  150. len);
  151. break;
  152. case QLFC_FCP_PRIO_SET_CONFIG:
  153. len = bsg_job->request_payload.payload_len;
  154. if (!len || len > FCP_PRIO_CFG_SIZE) {
  155. bsg_job->reply->result = (DID_ERROR << 16);
  156. ret = -EINVAL;
  157. goto exit_fcp_prio_cfg;
  158. }
  159. if (!ha->fcp_prio_cfg) {
  160. ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
  161. if (!ha->fcp_prio_cfg) {
  162. qla_printk(KERN_WARNING, ha,
  163. "Unable to allocate memory "
  164. "for fcp prio config data (%x).\n",
  165. FCP_PRIO_CFG_SIZE);
  166. bsg_job->reply->result = (DID_ERROR << 16);
  167. ret = -ENOMEM;
  168. goto exit_fcp_prio_cfg;
  169. }
  170. }
  171. memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
  172. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  173. bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
  174. FCP_PRIO_CFG_SIZE);
  175. /* validate fcp priority data */
  176. if (!qla24xx_fcp_prio_cfg_valid(
  177. (struct qla_fcp_prio_cfg *)
  178. ha->fcp_prio_cfg, 1)) {
  179. bsg_job->reply->result = (DID_ERROR << 16);
  180. ret = -EINVAL;
  181. /* If buffer was invalidatic int
  182. * fcp_prio_cfg is of no use
  183. */
  184. vfree(ha->fcp_prio_cfg);
  185. ha->fcp_prio_cfg = NULL;
  186. goto exit_fcp_prio_cfg;
  187. }
  188. ha->flags.fcp_prio_enabled = 0;
  189. if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
  190. ha->flags.fcp_prio_enabled = 1;
  191. qla24xx_update_all_fcp_prio(vha);
  192. bsg_job->reply->result = DID_OK;
  193. break;
  194. default:
  195. ret = -EINVAL;
  196. break;
  197. }
  198. exit_fcp_prio_cfg:
  199. bsg_job->job_done(bsg_job);
  200. return ret;
  201. }
  202. static int
  203. qla2x00_process_els(struct fc_bsg_job *bsg_job)
  204. {
  205. struct fc_rport *rport;
  206. fc_port_t *fcport;
  207. struct Scsi_Host *host;
  208. scsi_qla_host_t *vha;
  209. struct qla_hw_data *ha;
  210. srb_t *sp;
  211. const char *type;
  212. int req_sg_cnt, rsp_sg_cnt;
  213. int rval = (DRIVER_ERROR << 16);
  214. uint16_t nextlid = 0;
  215. struct srb_ctx *els;
  216. /* Multiple SG's are not supported for ELS requests */
  217. if (bsg_job->request_payload.sg_cnt > 1 ||
  218. bsg_job->reply_payload.sg_cnt > 1) {
  219. DEBUG2(printk(KERN_INFO
  220. "multiple SG's are not supported for ELS requests"
  221. " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
  222. bsg_job->request_payload.sg_cnt,
  223. bsg_job->reply_payload.sg_cnt));
  224. rval = -EPERM;
  225. goto done;
  226. }
  227. /* ELS request for rport */
  228. if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
  229. rport = bsg_job->rport;
  230. fcport = *(fc_port_t **) rport->dd_data;
  231. host = rport_to_shost(rport);
  232. vha = shost_priv(host);
  233. ha = vha->hw;
  234. type = "FC_BSG_RPT_ELS";
  235. /* make sure the rport is logged in,
  236. * if not perform fabric login
  237. */
  238. if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
  239. DEBUG2(qla_printk(KERN_WARNING, ha,
  240. "failed to login port %06X for ELS passthru\n",
  241. fcport->d_id.b24));
  242. rval = -EIO;
  243. goto done;
  244. }
  245. } else {
  246. host = bsg_job->shost;
  247. vha = shost_priv(host);
  248. ha = vha->hw;
  249. type = "FC_BSG_HST_ELS_NOLOGIN";
  250. /* Allocate a dummy fcport structure, since functions
  251. * preparing the IOCB and mailbox command retrieves port
  252. * specific information from fcport structure. For Host based
  253. * ELS commands there will be no fcport structure allocated
  254. */
  255. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  256. if (!fcport) {
  257. rval = -ENOMEM;
  258. goto done;
  259. }
  260. /* Initialize all required fields of fcport */
  261. fcport->vha = vha;
  262. fcport->vp_idx = vha->vp_idx;
  263. fcport->d_id.b.al_pa =
  264. bsg_job->request->rqst_data.h_els.port_id[0];
  265. fcport->d_id.b.area =
  266. bsg_job->request->rqst_data.h_els.port_id[1];
  267. fcport->d_id.b.domain =
  268. bsg_job->request->rqst_data.h_els.port_id[2];
  269. fcport->loop_id =
  270. (fcport->d_id.b.al_pa == 0xFD) ?
  271. NPH_FABRIC_CONTROLLER : NPH_F_PORT;
  272. }
  273. if (!vha->flags.online) {
  274. DEBUG2(qla_printk(KERN_WARNING, ha,
  275. "host not online\n"));
  276. rval = -EIO;
  277. goto done;
  278. }
  279. req_sg_cnt =
  280. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  281. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  282. if (!req_sg_cnt) {
  283. rval = -ENOMEM;
  284. goto done_free_fcport;
  285. }
  286. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  287. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  288. if (!rsp_sg_cnt) {
  289. rval = -ENOMEM;
  290. goto done_free_fcport;
  291. }
  292. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  293. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  294. DEBUG2(printk(KERN_INFO
  295. "dma mapping resulted in different sg counts \
  296. [request_sg_cnt: %x dma_request_sg_cnt: %x\
  297. reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
  298. bsg_job->request_payload.sg_cnt, req_sg_cnt,
  299. bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
  300. rval = -EAGAIN;
  301. goto done_unmap_sg;
  302. }
  303. /* Alloc SRB structure */
  304. sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
  305. if (!sp) {
  306. rval = -ENOMEM;
  307. goto done_unmap_sg;
  308. }
  309. els = sp->ctx;
  310. els->type =
  311. (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
  312. SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
  313. els->name =
  314. (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
  315. "bsg_els_rpt" : "bsg_els_hst");
  316. els->u.bsg_job = bsg_job;
  317. DEBUG2(qla_printk(KERN_INFO, ha,
  318. "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
  319. "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
  320. bsg_job->request->rqst_data.h_els.command_code,
  321. fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
  322. fcport->d_id.b.al_pa));
  323. rval = qla2x00_start_sp(sp);
  324. if (rval != QLA_SUCCESS) {
  325. kfree(sp->ctx);
  326. mempool_free(sp, ha->srb_mempool);
  327. rval = -EIO;
  328. goto done_unmap_sg;
  329. }
  330. return rval;
  331. done_unmap_sg:
  332. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  333. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  334. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  335. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  336. goto done_free_fcport;
  337. done_free_fcport:
  338. if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
  339. kfree(fcport);
  340. done:
  341. return rval;
  342. }
  343. static int
  344. qla2x00_process_ct(struct fc_bsg_job *bsg_job)
  345. {
  346. srb_t *sp;
  347. struct Scsi_Host *host = bsg_job->shost;
  348. scsi_qla_host_t *vha = shost_priv(host);
  349. struct qla_hw_data *ha = vha->hw;
  350. int rval = (DRIVER_ERROR << 16);
  351. int req_sg_cnt, rsp_sg_cnt;
  352. uint16_t loop_id;
  353. struct fc_port *fcport;
  354. char *type = "FC_BSG_HST_CT";
  355. struct srb_ctx *ct;
  356. /* pass through is supported only for ISP 4Gb or higher */
  357. if (!IS_FWI2_CAPABLE(ha)) {
  358. DEBUG2(qla_printk(KERN_INFO, ha,
  359. "scsi(%ld):Firmware is not capable to support FC "
  360. "CT pass thru\n", vha->host_no));
  361. rval = -EPERM;
  362. goto done;
  363. }
  364. req_sg_cnt =
  365. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  366. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  367. if (!req_sg_cnt) {
  368. rval = -ENOMEM;
  369. goto done;
  370. }
  371. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  372. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  373. if (!rsp_sg_cnt) {
  374. rval = -ENOMEM;
  375. goto done;
  376. }
  377. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  378. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  379. DEBUG2(qla_printk(KERN_WARNING, ha,
  380. "[request_sg_cnt: %x dma_request_sg_cnt: %x\
  381. reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
  382. bsg_job->request_payload.sg_cnt, req_sg_cnt,
  383. bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
  384. rval = -EAGAIN;
  385. goto done_unmap_sg;
  386. }
  387. if (!vha->flags.online) {
  388. DEBUG2(qla_printk(KERN_WARNING, ha,
  389. "host not online\n"));
  390. rval = -EIO;
  391. goto done_unmap_sg;
  392. }
  393. loop_id =
  394. (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
  395. >> 24;
  396. switch (loop_id) {
  397. case 0xFC:
  398. loop_id = cpu_to_le16(NPH_SNS);
  399. break;
  400. case 0xFA:
  401. loop_id = vha->mgmt_svr_loop_id;
  402. break;
  403. default:
  404. DEBUG2(qla_printk(KERN_INFO, ha,
  405. "Unknown loop id: %x\n", loop_id));
  406. rval = -EINVAL;
  407. goto done_unmap_sg;
  408. }
  409. /* Allocate a dummy fcport structure, since functions preparing the
  410. * IOCB and mailbox command retrieves port specific information
  411. * from fcport structure. For Host based ELS commands there will be
  412. * no fcport structure allocated
  413. */
  414. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  415. if (!fcport) {
  416. rval = -ENOMEM;
  417. goto done_unmap_sg;
  418. }
  419. /* Initialize all required fields of fcport */
  420. fcport->vha = vha;
  421. fcport->vp_idx = vha->vp_idx;
  422. fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
  423. fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
  424. fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
  425. fcport->loop_id = loop_id;
  426. /* Alloc SRB structure */
  427. sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
  428. if (!sp) {
  429. rval = -ENOMEM;
  430. goto done_free_fcport;
  431. }
  432. ct = sp->ctx;
  433. ct->type = SRB_CT_CMD;
  434. ct->name = "bsg_ct";
  435. ct->u.bsg_job = bsg_job;
  436. DEBUG2(qla_printk(KERN_INFO, ha,
  437. "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
  438. "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
  439. (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
  440. fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
  441. fcport->d_id.b.al_pa));
  442. rval = qla2x00_start_sp(sp);
  443. if (rval != QLA_SUCCESS) {
  444. kfree(sp->ctx);
  445. mempool_free(sp, ha->srb_mempool);
  446. rval = -EIO;
  447. goto done_free_fcport;
  448. }
  449. return rval;
  450. done_free_fcport:
  451. kfree(fcport);
  452. done_unmap_sg:
  453. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  454. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  455. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  456. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  457. done:
  458. return rval;
  459. }
  460. /* Set the port configuration to enable the
  461. * internal loopback on ISP81XX
  462. */
  463. static inline int
  464. qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
  465. uint16_t *new_config)
  466. {
  467. int ret = 0;
  468. int rval = 0;
  469. struct qla_hw_data *ha = vha->hw;
  470. if (!IS_QLA81XX(ha))
  471. goto done_set_internal;
  472. new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
  473. memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
  474. ha->notify_dcbx_comp = 1;
  475. ret = qla81xx_set_port_config(vha, new_config);
  476. if (ret != QLA_SUCCESS) {
  477. DEBUG2(printk(KERN_ERR
  478. "%s(%lu): Set port config failed\n",
  479. __func__, vha->host_no));
  480. ha->notify_dcbx_comp = 0;
  481. rval = -EINVAL;
  482. goto done_set_internal;
  483. }
  484. /* Wait for DCBX complete event */
  485. if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
  486. DEBUG2(qla_printk(KERN_WARNING, ha,
  487. "State change notificaition not received.\n"));
  488. } else
  489. DEBUG2(qla_printk(KERN_INFO, ha,
  490. "State change RECEIVED\n"));
  491. ha->notify_dcbx_comp = 0;
  492. done_set_internal:
  493. return rval;
  494. }
  495. /* Set the port configuration to disable the
  496. * internal loopback on ISP81XX
  497. */
  498. static inline int
  499. qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
  500. int wait)
  501. {
  502. int ret = 0;
  503. int rval = 0;
  504. uint16_t new_config[4];
  505. struct qla_hw_data *ha = vha->hw;
  506. if (!IS_QLA81XX(ha))
  507. goto done_reset_internal;
  508. memset(new_config, 0 , sizeof(new_config));
  509. if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
  510. ENABLE_INTERNAL_LOOPBACK) {
  511. new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
  512. memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
  513. ha->notify_dcbx_comp = wait;
  514. ret = qla81xx_set_port_config(vha, new_config);
  515. if (ret != QLA_SUCCESS) {
  516. DEBUG2(printk(KERN_ERR
  517. "%s(%lu): Set port config failed\n",
  518. __func__, vha->host_no));
  519. ha->notify_dcbx_comp = 0;
  520. rval = -EINVAL;
  521. goto done_reset_internal;
  522. }
  523. /* Wait for DCBX complete event */
  524. if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
  525. (20 * HZ))) {
  526. DEBUG2(qla_printk(KERN_WARNING, ha,
  527. "State change notificaition not received.\n"));
  528. ha->notify_dcbx_comp = 0;
  529. rval = -EINVAL;
  530. goto done_reset_internal;
  531. } else
  532. DEBUG2(qla_printk(KERN_INFO, ha,
  533. "State change RECEIVED\n"));
  534. ha->notify_dcbx_comp = 0;
  535. }
  536. done_reset_internal:
  537. return rval;
  538. }
  539. static int
  540. qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
  541. {
  542. struct Scsi_Host *host = bsg_job->shost;
  543. scsi_qla_host_t *vha = shost_priv(host);
  544. struct qla_hw_data *ha = vha->hw;
  545. int rval;
  546. uint8_t command_sent;
  547. char *type;
  548. struct msg_echo_lb elreq;
  549. uint16_t response[MAILBOX_REGISTER_COUNT];
  550. uint16_t config[4], new_config[4];
  551. uint8_t *fw_sts_ptr;
  552. uint8_t *req_data = NULL;
  553. dma_addr_t req_data_dma;
  554. uint32_t req_data_len;
  555. uint8_t *rsp_data = NULL;
  556. dma_addr_t rsp_data_dma;
  557. uint32_t rsp_data_len;
  558. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  559. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  560. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  561. return -EBUSY;
  562. if (!vha->flags.online) {
  563. DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
  564. return -EIO;
  565. }
  566. elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
  567. bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
  568. DMA_TO_DEVICE);
  569. if (!elreq.req_sg_cnt)
  570. return -ENOMEM;
  571. elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
  572. bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
  573. DMA_FROM_DEVICE);
  574. if (!elreq.rsp_sg_cnt) {
  575. rval = -ENOMEM;
  576. goto done_unmap_req_sg;
  577. }
  578. if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  579. (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  580. DEBUG2(printk(KERN_INFO
  581. "dma mapping resulted in different sg counts "
  582. "[request_sg_cnt: %x dma_request_sg_cnt: %x "
  583. "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
  584. bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
  585. bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
  586. rval = -EAGAIN;
  587. goto done_unmap_sg;
  588. }
  589. req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
  590. req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
  591. &req_data_dma, GFP_KERNEL);
  592. if (!req_data) {
  593. DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
  594. "failed for host=%lu\n", __func__, vha->host_no));
  595. rval = -ENOMEM;
  596. goto done_unmap_sg;
  597. }
  598. rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
  599. &rsp_data_dma, GFP_KERNEL);
  600. if (!rsp_data) {
  601. DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
  602. "failed for host=%lu\n", __func__, vha->host_no));
  603. rval = -ENOMEM;
  604. goto done_free_dma_req;
  605. }
  606. /* Copy the request buffer in req_data now */
  607. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  608. bsg_job->request_payload.sg_cnt, req_data, req_data_len);
  609. elreq.send_dma = req_data_dma;
  610. elreq.rcv_dma = rsp_data_dma;
  611. elreq.transfer_size = req_data_len;
  612. elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  613. if ((ha->current_topology == ISP_CFG_F ||
  614. (IS_QLA81XX(ha) &&
  615. le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
  616. && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
  617. elreq.options == EXTERNAL_LOOPBACK) {
  618. type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
  619. DEBUG2(qla_printk(KERN_INFO, ha,
  620. "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
  621. command_sent = INT_DEF_LB_ECHO_CMD;
  622. rval = qla2x00_echo_test(vha, &elreq, response);
  623. } else {
  624. if (IS_QLA81XX(ha)) {
  625. memset(config, 0, sizeof(config));
  626. memset(new_config, 0, sizeof(new_config));
  627. if (qla81xx_get_port_config(vha, config)) {
  628. DEBUG2(printk(KERN_ERR
  629. "%s(%lu): Get port config failed\n",
  630. __func__, vha->host_no));
  631. bsg_job->reply->reply_payload_rcv_len = 0;
  632. bsg_job->reply->result = (DID_ERROR << 16);
  633. rval = -EPERM;
  634. goto done_free_dma_req;
  635. }
  636. if (elreq.options != EXTERNAL_LOOPBACK) {
  637. DEBUG2(qla_printk(KERN_INFO, ha,
  638. "Internal: current port config = %x\n",
  639. config[0]));
  640. if (qla81xx_set_internal_loopback(vha, config,
  641. new_config)) {
  642. bsg_job->reply->reply_payload_rcv_len =
  643. 0;
  644. bsg_job->reply->result =
  645. (DID_ERROR << 16);
  646. rval = -EPERM;
  647. goto done_free_dma_req;
  648. }
  649. } else {
  650. /* For external loopback to work
  651. * ensure internal loopback is disabled
  652. */
  653. if (qla81xx_reset_internal_loopback(vha,
  654. config, 1)) {
  655. bsg_job->reply->reply_payload_rcv_len =
  656. 0;
  657. bsg_job->reply->result =
  658. (DID_ERROR << 16);
  659. rval = -EPERM;
  660. goto done_free_dma_req;
  661. }
  662. }
  663. type = "FC_BSG_HST_VENDOR_LOOPBACK";
  664. DEBUG2(qla_printk(KERN_INFO, ha,
  665. "scsi(%ld) bsg rqst type: %s\n",
  666. vha->host_no, type));
  667. command_sent = INT_DEF_LB_LOOPBACK_CMD;
  668. rval = qla2x00_loopback_test(vha, &elreq, response);
  669. if (new_config[1]) {
  670. /* Revert back to original port config
  671. * Also clear internal loopback
  672. */
  673. qla81xx_reset_internal_loopback(vha,
  674. new_config, 0);
  675. }
  676. if (response[0] == MBS_COMMAND_ERROR &&
  677. response[1] == MBS_LB_RESET) {
  678. DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
  679. "ISP\n", __func__, vha->host_no));
  680. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  681. qla2xxx_wake_dpc(vha);
  682. qla2x00_wait_for_chip_reset(vha);
  683. /* Also reset the MPI */
  684. if (qla81xx_restart_mpi_firmware(vha) !=
  685. QLA_SUCCESS) {
  686. qla_printk(KERN_INFO, ha,
  687. "MPI reset failed for host%ld.\n",
  688. vha->host_no);
  689. }
  690. bsg_job->reply->reply_payload_rcv_len = 0;
  691. bsg_job->reply->result = (DID_ERROR << 16);
  692. rval = -EIO;
  693. goto done_free_dma_req;
  694. }
  695. } else {
  696. type = "FC_BSG_HST_VENDOR_LOOPBACK";
  697. DEBUG2(qla_printk(KERN_INFO, ha,
  698. "scsi(%ld) bsg rqst type: %s\n",
  699. vha->host_no, type));
  700. command_sent = INT_DEF_LB_LOOPBACK_CMD;
  701. rval = qla2x00_loopback_test(vha, &elreq, response);
  702. }
  703. }
  704. if (rval) {
  705. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  706. "request %s failed\n", vha->host_no, type));
  707. fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
  708. sizeof(struct fc_bsg_reply);
  709. memcpy(fw_sts_ptr, response, sizeof(response));
  710. fw_sts_ptr += sizeof(response);
  711. *fw_sts_ptr = command_sent;
  712. rval = 0;
  713. bsg_job->reply->reply_payload_rcv_len = 0;
  714. bsg_job->reply->result = (DID_ERROR << 16);
  715. } else {
  716. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  717. "request %s completed\n", vha->host_no, type));
  718. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  719. sizeof(response) + sizeof(uint8_t);
  720. bsg_job->reply->reply_payload_rcv_len =
  721. bsg_job->reply_payload.payload_len;
  722. fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
  723. sizeof(struct fc_bsg_reply);
  724. memcpy(fw_sts_ptr, response, sizeof(response));
  725. fw_sts_ptr += sizeof(response);
  726. *fw_sts_ptr = command_sent;
  727. bsg_job->reply->result = DID_OK;
  728. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  729. bsg_job->reply_payload.sg_cnt, rsp_data,
  730. rsp_data_len);
  731. }
  732. bsg_job->job_done(bsg_job);
  733. dma_free_coherent(&ha->pdev->dev, rsp_data_len,
  734. rsp_data, rsp_data_dma);
  735. done_free_dma_req:
  736. dma_free_coherent(&ha->pdev->dev, req_data_len,
  737. req_data, req_data_dma);
  738. done_unmap_sg:
  739. dma_unmap_sg(&ha->pdev->dev,
  740. bsg_job->reply_payload.sg_list,
  741. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  742. done_unmap_req_sg:
  743. dma_unmap_sg(&ha->pdev->dev,
  744. bsg_job->request_payload.sg_list,
  745. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  746. return rval;
  747. }
  748. static int
  749. qla84xx_reset(struct fc_bsg_job *bsg_job)
  750. {
  751. struct Scsi_Host *host = bsg_job->shost;
  752. scsi_qla_host_t *vha = shost_priv(host);
  753. struct qla_hw_data *ha = vha->hw;
  754. int rval = 0;
  755. uint32_t flag;
  756. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  757. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  758. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  759. return -EBUSY;
  760. if (!IS_QLA84XX(ha)) {
  761. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
  762. "exiting.\n", vha->host_no));
  763. return -EINVAL;
  764. }
  765. flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  766. rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
  767. if (rval) {
  768. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  769. "request 84xx reset failed\n", vha->host_no));
  770. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  771. bsg_job->reply->result = (DID_ERROR << 16);
  772. } else {
  773. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  774. "request 84xx reset completed\n", vha->host_no));
  775. bsg_job->reply->result = DID_OK;
  776. }
  777. bsg_job->job_done(bsg_job);
  778. return rval;
  779. }
  780. static int
  781. qla84xx_updatefw(struct fc_bsg_job *bsg_job)
  782. {
  783. struct Scsi_Host *host = bsg_job->shost;
  784. scsi_qla_host_t *vha = shost_priv(host);
  785. struct qla_hw_data *ha = vha->hw;
  786. struct verify_chip_entry_84xx *mn = NULL;
  787. dma_addr_t mn_dma, fw_dma;
  788. void *fw_buf = NULL;
  789. int rval = 0;
  790. uint32_t sg_cnt;
  791. uint32_t data_len;
  792. uint16_t options;
  793. uint32_t flag;
  794. uint32_t fw_ver;
  795. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  796. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  797. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  798. return -EBUSY;
  799. if (!IS_QLA84XX(ha)) {
  800. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
  801. "exiting.\n", vha->host_no));
  802. return -EINVAL;
  803. }
  804. sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  805. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  806. if (!sg_cnt)
  807. return -ENOMEM;
  808. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  809. DEBUG2(printk(KERN_INFO
  810. "dma mapping resulted in different sg counts "
  811. "request_sg_cnt: %x dma_request_sg_cnt: %x ",
  812. bsg_job->request_payload.sg_cnt, sg_cnt));
  813. rval = -EAGAIN;
  814. goto done_unmap_sg;
  815. }
  816. data_len = bsg_job->request_payload.payload_len;
  817. fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
  818. &fw_dma, GFP_KERNEL);
  819. if (!fw_buf) {
  820. DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
  821. "failed for host=%lu\n", __func__, vha->host_no));
  822. rval = -ENOMEM;
  823. goto done_unmap_sg;
  824. }
  825. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  826. bsg_job->request_payload.sg_cnt, fw_buf, data_len);
  827. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  828. if (!mn) {
  829. DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
  830. "failed for host=%lu\n", __func__, vha->host_no));
  831. rval = -ENOMEM;
  832. goto done_free_fw_buf;
  833. }
  834. flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  835. fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
  836. memset(mn, 0, sizeof(struct access_chip_84xx));
  837. mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
  838. mn->entry_count = 1;
  839. options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
  840. if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
  841. options |= VCO_DIAG_FW;
  842. mn->options = cpu_to_le16(options);
  843. mn->fw_ver = cpu_to_le32(fw_ver);
  844. mn->fw_size = cpu_to_le32(data_len);
  845. mn->fw_seq_size = cpu_to_le32(data_len);
  846. mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
  847. mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
  848. mn->dseg_length = cpu_to_le32(data_len);
  849. mn->data_seg_cnt = cpu_to_le16(1);
  850. rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
  851. if (rval) {
  852. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  853. "request 84xx updatefw failed\n", vha->host_no));
  854. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  855. bsg_job->reply->result = (DID_ERROR << 16);
  856. } else {
  857. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  858. "request 84xx updatefw completed\n", vha->host_no));
  859. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  860. bsg_job->reply->result = DID_OK;
  861. }
  862. bsg_job->job_done(bsg_job);
  863. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  864. done_free_fw_buf:
  865. dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
  866. done_unmap_sg:
  867. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  868. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  869. return rval;
  870. }
  871. static int
  872. qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
  873. {
  874. struct Scsi_Host *host = bsg_job->shost;
  875. scsi_qla_host_t *vha = shost_priv(host);
  876. struct qla_hw_data *ha = vha->hw;
  877. struct access_chip_84xx *mn = NULL;
  878. dma_addr_t mn_dma, mgmt_dma;
  879. void *mgmt_b = NULL;
  880. int rval = 0;
  881. struct qla_bsg_a84_mgmt *ql84_mgmt;
  882. uint32_t sg_cnt;
  883. uint32_t data_len = 0;
  884. uint32_t dma_direction = DMA_NONE;
  885. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  886. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  887. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  888. return -EBUSY;
  889. if (!IS_QLA84XX(ha)) {
  890. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
  891. "exiting.\n", vha->host_no));
  892. return -EINVAL;
  893. }
  894. ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
  895. sizeof(struct fc_bsg_request));
  896. if (!ql84_mgmt) {
  897. DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
  898. __func__, vha->host_no));
  899. return -EINVAL;
  900. }
  901. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  902. if (!mn) {
  903. DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
  904. "failed for host=%lu\n", __func__, vha->host_no));
  905. return -ENOMEM;
  906. }
  907. memset(mn, 0, sizeof(struct access_chip_84xx));
  908. mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
  909. mn->entry_count = 1;
  910. switch (ql84_mgmt->mgmt.cmd) {
  911. case QLA84_MGMT_READ_MEM:
  912. case QLA84_MGMT_GET_INFO:
  913. sg_cnt = dma_map_sg(&ha->pdev->dev,
  914. bsg_job->reply_payload.sg_list,
  915. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  916. if (!sg_cnt) {
  917. rval = -ENOMEM;
  918. goto exit_mgmt;
  919. }
  920. dma_direction = DMA_FROM_DEVICE;
  921. if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
  922. DEBUG2(printk(KERN_INFO
  923. "dma mapping resulted in different sg counts "
  924. "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
  925. bsg_job->reply_payload.sg_cnt, sg_cnt));
  926. rval = -EAGAIN;
  927. goto done_unmap_sg;
  928. }
  929. data_len = bsg_job->reply_payload.payload_len;
  930. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  931. &mgmt_dma, GFP_KERNEL);
  932. if (!mgmt_b) {
  933. DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
  934. "failed for host=%lu\n",
  935. __func__, vha->host_no));
  936. rval = -ENOMEM;
  937. goto done_unmap_sg;
  938. }
  939. if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
  940. mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
  941. mn->parameter1 =
  942. cpu_to_le32(
  943. ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  944. } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
  945. mn->options = cpu_to_le16(ACO_REQUEST_INFO);
  946. mn->parameter1 =
  947. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
  948. mn->parameter2 =
  949. cpu_to_le32(
  950. ql84_mgmt->mgmt.mgmtp.u.info.context);
  951. }
  952. break;
  953. case QLA84_MGMT_WRITE_MEM:
  954. sg_cnt = dma_map_sg(&ha->pdev->dev,
  955. bsg_job->request_payload.sg_list,
  956. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  957. if (!sg_cnt) {
  958. rval = -ENOMEM;
  959. goto exit_mgmt;
  960. }
  961. dma_direction = DMA_TO_DEVICE;
  962. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  963. DEBUG2(printk(KERN_INFO
  964. "dma mapping resulted in different sg counts "
  965. "request_sg_cnt: %x dma_request_sg_cnt: %x ",
  966. bsg_job->request_payload.sg_cnt, sg_cnt));
  967. rval = -EAGAIN;
  968. goto done_unmap_sg;
  969. }
  970. data_len = bsg_job->request_payload.payload_len;
  971. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  972. &mgmt_dma, GFP_KERNEL);
  973. if (!mgmt_b) {
  974. DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
  975. "failed for host=%lu\n",
  976. __func__, vha->host_no));
  977. rval = -ENOMEM;
  978. goto done_unmap_sg;
  979. }
  980. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  981. bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
  982. mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
  983. mn->parameter1 =
  984. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  985. break;
  986. case QLA84_MGMT_CHNG_CONFIG:
  987. mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
  988. mn->parameter1 =
  989. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
  990. mn->parameter2 =
  991. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
  992. mn->parameter3 =
  993. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
  994. break;
  995. default:
  996. rval = -EIO;
  997. goto exit_mgmt;
  998. }
  999. if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
  1000. mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
  1001. mn->dseg_count = cpu_to_le16(1);
  1002. mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
  1003. mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
  1004. mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
  1005. }
  1006. rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
  1007. if (rval) {
  1008. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  1009. "request 84xx mgmt failed\n", vha->host_no));
  1010. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  1011. bsg_job->reply->result = (DID_ERROR << 16);
  1012. } else {
  1013. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  1014. "request 84xx mgmt completed\n", vha->host_no));
  1015. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1016. bsg_job->reply->result = DID_OK;
  1017. if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
  1018. (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
  1019. bsg_job->reply->reply_payload_rcv_len =
  1020. bsg_job->reply_payload.payload_len;
  1021. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1022. bsg_job->reply_payload.sg_cnt, mgmt_b,
  1023. data_len);
  1024. }
  1025. }
  1026. bsg_job->job_done(bsg_job);
  1027. done_unmap_sg:
  1028. if (mgmt_b)
  1029. dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
  1030. if (dma_direction == DMA_TO_DEVICE)
  1031. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  1032. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1033. else if (dma_direction == DMA_FROM_DEVICE)
  1034. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  1035. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  1036. exit_mgmt:
  1037. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  1038. return rval;
  1039. }
  1040. static int
  1041. qla24xx_iidma(struct fc_bsg_job *bsg_job)
  1042. {
  1043. struct Scsi_Host *host = bsg_job->shost;
  1044. scsi_qla_host_t *vha = shost_priv(host);
  1045. struct qla_hw_data *ha = vha->hw;
  1046. int rval = 0;
  1047. struct qla_port_param *port_param = NULL;
  1048. fc_port_t *fcport = NULL;
  1049. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1050. uint8_t *rsp_ptr = NULL;
  1051. bsg_job->reply->reply_payload_rcv_len = 0;
  1052. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  1053. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  1054. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  1055. return -EBUSY;
  1056. if (!IS_IIDMA_CAPABLE(vha->hw)) {
  1057. DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
  1058. "supported\n", __func__, vha->host_no));
  1059. return -EINVAL;
  1060. }
  1061. port_param = (struct qla_port_param *)((char *)bsg_job->request +
  1062. sizeof(struct fc_bsg_request));
  1063. if (!port_param) {
  1064. DEBUG2(printk("%s(%ld): port_param header not provided, "
  1065. "exiting.\n", __func__, vha->host_no));
  1066. return -EINVAL;
  1067. }
  1068. if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
  1069. DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
  1070. __func__, vha->host_no));
  1071. return -EINVAL;
  1072. }
  1073. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1074. if (fcport->port_type != FCT_TARGET)
  1075. continue;
  1076. if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
  1077. fcport->port_name, sizeof(fcport->port_name)))
  1078. continue;
  1079. break;
  1080. }
  1081. if (!fcport) {
  1082. DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
  1083. __func__, vha->host_no));
  1084. return -EINVAL;
  1085. }
  1086. if (fcport->loop_id == FC_NO_LOOP_ID) {
  1087. DEBUG2(printk(KERN_ERR "%s(%ld): Invalid port loop id, "
  1088. "loop_id = 0x%x\n",
  1089. __func__, vha->host_no, fcport->loop_id));
  1090. return -EINVAL;
  1091. }
  1092. if (port_param->mode)
  1093. rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
  1094. port_param->speed, mb);
  1095. else
  1096. rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
  1097. &port_param->speed, mb);
  1098. if (rval) {
  1099. DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
  1100. "%02x%02x%02x%02x%02x%02x%02x%02x -- "
  1101. "%04x %x %04x %04x.\n",
  1102. vha->host_no, fcport->port_name[0],
  1103. fcport->port_name[1],
  1104. fcport->port_name[2], fcport->port_name[3],
  1105. fcport->port_name[4], fcport->port_name[5],
  1106. fcport->port_name[6], fcport->port_name[7], rval,
  1107. fcport->fp_speed, mb[0], mb[1]));
  1108. rval = 0;
  1109. bsg_job->reply->result = (DID_ERROR << 16);
  1110. } else {
  1111. if (!port_param->mode) {
  1112. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  1113. sizeof(struct qla_port_param);
  1114. rsp_ptr = ((uint8_t *)bsg_job->reply) +
  1115. sizeof(struct fc_bsg_reply);
  1116. memcpy(rsp_ptr, port_param,
  1117. sizeof(struct qla_port_param));
  1118. }
  1119. bsg_job->reply->result = DID_OK;
  1120. }
  1121. bsg_job->job_done(bsg_job);
  1122. return rval;
  1123. }
  1124. static int
  1125. qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
  1126. {
  1127. switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
  1128. case QL_VND_LOOPBACK:
  1129. return qla2x00_process_loopback(bsg_job);
  1130. case QL_VND_A84_RESET:
  1131. return qla84xx_reset(bsg_job);
  1132. case QL_VND_A84_UPDATE_FW:
  1133. return qla84xx_updatefw(bsg_job);
  1134. case QL_VND_A84_MGMT_CMD:
  1135. return qla84xx_mgmt_cmd(bsg_job);
  1136. case QL_VND_IIDMA:
  1137. return qla24xx_iidma(bsg_job);
  1138. case QL_VND_FCP_PRIO_CFG_CMD:
  1139. return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
  1140. default:
  1141. bsg_job->reply->result = (DID_ERROR << 16);
  1142. bsg_job->job_done(bsg_job);
  1143. return -ENOSYS;
  1144. }
  1145. }
  1146. int
  1147. qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
  1148. {
  1149. int ret = -EINVAL;
  1150. switch (bsg_job->request->msgcode) {
  1151. case FC_BSG_RPT_ELS:
  1152. case FC_BSG_HST_ELS_NOLOGIN:
  1153. ret = qla2x00_process_els(bsg_job);
  1154. break;
  1155. case FC_BSG_HST_CT:
  1156. ret = qla2x00_process_ct(bsg_job);
  1157. break;
  1158. case FC_BSG_HST_VENDOR:
  1159. ret = qla2x00_process_vendor_specific(bsg_job);
  1160. break;
  1161. case FC_BSG_HST_ADD_RPORT:
  1162. case FC_BSG_HST_DEL_RPORT:
  1163. case FC_BSG_RPT_CT:
  1164. default:
  1165. DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
  1166. break;
  1167. }
  1168. return ret;
  1169. }
  1170. int
  1171. qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
  1172. {
  1173. scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
  1174. struct qla_hw_data *ha = vha->hw;
  1175. srb_t *sp;
  1176. int cnt, que;
  1177. unsigned long flags;
  1178. struct req_que *req;
  1179. struct srb_ctx *sp_bsg;
  1180. /* find the bsg job from the active list of commands */
  1181. spin_lock_irqsave(&ha->hardware_lock, flags);
  1182. for (que = 0; que < ha->max_req_queues; que++) {
  1183. req = ha->req_q_map[que];
  1184. if (!req)
  1185. continue;
  1186. for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
  1187. sp = req->outstanding_cmds[cnt];
  1188. if (sp) {
  1189. sp_bsg = sp->ctx;
  1190. if (((sp_bsg->type == SRB_CT_CMD) ||
  1191. (sp_bsg->type == SRB_ELS_CMD_HST))
  1192. && (sp_bsg->u.bsg_job == bsg_job)) {
  1193. if (ha->isp_ops->abort_command(sp)) {
  1194. DEBUG2(qla_printk(KERN_INFO, ha,
  1195. "scsi(%ld): mbx "
  1196. "abort_command failed\n",
  1197. vha->host_no));
  1198. bsg_job->req->errors =
  1199. bsg_job->reply->result = -EIO;
  1200. } else {
  1201. DEBUG2(qla_printk(KERN_INFO, ha,
  1202. "scsi(%ld): mbx "
  1203. "abort_command success\n",
  1204. vha->host_no));
  1205. bsg_job->req->errors =
  1206. bsg_job->reply->result = 0;
  1207. }
  1208. goto done;
  1209. }
  1210. }
  1211. }
  1212. }
  1213. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1214. DEBUG2(qla_printk(KERN_INFO, ha,
  1215. "scsi(%ld) SRB not found to abort\n", vha->host_no));
  1216. bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
  1217. return 0;
  1218. done:
  1219. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1220. if (bsg_job->request->msgcode == FC_BSG_HST_CT)
  1221. kfree(sp->fcport);
  1222. kfree(sp->ctx);
  1223. mempool_free(sp, ha->srb_mempool);
  1224. return 0;
  1225. }