qla_bsg.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2011 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/kthread.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/delay.h>
  11. /* BSG support for ELS/CT pass through */
  12. inline srb_t *
  13. qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
  14. {
  15. srb_t *sp;
  16. struct qla_hw_data *ha = vha->hw;
  17. struct srb_ctx *ctx;
  18. sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
  19. if (!sp)
  20. goto done;
  21. ctx = kzalloc(size, GFP_KERNEL);
  22. if (!ctx) {
  23. mempool_free(sp, ha->srb_mempool);
  24. sp = NULL;
  25. goto done;
  26. }
  27. memset(sp, 0, sizeof(*sp));
  28. sp->fcport = fcport;
  29. sp->ctx = ctx;
  30. done:
  31. return sp;
  32. }
  33. int
  34. qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
  35. struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
  36. {
  37. int i, ret, num_valid;
  38. uint8_t *bcode;
  39. struct qla_fcp_prio_entry *pri_entry;
  40. uint32_t *bcode_val_ptr, bcode_val;
  41. ret = 1;
  42. num_valid = 0;
  43. bcode = (uint8_t *)pri_cfg;
  44. bcode_val_ptr = (uint32_t *)pri_cfg;
  45. bcode_val = (uint32_t)(*bcode_val_ptr);
  46. if (bcode_val == 0xFFFFFFFF) {
  47. /* No FCP Priority config data in flash */
  48. ql_dbg(ql_dbg_user, vha, 0x7051,
  49. "No FCP Priority config data.\n");
  50. return 0;
  51. }
  52. if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
  53. bcode[3] != 'S') {
  54. /* Invalid FCP priority data header*/
  55. ql_dbg(ql_dbg_user, vha, 0x7052,
  56. "Invalid FCP Priority data header. bcode=0x%x.\n",
  57. bcode_val);
  58. return 0;
  59. }
  60. if (flag != 1)
  61. return ret;
  62. pri_entry = &pri_cfg->entry[0];
  63. for (i = 0; i < pri_cfg->num_entries; i++) {
  64. if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
  65. num_valid++;
  66. pri_entry++;
  67. }
  68. if (num_valid == 0) {
  69. /* No valid FCP priority data entries */
  70. ql_dbg(ql_dbg_user, vha, 0x7053,
  71. "No valid FCP Priority data entries.\n");
  72. ret = 0;
  73. } else {
  74. /* FCP priority data is valid */
  75. ql_dbg(ql_dbg_user, vha, 0x7054,
  76. "Valid FCP priority data. num entries = %d.\n",
  77. num_valid);
  78. }
  79. return ret;
  80. }
  81. static int
  82. qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
  83. {
  84. struct Scsi_Host *host = bsg_job->shost;
  85. scsi_qla_host_t *vha = shost_priv(host);
  86. struct qla_hw_data *ha = vha->hw;
  87. int ret = 0;
  88. uint32_t len;
  89. uint32_t oper;
  90. bsg_job->reply->reply_payload_rcv_len = 0;
  91. if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
  92. ret = -EINVAL;
  93. goto exit_fcp_prio_cfg;
  94. }
  95. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  96. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  97. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  98. ret = -EBUSY;
  99. goto exit_fcp_prio_cfg;
  100. }
  101. /* Get the sub command */
  102. oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  103. /* Only set config is allowed if config memory is not allocated */
  104. if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
  105. ret = -EINVAL;
  106. goto exit_fcp_prio_cfg;
  107. }
  108. switch (oper) {
  109. case QLFC_FCP_PRIO_DISABLE:
  110. if (ha->flags.fcp_prio_enabled) {
  111. ha->flags.fcp_prio_enabled = 0;
  112. ha->fcp_prio_cfg->attributes &=
  113. ~FCP_PRIO_ATTR_ENABLE;
  114. qla24xx_update_all_fcp_prio(vha);
  115. bsg_job->reply->result = DID_OK;
  116. } else {
  117. ret = -EINVAL;
  118. bsg_job->reply->result = (DID_ERROR << 16);
  119. goto exit_fcp_prio_cfg;
  120. }
  121. break;
  122. case QLFC_FCP_PRIO_ENABLE:
  123. if (!ha->flags.fcp_prio_enabled) {
  124. if (ha->fcp_prio_cfg) {
  125. ha->flags.fcp_prio_enabled = 1;
  126. ha->fcp_prio_cfg->attributes |=
  127. FCP_PRIO_ATTR_ENABLE;
  128. qla24xx_update_all_fcp_prio(vha);
  129. bsg_job->reply->result = DID_OK;
  130. } else {
  131. ret = -EINVAL;
  132. bsg_job->reply->result = (DID_ERROR << 16);
  133. goto exit_fcp_prio_cfg;
  134. }
  135. }
  136. break;
  137. case QLFC_FCP_PRIO_GET_CONFIG:
  138. len = bsg_job->reply_payload.payload_len;
  139. if (!len || len > FCP_PRIO_CFG_SIZE) {
  140. ret = -EINVAL;
  141. bsg_job->reply->result = (DID_ERROR << 16);
  142. goto exit_fcp_prio_cfg;
  143. }
  144. bsg_job->reply->result = DID_OK;
  145. bsg_job->reply->reply_payload_rcv_len =
  146. sg_copy_from_buffer(
  147. bsg_job->reply_payload.sg_list,
  148. bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
  149. len);
  150. break;
  151. case QLFC_FCP_PRIO_SET_CONFIG:
  152. len = bsg_job->request_payload.payload_len;
  153. if (!len || len > FCP_PRIO_CFG_SIZE) {
  154. bsg_job->reply->result = (DID_ERROR << 16);
  155. ret = -EINVAL;
  156. goto exit_fcp_prio_cfg;
  157. }
  158. if (!ha->fcp_prio_cfg) {
  159. ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
  160. if (!ha->fcp_prio_cfg) {
  161. ql_log(ql_log_warn, vha, 0x7050,
  162. "Unable to allocate memory for fcp prio "
  163. "config data (%x).\n", FCP_PRIO_CFG_SIZE);
  164. bsg_job->reply->result = (DID_ERROR << 16);
  165. ret = -ENOMEM;
  166. goto exit_fcp_prio_cfg;
  167. }
  168. }
  169. memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
  170. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  171. bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
  172. FCP_PRIO_CFG_SIZE);
  173. /* validate fcp priority data */
  174. if (!qla24xx_fcp_prio_cfg_valid(vha,
  175. (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
  176. bsg_job->reply->result = (DID_ERROR << 16);
  177. ret = -EINVAL;
  178. /* If buffer was invalidatic int
  179. * fcp_prio_cfg is of no use
  180. */
  181. vfree(ha->fcp_prio_cfg);
  182. ha->fcp_prio_cfg = NULL;
  183. goto exit_fcp_prio_cfg;
  184. }
  185. ha->flags.fcp_prio_enabled = 0;
  186. if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
  187. ha->flags.fcp_prio_enabled = 1;
  188. qla24xx_update_all_fcp_prio(vha);
  189. bsg_job->reply->result = DID_OK;
  190. break;
  191. default:
  192. ret = -EINVAL;
  193. break;
  194. }
  195. exit_fcp_prio_cfg:
  196. bsg_job->job_done(bsg_job);
  197. return ret;
  198. }
  199. static int
  200. qla2x00_process_els(struct fc_bsg_job *bsg_job)
  201. {
  202. struct fc_rport *rport;
  203. fc_port_t *fcport = NULL;
  204. struct Scsi_Host *host;
  205. scsi_qla_host_t *vha;
  206. struct qla_hw_data *ha;
  207. srb_t *sp;
  208. const char *type;
  209. int req_sg_cnt, rsp_sg_cnt;
  210. int rval = (DRIVER_ERROR << 16);
  211. uint16_t nextlid = 0;
  212. struct srb_ctx *els;
  213. if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
  214. rport = bsg_job->rport;
  215. fcport = *(fc_port_t **) rport->dd_data;
  216. host = rport_to_shost(rport);
  217. vha = shost_priv(host);
  218. ha = vha->hw;
  219. type = "FC_BSG_RPT_ELS";
  220. } else {
  221. host = bsg_job->shost;
  222. vha = shost_priv(host);
  223. ha = vha->hw;
  224. type = "FC_BSG_HST_ELS_NOLOGIN";
  225. }
  226. /* pass through is supported only for ISP 4Gb or higher */
  227. if (!IS_FWI2_CAPABLE(ha)) {
  228. ql_dbg(ql_dbg_user, vha, 0x7001,
  229. "ELS passthru not supported for ISP23xx based adapters.\n");
  230. rval = -EPERM;
  231. goto done;
  232. }
  233. /* Multiple SG's are not supported for ELS requests */
  234. if (bsg_job->request_payload.sg_cnt > 1 ||
  235. bsg_job->reply_payload.sg_cnt > 1) {
  236. ql_dbg(ql_dbg_user, vha, 0x7002,
  237. "Multiple SG's are not suppored for ELS requests, "
  238. "request_sg_cnt=%x reply_sg_cnt=%x.\n",
  239. bsg_job->request_payload.sg_cnt,
  240. bsg_job->reply_payload.sg_cnt);
  241. rval = -EPERM;
  242. goto done;
  243. }
  244. /* ELS request for rport */
  245. if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
  246. /* make sure the rport is logged in,
  247. * if not perform fabric login
  248. */
  249. if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
  250. ql_dbg(ql_dbg_user, vha, 0x7003,
  251. "Failed to login port %06X for ELS passthru.\n",
  252. fcport->d_id.b24);
  253. rval = -EIO;
  254. goto done;
  255. }
  256. } else {
  257. /* Allocate a dummy fcport structure, since functions
  258. * preparing the IOCB and mailbox command retrieves port
  259. * specific information from fcport structure. For Host based
  260. * ELS commands there will be no fcport structure allocated
  261. */
  262. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  263. if (!fcport) {
  264. rval = -ENOMEM;
  265. goto done;
  266. }
  267. /* Initialize all required fields of fcport */
  268. fcport->vha = vha;
  269. fcport->vp_idx = vha->vp_idx;
  270. fcport->d_id.b.al_pa =
  271. bsg_job->request->rqst_data.h_els.port_id[0];
  272. fcport->d_id.b.area =
  273. bsg_job->request->rqst_data.h_els.port_id[1];
  274. fcport->d_id.b.domain =
  275. bsg_job->request->rqst_data.h_els.port_id[2];
  276. fcport->loop_id =
  277. (fcport->d_id.b.al_pa == 0xFD) ?
  278. NPH_FABRIC_CONTROLLER : NPH_F_PORT;
  279. }
  280. if (!vha->flags.online) {
  281. ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
  282. rval = -EIO;
  283. goto done;
  284. }
  285. req_sg_cnt =
  286. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  287. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  288. if (!req_sg_cnt) {
  289. rval = -ENOMEM;
  290. goto done_free_fcport;
  291. }
  292. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  293. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  294. if (!rsp_sg_cnt) {
  295. rval = -ENOMEM;
  296. goto done_free_fcport;
  297. }
  298. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  299. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  300. ql_log(ql_log_warn, vha, 0x7008,
  301. "dma mapping resulted in different sg counts, "
  302. "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
  303. "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
  304. req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
  305. rval = -EAGAIN;
  306. goto done_unmap_sg;
  307. }
  308. /* Alloc SRB structure */
  309. sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
  310. if (!sp) {
  311. rval = -ENOMEM;
  312. goto done_unmap_sg;
  313. }
  314. els = sp->ctx;
  315. els->type =
  316. (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
  317. SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
  318. els->name =
  319. (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
  320. "bsg_els_rpt" : "bsg_els_hst");
  321. els->u.bsg_job = bsg_job;
  322. ql_dbg(ql_dbg_user, vha, 0x700a,
  323. "bsg rqst type: %s els type: %x - loop-id=%x "
  324. "portid=%-2x%02x%02x.\n", type,
  325. bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
  326. fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
  327. rval = qla2x00_start_sp(sp);
  328. if (rval != QLA_SUCCESS) {
  329. ql_log(ql_log_warn, vha, 0x700e,
  330. "qla2x00_start_sp failed = %d\n", rval);
  331. kfree(sp->ctx);
  332. mempool_free(sp, ha->srb_mempool);
  333. rval = -EIO;
  334. goto done_unmap_sg;
  335. }
  336. return rval;
  337. done_unmap_sg:
  338. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  339. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  340. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  341. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  342. goto done_free_fcport;
  343. done_free_fcport:
  344. if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
  345. kfree(fcport);
  346. done:
  347. return rval;
  348. }
  349. static int
  350. qla2x00_process_ct(struct fc_bsg_job *bsg_job)
  351. {
  352. srb_t *sp;
  353. struct Scsi_Host *host = bsg_job->shost;
  354. scsi_qla_host_t *vha = shost_priv(host);
  355. struct qla_hw_data *ha = vha->hw;
  356. int rval = (DRIVER_ERROR << 16);
  357. int req_sg_cnt, rsp_sg_cnt;
  358. uint16_t loop_id;
  359. struct fc_port *fcport;
  360. char *type = "FC_BSG_HST_CT";
  361. struct srb_ctx *ct;
  362. req_sg_cnt =
  363. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  364. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  365. if (!req_sg_cnt) {
  366. ql_log(ql_log_warn, vha, 0x700f,
  367. "dma_map_sg return %d for request\n", req_sg_cnt);
  368. rval = -ENOMEM;
  369. goto done;
  370. }
  371. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  372. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  373. if (!rsp_sg_cnt) {
  374. ql_log(ql_log_warn, vha, 0x7010,
  375. "dma_map_sg return %d for reply\n", rsp_sg_cnt);
  376. rval = -ENOMEM;
  377. goto done;
  378. }
  379. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  380. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  381. ql_log(ql_log_warn, vha, 0x7011,
  382. "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
  383. "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
  384. req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
  385. rval = -EAGAIN;
  386. goto done_unmap_sg;
  387. }
  388. if (!vha->flags.online) {
  389. ql_log(ql_log_warn, vha, 0x7012,
  390. "Host is not online.\n");
  391. rval = -EIO;
  392. goto done_unmap_sg;
  393. }
  394. loop_id =
  395. (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
  396. >> 24;
  397. switch (loop_id) {
  398. case 0xFC:
  399. loop_id = cpu_to_le16(NPH_SNS);
  400. break;
  401. case 0xFA:
  402. loop_id = vha->mgmt_svr_loop_id;
  403. break;
  404. default:
  405. ql_dbg(ql_dbg_user, vha, 0x7013,
  406. "Unknown loop id: %x.\n", loop_id);
  407. rval = -EINVAL;
  408. goto done_unmap_sg;
  409. }
  410. /* Allocate a dummy fcport structure, since functions preparing the
  411. * IOCB and mailbox command retrieves port specific information
  412. * from fcport structure. For Host based ELS commands there will be
  413. * no fcport structure allocated
  414. */
  415. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  416. if (!fcport) {
  417. ql_log(ql_log_warn, vha, 0x7014,
  418. "Failed to allocate fcport.\n");
  419. rval = -ENOMEM;
  420. goto done_unmap_sg;
  421. }
  422. /* Initialize all required fields of fcport */
  423. fcport->vha = vha;
  424. fcport->vp_idx = vha->vp_idx;
  425. fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
  426. fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
  427. fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
  428. fcport->loop_id = loop_id;
  429. /* Alloc SRB structure */
  430. sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
  431. if (!sp) {
  432. ql_log(ql_log_warn, vha, 0x7015,
  433. "qla2x00_get_ctx_bsg_sp failed.\n");
  434. rval = -ENOMEM;
  435. goto done_free_fcport;
  436. }
  437. ct = sp->ctx;
  438. ct->type = SRB_CT_CMD;
  439. ct->name = "bsg_ct";
  440. ct->u.bsg_job = bsg_job;
  441. ql_dbg(ql_dbg_user, vha, 0x7016,
  442. "bsg rqst type: %s else type: %x - "
  443. "loop-id=%x portid=%02x%02x%02x.\n", type,
  444. (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
  445. fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
  446. fcport->d_id.b.al_pa);
  447. rval = qla2x00_start_sp(sp);
  448. if (rval != QLA_SUCCESS) {
  449. ql_log(ql_log_warn, vha, 0x7017,
  450. "qla2x00_start_sp failed=%d.\n", rval);
  451. kfree(sp->ctx);
  452. mempool_free(sp, ha->srb_mempool);
  453. rval = -EIO;
  454. goto done_free_fcport;
  455. }
  456. return rval;
  457. done_free_fcport:
  458. kfree(fcport);
  459. done_unmap_sg:
  460. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  461. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  462. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  463. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  464. done:
  465. return rval;
  466. }
  467. /* Set the port configuration to enable the
  468. * internal loopback on ISP81XX
  469. */
  470. static inline int
  471. qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
  472. uint16_t *new_config)
  473. {
  474. int ret = 0;
  475. int rval = 0;
  476. struct qla_hw_data *ha = vha->hw;
  477. if (!IS_QLA81XX(ha))
  478. goto done_set_internal;
  479. new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
  480. memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
  481. ha->notify_dcbx_comp = 1;
  482. ret = qla81xx_set_port_config(vha, new_config);
  483. if (ret != QLA_SUCCESS) {
  484. ql_log(ql_log_warn, vha, 0x7021,
  485. "set port config failed.\n");
  486. ha->notify_dcbx_comp = 0;
  487. rval = -EINVAL;
  488. goto done_set_internal;
  489. }
  490. /* Wait for DCBX complete event */
  491. if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
  492. ql_dbg(ql_dbg_user, vha, 0x7022,
  493. "State change notification not received.\n");
  494. } else
  495. ql_dbg(ql_dbg_user, vha, 0x7023,
  496. "State change received.\n");
  497. ha->notify_dcbx_comp = 0;
  498. done_set_internal:
  499. return rval;
  500. }
  501. /* Set the port configuration to disable the
  502. * internal loopback on ISP81XX
  503. */
  504. static inline int
  505. qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
  506. int wait)
  507. {
  508. int ret = 0;
  509. int rval = 0;
  510. uint16_t new_config[4];
  511. struct qla_hw_data *ha = vha->hw;
  512. if (!IS_QLA81XX(ha))
  513. goto done_reset_internal;
  514. memset(new_config, 0 , sizeof(new_config));
  515. if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
  516. ENABLE_INTERNAL_LOOPBACK) {
  517. new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
  518. memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
  519. ha->notify_dcbx_comp = wait;
  520. ret = qla81xx_set_port_config(vha, new_config);
  521. if (ret != QLA_SUCCESS) {
  522. ql_log(ql_log_warn, vha, 0x7025,
  523. "Set port config failed.\n");
  524. ha->notify_dcbx_comp = 0;
  525. rval = -EINVAL;
  526. goto done_reset_internal;
  527. }
  528. /* Wait for DCBX complete event */
  529. if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
  530. (20 * HZ))) {
  531. ql_dbg(ql_dbg_user, vha, 0x7026,
  532. "State change notification not received.\n");
  533. ha->notify_dcbx_comp = 0;
  534. rval = -EINVAL;
  535. goto done_reset_internal;
  536. } else
  537. ql_dbg(ql_dbg_user, vha, 0x7027,
  538. "State change received.\n");
  539. ha->notify_dcbx_comp = 0;
  540. }
  541. done_reset_internal:
  542. return rval;
  543. }
  544. static int
  545. qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
  546. {
  547. struct Scsi_Host *host = bsg_job->shost;
  548. scsi_qla_host_t *vha = shost_priv(host);
  549. struct qla_hw_data *ha = vha->hw;
  550. int rval;
  551. uint8_t command_sent;
  552. char *type;
  553. struct msg_echo_lb elreq;
  554. uint16_t response[MAILBOX_REGISTER_COUNT];
  555. uint16_t config[4], new_config[4];
  556. uint8_t *fw_sts_ptr;
  557. uint8_t *req_data = NULL;
  558. dma_addr_t req_data_dma;
  559. uint32_t req_data_len;
  560. uint8_t *rsp_data = NULL;
  561. dma_addr_t rsp_data_dma;
  562. uint32_t rsp_data_len;
  563. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  564. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  565. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  566. ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
  567. return -EBUSY;
  568. }
  569. if (!vha->flags.online) {
  570. ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
  571. return -EIO;
  572. }
  573. elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
  574. bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
  575. DMA_TO_DEVICE);
  576. if (!elreq.req_sg_cnt) {
  577. ql_log(ql_log_warn, vha, 0x701a,
  578. "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
  579. return -ENOMEM;
  580. }
  581. elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
  582. bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
  583. DMA_FROM_DEVICE);
  584. if (!elreq.rsp_sg_cnt) {
  585. ql_log(ql_log_warn, vha, 0x701b,
  586. "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
  587. rval = -ENOMEM;
  588. goto done_unmap_req_sg;
  589. }
  590. if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  591. (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  592. ql_log(ql_log_warn, vha, 0x701c,
  593. "dma mapping resulted in different sg counts, "
  594. "request_sg_cnt: %x dma_request_sg_cnt: %x "
  595. "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
  596. bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
  597. bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
  598. rval = -EAGAIN;
  599. goto done_unmap_sg;
  600. }
  601. req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
  602. req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
  603. &req_data_dma, GFP_KERNEL);
  604. if (!req_data) {
  605. ql_log(ql_log_warn, vha, 0x701d,
  606. "dma alloc failed for req_data.\n");
  607. rval = -ENOMEM;
  608. goto done_unmap_sg;
  609. }
  610. rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
  611. &rsp_data_dma, GFP_KERNEL);
  612. if (!rsp_data) {
  613. ql_log(ql_log_warn, vha, 0x7004,
  614. "dma alloc failed for rsp_data.\n");
  615. rval = -ENOMEM;
  616. goto done_free_dma_req;
  617. }
  618. /* Copy the request buffer in req_data now */
  619. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  620. bsg_job->request_payload.sg_cnt, req_data, req_data_len);
  621. elreq.send_dma = req_data_dma;
  622. elreq.rcv_dma = rsp_data_dma;
  623. elreq.transfer_size = req_data_len;
  624. elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  625. if ((ha->current_topology == ISP_CFG_F ||
  626. (IS_QLA81XX(ha) &&
  627. le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
  628. && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
  629. elreq.options == EXTERNAL_LOOPBACK) {
  630. type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
  631. ql_dbg(ql_dbg_user, vha, 0x701e,
  632. "BSG request type: %s.\n", type);
  633. command_sent = INT_DEF_LB_ECHO_CMD;
  634. rval = qla2x00_echo_test(vha, &elreq, response);
  635. } else {
  636. if (IS_QLA81XX(ha)) {
  637. memset(config, 0, sizeof(config));
  638. memset(new_config, 0, sizeof(new_config));
  639. if (qla81xx_get_port_config(vha, config)) {
  640. ql_log(ql_log_warn, vha, 0x701f,
  641. "Get port config failed.\n");
  642. bsg_job->reply->reply_payload_rcv_len = 0;
  643. bsg_job->reply->result = (DID_ERROR << 16);
  644. rval = -EPERM;
  645. goto done_free_dma_req;
  646. }
  647. if (elreq.options != EXTERNAL_LOOPBACK) {
  648. ql_dbg(ql_dbg_user, vha, 0x7020,
  649. "Internal: curent port config = %x\n",
  650. config[0]);
  651. if (qla81xx_set_internal_loopback(vha, config,
  652. new_config)) {
  653. ql_log(ql_log_warn, vha, 0x7024,
  654. "Internal loopback failed.\n");
  655. bsg_job->reply->reply_payload_rcv_len =
  656. 0;
  657. bsg_job->reply->result =
  658. (DID_ERROR << 16);
  659. rval = -EPERM;
  660. goto done_free_dma_req;
  661. }
  662. } else {
  663. /* For external loopback to work
  664. * ensure internal loopback is disabled
  665. */
  666. if (qla81xx_reset_internal_loopback(vha,
  667. config, 1)) {
  668. bsg_job->reply->reply_payload_rcv_len =
  669. 0;
  670. bsg_job->reply->result =
  671. (DID_ERROR << 16);
  672. rval = -EPERM;
  673. goto done_free_dma_req;
  674. }
  675. }
  676. type = "FC_BSG_HST_VENDOR_LOOPBACK";
  677. ql_dbg(ql_dbg_user, vha, 0x7028,
  678. "BSG request type: %s.\n", type);
  679. command_sent = INT_DEF_LB_LOOPBACK_CMD;
  680. rval = qla2x00_loopback_test(vha, &elreq, response);
  681. if (new_config[0]) {
  682. /* Revert back to original port config
  683. * Also clear internal loopback
  684. */
  685. qla81xx_reset_internal_loopback(vha,
  686. new_config, 0);
  687. }
  688. if (response[0] == MBS_COMMAND_ERROR &&
  689. response[1] == MBS_LB_RESET) {
  690. ql_log(ql_log_warn, vha, 0x7029,
  691. "MBX command error, Aborting ISP.\n");
  692. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  693. qla2xxx_wake_dpc(vha);
  694. qla2x00_wait_for_chip_reset(vha);
  695. /* Also reset the MPI */
  696. if (qla81xx_restart_mpi_firmware(vha) !=
  697. QLA_SUCCESS) {
  698. ql_log(ql_log_warn, vha, 0x702a,
  699. "MPI reset failed.\n");
  700. }
  701. bsg_job->reply->reply_payload_rcv_len = 0;
  702. bsg_job->reply->result = (DID_ERROR << 16);
  703. rval = -EIO;
  704. goto done_free_dma_req;
  705. }
  706. } else {
  707. type = "FC_BSG_HST_VENDOR_LOOPBACK";
  708. ql_dbg(ql_dbg_user, vha, 0x702b,
  709. "BSG request type: %s.\n", type);
  710. command_sent = INT_DEF_LB_LOOPBACK_CMD;
  711. rval = qla2x00_loopback_test(vha, &elreq, response);
  712. }
  713. }
  714. if (rval) {
  715. ql_log(ql_log_warn, vha, 0x702c,
  716. "Vendor request %s failed.\n", type);
  717. fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
  718. sizeof(struct fc_bsg_reply);
  719. memcpy(fw_sts_ptr, response, sizeof(response));
  720. fw_sts_ptr += sizeof(response);
  721. *fw_sts_ptr = command_sent;
  722. rval = 0;
  723. bsg_job->reply->reply_payload_rcv_len = 0;
  724. bsg_job->reply->result = (DID_ERROR << 16);
  725. } else {
  726. ql_dbg(ql_dbg_user, vha, 0x702d,
  727. "Vendor request %s completed.\n", type);
  728. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  729. sizeof(response) + sizeof(uint8_t);
  730. bsg_job->reply->reply_payload_rcv_len =
  731. bsg_job->reply_payload.payload_len;
  732. fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
  733. sizeof(struct fc_bsg_reply);
  734. memcpy(fw_sts_ptr, response, sizeof(response));
  735. fw_sts_ptr += sizeof(response);
  736. *fw_sts_ptr = command_sent;
  737. bsg_job->reply->result = DID_OK;
  738. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  739. bsg_job->reply_payload.sg_cnt, rsp_data,
  740. rsp_data_len);
  741. }
  742. bsg_job->job_done(bsg_job);
  743. dma_free_coherent(&ha->pdev->dev, rsp_data_len,
  744. rsp_data, rsp_data_dma);
  745. done_free_dma_req:
  746. dma_free_coherent(&ha->pdev->dev, req_data_len,
  747. req_data, req_data_dma);
  748. done_unmap_sg:
  749. dma_unmap_sg(&ha->pdev->dev,
  750. bsg_job->reply_payload.sg_list,
  751. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  752. done_unmap_req_sg:
  753. dma_unmap_sg(&ha->pdev->dev,
  754. bsg_job->request_payload.sg_list,
  755. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  756. return rval;
  757. }
  758. static int
  759. qla84xx_reset(struct fc_bsg_job *bsg_job)
  760. {
  761. struct Scsi_Host *host = bsg_job->shost;
  762. scsi_qla_host_t *vha = shost_priv(host);
  763. struct qla_hw_data *ha = vha->hw;
  764. int rval = 0;
  765. uint32_t flag;
  766. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  767. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  768. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  769. ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
  770. return -EBUSY;
  771. }
  772. if (!IS_QLA84XX(ha)) {
  773. ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
  774. return -EINVAL;
  775. }
  776. flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  777. rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
  778. if (rval) {
  779. ql_log(ql_log_warn, vha, 0x7030,
  780. "Vendor request 84xx reset failed.\n");
  781. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  782. bsg_job->reply->result = (DID_ERROR << 16);
  783. } else {
  784. ql_dbg(ql_dbg_user, vha, 0x7031,
  785. "Vendor request 84xx reset completed.\n");
  786. bsg_job->reply->result = DID_OK;
  787. }
  788. bsg_job->job_done(bsg_job);
  789. return rval;
  790. }
  791. static int
  792. qla84xx_updatefw(struct fc_bsg_job *bsg_job)
  793. {
  794. struct Scsi_Host *host = bsg_job->shost;
  795. scsi_qla_host_t *vha = shost_priv(host);
  796. struct qla_hw_data *ha = vha->hw;
  797. struct verify_chip_entry_84xx *mn = NULL;
  798. dma_addr_t mn_dma, fw_dma;
  799. void *fw_buf = NULL;
  800. int rval = 0;
  801. uint32_t sg_cnt;
  802. uint32_t data_len;
  803. uint16_t options;
  804. uint32_t flag;
  805. uint32_t fw_ver;
  806. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  807. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  808. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  809. return -EBUSY;
  810. if (!IS_QLA84XX(ha)) {
  811. ql_dbg(ql_dbg_user, vha, 0x7032,
  812. "Not 84xx, exiting.\n");
  813. return -EINVAL;
  814. }
  815. sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  816. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  817. if (!sg_cnt) {
  818. ql_log(ql_log_warn, vha, 0x7033,
  819. "dma_map_sg returned %d for request.\n", sg_cnt);
  820. return -ENOMEM;
  821. }
  822. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  823. ql_log(ql_log_warn, vha, 0x7034,
  824. "DMA mapping resulted in different sg counts, "
  825. "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
  826. bsg_job->request_payload.sg_cnt, sg_cnt);
  827. rval = -EAGAIN;
  828. goto done_unmap_sg;
  829. }
  830. data_len = bsg_job->request_payload.payload_len;
  831. fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
  832. &fw_dma, GFP_KERNEL);
  833. if (!fw_buf) {
  834. ql_log(ql_log_warn, vha, 0x7035,
  835. "DMA alloc failed for fw_buf.\n");
  836. rval = -ENOMEM;
  837. goto done_unmap_sg;
  838. }
  839. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  840. bsg_job->request_payload.sg_cnt, fw_buf, data_len);
  841. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  842. if (!mn) {
  843. ql_log(ql_log_warn, vha, 0x7036,
  844. "DMA alloc failed for fw buffer.\n");
  845. rval = -ENOMEM;
  846. goto done_free_fw_buf;
  847. }
  848. flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  849. fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
  850. memset(mn, 0, sizeof(struct access_chip_84xx));
  851. mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
  852. mn->entry_count = 1;
  853. options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
  854. if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
  855. options |= VCO_DIAG_FW;
  856. mn->options = cpu_to_le16(options);
  857. mn->fw_ver = cpu_to_le32(fw_ver);
  858. mn->fw_size = cpu_to_le32(data_len);
  859. mn->fw_seq_size = cpu_to_le32(data_len);
  860. mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
  861. mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
  862. mn->dseg_length = cpu_to_le32(data_len);
  863. mn->data_seg_cnt = cpu_to_le16(1);
  864. rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
  865. if (rval) {
  866. ql_log(ql_log_warn, vha, 0x7037,
  867. "Vendor request 84xx updatefw failed.\n");
  868. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  869. bsg_job->reply->result = (DID_ERROR << 16);
  870. } else {
  871. ql_dbg(ql_dbg_user, vha, 0x7038,
  872. "Vendor request 84xx updatefw completed.\n");
  873. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  874. bsg_job->reply->result = DID_OK;
  875. }
  876. bsg_job->job_done(bsg_job);
  877. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  878. done_free_fw_buf:
  879. dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
  880. done_unmap_sg:
  881. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  882. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  883. return rval;
  884. }
  885. static int
  886. qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
  887. {
  888. struct Scsi_Host *host = bsg_job->shost;
  889. scsi_qla_host_t *vha = shost_priv(host);
  890. struct qla_hw_data *ha = vha->hw;
  891. struct access_chip_84xx *mn = NULL;
  892. dma_addr_t mn_dma, mgmt_dma;
  893. void *mgmt_b = NULL;
  894. int rval = 0;
  895. struct qla_bsg_a84_mgmt *ql84_mgmt;
  896. uint32_t sg_cnt;
  897. uint32_t data_len = 0;
  898. uint32_t dma_direction = DMA_NONE;
  899. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  900. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  901. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  902. ql_log(ql_log_warn, vha, 0x7039,
  903. "Abort active or needed.\n");
  904. return -EBUSY;
  905. }
  906. if (!IS_QLA84XX(ha)) {
  907. ql_log(ql_log_warn, vha, 0x703a,
  908. "Not 84xx, exiting.\n");
  909. return -EINVAL;
  910. }
  911. ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
  912. sizeof(struct fc_bsg_request));
  913. if (!ql84_mgmt) {
  914. ql_log(ql_log_warn, vha, 0x703b,
  915. "MGMT header not provided, exiting.\n");
  916. return -EINVAL;
  917. }
  918. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  919. if (!mn) {
  920. ql_log(ql_log_warn, vha, 0x703c,
  921. "DMA alloc failed for fw buffer.\n");
  922. return -ENOMEM;
  923. }
  924. memset(mn, 0, sizeof(struct access_chip_84xx));
  925. mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
  926. mn->entry_count = 1;
  927. switch (ql84_mgmt->mgmt.cmd) {
  928. case QLA84_MGMT_READ_MEM:
  929. case QLA84_MGMT_GET_INFO:
  930. sg_cnt = dma_map_sg(&ha->pdev->dev,
  931. bsg_job->reply_payload.sg_list,
  932. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  933. if (!sg_cnt) {
  934. ql_log(ql_log_warn, vha, 0x703d,
  935. "dma_map_sg returned %d for reply.\n", sg_cnt);
  936. rval = -ENOMEM;
  937. goto exit_mgmt;
  938. }
  939. dma_direction = DMA_FROM_DEVICE;
  940. if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
  941. ql_log(ql_log_warn, vha, 0x703e,
  942. "DMA mapping resulted in different sg counts, "
  943. "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
  944. bsg_job->reply_payload.sg_cnt, sg_cnt);
  945. rval = -EAGAIN;
  946. goto done_unmap_sg;
  947. }
  948. data_len = bsg_job->reply_payload.payload_len;
  949. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  950. &mgmt_dma, GFP_KERNEL);
  951. if (!mgmt_b) {
  952. ql_log(ql_log_warn, vha, 0x703f,
  953. "DMA alloc failed for mgmt_b.\n");
  954. rval = -ENOMEM;
  955. goto done_unmap_sg;
  956. }
  957. if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
  958. mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
  959. mn->parameter1 =
  960. cpu_to_le32(
  961. ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  962. } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
  963. mn->options = cpu_to_le16(ACO_REQUEST_INFO);
  964. mn->parameter1 =
  965. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
  966. mn->parameter2 =
  967. cpu_to_le32(
  968. ql84_mgmt->mgmt.mgmtp.u.info.context);
  969. }
  970. break;
  971. case QLA84_MGMT_WRITE_MEM:
  972. sg_cnt = dma_map_sg(&ha->pdev->dev,
  973. bsg_job->request_payload.sg_list,
  974. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  975. if (!sg_cnt) {
  976. ql_log(ql_log_warn, vha, 0x7040,
  977. "dma_map_sg returned %d.\n", sg_cnt);
  978. rval = -ENOMEM;
  979. goto exit_mgmt;
  980. }
  981. dma_direction = DMA_TO_DEVICE;
  982. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  983. ql_log(ql_log_warn, vha, 0x7041,
  984. "DMA mapping resulted in different sg counts, "
  985. "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
  986. bsg_job->request_payload.sg_cnt, sg_cnt);
  987. rval = -EAGAIN;
  988. goto done_unmap_sg;
  989. }
  990. data_len = bsg_job->request_payload.payload_len;
  991. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  992. &mgmt_dma, GFP_KERNEL);
  993. if (!mgmt_b) {
  994. ql_log(ql_log_warn, vha, 0x7042,
  995. "DMA alloc failed for mgmt_b.\n");
  996. rval = -ENOMEM;
  997. goto done_unmap_sg;
  998. }
  999. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1000. bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
  1001. mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
  1002. mn->parameter1 =
  1003. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  1004. break;
  1005. case QLA84_MGMT_CHNG_CONFIG:
  1006. mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
  1007. mn->parameter1 =
  1008. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
  1009. mn->parameter2 =
  1010. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
  1011. mn->parameter3 =
  1012. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
  1013. break;
  1014. default:
  1015. rval = -EIO;
  1016. goto exit_mgmt;
  1017. }
  1018. if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
  1019. mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
  1020. mn->dseg_count = cpu_to_le16(1);
  1021. mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
  1022. mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
  1023. mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
  1024. }
  1025. rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
  1026. if (rval) {
  1027. ql_log(ql_log_warn, vha, 0x7043,
  1028. "Vendor request 84xx mgmt failed.\n");
  1029. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  1030. bsg_job->reply->result = (DID_ERROR << 16);
  1031. } else {
  1032. ql_dbg(ql_dbg_user, vha, 0x7044,
  1033. "Vendor request 84xx mgmt completed.\n");
  1034. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1035. bsg_job->reply->result = DID_OK;
  1036. if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
  1037. (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
  1038. bsg_job->reply->reply_payload_rcv_len =
  1039. bsg_job->reply_payload.payload_len;
  1040. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1041. bsg_job->reply_payload.sg_cnt, mgmt_b,
  1042. data_len);
  1043. }
  1044. }
  1045. bsg_job->job_done(bsg_job);
  1046. done_unmap_sg:
  1047. if (mgmt_b)
  1048. dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
  1049. if (dma_direction == DMA_TO_DEVICE)
  1050. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  1051. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1052. else if (dma_direction == DMA_FROM_DEVICE)
  1053. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  1054. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  1055. exit_mgmt:
  1056. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  1057. return rval;
  1058. }
  1059. static int
  1060. qla24xx_iidma(struct fc_bsg_job *bsg_job)
  1061. {
  1062. struct Scsi_Host *host = bsg_job->shost;
  1063. scsi_qla_host_t *vha = shost_priv(host);
  1064. int rval = 0;
  1065. struct qla_port_param *port_param = NULL;
  1066. fc_port_t *fcport = NULL;
  1067. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1068. uint8_t *rsp_ptr = NULL;
  1069. bsg_job->reply->reply_payload_rcv_len = 0;
  1070. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  1071. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  1072. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  1073. ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
  1074. return -EBUSY;
  1075. }
  1076. if (!IS_IIDMA_CAPABLE(vha->hw)) {
  1077. ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
  1078. return -EINVAL;
  1079. }
  1080. port_param = (struct qla_port_param *)((char *)bsg_job->request +
  1081. sizeof(struct fc_bsg_request));
  1082. if (!port_param) {
  1083. ql_log(ql_log_warn, vha, 0x7047,
  1084. "port_param header not provided.\n");
  1085. return -EINVAL;
  1086. }
  1087. if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
  1088. ql_log(ql_log_warn, vha, 0x7048,
  1089. "Invalid destination type.\n");
  1090. return -EINVAL;
  1091. }
  1092. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1093. if (fcport->port_type != FCT_TARGET)
  1094. continue;
  1095. if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
  1096. fcport->port_name, sizeof(fcport->port_name)))
  1097. continue;
  1098. break;
  1099. }
  1100. if (!fcport) {
  1101. ql_log(ql_log_warn, vha, 0x7049,
  1102. "Failed to find port.\n");
  1103. return -EINVAL;
  1104. }
  1105. if (atomic_read(&fcport->state) != FCS_ONLINE) {
  1106. ql_log(ql_log_warn, vha, 0x704a,
  1107. "Port is not online.\n");
  1108. return -EINVAL;
  1109. }
  1110. if (fcport->flags & FCF_LOGIN_NEEDED) {
  1111. ql_log(ql_log_warn, vha, 0x704b,
  1112. "Remote port not logged in flags = 0x%x.\n", fcport->flags);
  1113. return -EINVAL;
  1114. }
  1115. if (port_param->mode)
  1116. rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
  1117. port_param->speed, mb);
  1118. else
  1119. rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
  1120. &port_param->speed, mb);
  1121. if (rval) {
  1122. ql_log(ql_log_warn, vha, 0x704c,
  1123. "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
  1124. "%04x %x %04x %04x.\n", fcport->port_name[0],
  1125. fcport->port_name[1], fcport->port_name[2],
  1126. fcport->port_name[3], fcport->port_name[4],
  1127. fcport->port_name[5], fcport->port_name[6],
  1128. fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
  1129. rval = 0;
  1130. bsg_job->reply->result = (DID_ERROR << 16);
  1131. } else {
  1132. if (!port_param->mode) {
  1133. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  1134. sizeof(struct qla_port_param);
  1135. rsp_ptr = ((uint8_t *)bsg_job->reply) +
  1136. sizeof(struct fc_bsg_reply);
  1137. memcpy(rsp_ptr, port_param,
  1138. sizeof(struct qla_port_param));
  1139. }
  1140. bsg_job->reply->result = DID_OK;
  1141. }
  1142. bsg_job->job_done(bsg_job);
  1143. return rval;
  1144. }
  1145. static int
  1146. qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
  1147. uint8_t is_update)
  1148. {
  1149. uint32_t start = 0;
  1150. int valid = 0;
  1151. struct qla_hw_data *ha = vha->hw;
  1152. bsg_job->reply->reply_payload_rcv_len = 0;
  1153. if (unlikely(pci_channel_offline(ha->pdev)))
  1154. return -EINVAL;
  1155. start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  1156. if (start > ha->optrom_size) {
  1157. ql_log(ql_log_warn, vha, 0x7055,
  1158. "start %d > optrom_size %d.\n", start, ha->optrom_size);
  1159. return -EINVAL;
  1160. }
  1161. if (ha->optrom_state != QLA_SWAITING) {
  1162. ql_log(ql_log_info, vha, 0x7056,
  1163. "optrom_state %d.\n", ha->optrom_state);
  1164. return -EBUSY;
  1165. }
  1166. ha->optrom_region_start = start;
  1167. ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
  1168. if (is_update) {
  1169. if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
  1170. valid = 1;
  1171. else if (start == (ha->flt_region_boot * 4) ||
  1172. start == (ha->flt_region_fw * 4))
  1173. valid = 1;
  1174. else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
  1175. IS_QLA8XXX_TYPE(ha))
  1176. valid = 1;
  1177. if (!valid) {
  1178. ql_log(ql_log_warn, vha, 0x7058,
  1179. "Invalid start region 0x%x/0x%x.\n", start,
  1180. bsg_job->request_payload.payload_len);
  1181. return -EINVAL;
  1182. }
  1183. ha->optrom_region_size = start +
  1184. bsg_job->request_payload.payload_len > ha->optrom_size ?
  1185. ha->optrom_size - start :
  1186. bsg_job->request_payload.payload_len;
  1187. ha->optrom_state = QLA_SWRITING;
  1188. } else {
  1189. ha->optrom_region_size = start +
  1190. bsg_job->reply_payload.payload_len > ha->optrom_size ?
  1191. ha->optrom_size - start :
  1192. bsg_job->reply_payload.payload_len;
  1193. ha->optrom_state = QLA_SREADING;
  1194. }
  1195. ha->optrom_buffer = vmalloc(ha->optrom_region_size);
  1196. if (!ha->optrom_buffer) {
  1197. ql_log(ql_log_warn, vha, 0x7059,
  1198. "Read: Unable to allocate memory for optrom retrieval "
  1199. "(%x)\n", ha->optrom_region_size);
  1200. ha->optrom_state = QLA_SWAITING;
  1201. return -ENOMEM;
  1202. }
  1203. memset(ha->optrom_buffer, 0, ha->optrom_region_size);
  1204. return 0;
  1205. }
  1206. static int
  1207. qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
  1208. {
  1209. struct Scsi_Host *host = bsg_job->shost;
  1210. scsi_qla_host_t *vha = shost_priv(host);
  1211. struct qla_hw_data *ha = vha->hw;
  1212. int rval = 0;
  1213. rval = qla2x00_optrom_setup(bsg_job, vha, 0);
  1214. if (rval)
  1215. return rval;
  1216. ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
  1217. ha->optrom_region_start, ha->optrom_region_size);
  1218. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1219. bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
  1220. ha->optrom_region_size);
  1221. bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
  1222. bsg_job->reply->result = DID_OK;
  1223. vfree(ha->optrom_buffer);
  1224. ha->optrom_buffer = NULL;
  1225. ha->optrom_state = QLA_SWAITING;
  1226. bsg_job->job_done(bsg_job);
  1227. return rval;
  1228. }
  1229. static int
  1230. qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
  1231. {
  1232. struct Scsi_Host *host = bsg_job->shost;
  1233. scsi_qla_host_t *vha = shost_priv(host);
  1234. struct qla_hw_data *ha = vha->hw;
  1235. int rval = 0;
  1236. rval = qla2x00_optrom_setup(bsg_job, vha, 1);
  1237. if (rval)
  1238. return rval;
  1239. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1240. bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
  1241. ha->optrom_region_size);
  1242. ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
  1243. ha->optrom_region_start, ha->optrom_region_size);
  1244. bsg_job->reply->result = DID_OK;
  1245. vfree(ha->optrom_buffer);
  1246. ha->optrom_buffer = NULL;
  1247. ha->optrom_state = QLA_SWAITING;
  1248. bsg_job->job_done(bsg_job);
  1249. return rval;
  1250. }
  1251. static int
  1252. qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
  1253. {
  1254. switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
  1255. case QL_VND_LOOPBACK:
  1256. return qla2x00_process_loopback(bsg_job);
  1257. case QL_VND_A84_RESET:
  1258. return qla84xx_reset(bsg_job);
  1259. case QL_VND_A84_UPDATE_FW:
  1260. return qla84xx_updatefw(bsg_job);
  1261. case QL_VND_A84_MGMT_CMD:
  1262. return qla84xx_mgmt_cmd(bsg_job);
  1263. case QL_VND_IIDMA:
  1264. return qla24xx_iidma(bsg_job);
  1265. case QL_VND_FCP_PRIO_CFG_CMD:
  1266. return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
  1267. case QL_VND_READ_FLASH:
  1268. return qla2x00_read_optrom(bsg_job);
  1269. case QL_VND_UPDATE_FLASH:
  1270. return qla2x00_update_optrom(bsg_job);
  1271. default:
  1272. bsg_job->reply->result = (DID_ERROR << 16);
  1273. bsg_job->job_done(bsg_job);
  1274. return -ENOSYS;
  1275. }
  1276. }
  1277. int
  1278. qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
  1279. {
  1280. int ret = -EINVAL;
  1281. struct fc_rport *rport;
  1282. fc_port_t *fcport = NULL;
  1283. struct Scsi_Host *host;
  1284. scsi_qla_host_t *vha;
  1285. if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
  1286. rport = bsg_job->rport;
  1287. fcport = *(fc_port_t **) rport->dd_data;
  1288. host = rport_to_shost(rport);
  1289. vha = shost_priv(host);
  1290. } else {
  1291. host = bsg_job->shost;
  1292. vha = shost_priv(host);
  1293. }
  1294. ql_dbg(ql_dbg_user, vha, 0x7000,
  1295. "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
  1296. switch (bsg_job->request->msgcode) {
  1297. case FC_BSG_RPT_ELS:
  1298. case FC_BSG_HST_ELS_NOLOGIN:
  1299. ret = qla2x00_process_els(bsg_job);
  1300. break;
  1301. case FC_BSG_HST_CT:
  1302. ret = qla2x00_process_ct(bsg_job);
  1303. break;
  1304. case FC_BSG_HST_VENDOR:
  1305. ret = qla2x00_process_vendor_specific(bsg_job);
  1306. break;
  1307. case FC_BSG_HST_ADD_RPORT:
  1308. case FC_BSG_HST_DEL_RPORT:
  1309. case FC_BSG_RPT_CT:
  1310. default:
  1311. ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
  1312. break;
  1313. }
  1314. return ret;
  1315. }
  1316. int
  1317. qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
  1318. {
  1319. scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
  1320. struct qla_hw_data *ha = vha->hw;
  1321. srb_t *sp;
  1322. int cnt, que;
  1323. unsigned long flags;
  1324. struct req_que *req;
  1325. struct srb_ctx *sp_bsg;
  1326. /* find the bsg job from the active list of commands */
  1327. spin_lock_irqsave(&ha->hardware_lock, flags);
  1328. for (que = 0; que < ha->max_req_queues; que++) {
  1329. req = ha->req_q_map[que];
  1330. if (!req)
  1331. continue;
  1332. for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
  1333. sp = req->outstanding_cmds[cnt];
  1334. if (sp) {
  1335. sp_bsg = sp->ctx;
  1336. if (((sp_bsg->type == SRB_CT_CMD) ||
  1337. (sp_bsg->type == SRB_ELS_CMD_HST))
  1338. && (sp_bsg->u.bsg_job == bsg_job)) {
  1339. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1340. if (ha->isp_ops->abort_command(sp)) {
  1341. ql_log(ql_log_warn, vha, 0x7089,
  1342. "mbx abort_command "
  1343. "failed.\n");
  1344. bsg_job->req->errors =
  1345. bsg_job->reply->result = -EIO;
  1346. } else {
  1347. ql_dbg(ql_dbg_user, vha, 0x708a,
  1348. "mbx abort_command "
  1349. "success.\n");
  1350. bsg_job->req->errors =
  1351. bsg_job->reply->result = 0;
  1352. }
  1353. spin_lock_irqsave(&ha->hardware_lock, flags);
  1354. goto done;
  1355. }
  1356. }
  1357. }
  1358. }
  1359. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1360. ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
  1361. bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
  1362. return 0;
  1363. done:
  1364. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1365. if (bsg_job->request->msgcode == FC_BSG_HST_CT)
  1366. kfree(sp->fcport);
  1367. kfree(sp->ctx);
  1368. mempool_free(sp, ha->srb_mempool);
  1369. return 0;
  1370. }