qla_bsg.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/kthread.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/delay.h>
  11. /* BSG support for ELS/CT pass through */
  12. inline srb_t *
  13. qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
  14. {
  15. srb_t *sp;
  16. struct qla_hw_data *ha = vha->hw;
  17. struct srb_ctx *ctx;
  18. sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
  19. if (!sp)
  20. goto done;
  21. ctx = kzalloc(size, GFP_KERNEL);
  22. if (!ctx) {
  23. mempool_free(sp, ha->srb_mempool);
  24. sp = NULL;
  25. goto done;
  26. }
  27. memset(sp, 0, sizeof(*sp));
  28. sp->fcport = fcport;
  29. sp->ctx = ctx;
  30. done:
  31. return sp;
  32. }
  33. int
  34. qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
  35. {
  36. int i, ret, num_valid;
  37. uint8_t *bcode;
  38. struct qla_fcp_prio_entry *pri_entry;
  39. ret = 1;
  40. num_valid = 0;
  41. bcode = (uint8_t *)pri_cfg;
  42. if (bcode[0x0] != 'H' || bcode[0x1] != 'Q' || bcode[0x2] != 'O' ||
  43. bcode[0x3] != 'S') {
  44. return 0;
  45. }
  46. if (flag != 1)
  47. return ret;
  48. pri_entry = &pri_cfg->entry[0];
  49. for (i = 0; i < pri_cfg->num_entries; i++) {
  50. if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
  51. num_valid++;
  52. pri_entry++;
  53. }
  54. if (num_valid == 0)
  55. ret = 0;
  56. return ret;
  57. }
  58. static int
  59. qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
  60. {
  61. struct Scsi_Host *host = bsg_job->shost;
  62. scsi_qla_host_t *vha = shost_priv(host);
  63. struct qla_hw_data *ha = vha->hw;
  64. int ret = 0;
  65. uint32_t len;
  66. uint32_t oper;
  67. bsg_job->reply->reply_payload_rcv_len = 0;
  68. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  69. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  70. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  71. ret = -EBUSY;
  72. goto exit_fcp_prio_cfg;
  73. }
  74. /* Get the sub command */
  75. oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  76. /* Only set config is allowed if config memory is not allocated */
  77. if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
  78. ret = -EINVAL;
  79. goto exit_fcp_prio_cfg;
  80. }
  81. switch (oper) {
  82. case QLFC_FCP_PRIO_DISABLE:
  83. if (ha->flags.fcp_prio_enabled) {
  84. ha->flags.fcp_prio_enabled = 0;
  85. ha->fcp_prio_cfg->attributes &=
  86. ~FCP_PRIO_ATTR_ENABLE;
  87. qla24xx_update_all_fcp_prio(vha);
  88. bsg_job->reply->result = DID_OK;
  89. } else {
  90. ret = -EINVAL;
  91. bsg_job->reply->result = (DID_ERROR << 16);
  92. goto exit_fcp_prio_cfg;
  93. }
  94. break;
  95. case QLFC_FCP_PRIO_ENABLE:
  96. if (!ha->flags.fcp_prio_enabled) {
  97. if (ha->fcp_prio_cfg) {
  98. ha->flags.fcp_prio_enabled = 1;
  99. ha->fcp_prio_cfg->attributes |=
  100. FCP_PRIO_ATTR_ENABLE;
  101. qla24xx_update_all_fcp_prio(vha);
  102. bsg_job->reply->result = DID_OK;
  103. } else {
  104. ret = -EINVAL;
  105. bsg_job->reply->result = (DID_ERROR << 16);
  106. goto exit_fcp_prio_cfg;
  107. }
  108. }
  109. break;
  110. case QLFC_FCP_PRIO_GET_CONFIG:
  111. len = bsg_job->reply_payload.payload_len;
  112. if (!len || len > FCP_PRIO_CFG_SIZE) {
  113. ret = -EINVAL;
  114. bsg_job->reply->result = (DID_ERROR << 16);
  115. goto exit_fcp_prio_cfg;
  116. }
  117. bsg_job->reply->result = DID_OK;
  118. bsg_job->reply->reply_payload_rcv_len =
  119. sg_copy_from_buffer(
  120. bsg_job->reply_payload.sg_list,
  121. bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
  122. len);
  123. break;
  124. case QLFC_FCP_PRIO_SET_CONFIG:
  125. len = bsg_job->request_payload.payload_len;
  126. if (!len || len > FCP_PRIO_CFG_SIZE) {
  127. bsg_job->reply->result = (DID_ERROR << 16);
  128. ret = -EINVAL;
  129. goto exit_fcp_prio_cfg;
  130. }
  131. if (!ha->fcp_prio_cfg) {
  132. ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
  133. if (!ha->fcp_prio_cfg) {
  134. qla_printk(KERN_WARNING, ha,
  135. "Unable to allocate memory "
  136. "for fcp prio config data (%x).\n",
  137. FCP_PRIO_CFG_SIZE);
  138. bsg_job->reply->result = (DID_ERROR << 16);
  139. ret = -ENOMEM;
  140. goto exit_fcp_prio_cfg;
  141. }
  142. }
  143. memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
  144. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  145. bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
  146. FCP_PRIO_CFG_SIZE);
  147. /* validate fcp priority data */
  148. if (!qla24xx_fcp_prio_cfg_valid(
  149. (struct qla_fcp_prio_cfg *)
  150. ha->fcp_prio_cfg, 1)) {
  151. bsg_job->reply->result = (DID_ERROR << 16);
  152. ret = -EINVAL;
  153. /* If buffer was invalidatic int
  154. * fcp_prio_cfg is of no use
  155. */
  156. vfree(ha->fcp_prio_cfg);
  157. ha->fcp_prio_cfg = NULL;
  158. goto exit_fcp_prio_cfg;
  159. }
  160. ha->flags.fcp_prio_enabled = 0;
  161. if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
  162. ha->flags.fcp_prio_enabled = 1;
  163. qla24xx_update_all_fcp_prio(vha);
  164. bsg_job->reply->result = DID_OK;
  165. break;
  166. default:
  167. ret = -EINVAL;
  168. break;
  169. }
  170. exit_fcp_prio_cfg:
  171. bsg_job->job_done(bsg_job);
  172. return ret;
  173. }
  174. static int
  175. qla2x00_process_els(struct fc_bsg_job *bsg_job)
  176. {
  177. struct fc_rport *rport;
  178. fc_port_t *fcport;
  179. struct Scsi_Host *host;
  180. scsi_qla_host_t *vha;
  181. struct qla_hw_data *ha;
  182. srb_t *sp;
  183. const char *type;
  184. int req_sg_cnt, rsp_sg_cnt;
  185. int rval = (DRIVER_ERROR << 16);
  186. uint16_t nextlid = 0;
  187. struct srb_ctx *els;
  188. /* Multiple SG's are not supported for ELS requests */
  189. if (bsg_job->request_payload.sg_cnt > 1 ||
  190. bsg_job->reply_payload.sg_cnt > 1) {
  191. DEBUG2(printk(KERN_INFO
  192. "multiple SG's are not supported for ELS requests"
  193. " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
  194. bsg_job->request_payload.sg_cnt,
  195. bsg_job->reply_payload.sg_cnt));
  196. rval = -EPERM;
  197. goto done;
  198. }
  199. /* ELS request for rport */
  200. if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
  201. rport = bsg_job->rport;
  202. fcport = *(fc_port_t **) rport->dd_data;
  203. host = rport_to_shost(rport);
  204. vha = shost_priv(host);
  205. ha = vha->hw;
  206. type = "FC_BSG_RPT_ELS";
  207. /* make sure the rport is logged in,
  208. * if not perform fabric login
  209. */
  210. if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
  211. DEBUG2(qla_printk(KERN_WARNING, ha,
  212. "failed to login port %06X for ELS passthru\n",
  213. fcport->d_id.b24));
  214. rval = -EIO;
  215. goto done;
  216. }
  217. } else {
  218. host = bsg_job->shost;
  219. vha = shost_priv(host);
  220. ha = vha->hw;
  221. type = "FC_BSG_HST_ELS_NOLOGIN";
  222. /* Allocate a dummy fcport structure, since functions
  223. * preparing the IOCB and mailbox command retrieves port
  224. * specific information from fcport structure. For Host based
  225. * ELS commands there will be no fcport structure allocated
  226. */
  227. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  228. if (!fcport) {
  229. rval = -ENOMEM;
  230. goto done;
  231. }
  232. /* Initialize all required fields of fcport */
  233. fcport->vha = vha;
  234. fcport->vp_idx = vha->vp_idx;
  235. fcport->d_id.b.al_pa =
  236. bsg_job->request->rqst_data.h_els.port_id[0];
  237. fcport->d_id.b.area =
  238. bsg_job->request->rqst_data.h_els.port_id[1];
  239. fcport->d_id.b.domain =
  240. bsg_job->request->rqst_data.h_els.port_id[2];
  241. fcport->loop_id =
  242. (fcport->d_id.b.al_pa == 0xFD) ?
  243. NPH_FABRIC_CONTROLLER : NPH_F_PORT;
  244. }
  245. if (!vha->flags.online) {
  246. DEBUG2(qla_printk(KERN_WARNING, ha,
  247. "host not online\n"));
  248. rval = -EIO;
  249. goto done;
  250. }
  251. req_sg_cnt =
  252. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  253. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  254. if (!req_sg_cnt) {
  255. rval = -ENOMEM;
  256. goto done_free_fcport;
  257. }
  258. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  259. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  260. if (!rsp_sg_cnt) {
  261. rval = -ENOMEM;
  262. goto done_free_fcport;
  263. }
  264. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  265. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  266. DEBUG2(printk(KERN_INFO
  267. "dma mapping resulted in different sg counts \
  268. [request_sg_cnt: %x dma_request_sg_cnt: %x\
  269. reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
  270. bsg_job->request_payload.sg_cnt, req_sg_cnt,
  271. bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
  272. rval = -EAGAIN;
  273. goto done_unmap_sg;
  274. }
  275. /* Alloc SRB structure */
  276. sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
  277. if (!sp) {
  278. rval = -ENOMEM;
  279. goto done_unmap_sg;
  280. }
  281. els = sp->ctx;
  282. els->type =
  283. (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
  284. SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
  285. els->name =
  286. (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
  287. "bsg_els_rpt" : "bsg_els_hst");
  288. els->u.bsg_job = bsg_job;
  289. DEBUG2(qla_printk(KERN_INFO, ha,
  290. "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
  291. "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
  292. bsg_job->request->rqst_data.h_els.command_code,
  293. fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
  294. fcport->d_id.b.al_pa));
  295. rval = qla2x00_start_sp(sp);
  296. if (rval != QLA_SUCCESS) {
  297. kfree(sp->ctx);
  298. mempool_free(sp, ha->srb_mempool);
  299. rval = -EIO;
  300. goto done_unmap_sg;
  301. }
  302. return rval;
  303. done_unmap_sg:
  304. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  305. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  306. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  307. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  308. goto done_free_fcport;
  309. done_free_fcport:
  310. if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
  311. kfree(fcport);
  312. done:
  313. return rval;
  314. }
  315. static int
  316. qla2x00_process_ct(struct fc_bsg_job *bsg_job)
  317. {
  318. srb_t *sp;
  319. struct Scsi_Host *host = bsg_job->shost;
  320. scsi_qla_host_t *vha = shost_priv(host);
  321. struct qla_hw_data *ha = vha->hw;
  322. int rval = (DRIVER_ERROR << 16);
  323. int req_sg_cnt, rsp_sg_cnt;
  324. uint16_t loop_id;
  325. struct fc_port *fcport;
  326. char *type = "FC_BSG_HST_CT";
  327. struct srb_ctx *ct;
  328. /* pass through is supported only for ISP 4Gb or higher */
  329. if (!IS_FWI2_CAPABLE(ha)) {
  330. DEBUG2(qla_printk(KERN_INFO, ha,
  331. "scsi(%ld):Firmware is not capable to support FC "
  332. "CT pass thru\n", vha->host_no));
  333. rval = -EPERM;
  334. goto done;
  335. }
  336. req_sg_cnt =
  337. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  338. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  339. if (!req_sg_cnt) {
  340. rval = -ENOMEM;
  341. goto done;
  342. }
  343. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  344. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  345. if (!rsp_sg_cnt) {
  346. rval = -ENOMEM;
  347. goto done;
  348. }
  349. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  350. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  351. DEBUG2(qla_printk(KERN_WARNING, ha,
  352. "[request_sg_cnt: %x dma_request_sg_cnt: %x\
  353. reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
  354. bsg_job->request_payload.sg_cnt, req_sg_cnt,
  355. bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
  356. rval = -EAGAIN;
  357. goto done_unmap_sg;
  358. }
  359. if (!vha->flags.online) {
  360. DEBUG2(qla_printk(KERN_WARNING, ha,
  361. "host not online\n"));
  362. rval = -EIO;
  363. goto done_unmap_sg;
  364. }
  365. loop_id =
  366. (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
  367. >> 24;
  368. switch (loop_id) {
  369. case 0xFC:
  370. loop_id = cpu_to_le16(NPH_SNS);
  371. break;
  372. case 0xFA:
  373. loop_id = vha->mgmt_svr_loop_id;
  374. break;
  375. default:
  376. DEBUG2(qla_printk(KERN_INFO, ha,
  377. "Unknown loop id: %x\n", loop_id));
  378. rval = -EINVAL;
  379. goto done_unmap_sg;
  380. }
  381. /* Allocate a dummy fcport structure, since functions preparing the
  382. * IOCB and mailbox command retrieves port specific information
  383. * from fcport structure. For Host based ELS commands there will be
  384. * no fcport structure allocated
  385. */
  386. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  387. if (!fcport) {
  388. rval = -ENOMEM;
  389. goto done_unmap_sg;
  390. }
  391. /* Initialize all required fields of fcport */
  392. fcport->vha = vha;
  393. fcport->vp_idx = vha->vp_idx;
  394. fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
  395. fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
  396. fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
  397. fcport->loop_id = loop_id;
  398. /* Alloc SRB structure */
  399. sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
  400. if (!sp) {
  401. rval = -ENOMEM;
  402. goto done_free_fcport;
  403. }
  404. ct = sp->ctx;
  405. ct->type = SRB_CT_CMD;
  406. ct->name = "bsg_ct";
  407. ct->u.bsg_job = bsg_job;
  408. DEBUG2(qla_printk(KERN_INFO, ha,
  409. "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
  410. "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
  411. (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
  412. fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
  413. fcport->d_id.b.al_pa));
  414. rval = qla2x00_start_sp(sp);
  415. if (rval != QLA_SUCCESS) {
  416. kfree(sp->ctx);
  417. mempool_free(sp, ha->srb_mempool);
  418. rval = -EIO;
  419. goto done_free_fcport;
  420. }
  421. return rval;
  422. done_free_fcport:
  423. kfree(fcport);
  424. done_unmap_sg:
  425. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  426. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  427. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  428. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  429. done:
  430. return rval;
  431. }
  432. static int
  433. qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
  434. {
  435. struct Scsi_Host *host = bsg_job->shost;
  436. scsi_qla_host_t *vha = shost_priv(host);
  437. struct qla_hw_data *ha = vha->hw;
  438. int rval;
  439. uint8_t command_sent;
  440. char *type;
  441. struct msg_echo_lb elreq;
  442. uint16_t response[MAILBOX_REGISTER_COUNT];
  443. uint8_t *fw_sts_ptr;
  444. uint8_t *req_data = NULL;
  445. dma_addr_t req_data_dma;
  446. uint32_t req_data_len;
  447. uint8_t *rsp_data = NULL;
  448. dma_addr_t rsp_data_dma;
  449. uint32_t rsp_data_len;
  450. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  451. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  452. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  453. return -EBUSY;
  454. if (!vha->flags.online) {
  455. DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
  456. return -EIO;
  457. }
  458. elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
  459. bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
  460. DMA_TO_DEVICE);
  461. if (!elreq.req_sg_cnt)
  462. return -ENOMEM;
  463. elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
  464. bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
  465. DMA_FROM_DEVICE);
  466. if (!elreq.rsp_sg_cnt) {
  467. rval = -ENOMEM;
  468. goto done_unmap_req_sg;
  469. }
  470. if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  471. (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  472. DEBUG2(printk(KERN_INFO
  473. "dma mapping resulted in different sg counts "
  474. "[request_sg_cnt: %x dma_request_sg_cnt: %x "
  475. "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
  476. bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
  477. bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
  478. rval = -EAGAIN;
  479. goto done_unmap_sg;
  480. }
  481. req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
  482. req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
  483. &req_data_dma, GFP_KERNEL);
  484. if (!req_data) {
  485. DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
  486. "failed for host=%lu\n", __func__, vha->host_no));
  487. rval = -ENOMEM;
  488. goto done_unmap_sg;
  489. }
  490. rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
  491. &rsp_data_dma, GFP_KERNEL);
  492. if (!rsp_data) {
  493. DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
  494. "failed for host=%lu\n", __func__, vha->host_no));
  495. rval = -ENOMEM;
  496. goto done_free_dma_req;
  497. }
  498. /* Copy the request buffer in req_data now */
  499. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  500. bsg_job->request_payload.sg_cnt, req_data, req_data_len);
  501. elreq.send_dma = req_data_dma;
  502. elreq.rcv_dma = rsp_data_dma;
  503. elreq.transfer_size = req_data_len;
  504. elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  505. if (ha->current_topology != ISP_CFG_F) {
  506. type = "FC_BSG_HST_VENDOR_LOOPBACK";
  507. DEBUG2(qla_printk(KERN_INFO, ha,
  508. "scsi(%ld) bsg rqst type: %s\n",
  509. vha->host_no, type));
  510. command_sent = INT_DEF_LB_LOOPBACK_CMD;
  511. rval = qla2x00_loopback_test(vha, &elreq, response);
  512. if (IS_QLA81XX(ha)) {
  513. if (response[0] == MBS_COMMAND_ERROR &&
  514. response[1] == MBS_LB_RESET) {
  515. DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
  516. "ISP\n", __func__, vha->host_no));
  517. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  518. qla2xxx_wake_dpc(vha);
  519. }
  520. }
  521. } else {
  522. type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
  523. DEBUG2(qla_printk(KERN_INFO, ha,
  524. "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
  525. command_sent = INT_DEF_LB_ECHO_CMD;
  526. rval = qla2x00_echo_test(vha, &elreq, response);
  527. }
  528. if (rval) {
  529. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  530. "request %s failed\n", vha->host_no, type));
  531. fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
  532. sizeof(struct fc_bsg_reply);
  533. memcpy(fw_sts_ptr, response, sizeof(response));
  534. fw_sts_ptr += sizeof(response);
  535. *fw_sts_ptr = command_sent;
  536. rval = 0;
  537. bsg_job->reply->reply_payload_rcv_len = 0;
  538. bsg_job->reply->result = (DID_ERROR << 16);
  539. } else {
  540. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  541. "request %s completed\n", vha->host_no, type));
  542. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  543. sizeof(response) + sizeof(uint8_t);
  544. bsg_job->reply->reply_payload_rcv_len =
  545. bsg_job->reply_payload.payload_len;
  546. fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
  547. sizeof(struct fc_bsg_reply);
  548. memcpy(fw_sts_ptr, response, sizeof(response));
  549. fw_sts_ptr += sizeof(response);
  550. *fw_sts_ptr = command_sent;
  551. bsg_job->reply->result = DID_OK;
  552. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  553. bsg_job->reply_payload.sg_cnt, rsp_data,
  554. rsp_data_len);
  555. }
  556. bsg_job->job_done(bsg_job);
  557. dma_free_coherent(&ha->pdev->dev, rsp_data_len,
  558. rsp_data, rsp_data_dma);
  559. done_free_dma_req:
  560. dma_free_coherent(&ha->pdev->dev, req_data_len,
  561. req_data, req_data_dma);
  562. done_unmap_sg:
  563. dma_unmap_sg(&ha->pdev->dev,
  564. bsg_job->reply_payload.sg_list,
  565. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  566. done_unmap_req_sg:
  567. dma_unmap_sg(&ha->pdev->dev,
  568. bsg_job->request_payload.sg_list,
  569. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  570. return rval;
  571. }
  572. static int
  573. qla84xx_reset(struct fc_bsg_job *bsg_job)
  574. {
  575. struct Scsi_Host *host = bsg_job->shost;
  576. scsi_qla_host_t *vha = shost_priv(host);
  577. struct qla_hw_data *ha = vha->hw;
  578. int rval = 0;
  579. uint32_t flag;
  580. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  581. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  582. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  583. return -EBUSY;
  584. if (!IS_QLA84XX(ha)) {
  585. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
  586. "exiting.\n", vha->host_no));
  587. return -EINVAL;
  588. }
  589. flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  590. rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
  591. if (rval) {
  592. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  593. "request 84xx reset failed\n", vha->host_no));
  594. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  595. bsg_job->reply->result = (DID_ERROR << 16);
  596. } else {
  597. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  598. "request 84xx reset completed\n", vha->host_no));
  599. bsg_job->reply->result = DID_OK;
  600. }
  601. bsg_job->job_done(bsg_job);
  602. return rval;
  603. }
  604. static int
  605. qla84xx_updatefw(struct fc_bsg_job *bsg_job)
  606. {
  607. struct Scsi_Host *host = bsg_job->shost;
  608. scsi_qla_host_t *vha = shost_priv(host);
  609. struct qla_hw_data *ha = vha->hw;
  610. struct verify_chip_entry_84xx *mn = NULL;
  611. dma_addr_t mn_dma, fw_dma;
  612. void *fw_buf = NULL;
  613. int rval = 0;
  614. uint32_t sg_cnt;
  615. uint32_t data_len;
  616. uint16_t options;
  617. uint32_t flag;
  618. uint32_t fw_ver;
  619. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  620. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  621. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  622. return -EBUSY;
  623. if (!IS_QLA84XX(ha)) {
  624. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
  625. "exiting.\n", vha->host_no));
  626. return -EINVAL;
  627. }
  628. sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  629. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  630. if (!sg_cnt)
  631. return -ENOMEM;
  632. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  633. DEBUG2(printk(KERN_INFO
  634. "dma mapping resulted in different sg counts "
  635. "request_sg_cnt: %x dma_request_sg_cnt: %x ",
  636. bsg_job->request_payload.sg_cnt, sg_cnt));
  637. rval = -EAGAIN;
  638. goto done_unmap_sg;
  639. }
  640. data_len = bsg_job->request_payload.payload_len;
  641. fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
  642. &fw_dma, GFP_KERNEL);
  643. if (!fw_buf) {
  644. DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
  645. "failed for host=%lu\n", __func__, vha->host_no));
  646. rval = -ENOMEM;
  647. goto done_unmap_sg;
  648. }
  649. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  650. bsg_job->request_payload.sg_cnt, fw_buf, data_len);
  651. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  652. if (!mn) {
  653. DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
  654. "failed for host=%lu\n", __func__, vha->host_no));
  655. rval = -ENOMEM;
  656. goto done_free_fw_buf;
  657. }
  658. flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  659. fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
  660. memset(mn, 0, sizeof(struct access_chip_84xx));
  661. mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
  662. mn->entry_count = 1;
  663. options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
  664. if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
  665. options |= VCO_DIAG_FW;
  666. mn->options = cpu_to_le16(options);
  667. mn->fw_ver = cpu_to_le32(fw_ver);
  668. mn->fw_size = cpu_to_le32(data_len);
  669. mn->fw_seq_size = cpu_to_le32(data_len);
  670. mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
  671. mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
  672. mn->dseg_length = cpu_to_le32(data_len);
  673. mn->data_seg_cnt = cpu_to_le16(1);
  674. rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
  675. if (rval) {
  676. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  677. "request 84xx updatefw failed\n", vha->host_no));
  678. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  679. bsg_job->reply->result = (DID_ERROR << 16);
  680. } else {
  681. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  682. "request 84xx updatefw completed\n", vha->host_no));
  683. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  684. bsg_job->reply->result = DID_OK;
  685. }
  686. bsg_job->job_done(bsg_job);
  687. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  688. done_free_fw_buf:
  689. dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
  690. done_unmap_sg:
  691. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  692. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  693. return rval;
  694. }
  695. static int
  696. qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
  697. {
  698. struct Scsi_Host *host = bsg_job->shost;
  699. scsi_qla_host_t *vha = shost_priv(host);
  700. struct qla_hw_data *ha = vha->hw;
  701. struct access_chip_84xx *mn = NULL;
  702. dma_addr_t mn_dma, mgmt_dma;
  703. void *mgmt_b = NULL;
  704. int rval = 0;
  705. struct qla_bsg_a84_mgmt *ql84_mgmt;
  706. uint32_t sg_cnt;
  707. uint32_t data_len = 0;
  708. uint32_t dma_direction = DMA_NONE;
  709. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  710. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  711. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  712. return -EBUSY;
  713. if (!IS_QLA84XX(ha)) {
  714. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
  715. "exiting.\n", vha->host_no));
  716. return -EINVAL;
  717. }
  718. ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
  719. sizeof(struct fc_bsg_request));
  720. if (!ql84_mgmt) {
  721. DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
  722. __func__, vha->host_no));
  723. return -EINVAL;
  724. }
  725. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  726. if (!mn) {
  727. DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
  728. "failed for host=%lu\n", __func__, vha->host_no));
  729. return -ENOMEM;
  730. }
  731. memset(mn, 0, sizeof(struct access_chip_84xx));
  732. mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
  733. mn->entry_count = 1;
  734. switch (ql84_mgmt->mgmt.cmd) {
  735. case QLA84_MGMT_READ_MEM:
  736. case QLA84_MGMT_GET_INFO:
  737. sg_cnt = dma_map_sg(&ha->pdev->dev,
  738. bsg_job->reply_payload.sg_list,
  739. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  740. if (!sg_cnt) {
  741. rval = -ENOMEM;
  742. goto exit_mgmt;
  743. }
  744. dma_direction = DMA_FROM_DEVICE;
  745. if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
  746. DEBUG2(printk(KERN_INFO
  747. "dma mapping resulted in different sg counts "
  748. "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
  749. bsg_job->reply_payload.sg_cnt, sg_cnt));
  750. rval = -EAGAIN;
  751. goto done_unmap_sg;
  752. }
  753. data_len = bsg_job->reply_payload.payload_len;
  754. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  755. &mgmt_dma, GFP_KERNEL);
  756. if (!mgmt_b) {
  757. DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
  758. "failed for host=%lu\n",
  759. __func__, vha->host_no));
  760. rval = -ENOMEM;
  761. goto done_unmap_sg;
  762. }
  763. if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
  764. mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
  765. mn->parameter1 =
  766. cpu_to_le32(
  767. ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  768. } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
  769. mn->options = cpu_to_le16(ACO_REQUEST_INFO);
  770. mn->parameter1 =
  771. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
  772. mn->parameter2 =
  773. cpu_to_le32(
  774. ql84_mgmt->mgmt.mgmtp.u.info.context);
  775. }
  776. break;
  777. case QLA84_MGMT_WRITE_MEM:
  778. sg_cnt = dma_map_sg(&ha->pdev->dev,
  779. bsg_job->request_payload.sg_list,
  780. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  781. if (!sg_cnt) {
  782. rval = -ENOMEM;
  783. goto exit_mgmt;
  784. }
  785. dma_direction = DMA_TO_DEVICE;
  786. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  787. DEBUG2(printk(KERN_INFO
  788. "dma mapping resulted in different sg counts "
  789. "request_sg_cnt: %x dma_request_sg_cnt: %x ",
  790. bsg_job->request_payload.sg_cnt, sg_cnt));
  791. rval = -EAGAIN;
  792. goto done_unmap_sg;
  793. }
  794. data_len = bsg_job->request_payload.payload_len;
  795. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  796. &mgmt_dma, GFP_KERNEL);
  797. if (!mgmt_b) {
  798. DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
  799. "failed for host=%lu\n",
  800. __func__, vha->host_no));
  801. rval = -ENOMEM;
  802. goto done_unmap_sg;
  803. }
  804. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  805. bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
  806. mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
  807. mn->parameter1 =
  808. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  809. break;
  810. case QLA84_MGMT_CHNG_CONFIG:
  811. mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
  812. mn->parameter1 =
  813. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
  814. mn->parameter2 =
  815. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
  816. mn->parameter3 =
  817. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
  818. break;
  819. default:
  820. rval = -EIO;
  821. goto exit_mgmt;
  822. }
  823. if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
  824. mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
  825. mn->dseg_count = cpu_to_le16(1);
  826. mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
  827. mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
  828. mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
  829. }
  830. rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
  831. if (rval) {
  832. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  833. "request 84xx mgmt failed\n", vha->host_no));
  834. rval = bsg_job->reply->reply_payload_rcv_len = 0;
  835. bsg_job->reply->result = (DID_ERROR << 16);
  836. } else {
  837. DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
  838. "request 84xx mgmt completed\n", vha->host_no));
  839. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  840. bsg_job->reply->result = DID_OK;
  841. if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
  842. (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
  843. bsg_job->reply->reply_payload_rcv_len =
  844. bsg_job->reply_payload.payload_len;
  845. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  846. bsg_job->reply_payload.sg_cnt, mgmt_b,
  847. data_len);
  848. }
  849. }
  850. bsg_job->job_done(bsg_job);
  851. done_unmap_sg:
  852. if (mgmt_b)
  853. dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
  854. if (dma_direction == DMA_TO_DEVICE)
  855. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  856. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  857. else if (dma_direction == DMA_FROM_DEVICE)
  858. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  859. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  860. exit_mgmt:
  861. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  862. return rval;
  863. }
  864. static int
  865. qla24xx_iidma(struct fc_bsg_job *bsg_job)
  866. {
  867. struct Scsi_Host *host = bsg_job->shost;
  868. scsi_qla_host_t *vha = shost_priv(host);
  869. struct qla_hw_data *ha = vha->hw;
  870. int rval = 0;
  871. struct qla_port_param *port_param = NULL;
  872. fc_port_t *fcport = NULL;
  873. uint16_t mb[MAILBOX_REGISTER_COUNT];
  874. uint8_t *rsp_ptr = NULL;
  875. bsg_job->reply->reply_payload_rcv_len = 0;
  876. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  877. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  878. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
  879. return -EBUSY;
  880. if (!IS_IIDMA_CAPABLE(vha->hw)) {
  881. DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
  882. "supported\n", __func__, vha->host_no));
  883. return -EINVAL;
  884. }
  885. port_param = (struct qla_port_param *)((char *)bsg_job->request +
  886. sizeof(struct fc_bsg_request));
  887. if (!port_param) {
  888. DEBUG2(printk("%s(%ld): port_param header not provided, "
  889. "exiting.\n", __func__, vha->host_no));
  890. return -EINVAL;
  891. }
  892. if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
  893. DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
  894. __func__, vha->host_no));
  895. return -EINVAL;
  896. }
  897. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  898. if (fcport->port_type != FCT_TARGET)
  899. continue;
  900. if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
  901. fcport->port_name, sizeof(fcport->port_name)))
  902. continue;
  903. break;
  904. }
  905. if (!fcport) {
  906. DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
  907. __func__, vha->host_no));
  908. return -EINVAL;
  909. }
  910. if (port_param->mode)
  911. rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
  912. port_param->speed, mb);
  913. else
  914. rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
  915. &port_param->speed, mb);
  916. if (rval) {
  917. DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
  918. "%02x%02x%02x%02x%02x%02x%02x%02x -- "
  919. "%04x %x %04x %04x.\n",
  920. vha->host_no, fcport->port_name[0],
  921. fcport->port_name[1],
  922. fcport->port_name[2], fcport->port_name[3],
  923. fcport->port_name[4], fcport->port_name[5],
  924. fcport->port_name[6], fcport->port_name[7], rval,
  925. fcport->fp_speed, mb[0], mb[1]));
  926. rval = 0;
  927. bsg_job->reply->result = (DID_ERROR << 16);
  928. } else {
  929. if (!port_param->mode) {
  930. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  931. sizeof(struct qla_port_param);
  932. rsp_ptr = ((uint8_t *)bsg_job->reply) +
  933. sizeof(struct fc_bsg_reply);
  934. memcpy(rsp_ptr, port_param,
  935. sizeof(struct qla_port_param));
  936. }
  937. bsg_job->reply->result = DID_OK;
  938. }
  939. bsg_job->job_done(bsg_job);
  940. return rval;
  941. }
  942. static int
  943. qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
  944. {
  945. switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
  946. case QL_VND_LOOPBACK:
  947. return qla2x00_process_loopback(bsg_job);
  948. case QL_VND_A84_RESET:
  949. return qla84xx_reset(bsg_job);
  950. case QL_VND_A84_UPDATE_FW:
  951. return qla84xx_updatefw(bsg_job);
  952. case QL_VND_A84_MGMT_CMD:
  953. return qla84xx_mgmt_cmd(bsg_job);
  954. case QL_VND_IIDMA:
  955. return qla24xx_iidma(bsg_job);
  956. case QL_VND_FCP_PRIO_CFG_CMD:
  957. return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
  958. default:
  959. bsg_job->reply->result = (DID_ERROR << 16);
  960. bsg_job->job_done(bsg_job);
  961. return -ENOSYS;
  962. }
  963. }
  964. int
  965. qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
  966. {
  967. int ret = -EINVAL;
  968. switch (bsg_job->request->msgcode) {
  969. case FC_BSG_RPT_ELS:
  970. case FC_BSG_HST_ELS_NOLOGIN:
  971. ret = qla2x00_process_els(bsg_job);
  972. break;
  973. case FC_BSG_HST_CT:
  974. ret = qla2x00_process_ct(bsg_job);
  975. break;
  976. case FC_BSG_HST_VENDOR:
  977. ret = qla2x00_process_vendor_specific(bsg_job);
  978. break;
  979. case FC_BSG_HST_ADD_RPORT:
  980. case FC_BSG_HST_DEL_RPORT:
  981. case FC_BSG_RPT_CT:
  982. default:
  983. DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
  984. break;
  985. }
  986. return ret;
  987. }
  988. int
  989. qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
  990. {
  991. scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
  992. struct qla_hw_data *ha = vha->hw;
  993. srb_t *sp;
  994. int cnt, que;
  995. unsigned long flags;
  996. struct req_que *req;
  997. struct srb_ctx *sp_bsg;
  998. /* find the bsg job from the active list of commands */
  999. spin_lock_irqsave(&ha->hardware_lock, flags);
  1000. for (que = 0; que < ha->max_req_queues; que++) {
  1001. req = ha->req_q_map[que];
  1002. if (!req)
  1003. continue;
  1004. for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
  1005. sp = req->outstanding_cmds[cnt];
  1006. if (sp) {
  1007. sp_bsg = sp->ctx;
  1008. if (((sp_bsg->type == SRB_CT_CMD) ||
  1009. (sp_bsg->type == SRB_ELS_CMD_HST))
  1010. && (sp_bsg->u.bsg_job == bsg_job)) {
  1011. if (ha->isp_ops->abort_command(sp)) {
  1012. DEBUG2(qla_printk(KERN_INFO, ha,
  1013. "scsi(%ld): mbx "
  1014. "abort_command failed\n",
  1015. vha->host_no));
  1016. bsg_job->req->errors =
  1017. bsg_job->reply->result = -EIO;
  1018. } else {
  1019. DEBUG2(qla_printk(KERN_INFO, ha,
  1020. "scsi(%ld): mbx "
  1021. "abort_command success\n",
  1022. vha->host_no));
  1023. bsg_job->req->errors =
  1024. bsg_job->reply->result = 0;
  1025. }
  1026. goto done;
  1027. }
  1028. }
  1029. }
  1030. }
  1031. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1032. DEBUG2(qla_printk(KERN_INFO, ha,
  1033. "scsi(%ld) SRB not found to abort\n", vha->host_no));
  1034. bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
  1035. return 0;
  1036. done:
  1037. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1038. if (bsg_job->request->msgcode == FC_BSG_HST_CT)
  1039. kfree(sp->fcport);
  1040. kfree(sp->ctx);
  1041. mempool_free(sp, ha->srb_mempool);
  1042. return 0;
  1043. }