qla_bsg.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2012 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/kthread.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/delay.h>
  11. /* BSG support for ELS/CT pass through */
  12. void
  13. qla2x00_bsg_job_done(void *data, void *ptr, int res)
  14. {
  15. srb_t *sp = (srb_t *)ptr;
  16. struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
  17. struct fc_bsg_job *bsg_job = sp->u.bsg_job;
  18. bsg_job->reply->result = res;
  19. bsg_job->job_done(bsg_job);
  20. sp->free(vha, sp);
  21. }
  22. void
  23. qla2x00_bsg_sp_free(void *data, void *ptr)
  24. {
  25. srb_t *sp = (srb_t *)ptr;
  26. struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
  27. struct fc_bsg_job *bsg_job = sp->u.bsg_job;
  28. struct qla_hw_data *ha = vha->hw;
  29. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  30. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  31. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  32. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  33. if (sp->type == SRB_CT_CMD ||
  34. sp->type == SRB_ELS_CMD_HST)
  35. kfree(sp->fcport);
  36. mempool_free(sp, vha->hw->srb_mempool);
  37. }
  38. int
  39. qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
  40. struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
  41. {
  42. int i, ret, num_valid;
  43. uint8_t *bcode;
  44. struct qla_fcp_prio_entry *pri_entry;
  45. uint32_t *bcode_val_ptr, bcode_val;
  46. ret = 1;
  47. num_valid = 0;
  48. bcode = (uint8_t *)pri_cfg;
  49. bcode_val_ptr = (uint32_t *)pri_cfg;
  50. bcode_val = (uint32_t)(*bcode_val_ptr);
  51. if (bcode_val == 0xFFFFFFFF) {
  52. /* No FCP Priority config data in flash */
  53. ql_dbg(ql_dbg_user, vha, 0x7051,
  54. "No FCP Priority config data.\n");
  55. return 0;
  56. }
  57. if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
  58. bcode[3] != 'S') {
  59. /* Invalid FCP priority data header*/
  60. ql_dbg(ql_dbg_user, vha, 0x7052,
  61. "Invalid FCP Priority data header. bcode=0x%x.\n",
  62. bcode_val);
  63. return 0;
  64. }
  65. if (flag != 1)
  66. return ret;
  67. pri_entry = &pri_cfg->entry[0];
  68. for (i = 0; i < pri_cfg->num_entries; i++) {
  69. if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
  70. num_valid++;
  71. pri_entry++;
  72. }
  73. if (num_valid == 0) {
  74. /* No valid FCP priority data entries */
  75. ql_dbg(ql_dbg_user, vha, 0x7053,
  76. "No valid FCP Priority data entries.\n");
  77. ret = 0;
  78. } else {
  79. /* FCP priority data is valid */
  80. ql_dbg(ql_dbg_user, vha, 0x7054,
  81. "Valid FCP priority data. num entries = %d.\n",
  82. num_valid);
  83. }
  84. return ret;
  85. }
  86. static int
  87. qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
  88. {
  89. struct Scsi_Host *host = bsg_job->shost;
  90. scsi_qla_host_t *vha = shost_priv(host);
  91. struct qla_hw_data *ha = vha->hw;
  92. int ret = 0;
  93. uint32_t len;
  94. uint32_t oper;
  95. if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
  96. ret = -EINVAL;
  97. goto exit_fcp_prio_cfg;
  98. }
  99. /* Get the sub command */
  100. oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  101. /* Only set config is allowed if config memory is not allocated */
  102. if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
  103. ret = -EINVAL;
  104. goto exit_fcp_prio_cfg;
  105. }
  106. switch (oper) {
  107. case QLFC_FCP_PRIO_DISABLE:
  108. if (ha->flags.fcp_prio_enabled) {
  109. ha->flags.fcp_prio_enabled = 0;
  110. ha->fcp_prio_cfg->attributes &=
  111. ~FCP_PRIO_ATTR_ENABLE;
  112. qla24xx_update_all_fcp_prio(vha);
  113. bsg_job->reply->result = DID_OK;
  114. } else {
  115. ret = -EINVAL;
  116. bsg_job->reply->result = (DID_ERROR << 16);
  117. goto exit_fcp_prio_cfg;
  118. }
  119. break;
  120. case QLFC_FCP_PRIO_ENABLE:
  121. if (!ha->flags.fcp_prio_enabled) {
  122. if (ha->fcp_prio_cfg) {
  123. ha->flags.fcp_prio_enabled = 1;
  124. ha->fcp_prio_cfg->attributes |=
  125. FCP_PRIO_ATTR_ENABLE;
  126. qla24xx_update_all_fcp_prio(vha);
  127. bsg_job->reply->result = DID_OK;
  128. } else {
  129. ret = -EINVAL;
  130. bsg_job->reply->result = (DID_ERROR << 16);
  131. goto exit_fcp_prio_cfg;
  132. }
  133. }
  134. break;
  135. case QLFC_FCP_PRIO_GET_CONFIG:
  136. len = bsg_job->reply_payload.payload_len;
  137. if (!len || len > FCP_PRIO_CFG_SIZE) {
  138. ret = -EINVAL;
  139. bsg_job->reply->result = (DID_ERROR << 16);
  140. goto exit_fcp_prio_cfg;
  141. }
  142. bsg_job->reply->result = DID_OK;
  143. bsg_job->reply->reply_payload_rcv_len =
  144. sg_copy_from_buffer(
  145. bsg_job->reply_payload.sg_list,
  146. bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
  147. len);
  148. break;
  149. case QLFC_FCP_PRIO_SET_CONFIG:
  150. len = bsg_job->request_payload.payload_len;
  151. if (!len || len > FCP_PRIO_CFG_SIZE) {
  152. bsg_job->reply->result = (DID_ERROR << 16);
  153. ret = -EINVAL;
  154. goto exit_fcp_prio_cfg;
  155. }
  156. if (!ha->fcp_prio_cfg) {
  157. ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
  158. if (!ha->fcp_prio_cfg) {
  159. ql_log(ql_log_warn, vha, 0x7050,
  160. "Unable to allocate memory for fcp prio "
  161. "config data (%x).\n", FCP_PRIO_CFG_SIZE);
  162. bsg_job->reply->result = (DID_ERROR << 16);
  163. ret = -ENOMEM;
  164. goto exit_fcp_prio_cfg;
  165. }
  166. }
  167. memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
  168. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  169. bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
  170. FCP_PRIO_CFG_SIZE);
  171. /* validate fcp priority data */
  172. if (!qla24xx_fcp_prio_cfg_valid(vha,
  173. (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
  174. bsg_job->reply->result = (DID_ERROR << 16);
  175. ret = -EINVAL;
  176. /* If buffer was invalidatic int
  177. * fcp_prio_cfg is of no use
  178. */
  179. vfree(ha->fcp_prio_cfg);
  180. ha->fcp_prio_cfg = NULL;
  181. goto exit_fcp_prio_cfg;
  182. }
  183. ha->flags.fcp_prio_enabled = 0;
  184. if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
  185. ha->flags.fcp_prio_enabled = 1;
  186. qla24xx_update_all_fcp_prio(vha);
  187. bsg_job->reply->result = DID_OK;
  188. break;
  189. default:
  190. ret = -EINVAL;
  191. break;
  192. }
  193. exit_fcp_prio_cfg:
  194. if (!ret)
  195. bsg_job->job_done(bsg_job);
  196. return ret;
  197. }
  198. static int
  199. qla2x00_process_els(struct fc_bsg_job *bsg_job)
  200. {
  201. struct fc_rport *rport;
  202. fc_port_t *fcport = NULL;
  203. struct Scsi_Host *host;
  204. scsi_qla_host_t *vha;
  205. struct qla_hw_data *ha;
  206. srb_t *sp;
  207. const char *type;
  208. int req_sg_cnt, rsp_sg_cnt;
  209. int rval = (DRIVER_ERROR << 16);
  210. uint16_t nextlid = 0;
  211. if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
  212. rport = bsg_job->rport;
  213. fcport = *(fc_port_t **) rport->dd_data;
  214. host = rport_to_shost(rport);
  215. vha = shost_priv(host);
  216. ha = vha->hw;
  217. type = "FC_BSG_RPT_ELS";
  218. } else {
  219. host = bsg_job->shost;
  220. vha = shost_priv(host);
  221. ha = vha->hw;
  222. type = "FC_BSG_HST_ELS_NOLOGIN";
  223. }
  224. /* pass through is supported only for ISP 4Gb or higher */
  225. if (!IS_FWI2_CAPABLE(ha)) {
  226. ql_dbg(ql_dbg_user, vha, 0x7001,
  227. "ELS passthru not supported for ISP23xx based adapters.\n");
  228. rval = -EPERM;
  229. goto done;
  230. }
  231. /* Multiple SG's are not supported for ELS requests */
  232. if (bsg_job->request_payload.sg_cnt > 1 ||
  233. bsg_job->reply_payload.sg_cnt > 1) {
  234. ql_dbg(ql_dbg_user, vha, 0x7002,
  235. "Multiple SG's are not suppored for ELS requests, "
  236. "request_sg_cnt=%x reply_sg_cnt=%x.\n",
  237. bsg_job->request_payload.sg_cnt,
  238. bsg_job->reply_payload.sg_cnt);
  239. rval = -EPERM;
  240. goto done;
  241. }
  242. /* ELS request for rport */
  243. if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
  244. /* make sure the rport is logged in,
  245. * if not perform fabric login
  246. */
  247. if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
  248. ql_dbg(ql_dbg_user, vha, 0x7003,
  249. "Failed to login port %06X for ELS passthru.\n",
  250. fcport->d_id.b24);
  251. rval = -EIO;
  252. goto done;
  253. }
  254. } else {
  255. /* Allocate a dummy fcport structure, since functions
  256. * preparing the IOCB and mailbox command retrieves port
  257. * specific information from fcport structure. For Host based
  258. * ELS commands there will be no fcport structure allocated
  259. */
  260. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  261. if (!fcport) {
  262. rval = -ENOMEM;
  263. goto done;
  264. }
  265. /* Initialize all required fields of fcport */
  266. fcport->vha = vha;
  267. fcport->d_id.b.al_pa =
  268. bsg_job->request->rqst_data.h_els.port_id[0];
  269. fcport->d_id.b.area =
  270. bsg_job->request->rqst_data.h_els.port_id[1];
  271. fcport->d_id.b.domain =
  272. bsg_job->request->rqst_data.h_els.port_id[2];
  273. fcport->loop_id =
  274. (fcport->d_id.b.al_pa == 0xFD) ?
  275. NPH_FABRIC_CONTROLLER : NPH_F_PORT;
  276. }
  277. if (!vha->flags.online) {
  278. ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
  279. rval = -EIO;
  280. goto done;
  281. }
  282. req_sg_cnt =
  283. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  284. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  285. if (!req_sg_cnt) {
  286. rval = -ENOMEM;
  287. goto done_free_fcport;
  288. }
  289. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  290. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  291. if (!rsp_sg_cnt) {
  292. rval = -ENOMEM;
  293. goto done_free_fcport;
  294. }
  295. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  296. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  297. ql_log(ql_log_warn, vha, 0x7008,
  298. "dma mapping resulted in different sg counts, "
  299. "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
  300. "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
  301. req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
  302. rval = -EAGAIN;
  303. goto done_unmap_sg;
  304. }
  305. /* Alloc SRB structure */
  306. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  307. if (!sp) {
  308. rval = -ENOMEM;
  309. goto done_unmap_sg;
  310. }
  311. sp->type =
  312. (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
  313. SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
  314. sp->name =
  315. (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
  316. "bsg_els_rpt" : "bsg_els_hst");
  317. sp->u.bsg_job = bsg_job;
  318. sp->free = qla2x00_bsg_sp_free;
  319. sp->done = qla2x00_bsg_job_done;
  320. ql_dbg(ql_dbg_user, vha, 0x700a,
  321. "bsg rqst type: %s els type: %x - loop-id=%x "
  322. "portid=%-2x%02x%02x.\n", type,
  323. bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
  324. fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
  325. rval = qla2x00_start_sp(sp);
  326. if (rval != QLA_SUCCESS) {
  327. ql_log(ql_log_warn, vha, 0x700e,
  328. "qla2x00_start_sp failed = %d\n", rval);
  329. mempool_free(sp, ha->srb_mempool);
  330. rval = -EIO;
  331. goto done_unmap_sg;
  332. }
  333. return rval;
  334. done_unmap_sg:
  335. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  336. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  337. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  338. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  339. goto done_free_fcport;
  340. done_free_fcport:
  341. if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
  342. kfree(fcport);
  343. done:
  344. return rval;
  345. }
  346. inline uint16_t
  347. qla24xx_calc_ct_iocbs(uint16_t dsds)
  348. {
  349. uint16_t iocbs;
  350. iocbs = 1;
  351. if (dsds > 2) {
  352. iocbs += (dsds - 2) / 5;
  353. if ((dsds - 2) % 5)
  354. iocbs++;
  355. }
  356. return iocbs;
  357. }
  358. static int
  359. qla2x00_process_ct(struct fc_bsg_job *bsg_job)
  360. {
  361. srb_t *sp;
  362. struct Scsi_Host *host = bsg_job->shost;
  363. scsi_qla_host_t *vha = shost_priv(host);
  364. struct qla_hw_data *ha = vha->hw;
  365. int rval = (DRIVER_ERROR << 16);
  366. int req_sg_cnt, rsp_sg_cnt;
  367. uint16_t loop_id;
  368. struct fc_port *fcport;
  369. char *type = "FC_BSG_HST_CT";
  370. req_sg_cnt =
  371. dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  372. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  373. if (!req_sg_cnt) {
  374. ql_log(ql_log_warn, vha, 0x700f,
  375. "dma_map_sg return %d for request\n", req_sg_cnt);
  376. rval = -ENOMEM;
  377. goto done;
  378. }
  379. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  380. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  381. if (!rsp_sg_cnt) {
  382. ql_log(ql_log_warn, vha, 0x7010,
  383. "dma_map_sg return %d for reply\n", rsp_sg_cnt);
  384. rval = -ENOMEM;
  385. goto done;
  386. }
  387. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  388. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  389. ql_log(ql_log_warn, vha, 0x7011,
  390. "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
  391. "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
  392. req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
  393. rval = -EAGAIN;
  394. goto done_unmap_sg;
  395. }
  396. if (!vha->flags.online) {
  397. ql_log(ql_log_warn, vha, 0x7012,
  398. "Host is not online.\n");
  399. rval = -EIO;
  400. goto done_unmap_sg;
  401. }
  402. loop_id =
  403. (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
  404. >> 24;
  405. switch (loop_id) {
  406. case 0xFC:
  407. loop_id = cpu_to_le16(NPH_SNS);
  408. break;
  409. case 0xFA:
  410. loop_id = vha->mgmt_svr_loop_id;
  411. break;
  412. default:
  413. ql_dbg(ql_dbg_user, vha, 0x7013,
  414. "Unknown loop id: %x.\n", loop_id);
  415. rval = -EINVAL;
  416. goto done_unmap_sg;
  417. }
  418. /* Allocate a dummy fcport structure, since functions preparing the
  419. * IOCB and mailbox command retrieves port specific information
  420. * from fcport structure. For Host based ELS commands there will be
  421. * no fcport structure allocated
  422. */
  423. fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  424. if (!fcport) {
  425. ql_log(ql_log_warn, vha, 0x7014,
  426. "Failed to allocate fcport.\n");
  427. rval = -ENOMEM;
  428. goto done_unmap_sg;
  429. }
  430. /* Initialize all required fields of fcport */
  431. fcport->vha = vha;
  432. fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
  433. fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
  434. fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
  435. fcport->loop_id = loop_id;
  436. /* Alloc SRB structure */
  437. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  438. if (!sp) {
  439. ql_log(ql_log_warn, vha, 0x7015,
  440. "qla2x00_get_sp failed.\n");
  441. rval = -ENOMEM;
  442. goto done_free_fcport;
  443. }
  444. sp->type = SRB_CT_CMD;
  445. sp->name = "bsg_ct";
  446. sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
  447. sp->u.bsg_job = bsg_job;
  448. sp->free = qla2x00_bsg_sp_free;
  449. sp->done = qla2x00_bsg_job_done;
  450. ql_dbg(ql_dbg_user, vha, 0x7016,
  451. "bsg rqst type: %s else type: %x - "
  452. "loop-id=%x portid=%02x%02x%02x.\n", type,
  453. (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
  454. fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
  455. fcport->d_id.b.al_pa);
  456. rval = qla2x00_start_sp(sp);
  457. if (rval != QLA_SUCCESS) {
  458. ql_log(ql_log_warn, vha, 0x7017,
  459. "qla2x00_start_sp failed=%d.\n", rval);
  460. mempool_free(sp, ha->srb_mempool);
  461. rval = -EIO;
  462. goto done_free_fcport;
  463. }
  464. return rval;
  465. done_free_fcport:
  466. kfree(fcport);
  467. done_unmap_sg:
  468. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  469. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  470. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  471. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  472. done:
  473. return rval;
  474. }
  475. /*
  476. * Set the port configuration to enable the internal or external loopback
  477. * depending on the loopback mode.
  478. */
  479. static inline int
  480. qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
  481. uint16_t *new_config, uint16_t mode)
  482. {
  483. int ret = 0;
  484. int rval = 0;
  485. struct qla_hw_data *ha = vha->hw;
  486. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
  487. goto done_set_internal;
  488. if (mode == INTERNAL_LOOPBACK)
  489. new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
  490. else if (mode == EXTERNAL_LOOPBACK)
  491. new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
  492. ql_dbg(ql_dbg_user, vha, 0x70be,
  493. "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
  494. memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
  495. ha->notify_dcbx_comp = 1;
  496. ret = qla81xx_set_port_config(vha, new_config);
  497. if (ret != QLA_SUCCESS) {
  498. ql_log(ql_log_warn, vha, 0x7021,
  499. "set port config failed.\n");
  500. ha->notify_dcbx_comp = 0;
  501. rval = -EINVAL;
  502. goto done_set_internal;
  503. }
  504. /* Wait for DCBX complete event */
  505. if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
  506. ql_dbg(ql_dbg_user, vha, 0x7022,
  507. "State change notification not received.\n");
  508. rval = -EINVAL;
  509. } else {
  510. if (ha->flags.idc_compl_status) {
  511. ql_dbg(ql_dbg_user, vha, 0x70c3,
  512. "Bad status in IDC Completion AEN\n");
  513. rval = -EINVAL;
  514. ha->flags.idc_compl_status = 0;
  515. } else
  516. ql_dbg(ql_dbg_user, vha, 0x7023,
  517. "State change received.\n");
  518. }
  519. ha->notify_dcbx_comp = 0;
  520. done_set_internal:
  521. return rval;
  522. }
  523. /* Disable loopback mode */
  524. static inline int
  525. qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
  526. int wait)
  527. {
  528. int ret = 0;
  529. int rval = 0;
  530. uint16_t new_config[4];
  531. struct qla_hw_data *ha = vha->hw;
  532. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
  533. goto done_reset_internal;
  534. memset(new_config, 0 , sizeof(new_config));
  535. if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
  536. ENABLE_INTERNAL_LOOPBACK ||
  537. (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
  538. ENABLE_EXTERNAL_LOOPBACK) {
  539. new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
  540. ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
  541. (new_config[0] & INTERNAL_LOOPBACK_MASK));
  542. memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
  543. ha->notify_dcbx_comp = wait;
  544. ret = qla81xx_set_port_config(vha, new_config);
  545. if (ret != QLA_SUCCESS) {
  546. ql_log(ql_log_warn, vha, 0x7025,
  547. "Set port config failed.\n");
  548. ha->notify_dcbx_comp = 0;
  549. rval = -EINVAL;
  550. goto done_reset_internal;
  551. }
  552. /* Wait for DCBX complete event */
  553. if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
  554. (20 * HZ))) {
  555. ql_dbg(ql_dbg_user, vha, 0x7026,
  556. "State change notification not received.\n");
  557. ha->notify_dcbx_comp = 0;
  558. rval = -EINVAL;
  559. goto done_reset_internal;
  560. } else
  561. ql_dbg(ql_dbg_user, vha, 0x7027,
  562. "State change received.\n");
  563. ha->notify_dcbx_comp = 0;
  564. }
  565. done_reset_internal:
  566. return rval;
  567. }
  568. static int
  569. qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
  570. {
  571. struct Scsi_Host *host = bsg_job->shost;
  572. scsi_qla_host_t *vha = shost_priv(host);
  573. struct qla_hw_data *ha = vha->hw;
  574. int rval;
  575. uint8_t command_sent;
  576. char *type;
  577. struct msg_echo_lb elreq;
  578. uint16_t response[MAILBOX_REGISTER_COUNT];
  579. uint16_t config[4], new_config[4];
  580. uint8_t *fw_sts_ptr;
  581. uint8_t *req_data = NULL;
  582. dma_addr_t req_data_dma;
  583. uint32_t req_data_len;
  584. uint8_t *rsp_data = NULL;
  585. dma_addr_t rsp_data_dma;
  586. uint32_t rsp_data_len;
  587. if (!vha->flags.online) {
  588. ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
  589. return -EIO;
  590. }
  591. elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
  592. bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
  593. DMA_TO_DEVICE);
  594. if (!elreq.req_sg_cnt) {
  595. ql_log(ql_log_warn, vha, 0x701a,
  596. "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
  597. return -ENOMEM;
  598. }
  599. elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
  600. bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
  601. DMA_FROM_DEVICE);
  602. if (!elreq.rsp_sg_cnt) {
  603. ql_log(ql_log_warn, vha, 0x701b,
  604. "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
  605. rval = -ENOMEM;
  606. goto done_unmap_req_sg;
  607. }
  608. if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  609. (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  610. ql_log(ql_log_warn, vha, 0x701c,
  611. "dma mapping resulted in different sg counts, "
  612. "request_sg_cnt: %x dma_request_sg_cnt: %x "
  613. "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
  614. bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
  615. bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
  616. rval = -EAGAIN;
  617. goto done_unmap_sg;
  618. }
  619. req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
  620. req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
  621. &req_data_dma, GFP_KERNEL);
  622. if (!req_data) {
  623. ql_log(ql_log_warn, vha, 0x701d,
  624. "dma alloc failed for req_data.\n");
  625. rval = -ENOMEM;
  626. goto done_unmap_sg;
  627. }
  628. rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
  629. &rsp_data_dma, GFP_KERNEL);
  630. if (!rsp_data) {
  631. ql_log(ql_log_warn, vha, 0x7004,
  632. "dma alloc failed for rsp_data.\n");
  633. rval = -ENOMEM;
  634. goto done_free_dma_req;
  635. }
  636. /* Copy the request buffer in req_data now */
  637. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  638. bsg_job->request_payload.sg_cnt, req_data, req_data_len);
  639. elreq.send_dma = req_data_dma;
  640. elreq.rcv_dma = rsp_data_dma;
  641. elreq.transfer_size = req_data_len;
  642. elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  643. if (atomic_read(&vha->loop_state) == LOOP_READY &&
  644. (ha->current_topology == ISP_CFG_F ||
  645. ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
  646. le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
  647. && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
  648. elreq.options == EXTERNAL_LOOPBACK) {
  649. type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
  650. ql_dbg(ql_dbg_user, vha, 0x701e,
  651. "BSG request type: %s.\n", type);
  652. command_sent = INT_DEF_LB_ECHO_CMD;
  653. rval = qla2x00_echo_test(vha, &elreq, response);
  654. } else {
  655. if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
  656. memset(config, 0, sizeof(config));
  657. memset(new_config, 0, sizeof(new_config));
  658. if (qla81xx_get_port_config(vha, config)) {
  659. ql_log(ql_log_warn, vha, 0x701f,
  660. "Get port config failed.\n");
  661. rval = -EPERM;
  662. goto done_free_dma_rsp;
  663. }
  664. if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
  665. ql_dbg(ql_dbg_user, vha, 0x70c4,
  666. "Loopback operation already in "
  667. "progress.\n");
  668. rval = -EAGAIN;
  669. goto done_free_dma_rsp;
  670. }
  671. ql_dbg(ql_dbg_user, vha, 0x70c0,
  672. "elreq.options=%04x\n", elreq.options);
  673. if (elreq.options == EXTERNAL_LOOPBACK)
  674. if (IS_QLA8031(ha))
  675. rval = qla81xx_set_loopback_mode(vha,
  676. config, new_config, elreq.options);
  677. else
  678. rval = qla81xx_reset_loopback_mode(vha,
  679. config, 1);
  680. else
  681. rval = qla81xx_set_loopback_mode(vha, config,
  682. new_config, elreq.options);
  683. if (rval) {
  684. rval = -EPERM;
  685. goto done_free_dma_rsp;
  686. }
  687. type = "FC_BSG_HST_VENDOR_LOOPBACK";
  688. ql_dbg(ql_dbg_user, vha, 0x7028,
  689. "BSG request type: %s.\n", type);
  690. command_sent = INT_DEF_LB_LOOPBACK_CMD;
  691. rval = qla2x00_loopback_test(vha, &elreq, response);
  692. if (new_config[0]) {
  693. /* Revert back to original port config
  694. * Also clear internal loopback
  695. */
  696. qla81xx_reset_loopback_mode(vha,
  697. new_config, 0);
  698. }
  699. if (response[0] == MBS_COMMAND_ERROR &&
  700. response[1] == MBS_LB_RESET) {
  701. ql_log(ql_log_warn, vha, 0x7029,
  702. "MBX command error, Aborting ISP.\n");
  703. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  704. qla2xxx_wake_dpc(vha);
  705. qla2x00_wait_for_chip_reset(vha);
  706. /* Also reset the MPI */
  707. if (IS_QLA81XX(ha)) {
  708. if (qla81xx_restart_mpi_firmware(vha) !=
  709. QLA_SUCCESS) {
  710. ql_log(ql_log_warn, vha, 0x702a,
  711. "MPI reset failed.\n");
  712. }
  713. }
  714. rval = -EIO;
  715. goto done_free_dma_rsp;
  716. }
  717. } else {
  718. type = "FC_BSG_HST_VENDOR_LOOPBACK";
  719. ql_dbg(ql_dbg_user, vha, 0x702b,
  720. "BSG request type: %s.\n", type);
  721. command_sent = INT_DEF_LB_LOOPBACK_CMD;
  722. rval = qla2x00_loopback_test(vha, &elreq, response);
  723. }
  724. }
  725. if (rval) {
  726. ql_log(ql_log_warn, vha, 0x702c,
  727. "Vendor request %s failed.\n", type);
  728. rval = 0;
  729. bsg_job->reply->result = (DID_ERROR << 16);
  730. bsg_job->reply->reply_payload_rcv_len = 0;
  731. } else {
  732. ql_dbg(ql_dbg_user, vha, 0x702d,
  733. "Vendor request %s completed.\n", type);
  734. bsg_job->reply->result = (DID_OK << 16);
  735. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  736. bsg_job->reply_payload.sg_cnt, rsp_data,
  737. rsp_data_len);
  738. }
  739. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  740. sizeof(response) + sizeof(uint8_t);
  741. fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
  742. sizeof(struct fc_bsg_reply);
  743. memcpy(fw_sts_ptr, response, sizeof(response));
  744. fw_sts_ptr += sizeof(response);
  745. *fw_sts_ptr = command_sent;
  746. done_free_dma_rsp:
  747. dma_free_coherent(&ha->pdev->dev, rsp_data_len,
  748. rsp_data, rsp_data_dma);
  749. done_free_dma_req:
  750. dma_free_coherent(&ha->pdev->dev, req_data_len,
  751. req_data, req_data_dma);
  752. done_unmap_sg:
  753. dma_unmap_sg(&ha->pdev->dev,
  754. bsg_job->reply_payload.sg_list,
  755. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  756. done_unmap_req_sg:
  757. dma_unmap_sg(&ha->pdev->dev,
  758. bsg_job->request_payload.sg_list,
  759. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  760. if (!rval)
  761. bsg_job->job_done(bsg_job);
  762. return rval;
  763. }
  764. static int
  765. qla84xx_reset(struct fc_bsg_job *bsg_job)
  766. {
  767. struct Scsi_Host *host = bsg_job->shost;
  768. scsi_qla_host_t *vha = shost_priv(host);
  769. struct qla_hw_data *ha = vha->hw;
  770. int rval = 0;
  771. uint32_t flag;
  772. if (!IS_QLA84XX(ha)) {
  773. ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
  774. return -EINVAL;
  775. }
  776. flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  777. rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
  778. if (rval) {
  779. ql_log(ql_log_warn, vha, 0x7030,
  780. "Vendor request 84xx reset failed.\n");
  781. rval = (DID_ERROR << 16);
  782. } else {
  783. ql_dbg(ql_dbg_user, vha, 0x7031,
  784. "Vendor request 84xx reset completed.\n");
  785. bsg_job->reply->result = DID_OK;
  786. bsg_job->job_done(bsg_job);
  787. }
  788. return rval;
  789. }
  790. static int
  791. qla84xx_updatefw(struct fc_bsg_job *bsg_job)
  792. {
  793. struct Scsi_Host *host = bsg_job->shost;
  794. scsi_qla_host_t *vha = shost_priv(host);
  795. struct qla_hw_data *ha = vha->hw;
  796. struct verify_chip_entry_84xx *mn = NULL;
  797. dma_addr_t mn_dma, fw_dma;
  798. void *fw_buf = NULL;
  799. int rval = 0;
  800. uint32_t sg_cnt;
  801. uint32_t data_len;
  802. uint16_t options;
  803. uint32_t flag;
  804. uint32_t fw_ver;
  805. if (!IS_QLA84XX(ha)) {
  806. ql_dbg(ql_dbg_user, vha, 0x7032,
  807. "Not 84xx, exiting.\n");
  808. return -EINVAL;
  809. }
  810. sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  811. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  812. if (!sg_cnt) {
  813. ql_log(ql_log_warn, vha, 0x7033,
  814. "dma_map_sg returned %d for request.\n", sg_cnt);
  815. return -ENOMEM;
  816. }
  817. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  818. ql_log(ql_log_warn, vha, 0x7034,
  819. "DMA mapping resulted in different sg counts, "
  820. "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
  821. bsg_job->request_payload.sg_cnt, sg_cnt);
  822. rval = -EAGAIN;
  823. goto done_unmap_sg;
  824. }
  825. data_len = bsg_job->request_payload.payload_len;
  826. fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
  827. &fw_dma, GFP_KERNEL);
  828. if (!fw_buf) {
  829. ql_log(ql_log_warn, vha, 0x7035,
  830. "DMA alloc failed for fw_buf.\n");
  831. rval = -ENOMEM;
  832. goto done_unmap_sg;
  833. }
  834. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  835. bsg_job->request_payload.sg_cnt, fw_buf, data_len);
  836. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  837. if (!mn) {
  838. ql_log(ql_log_warn, vha, 0x7036,
  839. "DMA alloc failed for fw buffer.\n");
  840. rval = -ENOMEM;
  841. goto done_free_fw_buf;
  842. }
  843. flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  844. fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
  845. memset(mn, 0, sizeof(struct access_chip_84xx));
  846. mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
  847. mn->entry_count = 1;
  848. options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
  849. if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
  850. options |= VCO_DIAG_FW;
  851. mn->options = cpu_to_le16(options);
  852. mn->fw_ver = cpu_to_le32(fw_ver);
  853. mn->fw_size = cpu_to_le32(data_len);
  854. mn->fw_seq_size = cpu_to_le32(data_len);
  855. mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
  856. mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
  857. mn->dseg_length = cpu_to_le32(data_len);
  858. mn->data_seg_cnt = cpu_to_le16(1);
  859. rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
  860. if (rval) {
  861. ql_log(ql_log_warn, vha, 0x7037,
  862. "Vendor request 84xx updatefw failed.\n");
  863. rval = (DID_ERROR << 16);
  864. } else {
  865. ql_dbg(ql_dbg_user, vha, 0x7038,
  866. "Vendor request 84xx updatefw completed.\n");
  867. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  868. bsg_job->reply->result = DID_OK;
  869. }
  870. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  871. done_free_fw_buf:
  872. dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
  873. done_unmap_sg:
  874. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  875. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  876. if (!rval)
  877. bsg_job->job_done(bsg_job);
  878. return rval;
  879. }
  880. static int
  881. qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
  882. {
  883. struct Scsi_Host *host = bsg_job->shost;
  884. scsi_qla_host_t *vha = shost_priv(host);
  885. struct qla_hw_data *ha = vha->hw;
  886. struct access_chip_84xx *mn = NULL;
  887. dma_addr_t mn_dma, mgmt_dma;
  888. void *mgmt_b = NULL;
  889. int rval = 0;
  890. struct qla_bsg_a84_mgmt *ql84_mgmt;
  891. uint32_t sg_cnt;
  892. uint32_t data_len = 0;
  893. uint32_t dma_direction = DMA_NONE;
  894. if (!IS_QLA84XX(ha)) {
  895. ql_log(ql_log_warn, vha, 0x703a,
  896. "Not 84xx, exiting.\n");
  897. return -EINVAL;
  898. }
  899. ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
  900. sizeof(struct fc_bsg_request));
  901. if (!ql84_mgmt) {
  902. ql_log(ql_log_warn, vha, 0x703b,
  903. "MGMT header not provided, exiting.\n");
  904. return -EINVAL;
  905. }
  906. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  907. if (!mn) {
  908. ql_log(ql_log_warn, vha, 0x703c,
  909. "DMA alloc failed for fw buffer.\n");
  910. return -ENOMEM;
  911. }
  912. memset(mn, 0, sizeof(struct access_chip_84xx));
  913. mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
  914. mn->entry_count = 1;
  915. switch (ql84_mgmt->mgmt.cmd) {
  916. case QLA84_MGMT_READ_MEM:
  917. case QLA84_MGMT_GET_INFO:
  918. sg_cnt = dma_map_sg(&ha->pdev->dev,
  919. bsg_job->reply_payload.sg_list,
  920. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  921. if (!sg_cnt) {
  922. ql_log(ql_log_warn, vha, 0x703d,
  923. "dma_map_sg returned %d for reply.\n", sg_cnt);
  924. rval = -ENOMEM;
  925. goto exit_mgmt;
  926. }
  927. dma_direction = DMA_FROM_DEVICE;
  928. if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
  929. ql_log(ql_log_warn, vha, 0x703e,
  930. "DMA mapping resulted in different sg counts, "
  931. "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
  932. bsg_job->reply_payload.sg_cnt, sg_cnt);
  933. rval = -EAGAIN;
  934. goto done_unmap_sg;
  935. }
  936. data_len = bsg_job->reply_payload.payload_len;
  937. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  938. &mgmt_dma, GFP_KERNEL);
  939. if (!mgmt_b) {
  940. ql_log(ql_log_warn, vha, 0x703f,
  941. "DMA alloc failed for mgmt_b.\n");
  942. rval = -ENOMEM;
  943. goto done_unmap_sg;
  944. }
  945. if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
  946. mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
  947. mn->parameter1 =
  948. cpu_to_le32(
  949. ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  950. } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
  951. mn->options = cpu_to_le16(ACO_REQUEST_INFO);
  952. mn->parameter1 =
  953. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
  954. mn->parameter2 =
  955. cpu_to_le32(
  956. ql84_mgmt->mgmt.mgmtp.u.info.context);
  957. }
  958. break;
  959. case QLA84_MGMT_WRITE_MEM:
  960. sg_cnt = dma_map_sg(&ha->pdev->dev,
  961. bsg_job->request_payload.sg_list,
  962. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  963. if (!sg_cnt) {
  964. ql_log(ql_log_warn, vha, 0x7040,
  965. "dma_map_sg returned %d.\n", sg_cnt);
  966. rval = -ENOMEM;
  967. goto exit_mgmt;
  968. }
  969. dma_direction = DMA_TO_DEVICE;
  970. if (sg_cnt != bsg_job->request_payload.sg_cnt) {
  971. ql_log(ql_log_warn, vha, 0x7041,
  972. "DMA mapping resulted in different sg counts, "
  973. "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
  974. bsg_job->request_payload.sg_cnt, sg_cnt);
  975. rval = -EAGAIN;
  976. goto done_unmap_sg;
  977. }
  978. data_len = bsg_job->request_payload.payload_len;
  979. mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
  980. &mgmt_dma, GFP_KERNEL);
  981. if (!mgmt_b) {
  982. ql_log(ql_log_warn, vha, 0x7042,
  983. "DMA alloc failed for mgmt_b.\n");
  984. rval = -ENOMEM;
  985. goto done_unmap_sg;
  986. }
  987. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  988. bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
  989. mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
  990. mn->parameter1 =
  991. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
  992. break;
  993. case QLA84_MGMT_CHNG_CONFIG:
  994. mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
  995. mn->parameter1 =
  996. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
  997. mn->parameter2 =
  998. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
  999. mn->parameter3 =
  1000. cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
  1001. break;
  1002. default:
  1003. rval = -EIO;
  1004. goto exit_mgmt;
  1005. }
  1006. if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
  1007. mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
  1008. mn->dseg_count = cpu_to_le16(1);
  1009. mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
  1010. mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
  1011. mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
  1012. }
  1013. rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
  1014. if (rval) {
  1015. ql_log(ql_log_warn, vha, 0x7043,
  1016. "Vendor request 84xx mgmt failed.\n");
  1017. rval = (DID_ERROR << 16);
  1018. } else {
  1019. ql_dbg(ql_dbg_user, vha, 0x7044,
  1020. "Vendor request 84xx mgmt completed.\n");
  1021. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1022. bsg_job->reply->result = DID_OK;
  1023. if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
  1024. (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
  1025. bsg_job->reply->reply_payload_rcv_len =
  1026. bsg_job->reply_payload.payload_len;
  1027. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1028. bsg_job->reply_payload.sg_cnt, mgmt_b,
  1029. data_len);
  1030. }
  1031. }
  1032. done_unmap_sg:
  1033. if (mgmt_b)
  1034. dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
  1035. if (dma_direction == DMA_TO_DEVICE)
  1036. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  1037. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1038. else if (dma_direction == DMA_FROM_DEVICE)
  1039. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  1040. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  1041. exit_mgmt:
  1042. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  1043. if (!rval)
  1044. bsg_job->job_done(bsg_job);
  1045. return rval;
  1046. }
  1047. static int
  1048. qla24xx_iidma(struct fc_bsg_job *bsg_job)
  1049. {
  1050. struct Scsi_Host *host = bsg_job->shost;
  1051. scsi_qla_host_t *vha = shost_priv(host);
  1052. int rval = 0;
  1053. struct qla_port_param *port_param = NULL;
  1054. fc_port_t *fcport = NULL;
  1055. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1056. uint8_t *rsp_ptr = NULL;
  1057. if (!IS_IIDMA_CAPABLE(vha->hw)) {
  1058. ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
  1059. return -EINVAL;
  1060. }
  1061. port_param = (struct qla_port_param *)((char *)bsg_job->request +
  1062. sizeof(struct fc_bsg_request));
  1063. if (!port_param) {
  1064. ql_log(ql_log_warn, vha, 0x7047,
  1065. "port_param header not provided.\n");
  1066. return -EINVAL;
  1067. }
  1068. if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
  1069. ql_log(ql_log_warn, vha, 0x7048,
  1070. "Invalid destination type.\n");
  1071. return -EINVAL;
  1072. }
  1073. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1074. if (fcport->port_type != FCT_TARGET)
  1075. continue;
  1076. if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
  1077. fcport->port_name, sizeof(fcport->port_name)))
  1078. continue;
  1079. break;
  1080. }
  1081. if (!fcport) {
  1082. ql_log(ql_log_warn, vha, 0x7049,
  1083. "Failed to find port.\n");
  1084. return -EINVAL;
  1085. }
  1086. if (atomic_read(&fcport->state) != FCS_ONLINE) {
  1087. ql_log(ql_log_warn, vha, 0x704a,
  1088. "Port is not online.\n");
  1089. return -EINVAL;
  1090. }
  1091. if (fcport->flags & FCF_LOGIN_NEEDED) {
  1092. ql_log(ql_log_warn, vha, 0x704b,
  1093. "Remote port not logged in flags = 0x%x.\n", fcport->flags);
  1094. return -EINVAL;
  1095. }
  1096. if (port_param->mode)
  1097. rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
  1098. port_param->speed, mb);
  1099. else
  1100. rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
  1101. &port_param->speed, mb);
  1102. if (rval) {
  1103. ql_log(ql_log_warn, vha, 0x704c,
  1104. "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
  1105. "%04x %x %04x %04x.\n", fcport->port_name[0],
  1106. fcport->port_name[1], fcport->port_name[2],
  1107. fcport->port_name[3], fcport->port_name[4],
  1108. fcport->port_name[5], fcport->port_name[6],
  1109. fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
  1110. rval = (DID_ERROR << 16);
  1111. } else {
  1112. if (!port_param->mode) {
  1113. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  1114. sizeof(struct qla_port_param);
  1115. rsp_ptr = ((uint8_t *)bsg_job->reply) +
  1116. sizeof(struct fc_bsg_reply);
  1117. memcpy(rsp_ptr, port_param,
  1118. sizeof(struct qla_port_param));
  1119. }
  1120. bsg_job->reply->result = DID_OK;
  1121. bsg_job->job_done(bsg_job);
  1122. }
  1123. return rval;
  1124. }
  1125. static int
  1126. qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
  1127. uint8_t is_update)
  1128. {
  1129. uint32_t start = 0;
  1130. int valid = 0;
  1131. struct qla_hw_data *ha = vha->hw;
  1132. if (unlikely(pci_channel_offline(ha->pdev)))
  1133. return -EINVAL;
  1134. start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  1135. if (start > ha->optrom_size) {
  1136. ql_log(ql_log_warn, vha, 0x7055,
  1137. "start %d > optrom_size %d.\n", start, ha->optrom_size);
  1138. return -EINVAL;
  1139. }
  1140. if (ha->optrom_state != QLA_SWAITING) {
  1141. ql_log(ql_log_info, vha, 0x7056,
  1142. "optrom_state %d.\n", ha->optrom_state);
  1143. return -EBUSY;
  1144. }
  1145. ha->optrom_region_start = start;
  1146. ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
  1147. if (is_update) {
  1148. if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
  1149. valid = 1;
  1150. else if (start == (ha->flt_region_boot * 4) ||
  1151. start == (ha->flt_region_fw * 4))
  1152. valid = 1;
  1153. else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
  1154. IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
  1155. valid = 1;
  1156. if (!valid) {
  1157. ql_log(ql_log_warn, vha, 0x7058,
  1158. "Invalid start region 0x%x/0x%x.\n", start,
  1159. bsg_job->request_payload.payload_len);
  1160. return -EINVAL;
  1161. }
  1162. ha->optrom_region_size = start +
  1163. bsg_job->request_payload.payload_len > ha->optrom_size ?
  1164. ha->optrom_size - start :
  1165. bsg_job->request_payload.payload_len;
  1166. ha->optrom_state = QLA_SWRITING;
  1167. } else {
  1168. ha->optrom_region_size = start +
  1169. bsg_job->reply_payload.payload_len > ha->optrom_size ?
  1170. ha->optrom_size - start :
  1171. bsg_job->reply_payload.payload_len;
  1172. ha->optrom_state = QLA_SREADING;
  1173. }
  1174. ha->optrom_buffer = vmalloc(ha->optrom_region_size);
  1175. if (!ha->optrom_buffer) {
  1176. ql_log(ql_log_warn, vha, 0x7059,
  1177. "Read: Unable to allocate memory for optrom retrieval "
  1178. "(%x)\n", ha->optrom_region_size);
  1179. ha->optrom_state = QLA_SWAITING;
  1180. return -ENOMEM;
  1181. }
  1182. memset(ha->optrom_buffer, 0, ha->optrom_region_size);
  1183. return 0;
  1184. }
  1185. static int
  1186. qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
  1187. {
  1188. struct Scsi_Host *host = bsg_job->shost;
  1189. scsi_qla_host_t *vha = shost_priv(host);
  1190. struct qla_hw_data *ha = vha->hw;
  1191. int rval = 0;
  1192. if (ha->flags.nic_core_reset_hdlr_active)
  1193. return -EBUSY;
  1194. rval = qla2x00_optrom_setup(bsg_job, vha, 0);
  1195. if (rval)
  1196. return rval;
  1197. ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
  1198. ha->optrom_region_start, ha->optrom_region_size);
  1199. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1200. bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
  1201. ha->optrom_region_size);
  1202. bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
  1203. bsg_job->reply->result = DID_OK;
  1204. vfree(ha->optrom_buffer);
  1205. ha->optrom_buffer = NULL;
  1206. ha->optrom_state = QLA_SWAITING;
  1207. bsg_job->job_done(bsg_job);
  1208. return rval;
  1209. }
  1210. static int
  1211. qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
  1212. {
  1213. struct Scsi_Host *host = bsg_job->shost;
  1214. scsi_qla_host_t *vha = shost_priv(host);
  1215. struct qla_hw_data *ha = vha->hw;
  1216. int rval = 0;
  1217. rval = qla2x00_optrom_setup(bsg_job, vha, 1);
  1218. if (rval)
  1219. return rval;
  1220. /* Set the isp82xx_no_md_cap not to capture minidump */
  1221. ha->flags.isp82xx_no_md_cap = 1;
  1222. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1223. bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
  1224. ha->optrom_region_size);
  1225. ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
  1226. ha->optrom_region_start, ha->optrom_region_size);
  1227. bsg_job->reply->result = DID_OK;
  1228. vfree(ha->optrom_buffer);
  1229. ha->optrom_buffer = NULL;
  1230. ha->optrom_state = QLA_SWAITING;
  1231. bsg_job->job_done(bsg_job);
  1232. return rval;
  1233. }
  1234. static int
  1235. qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
  1236. {
  1237. struct Scsi_Host *host = bsg_job->shost;
  1238. scsi_qla_host_t *vha = shost_priv(host);
  1239. struct qla_hw_data *ha = vha->hw;
  1240. int rval = 0;
  1241. uint8_t bsg[DMA_POOL_SIZE];
  1242. struct qla_image_version_list *list = (void *)bsg;
  1243. struct qla_image_version *image;
  1244. uint32_t count;
  1245. dma_addr_t sfp_dma;
  1246. void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
  1247. if (!sfp) {
  1248. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
  1249. EXT_STATUS_NO_MEMORY;
  1250. goto done;
  1251. }
  1252. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1253. bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
  1254. image = list->version;
  1255. count = list->count;
  1256. while (count--) {
  1257. memcpy(sfp, &image->field_info, sizeof(image->field_info));
  1258. rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
  1259. image->field_address.device, image->field_address.offset,
  1260. sizeof(image->field_info), image->field_address.option);
  1261. if (rval) {
  1262. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
  1263. EXT_STATUS_MAILBOX;
  1264. goto dealloc;
  1265. }
  1266. image++;
  1267. }
  1268. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
  1269. dealloc:
  1270. dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
  1271. done:
  1272. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1273. bsg_job->reply->result = DID_OK << 16;
  1274. bsg_job->job_done(bsg_job);
  1275. return 0;
  1276. }
  1277. static int
  1278. qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
  1279. {
  1280. struct Scsi_Host *host = bsg_job->shost;
  1281. scsi_qla_host_t *vha = shost_priv(host);
  1282. struct qla_hw_data *ha = vha->hw;
  1283. int rval = 0;
  1284. uint8_t bsg[DMA_POOL_SIZE];
  1285. struct qla_status_reg *sr = (void *)bsg;
  1286. dma_addr_t sfp_dma;
  1287. uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
  1288. if (!sfp) {
  1289. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
  1290. EXT_STATUS_NO_MEMORY;
  1291. goto done;
  1292. }
  1293. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1294. bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
  1295. rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
  1296. sr->field_address.device, sr->field_address.offset,
  1297. sizeof(sr->status_reg), sr->field_address.option);
  1298. sr->status_reg = *sfp;
  1299. if (rval) {
  1300. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
  1301. EXT_STATUS_MAILBOX;
  1302. goto dealloc;
  1303. }
  1304. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1305. bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
  1306. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
  1307. dealloc:
  1308. dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
  1309. done:
  1310. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1311. bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
  1312. bsg_job->reply->result = DID_OK << 16;
  1313. bsg_job->job_done(bsg_job);
  1314. return 0;
  1315. }
  1316. static int
  1317. qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
  1318. {
  1319. struct Scsi_Host *host = bsg_job->shost;
  1320. scsi_qla_host_t *vha = shost_priv(host);
  1321. struct qla_hw_data *ha = vha->hw;
  1322. int rval = 0;
  1323. uint8_t bsg[DMA_POOL_SIZE];
  1324. struct qla_status_reg *sr = (void *)bsg;
  1325. dma_addr_t sfp_dma;
  1326. uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
  1327. if (!sfp) {
  1328. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
  1329. EXT_STATUS_NO_MEMORY;
  1330. goto done;
  1331. }
  1332. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1333. bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
  1334. *sfp = sr->status_reg;
  1335. rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
  1336. sr->field_address.device, sr->field_address.offset,
  1337. sizeof(sr->status_reg), sr->field_address.option);
  1338. if (rval) {
  1339. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
  1340. EXT_STATUS_MAILBOX;
  1341. goto dealloc;
  1342. }
  1343. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
  1344. dealloc:
  1345. dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
  1346. done:
  1347. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1348. bsg_job->reply->result = DID_OK << 16;
  1349. bsg_job->job_done(bsg_job);
  1350. return 0;
  1351. }
  1352. static int
  1353. qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
  1354. {
  1355. struct Scsi_Host *host = bsg_job->shost;
  1356. scsi_qla_host_t *vha = shost_priv(host);
  1357. struct qla_hw_data *ha = vha->hw;
  1358. int rval = 0;
  1359. uint8_t bsg[DMA_POOL_SIZE];
  1360. struct qla_i2c_access *i2c = (void *)bsg;
  1361. dma_addr_t sfp_dma;
  1362. uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
  1363. if (!sfp) {
  1364. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
  1365. EXT_STATUS_NO_MEMORY;
  1366. goto done;
  1367. }
  1368. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1369. bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
  1370. memcpy(sfp, i2c->buffer, i2c->length);
  1371. rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
  1372. i2c->device, i2c->offset, i2c->length, i2c->option);
  1373. if (rval) {
  1374. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
  1375. EXT_STATUS_MAILBOX;
  1376. goto dealloc;
  1377. }
  1378. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
  1379. dealloc:
  1380. dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
  1381. done:
  1382. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1383. bsg_job->reply->result = DID_OK << 16;
  1384. bsg_job->job_done(bsg_job);
  1385. return 0;
  1386. }
  1387. static int
  1388. qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
  1389. {
  1390. struct Scsi_Host *host = bsg_job->shost;
  1391. scsi_qla_host_t *vha = shost_priv(host);
  1392. struct qla_hw_data *ha = vha->hw;
  1393. int rval = 0;
  1394. uint8_t bsg[DMA_POOL_SIZE];
  1395. struct qla_i2c_access *i2c = (void *)bsg;
  1396. dma_addr_t sfp_dma;
  1397. uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
  1398. if (!sfp) {
  1399. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
  1400. EXT_STATUS_NO_MEMORY;
  1401. goto done;
  1402. }
  1403. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  1404. bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
  1405. rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
  1406. i2c->device, i2c->offset, i2c->length, i2c->option);
  1407. if (rval) {
  1408. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
  1409. EXT_STATUS_MAILBOX;
  1410. goto dealloc;
  1411. }
  1412. memcpy(i2c->buffer, sfp, i2c->length);
  1413. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  1414. bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
  1415. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
  1416. dealloc:
  1417. dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
  1418. done:
  1419. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1420. bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
  1421. bsg_job->reply->result = DID_OK << 16;
  1422. bsg_job->job_done(bsg_job);
  1423. return 0;
  1424. }
  1425. static int
  1426. qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
  1427. {
  1428. struct Scsi_Host *host = bsg_job->shost;
  1429. scsi_qla_host_t *vha = shost_priv(host);
  1430. struct qla_hw_data *ha = vha->hw;
  1431. uint16_t thread_id;
  1432. uint32_t rval = EXT_STATUS_OK;
  1433. uint16_t req_sg_cnt = 0;
  1434. uint16_t rsp_sg_cnt = 0;
  1435. uint16_t nextlid = 0;
  1436. uint32_t tot_dsds;
  1437. srb_t *sp = NULL;
  1438. uint32_t req_data_len = 0;
  1439. uint32_t rsp_data_len = 0;
  1440. /* Check the type of the adapter */
  1441. if (!IS_BIDI_CAPABLE(ha)) {
  1442. ql_log(ql_log_warn, vha, 0x70a0,
  1443. "This adapter is not supported\n");
  1444. rval = EXT_STATUS_NOT_SUPPORTED;
  1445. goto done;
  1446. }
  1447. if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
  1448. test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
  1449. test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  1450. rval = EXT_STATUS_BUSY;
  1451. goto done;
  1452. }
  1453. /* Check if host is online */
  1454. if (!vha->flags.online) {
  1455. ql_log(ql_log_warn, vha, 0x70a1,
  1456. "Host is not online\n");
  1457. rval = EXT_STATUS_DEVICE_OFFLINE;
  1458. goto done;
  1459. }
  1460. /* Check if cable is plugged in or not */
  1461. if (vha->device_flags & DFLG_NO_CABLE) {
  1462. ql_log(ql_log_warn, vha, 0x70a2,
  1463. "Cable is unplugged...\n");
  1464. rval = EXT_STATUS_INVALID_CFG;
  1465. goto done;
  1466. }
  1467. /* Check if the switch is connected or not */
  1468. if (ha->current_topology != ISP_CFG_F) {
  1469. ql_log(ql_log_warn, vha, 0x70a3,
  1470. "Host is not connected to the switch\n");
  1471. rval = EXT_STATUS_INVALID_CFG;
  1472. goto done;
  1473. }
  1474. /* Check if operating mode is P2P */
  1475. if (ha->operating_mode != P2P) {
  1476. ql_log(ql_log_warn, vha, 0x70a4,
  1477. "Host is operating mode is not P2p\n");
  1478. rval = EXT_STATUS_INVALID_CFG;
  1479. goto done;
  1480. }
  1481. thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  1482. mutex_lock(&ha->selflogin_lock);
  1483. if (vha->self_login_loop_id == 0) {
  1484. /* Initialize all required fields of fcport */
  1485. vha->bidir_fcport.vha = vha;
  1486. vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
  1487. vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
  1488. vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
  1489. vha->bidir_fcport.loop_id = vha->loop_id;
  1490. if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
  1491. ql_log(ql_log_warn, vha, 0x70a7,
  1492. "Failed to login port %06X for bidirectional IOCB\n",
  1493. vha->bidir_fcport.d_id.b24);
  1494. mutex_unlock(&ha->selflogin_lock);
  1495. rval = EXT_STATUS_MAILBOX;
  1496. goto done;
  1497. }
  1498. vha->self_login_loop_id = nextlid - 1;
  1499. }
  1500. /* Assign the self login loop id to fcport */
  1501. mutex_unlock(&ha->selflogin_lock);
  1502. vha->bidir_fcport.loop_id = vha->self_login_loop_id;
  1503. req_sg_cnt = dma_map_sg(&ha->pdev->dev,
  1504. bsg_job->request_payload.sg_list,
  1505. bsg_job->request_payload.sg_cnt,
  1506. DMA_TO_DEVICE);
  1507. if (!req_sg_cnt) {
  1508. rval = EXT_STATUS_NO_MEMORY;
  1509. goto done;
  1510. }
  1511. rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
  1512. bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
  1513. DMA_FROM_DEVICE);
  1514. if (!rsp_sg_cnt) {
  1515. rval = EXT_STATUS_NO_MEMORY;
  1516. goto done_unmap_req_sg;
  1517. }
  1518. if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
  1519. (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
  1520. ql_dbg(ql_dbg_user, vha, 0x70a9,
  1521. "Dma mapping resulted in different sg counts "
  1522. "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
  1523. "%x dma_reply_sg_cnt: %x]\n",
  1524. bsg_job->request_payload.sg_cnt, req_sg_cnt,
  1525. bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
  1526. rval = EXT_STATUS_NO_MEMORY;
  1527. goto done_unmap_sg;
  1528. }
  1529. if (req_data_len != rsp_data_len) {
  1530. rval = EXT_STATUS_BUSY;
  1531. ql_log(ql_log_warn, vha, 0x70aa,
  1532. "req_data_len != rsp_data_len\n");
  1533. goto done_unmap_sg;
  1534. }
  1535. req_data_len = bsg_job->request_payload.payload_len;
  1536. rsp_data_len = bsg_job->reply_payload.payload_len;
  1537. /* Alloc SRB structure */
  1538. sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
  1539. if (!sp) {
  1540. ql_dbg(ql_dbg_user, vha, 0x70ac,
  1541. "Alloc SRB structure failed\n");
  1542. rval = EXT_STATUS_NO_MEMORY;
  1543. goto done_unmap_sg;
  1544. }
  1545. /*Populate srb->ctx with bidir ctx*/
  1546. sp->u.bsg_job = bsg_job;
  1547. sp->free = qla2x00_bsg_sp_free;
  1548. sp->type = SRB_BIDI_CMD;
  1549. sp->done = qla2x00_bsg_job_done;
  1550. /* Add the read and write sg count */
  1551. tot_dsds = rsp_sg_cnt + req_sg_cnt;
  1552. rval = qla2x00_start_bidir(sp, vha, tot_dsds);
  1553. if (rval != EXT_STATUS_OK)
  1554. goto done_free_srb;
  1555. /* the bsg request will be completed in the interrupt handler */
  1556. return rval;
  1557. done_free_srb:
  1558. mempool_free(sp, ha->srb_mempool);
  1559. done_unmap_sg:
  1560. dma_unmap_sg(&ha->pdev->dev,
  1561. bsg_job->reply_payload.sg_list,
  1562. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  1563. done_unmap_req_sg:
  1564. dma_unmap_sg(&ha->pdev->dev,
  1565. bsg_job->request_payload.sg_list,
  1566. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1567. done:
  1568. /* Return an error vendor specific response
  1569. * and complete the bsg request
  1570. */
  1571. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
  1572. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1573. bsg_job->reply->reply_payload_rcv_len = 0;
  1574. bsg_job->reply->result = (DID_OK) << 16;
  1575. bsg_job->job_done(bsg_job);
  1576. /* Always retrun success, vendor rsp carries correct status */
  1577. return 0;
  1578. }
  1579. static int
  1580. qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
  1581. {
  1582. switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
  1583. case QL_VND_LOOPBACK:
  1584. return qla2x00_process_loopback(bsg_job);
  1585. case QL_VND_A84_RESET:
  1586. return qla84xx_reset(bsg_job);
  1587. case QL_VND_A84_UPDATE_FW:
  1588. return qla84xx_updatefw(bsg_job);
  1589. case QL_VND_A84_MGMT_CMD:
  1590. return qla84xx_mgmt_cmd(bsg_job);
  1591. case QL_VND_IIDMA:
  1592. return qla24xx_iidma(bsg_job);
  1593. case QL_VND_FCP_PRIO_CFG_CMD:
  1594. return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
  1595. case QL_VND_READ_FLASH:
  1596. return qla2x00_read_optrom(bsg_job);
  1597. case QL_VND_UPDATE_FLASH:
  1598. return qla2x00_update_optrom(bsg_job);
  1599. case QL_VND_SET_FRU_VERSION:
  1600. return qla2x00_update_fru_versions(bsg_job);
  1601. case QL_VND_READ_FRU_STATUS:
  1602. return qla2x00_read_fru_status(bsg_job);
  1603. case QL_VND_WRITE_FRU_STATUS:
  1604. return qla2x00_write_fru_status(bsg_job);
  1605. case QL_VND_WRITE_I2C:
  1606. return qla2x00_write_i2c(bsg_job);
  1607. case QL_VND_READ_I2C:
  1608. return qla2x00_read_i2c(bsg_job);
  1609. case QL_VND_DIAG_IO_CMD:
  1610. return qla24xx_process_bidir_cmd(bsg_job);
  1611. default:
  1612. return -ENOSYS;
  1613. }
  1614. }
  1615. int
  1616. qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
  1617. {
  1618. int ret = -EINVAL;
  1619. struct fc_rport *rport;
  1620. fc_port_t *fcport = NULL;
  1621. struct Scsi_Host *host;
  1622. scsi_qla_host_t *vha;
  1623. /* In case no data transferred. */
  1624. bsg_job->reply->reply_payload_rcv_len = 0;
  1625. if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
  1626. rport = bsg_job->rport;
  1627. fcport = *(fc_port_t **) rport->dd_data;
  1628. host = rport_to_shost(rport);
  1629. vha = shost_priv(host);
  1630. } else {
  1631. host = bsg_job->shost;
  1632. vha = shost_priv(host);
  1633. }
  1634. if (qla2x00_reset_active(vha)) {
  1635. ql_dbg(ql_dbg_user, vha, 0x709f,
  1636. "BSG: ISP abort active/needed -- cmd=%d.\n",
  1637. bsg_job->request->msgcode);
  1638. return -EBUSY;
  1639. }
  1640. ql_dbg(ql_dbg_user, vha, 0x7000,
  1641. "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
  1642. switch (bsg_job->request->msgcode) {
  1643. case FC_BSG_RPT_ELS:
  1644. case FC_BSG_HST_ELS_NOLOGIN:
  1645. ret = qla2x00_process_els(bsg_job);
  1646. break;
  1647. case FC_BSG_HST_CT:
  1648. ret = qla2x00_process_ct(bsg_job);
  1649. break;
  1650. case FC_BSG_HST_VENDOR:
  1651. ret = qla2x00_process_vendor_specific(bsg_job);
  1652. break;
  1653. case FC_BSG_HST_ADD_RPORT:
  1654. case FC_BSG_HST_DEL_RPORT:
  1655. case FC_BSG_RPT_CT:
  1656. default:
  1657. ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
  1658. break;
  1659. }
  1660. return ret;
  1661. }
  1662. int
  1663. qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
  1664. {
  1665. scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
  1666. struct qla_hw_data *ha = vha->hw;
  1667. srb_t *sp;
  1668. int cnt, que;
  1669. unsigned long flags;
  1670. struct req_que *req;
  1671. /* find the bsg job from the active list of commands */
  1672. spin_lock_irqsave(&ha->hardware_lock, flags);
  1673. for (que = 0; que < ha->max_req_queues; que++) {
  1674. req = ha->req_q_map[que];
  1675. if (!req)
  1676. continue;
  1677. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
  1678. sp = req->outstanding_cmds[cnt];
  1679. if (sp) {
  1680. if (((sp->type == SRB_CT_CMD) ||
  1681. (sp->type == SRB_ELS_CMD_HST))
  1682. && (sp->u.bsg_job == bsg_job)) {
  1683. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1684. if (ha->isp_ops->abort_command(sp)) {
  1685. ql_log(ql_log_warn, vha, 0x7089,
  1686. "mbx abort_command "
  1687. "failed.\n");
  1688. bsg_job->req->errors =
  1689. bsg_job->reply->result = -EIO;
  1690. } else {
  1691. ql_dbg(ql_dbg_user, vha, 0x708a,
  1692. "mbx abort_command "
  1693. "success.\n");
  1694. bsg_job->req->errors =
  1695. bsg_job->reply->result = 0;
  1696. }
  1697. spin_lock_irqsave(&ha->hardware_lock, flags);
  1698. goto done;
  1699. }
  1700. }
  1701. }
  1702. }
  1703. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1704. ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
  1705. bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
  1706. return 0;
  1707. done:
  1708. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1709. if (bsg_job->request->msgcode == FC_BSG_HST_CT)
  1710. kfree(sp->fcport);
  1711. mempool_free(sp, ha->srb_mempool);
  1712. return 0;
  1713. }