qla_iocb.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/blkdev.h>
  9. #include <linux/delay.h>
  10. #include <scsi/scsi_tcq.h>
  11. static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
  12. struct rsp_que *rsp);
  13. static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
  14. static void qla25xx_set_que(srb_t *, struct rsp_que **);
  15. /**
  16. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  17. * @cmd: SCSI command
  18. *
  19. * Returns the proper CF_* direction based on CDB.
  20. */
  21. static inline uint16_t
  22. qla2x00_get_cmd_direction(srb_t *sp)
  23. {
  24. uint16_t cflags;
  25. cflags = 0;
  26. /* Set transfer direction */
  27. if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
  28. cflags = CF_WRITE;
  29. sp->fcport->vha->hw->qla_stats.output_bytes +=
  30. scsi_bufflen(sp->cmd);
  31. } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
  32. cflags = CF_READ;
  33. sp->fcport->vha->hw->qla_stats.input_bytes +=
  34. scsi_bufflen(sp->cmd);
  35. }
  36. return (cflags);
  37. }
  38. /**
  39. * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  40. * Continuation Type 0 IOCBs to allocate.
  41. *
  42. * @dsds: number of data segment decriptors needed
  43. *
  44. * Returns the number of IOCB entries needed to store @dsds.
  45. */
  46. uint16_t
  47. qla2x00_calc_iocbs_32(uint16_t dsds)
  48. {
  49. uint16_t iocbs;
  50. iocbs = 1;
  51. if (dsds > 3) {
  52. iocbs += (dsds - 3) / 7;
  53. if ((dsds - 3) % 7)
  54. iocbs++;
  55. }
  56. return (iocbs);
  57. }
  58. /**
  59. * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  60. * Continuation Type 1 IOCBs to allocate.
  61. *
  62. * @dsds: number of data segment decriptors needed
  63. *
  64. * Returns the number of IOCB entries needed to store @dsds.
  65. */
  66. uint16_t
  67. qla2x00_calc_iocbs_64(uint16_t dsds)
  68. {
  69. uint16_t iocbs;
  70. iocbs = 1;
  71. if (dsds > 2) {
  72. iocbs += (dsds - 2) / 5;
  73. if ((dsds - 2) % 5)
  74. iocbs++;
  75. }
  76. return (iocbs);
  77. }
  78. /**
  79. * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  80. * @ha: HA context
  81. *
  82. * Returns a pointer to the Continuation Type 0 IOCB packet.
  83. */
  84. static inline cont_entry_t *
  85. qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  86. {
  87. cont_entry_t *cont_pkt;
  88. struct req_que *req = vha->req;
  89. /* Adjust ring index. */
  90. req->ring_index++;
  91. if (req->ring_index == req->length) {
  92. req->ring_index = 0;
  93. req->ring_ptr = req->ring;
  94. } else {
  95. req->ring_ptr++;
  96. }
  97. cont_pkt = (cont_entry_t *)req->ring_ptr;
  98. /* Load packet defaults. */
  99. *((uint32_t *)(&cont_pkt->entry_type)) =
  100. __constant_cpu_to_le32(CONTINUE_TYPE);
  101. return (cont_pkt);
  102. }
  103. /**
  104. * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
  105. * @ha: HA context
  106. *
  107. * Returns a pointer to the continuation type 1 IOCB packet.
  108. */
  109. static inline cont_a64_entry_t *
  110. qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
  111. {
  112. cont_a64_entry_t *cont_pkt;
  113. struct req_que *req = vha->req;
  114. /* Adjust ring index. */
  115. req->ring_index++;
  116. if (req->ring_index == req->length) {
  117. req->ring_index = 0;
  118. req->ring_ptr = req->ring;
  119. } else {
  120. req->ring_ptr++;
  121. }
  122. cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
  123. /* Load packet defaults. */
  124. *((uint32_t *)(&cont_pkt->entry_type)) =
  125. __constant_cpu_to_le32(CONTINUE_A64_TYPE);
  126. return (cont_pkt);
  127. }
  128. /**
  129. * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
  130. * capable IOCB types.
  131. *
  132. * @sp: SRB command to process
  133. * @cmd_pkt: Command type 2 IOCB
  134. * @tot_dsds: Total number of segments to transfer
  135. */
  136. void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
  137. uint16_t tot_dsds)
  138. {
  139. uint16_t avail_dsds;
  140. uint32_t *cur_dsd;
  141. scsi_qla_host_t *vha;
  142. struct scsi_cmnd *cmd;
  143. struct scatterlist *sg;
  144. int i;
  145. cmd = sp->cmd;
  146. /* Update entry type to indicate Command Type 2 IOCB */
  147. *((uint32_t *)(&cmd_pkt->entry_type)) =
  148. __constant_cpu_to_le32(COMMAND_TYPE);
  149. /* No data transfer */
  150. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  151. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  152. return;
  153. }
  154. vha = sp->fcport->vha;
  155. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  156. /* Three DSDs are available in the Command Type 2 IOCB */
  157. avail_dsds = 3;
  158. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  159. /* Load data segments */
  160. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  161. cont_entry_t *cont_pkt;
  162. /* Allocate additional continuation packets? */
  163. if (avail_dsds == 0) {
  164. /*
  165. * Seven DSDs are available in the Continuation
  166. * Type 0 IOCB.
  167. */
  168. cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
  169. cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
  170. avail_dsds = 7;
  171. }
  172. *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
  173. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  174. avail_dsds--;
  175. }
  176. }
  177. /**
  178. * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
  179. * capable IOCB types.
  180. *
  181. * @sp: SRB command to process
  182. * @cmd_pkt: Command type 3 IOCB
  183. * @tot_dsds: Total number of segments to transfer
  184. */
  185. void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
  186. uint16_t tot_dsds)
  187. {
  188. uint16_t avail_dsds;
  189. uint32_t *cur_dsd;
  190. scsi_qla_host_t *vha;
  191. struct scsi_cmnd *cmd;
  192. struct scatterlist *sg;
  193. int i;
  194. cmd = sp->cmd;
  195. /* Update entry type to indicate Command Type 3 IOCB */
  196. *((uint32_t *)(&cmd_pkt->entry_type)) =
  197. __constant_cpu_to_le32(COMMAND_A64_TYPE);
  198. /* No data transfer */
  199. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  200. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  201. return;
  202. }
  203. vha = sp->fcport->vha;
  204. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  205. /* Two DSDs are available in the Command Type 3 IOCB */
  206. avail_dsds = 2;
  207. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  208. /* Load data segments */
  209. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  210. dma_addr_t sle_dma;
  211. cont_a64_entry_t *cont_pkt;
  212. /* Allocate additional continuation packets? */
  213. if (avail_dsds == 0) {
  214. /*
  215. * Five DSDs are available in the Continuation
  216. * Type 1 IOCB.
  217. */
  218. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  219. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  220. avail_dsds = 5;
  221. }
  222. sle_dma = sg_dma_address(sg);
  223. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  224. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  225. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  226. avail_dsds--;
  227. }
  228. }
  229. /**
  230. * qla2x00_start_scsi() - Send a SCSI command to the ISP
  231. * @sp: command to send to the ISP
  232. *
  233. * Returns non-zero if a failure occurred, else zero.
  234. */
  235. int
  236. qla2x00_start_scsi(srb_t *sp)
  237. {
  238. int ret, nseg;
  239. unsigned long flags;
  240. scsi_qla_host_t *vha;
  241. struct scsi_cmnd *cmd;
  242. uint32_t *clr_ptr;
  243. uint32_t index;
  244. uint32_t handle;
  245. cmd_entry_t *cmd_pkt;
  246. uint16_t cnt;
  247. uint16_t req_cnt;
  248. uint16_t tot_dsds;
  249. struct device_reg_2xxx __iomem *reg;
  250. struct qla_hw_data *ha;
  251. struct req_que *req;
  252. struct rsp_que *rsp;
  253. /* Setup device pointers. */
  254. ret = 0;
  255. vha = sp->fcport->vha;
  256. ha = vha->hw;
  257. reg = &ha->iobase->isp;
  258. cmd = sp->cmd;
  259. req = ha->req_q_map[0];
  260. rsp = ha->rsp_q_map[0];
  261. /* So we know we haven't pci_map'ed anything yet */
  262. tot_dsds = 0;
  263. /* Send marker if required */
  264. if (vha->marker_needed != 0) {
  265. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
  266. != QLA_SUCCESS)
  267. return (QLA_FUNCTION_FAILED);
  268. vha->marker_needed = 0;
  269. }
  270. /* Acquire ring specific lock */
  271. spin_lock_irqsave(&ha->hardware_lock, flags);
  272. /* Check for room in outstanding command list. */
  273. handle = req->current_outstanding_cmd;
  274. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  275. handle++;
  276. if (handle == MAX_OUTSTANDING_COMMANDS)
  277. handle = 1;
  278. if (!req->outstanding_cmds[handle])
  279. break;
  280. }
  281. if (index == MAX_OUTSTANDING_COMMANDS)
  282. goto queuing_error;
  283. /* Map the sg table so we have an accurate count of sg entries needed */
  284. if (scsi_sg_count(cmd)) {
  285. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  286. scsi_sg_count(cmd), cmd->sc_data_direction);
  287. if (unlikely(!nseg))
  288. goto queuing_error;
  289. } else
  290. nseg = 0;
  291. tot_dsds = nseg;
  292. /* Calculate the number of request entries needed. */
  293. req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
  294. if (req->cnt < (req_cnt + 2)) {
  295. cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
  296. if (req->ring_index < cnt)
  297. req->cnt = cnt - req->ring_index;
  298. else
  299. req->cnt = req->length -
  300. (req->ring_index - cnt);
  301. }
  302. if (req->cnt < (req_cnt + 2))
  303. goto queuing_error;
  304. /* Build command packet */
  305. req->current_outstanding_cmd = handle;
  306. req->outstanding_cmds[handle] = sp;
  307. sp->handle = handle;
  308. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  309. req->cnt -= req_cnt;
  310. cmd_pkt = (cmd_entry_t *)req->ring_ptr;
  311. cmd_pkt->handle = handle;
  312. /* Zero out remaining portion of packet. */
  313. clr_ptr = (uint32_t *)cmd_pkt + 2;
  314. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  315. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  316. /* Set target ID and LUN number*/
  317. SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
  318. cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
  319. /* Update tagged queuing modifier */
  320. cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
  321. /* Load SCSI command packet. */
  322. memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
  323. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  324. /* Build IOCB segments */
  325. ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
  326. /* Set total data segment count. */
  327. cmd_pkt->entry_count = (uint8_t)req_cnt;
  328. wmb();
  329. /* Adjust ring index. */
  330. req->ring_index++;
  331. if (req->ring_index == req->length) {
  332. req->ring_index = 0;
  333. req->ring_ptr = req->ring;
  334. } else
  335. req->ring_ptr++;
  336. sp->flags |= SRB_DMA_VALID;
  337. /* Set chip new ring index. */
  338. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
  339. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
  340. /* Manage unprocessed RIO/ZIO commands in response queue. */
  341. if (vha->flags.process_response_queue &&
  342. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  343. qla2x00_process_response_queue(rsp);
  344. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  345. return (QLA_SUCCESS);
  346. queuing_error:
  347. if (tot_dsds)
  348. scsi_dma_unmap(cmd);
  349. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  350. return (QLA_FUNCTION_FAILED);
  351. }
  352. /**
  353. * qla2x00_marker() - Send a marker IOCB to the firmware.
  354. * @ha: HA context
  355. * @loop_id: loop ID
  356. * @lun: LUN
  357. * @type: marker modifier
  358. *
  359. * Can be called from both normal and interrupt context.
  360. *
  361. * Returns non-zero if a failure occurred, else zero.
  362. */
  363. int
  364. __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
  365. struct rsp_que *rsp, uint16_t loop_id,
  366. uint16_t lun, uint8_t type)
  367. {
  368. mrk_entry_t *mrk;
  369. struct mrk_entry_24xx *mrk24;
  370. struct qla_hw_data *ha = vha->hw;
  371. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  372. mrk24 = NULL;
  373. mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
  374. if (mrk == NULL) {
  375. DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
  376. __func__, base_vha->host_no));
  377. return (QLA_FUNCTION_FAILED);
  378. }
  379. mrk->entry_type = MARKER_TYPE;
  380. mrk->modifier = type;
  381. if (type != MK_SYNC_ALL) {
  382. if (IS_FWI2_CAPABLE(ha)) {
  383. mrk24 = (struct mrk_entry_24xx *) mrk;
  384. mrk24->nport_handle = cpu_to_le16(loop_id);
  385. mrk24->lun[1] = LSB(lun);
  386. mrk24->lun[2] = MSB(lun);
  387. host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
  388. mrk24->vp_index = vha->vp_idx;
  389. mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
  390. } else {
  391. SET_TARGET_ID(ha, mrk->target, loop_id);
  392. mrk->lun = cpu_to_le16(lun);
  393. }
  394. }
  395. wmb();
  396. qla2x00_isp_cmd(vha, req);
  397. return (QLA_SUCCESS);
  398. }
  399. int
  400. qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
  401. struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
  402. uint8_t type)
  403. {
  404. int ret;
  405. unsigned long flags = 0;
  406. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  407. ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
  408. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  409. return (ret);
  410. }
  411. /**
  412. * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
  413. * @ha: HA context
  414. *
  415. * Note: The caller must hold the hardware lock before calling this routine.
  416. *
  417. * Returns NULL if function failed, else, a pointer to the request packet.
  418. */
  419. static request_t *
  420. qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
  421. struct rsp_que *rsp)
  422. {
  423. struct qla_hw_data *ha = vha->hw;
  424. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  425. request_t *pkt = NULL;
  426. uint16_t cnt;
  427. uint32_t *dword_ptr;
  428. uint32_t timer;
  429. uint16_t req_cnt = 1;
  430. /* Wait 1 second for slot. */
  431. for (timer = HZ; timer; timer--) {
  432. if ((req_cnt + 2) >= req->cnt) {
  433. /* Calculate number of free request entries. */
  434. if (ha->mqenable)
  435. cnt = (uint16_t)
  436. RD_REG_DWORD(&reg->isp25mq.req_q_out);
  437. else {
  438. if (IS_QLA82XX(ha))
  439. cnt = (uint16_t)RD_REG_DWORD(
  440. &reg->isp82.req_q_out);
  441. else if (IS_FWI2_CAPABLE(ha))
  442. cnt = (uint16_t)RD_REG_DWORD(
  443. &reg->isp24.req_q_out);
  444. else
  445. cnt = qla2x00_debounce_register(
  446. ISP_REQ_Q_OUT(ha, &reg->isp));
  447. }
  448. if (req->ring_index < cnt)
  449. req->cnt = cnt - req->ring_index;
  450. else
  451. req->cnt = req->length -
  452. (req->ring_index - cnt);
  453. }
  454. /* If room for request in request ring. */
  455. if ((req_cnt + 2) < req->cnt) {
  456. req->cnt--;
  457. pkt = req->ring_ptr;
  458. /* Zero out packet. */
  459. dword_ptr = (uint32_t *)pkt;
  460. for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
  461. *dword_ptr++ = 0;
  462. /* Set entry count. */
  463. pkt->entry_count = 1;
  464. break;
  465. }
  466. /* Release ring specific lock */
  467. spin_unlock_irq(&ha->hardware_lock);
  468. udelay(2); /* 2 us */
  469. /* Check for pending interrupts. */
  470. /* During init we issue marker directly */
  471. if (!vha->marker_needed && !vha->flags.init_done)
  472. qla2x00_poll(rsp);
  473. spin_lock_irq(&ha->hardware_lock);
  474. }
  475. if (!pkt) {
  476. DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
  477. }
  478. return (pkt);
  479. }
  480. /**
  481. * qla2x00_isp_cmd() - Modify the request ring pointer.
  482. * @ha: HA context
  483. *
  484. * Note: The caller must hold the hardware lock before calling this routine.
  485. */
  486. static void
  487. qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
  488. {
  489. struct qla_hw_data *ha = vha->hw;
  490. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  491. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  492. DEBUG5(printk("%s(): IOCB data:\n", __func__));
  493. DEBUG5(qla2x00_dump_buffer(
  494. (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
  495. /* Adjust ring index. */
  496. req->ring_index++;
  497. if (req->ring_index == req->length) {
  498. req->ring_index = 0;
  499. req->ring_ptr = req->ring;
  500. } else
  501. req->ring_ptr++;
  502. /* Set chip new ring index. */
  503. if (IS_QLA82XX(ha)) {
  504. uint32_t dbval = 0x04 | (ha->portnum << 5);
  505. /* write, read and verify logic */
  506. dbval = dbval | (req->id << 8) | (req->ring_index << 16);
  507. if (ql2xdbwr)
  508. qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
  509. else {
  510. WRT_REG_DWORD(
  511. (unsigned long __iomem *)ha->nxdb_wr_ptr,
  512. dbval);
  513. wmb();
  514. while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
  515. WRT_REG_DWORD((unsigned long __iomem *)
  516. ha->nxdb_wr_ptr, dbval);
  517. wmb();
  518. }
  519. }
  520. } else if (ha->mqenable) {
  521. /* Set chip new ring index. */
  522. WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
  523. RD_REG_DWORD(&ioreg->hccr);
  524. } else {
  525. if (IS_FWI2_CAPABLE(ha)) {
  526. WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
  527. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  528. } else {
  529. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
  530. req->ring_index);
  531. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  532. }
  533. }
  534. }
  535. /**
  536. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  537. * Continuation Type 1 IOCBs to allocate.
  538. *
  539. * @dsds: number of data segment decriptors needed
  540. *
  541. * Returns the number of IOCB entries needed to store @dsds.
  542. */
  543. inline uint16_t
  544. qla24xx_calc_iocbs(uint16_t dsds)
  545. {
  546. uint16_t iocbs;
  547. iocbs = 1;
  548. if (dsds > 1) {
  549. iocbs += (dsds - 1) / 5;
  550. if ((dsds - 1) % 5)
  551. iocbs++;
  552. }
  553. return iocbs;
  554. }
  555. /**
  556. * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  557. * IOCB types.
  558. *
  559. * @sp: SRB command to process
  560. * @cmd_pkt: Command type 3 IOCB
  561. * @tot_dsds: Total number of segments to transfer
  562. */
  563. inline void
  564. qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
  565. uint16_t tot_dsds)
  566. {
  567. uint16_t avail_dsds;
  568. uint32_t *cur_dsd;
  569. scsi_qla_host_t *vha;
  570. struct scsi_cmnd *cmd;
  571. struct scatterlist *sg;
  572. int i;
  573. struct req_que *req;
  574. cmd = sp->cmd;
  575. /* Update entry type to indicate Command Type 3 IOCB */
  576. *((uint32_t *)(&cmd_pkt->entry_type)) =
  577. __constant_cpu_to_le32(COMMAND_TYPE_7);
  578. /* No data transfer */
  579. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  580. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  581. return;
  582. }
  583. vha = sp->fcport->vha;
  584. req = vha->req;
  585. /* Set transfer direction */
  586. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  587. cmd_pkt->task_mgmt_flags =
  588. __constant_cpu_to_le16(TMF_WRITE_DATA);
  589. sp->fcport->vha->hw->qla_stats.output_bytes +=
  590. scsi_bufflen(sp->cmd);
  591. } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  592. cmd_pkt->task_mgmt_flags =
  593. __constant_cpu_to_le16(TMF_READ_DATA);
  594. sp->fcport->vha->hw->qla_stats.input_bytes +=
  595. scsi_bufflen(sp->cmd);
  596. }
  597. /* One DSD is available in the Command Type 3 IOCB */
  598. avail_dsds = 1;
  599. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  600. /* Load data segments */
  601. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  602. dma_addr_t sle_dma;
  603. cont_a64_entry_t *cont_pkt;
  604. /* Allocate additional continuation packets? */
  605. if (avail_dsds == 0) {
  606. /*
  607. * Five DSDs are available in the Continuation
  608. * Type 1 IOCB.
  609. */
  610. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  611. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  612. avail_dsds = 5;
  613. }
  614. sle_dma = sg_dma_address(sg);
  615. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  616. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  617. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  618. avail_dsds--;
  619. }
  620. }
  621. /**
  622. * qla24xx_start_scsi() - Send a SCSI command to the ISP
  623. * @sp: command to send to the ISP
  624. *
  625. * Returns non-zero if a failure occurred, else zero.
  626. */
  627. int
  628. qla24xx_start_scsi(srb_t *sp)
  629. {
  630. int ret, nseg;
  631. unsigned long flags;
  632. uint32_t *clr_ptr;
  633. uint32_t index;
  634. uint32_t handle;
  635. struct cmd_type_7 *cmd_pkt;
  636. uint16_t cnt;
  637. uint16_t req_cnt;
  638. uint16_t tot_dsds;
  639. struct req_que *req = NULL;
  640. struct rsp_que *rsp = NULL;
  641. struct scsi_cmnd *cmd = sp->cmd;
  642. struct scsi_qla_host *vha = sp->fcport->vha;
  643. struct qla_hw_data *ha = vha->hw;
  644. /* Setup device pointers. */
  645. ret = 0;
  646. qla25xx_set_que(sp, &rsp);
  647. req = vha->req;
  648. /* So we know we haven't pci_map'ed anything yet */
  649. tot_dsds = 0;
  650. /* Send marker if required */
  651. if (vha->marker_needed != 0) {
  652. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
  653. != QLA_SUCCESS)
  654. return QLA_FUNCTION_FAILED;
  655. vha->marker_needed = 0;
  656. }
  657. /* Acquire ring specific lock */
  658. spin_lock_irqsave(&ha->hardware_lock, flags);
  659. /* Check for room in outstanding command list. */
  660. handle = req->current_outstanding_cmd;
  661. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  662. handle++;
  663. if (handle == MAX_OUTSTANDING_COMMANDS)
  664. handle = 1;
  665. if (!req->outstanding_cmds[handle])
  666. break;
  667. }
  668. if (index == MAX_OUTSTANDING_COMMANDS)
  669. goto queuing_error;
  670. /* Map the sg table so we have an accurate count of sg entries needed */
  671. if (scsi_sg_count(cmd)) {
  672. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  673. scsi_sg_count(cmd), cmd->sc_data_direction);
  674. if (unlikely(!nseg))
  675. goto queuing_error;
  676. } else
  677. nseg = 0;
  678. tot_dsds = nseg;
  679. req_cnt = qla24xx_calc_iocbs(tot_dsds);
  680. if (req->cnt < (req_cnt + 2)) {
  681. cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
  682. if (req->ring_index < cnt)
  683. req->cnt = cnt - req->ring_index;
  684. else
  685. req->cnt = req->length -
  686. (req->ring_index - cnt);
  687. }
  688. if (req->cnt < (req_cnt + 2))
  689. goto queuing_error;
  690. /* Build command packet. */
  691. req->current_outstanding_cmd = handle;
  692. req->outstanding_cmds[handle] = sp;
  693. sp->handle = handle;
  694. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  695. req->cnt -= req_cnt;
  696. cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
  697. cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
  698. /* Zero out remaining portion of packet. */
  699. /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
  700. clr_ptr = (uint32_t *)cmd_pkt + 2;
  701. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  702. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  703. /* Set NPORT-ID and LUN number*/
  704. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  705. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  706. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  707. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  708. cmd_pkt->vp_index = sp->fcport->vp_idx;
  709. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  710. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  711. /* Load SCSI command packet. */
  712. memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
  713. host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
  714. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  715. /* Build IOCB segments */
  716. qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
  717. /* Set total data segment count. */
  718. cmd_pkt->entry_count = (uint8_t)req_cnt;
  719. /* Specify response queue number where completion should happen */
  720. cmd_pkt->entry_status = (uint8_t) rsp->id;
  721. wmb();
  722. /* Adjust ring index. */
  723. req->ring_index++;
  724. if (req->ring_index == req->length) {
  725. req->ring_index = 0;
  726. req->ring_ptr = req->ring;
  727. } else
  728. req->ring_ptr++;
  729. sp->flags |= SRB_DMA_VALID;
  730. /* Set chip new ring index. */
  731. WRT_REG_DWORD(req->req_q_in, req->ring_index);
  732. RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
  733. /* Manage unprocessed RIO/ZIO commands in response queue. */
  734. if (vha->flags.process_response_queue &&
  735. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  736. qla24xx_process_response_queue(vha, rsp);
  737. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  738. return QLA_SUCCESS;
  739. queuing_error:
  740. if (tot_dsds)
  741. scsi_dma_unmap(cmd);
  742. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  743. return QLA_FUNCTION_FAILED;
  744. }
  745. static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
  746. {
  747. struct scsi_cmnd *cmd = sp->cmd;
  748. struct qla_hw_data *ha = sp->fcport->vha->hw;
  749. int affinity = cmd->request->cpu;
  750. if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
  751. affinity < ha->max_rsp_queues - 1)
  752. *rsp = ha->rsp_q_map[affinity + 1];
  753. else
  754. *rsp = ha->rsp_q_map[0];
  755. }
  756. /* Generic Control-SRB manipulation functions. */
  757. static void *
  758. qla2x00_alloc_iocbs(srb_t *sp)
  759. {
  760. scsi_qla_host_t *vha = sp->fcport->vha;
  761. struct qla_hw_data *ha = vha->hw;
  762. struct req_que *req = ha->req_q_map[0];
  763. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  764. uint32_t index, handle;
  765. request_t *pkt;
  766. uint16_t cnt, req_cnt;
  767. pkt = NULL;
  768. req_cnt = 1;
  769. /* Check for room in outstanding command list. */
  770. handle = req->current_outstanding_cmd;
  771. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  772. handle++;
  773. if (handle == MAX_OUTSTANDING_COMMANDS)
  774. handle = 1;
  775. if (!req->outstanding_cmds[handle])
  776. break;
  777. }
  778. if (index == MAX_OUTSTANDING_COMMANDS)
  779. goto queuing_error;
  780. /* Check for room on request queue. */
  781. if (req->cnt < req_cnt) {
  782. if (ha->mqenable)
  783. cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
  784. else if (IS_FWI2_CAPABLE(ha))
  785. cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
  786. else
  787. cnt = qla2x00_debounce_register(
  788. ISP_REQ_Q_OUT(ha, &reg->isp));
  789. if (req->ring_index < cnt)
  790. req->cnt = cnt - req->ring_index;
  791. else
  792. req->cnt = req->length -
  793. (req->ring_index - cnt);
  794. }
  795. if (req->cnt < req_cnt)
  796. goto queuing_error;
  797. /* Prep packet */
  798. req->current_outstanding_cmd = handle;
  799. req->outstanding_cmds[handle] = sp;
  800. req->cnt -= req_cnt;
  801. pkt = req->ring_ptr;
  802. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  803. pkt->entry_count = req_cnt;
  804. pkt->handle = handle;
  805. sp->handle = handle;
  806. queuing_error:
  807. return pkt;
  808. }
  809. static void
  810. qla2x00_start_iocbs(srb_t *sp)
  811. {
  812. struct qla_hw_data *ha = sp->fcport->vha->hw;
  813. struct req_que *req = ha->req_q_map[0];
  814. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  815. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  816. if (IS_QLA82XX(ha)) {
  817. qla82xx_start_iocbs(sp);
  818. } else {
  819. /* Adjust ring index. */
  820. req->ring_index++;
  821. if (req->ring_index == req->length) {
  822. req->ring_index = 0;
  823. req->ring_ptr = req->ring;
  824. } else
  825. req->ring_ptr++;
  826. /* Set chip new ring index. */
  827. if (ha->mqenable) {
  828. WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
  829. RD_REG_DWORD(&ioreg->hccr);
  830. } else if (IS_QLA82XX(ha)) {
  831. qla82xx_start_iocbs(sp);
  832. } else if (IS_FWI2_CAPABLE(ha)) {
  833. WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
  834. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  835. } else {
  836. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
  837. req->ring_index);
  838. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  839. }
  840. }
  841. }
  842. static void
  843. qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
  844. {
  845. struct srb_ctx *ctx = sp->ctx;
  846. struct srb_iocb *lio = ctx->u.iocb_cmd;
  847. logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  848. logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
  849. if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
  850. logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
  851. if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
  852. logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
  853. logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  854. logio->port_id[0] = sp->fcport->d_id.b.al_pa;
  855. logio->port_id[1] = sp->fcport->d_id.b.area;
  856. logio->port_id[2] = sp->fcport->d_id.b.domain;
  857. logio->vp_index = sp->fcport->vp_idx;
  858. }
  859. static void
  860. qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
  861. {
  862. struct qla_hw_data *ha = sp->fcport->vha->hw;
  863. struct srb_ctx *ctx = sp->ctx;
  864. struct srb_iocb *lio = ctx->u.iocb_cmd;
  865. uint16_t opts;
  866. mbx->entry_type = MBX_IOCB_TYPE;;
  867. SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
  868. mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
  869. opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
  870. opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
  871. if (HAS_EXTENDED_IDS(ha)) {
  872. mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
  873. mbx->mb10 = cpu_to_le16(opts);
  874. } else {
  875. mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
  876. }
  877. mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
  878. mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
  879. sp->fcport->d_id.b.al_pa);
  880. mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
  881. }
  882. static void
  883. qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
  884. {
  885. logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  886. logio->control_flags =
  887. cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
  888. logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  889. logio->port_id[0] = sp->fcport->d_id.b.al_pa;
  890. logio->port_id[1] = sp->fcport->d_id.b.area;
  891. logio->port_id[2] = sp->fcport->d_id.b.domain;
  892. logio->vp_index = sp->fcport->vp_idx;
  893. }
  894. static void
  895. qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
  896. {
  897. struct qla_hw_data *ha = sp->fcport->vha->hw;
  898. mbx->entry_type = MBX_IOCB_TYPE;;
  899. SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
  900. mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
  901. mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
  902. cpu_to_le16(sp->fcport->loop_id):
  903. cpu_to_le16(sp->fcport->loop_id << 8);
  904. mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
  905. mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
  906. sp->fcport->d_id.b.al_pa);
  907. mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
  908. /* Implicit: mbx->mbx10 = 0. */
  909. }
  910. static void
  911. qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
  912. {
  913. logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  914. logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
  915. logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  916. logio->vp_index = sp->fcport->vp_idx;
  917. }
  918. static void
  919. qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
  920. {
  921. struct qla_hw_data *ha = sp->fcport->vha->hw;
  922. mbx->entry_type = MBX_IOCB_TYPE;
  923. SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
  924. mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
  925. if (HAS_EXTENDED_IDS(ha)) {
  926. mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
  927. mbx->mb10 = cpu_to_le16(BIT_0);
  928. } else {
  929. mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
  930. }
  931. mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
  932. mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
  933. mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
  934. mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
  935. mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
  936. }
  937. static void
  938. qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
  939. {
  940. uint32_t flags;
  941. unsigned int lun;
  942. struct fc_port *fcport = sp->fcport;
  943. scsi_qla_host_t *vha = fcport->vha;
  944. struct qla_hw_data *ha = vha->hw;
  945. struct srb_ctx *ctx = sp->ctx;
  946. struct srb_iocb *iocb = ctx->u.iocb_cmd;
  947. struct req_que *req = vha->req;
  948. flags = iocb->u.tmf.flags;
  949. lun = iocb->u.tmf.lun;
  950. tsk->entry_type = TSK_MGMT_IOCB_TYPE;
  951. tsk->entry_count = 1;
  952. tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
  953. tsk->nport_handle = cpu_to_le16(fcport->loop_id);
  954. tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
  955. tsk->control_flags = cpu_to_le32(flags);
  956. tsk->port_id[0] = fcport->d_id.b.al_pa;
  957. tsk->port_id[1] = fcport->d_id.b.area;
  958. tsk->port_id[2] = fcport->d_id.b.domain;
  959. tsk->vp_index = fcport->vp_idx;
  960. if (flags == TCF_LUN_RESET) {
  961. int_to_scsilun(lun, &tsk->lun);
  962. host_to_fcp_swap((uint8_t *)&tsk->lun,
  963. sizeof(tsk->lun));
  964. }
  965. }
  966. static void
  967. qla24xx_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
  968. {
  969. uint16_t lun;
  970. uint8_t modif;
  971. struct fc_port *fcport = sp->fcport;
  972. scsi_qla_host_t *vha = fcport->vha;
  973. struct srb_ctx *ctx = sp->ctx;
  974. struct srb_iocb *iocb = ctx->u.iocb_cmd;
  975. struct req_que *req = vha->req;
  976. lun = iocb->u.marker.lun;
  977. modif = iocb->u.marker.modif;
  978. mrk->entry_type = MARKER_TYPE;
  979. mrk->modifier = modif;
  980. if (modif != MK_SYNC_ALL) {
  981. mrk->nport_handle = cpu_to_le16(fcport->loop_id);
  982. mrk->lun[1] = LSB(lun);
  983. mrk->lun[2] = MSB(lun);
  984. host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
  985. mrk->vp_index = vha->vp_idx;
  986. mrk->handle = MAKE_HANDLE(req->id, mrk->handle);
  987. }
  988. }
  989. static void
  990. qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
  991. {
  992. struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
  993. els_iocb->entry_type = ELS_IOCB_TYPE;
  994. els_iocb->entry_count = 1;
  995. els_iocb->sys_define = 0;
  996. els_iocb->entry_status = 0;
  997. els_iocb->handle = sp->handle;
  998. els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  999. els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
  1000. els_iocb->vp_index = sp->fcport->vp_idx;
  1001. els_iocb->sof_type = EST_SOFI3;
  1002. els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
  1003. els_iocb->opcode =
  1004. (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
  1005. bsg_job->request->rqst_data.r_els.els_code :
  1006. bsg_job->request->rqst_data.h_els.command_code;
  1007. els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
  1008. els_iocb->port_id[1] = sp->fcport->d_id.b.area;
  1009. els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
  1010. els_iocb->control_flags = 0;
  1011. els_iocb->rx_byte_count =
  1012. cpu_to_le32(bsg_job->reply_payload.payload_len);
  1013. els_iocb->tx_byte_count =
  1014. cpu_to_le32(bsg_job->request_payload.payload_len);
  1015. els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
  1016. (bsg_job->request_payload.sg_list)));
  1017. els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
  1018. (bsg_job->request_payload.sg_list)));
  1019. els_iocb->tx_len = cpu_to_le32(sg_dma_len
  1020. (bsg_job->request_payload.sg_list));
  1021. els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
  1022. (bsg_job->reply_payload.sg_list)));
  1023. els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
  1024. (bsg_job->reply_payload.sg_list)));
  1025. els_iocb->rx_len = cpu_to_le32(sg_dma_len
  1026. (bsg_job->reply_payload.sg_list));
  1027. }
  1028. static void
  1029. qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
  1030. {
  1031. uint16_t avail_dsds;
  1032. uint32_t *cur_dsd;
  1033. struct scatterlist *sg;
  1034. int index;
  1035. uint16_t tot_dsds;
  1036. scsi_qla_host_t *vha = sp->fcport->vha;
  1037. struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
  1038. int loop_iterartion = 0;
  1039. int cont_iocb_prsnt = 0;
  1040. int entry_count = 1;
  1041. ct_iocb->entry_type = CT_IOCB_TYPE;
  1042. ct_iocb->entry_status = 0;
  1043. ct_iocb->sys_define = 0;
  1044. ct_iocb->handle = sp->handle;
  1045. ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  1046. ct_iocb->vp_index = sp->fcport->vp_idx;
  1047. ct_iocb->comp_status = __constant_cpu_to_le16(0);
  1048. ct_iocb->cmd_dsd_count =
  1049. __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
  1050. ct_iocb->timeout = 0;
  1051. ct_iocb->rsp_dsd_count =
  1052. __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
  1053. ct_iocb->rsp_byte_count =
  1054. cpu_to_le32(bsg_job->reply_payload.payload_len);
  1055. ct_iocb->cmd_byte_count =
  1056. cpu_to_le32(bsg_job->request_payload.payload_len);
  1057. ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
  1058. (bsg_job->request_payload.sg_list)));
  1059. ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
  1060. (bsg_job->request_payload.sg_list)));
  1061. ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
  1062. (bsg_job->request_payload.sg_list));
  1063. avail_dsds = 1;
  1064. cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
  1065. index = 0;
  1066. tot_dsds = bsg_job->reply_payload.sg_cnt;
  1067. for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
  1068. dma_addr_t sle_dma;
  1069. cont_a64_entry_t *cont_pkt;
  1070. /* Allocate additional continuation packets? */
  1071. if (avail_dsds == 0) {
  1072. /*
  1073. * Five DSDs are available in the Cont.
  1074. * Type 1 IOCB.
  1075. */
  1076. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  1077. cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
  1078. avail_dsds = 5;
  1079. cont_iocb_prsnt = 1;
  1080. entry_count++;
  1081. }
  1082. sle_dma = sg_dma_address(sg);
  1083. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  1084. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  1085. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  1086. loop_iterartion++;
  1087. avail_dsds--;
  1088. }
  1089. ct_iocb->entry_count = entry_count;
  1090. }
  1091. int
  1092. qla2x00_start_sp(srb_t *sp)
  1093. {
  1094. int rval;
  1095. struct qla_hw_data *ha = sp->fcport->vha->hw;
  1096. void *pkt;
  1097. struct srb_ctx *ctx = sp->ctx;
  1098. unsigned long flags;
  1099. rval = QLA_FUNCTION_FAILED;
  1100. spin_lock_irqsave(&ha->hardware_lock, flags);
  1101. pkt = qla2x00_alloc_iocbs(sp);
  1102. if (!pkt)
  1103. goto done;
  1104. rval = QLA_SUCCESS;
  1105. switch (ctx->type) {
  1106. case SRB_LOGIN_CMD:
  1107. IS_FWI2_CAPABLE(ha) ?
  1108. qla24xx_login_iocb(sp, pkt) :
  1109. qla2x00_login_iocb(sp, pkt);
  1110. break;
  1111. case SRB_LOGOUT_CMD:
  1112. IS_FWI2_CAPABLE(ha) ?
  1113. qla24xx_logout_iocb(sp, pkt) :
  1114. qla2x00_logout_iocb(sp, pkt);
  1115. break;
  1116. case SRB_ELS_CMD_RPT:
  1117. case SRB_ELS_CMD_HST:
  1118. qla24xx_els_iocb(sp, pkt);
  1119. break;
  1120. case SRB_CT_CMD:
  1121. qla24xx_ct_iocb(sp, pkt);
  1122. break;
  1123. case SRB_ADISC_CMD:
  1124. IS_FWI2_CAPABLE(ha) ?
  1125. qla24xx_adisc_iocb(sp, pkt) :
  1126. qla2x00_adisc_iocb(sp, pkt);
  1127. break;
  1128. case SRB_TM_CMD:
  1129. qla24xx_tm_iocb(sp, pkt);
  1130. break;
  1131. case SRB_MARKER_CMD:
  1132. qla24xx_marker_iocb(sp, pkt);
  1133. break;
  1134. default:
  1135. break;
  1136. }
  1137. wmb();
  1138. qla2x00_start_iocbs(sp);
  1139. done:
  1140. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1141. return rval;
  1142. }