qla_iocb.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/blkdev.h>
  9. #include <linux/delay.h>
  10. #include <scsi/scsi_tcq.h>
  11. static request_t *qla2x00_req_pkt(scsi_qla_host_t *);
  12. static void qla2x00_isp_cmd(scsi_qla_host_t *);
  13. /**
  14. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  15. * @cmd: SCSI command
  16. *
  17. * Returns the proper CF_* direction based on CDB.
  18. */
  19. static inline uint16_t
  20. qla2x00_get_cmd_direction(srb_t *sp)
  21. {
  22. uint16_t cflags;
  23. cflags = 0;
  24. /* Set transfer direction */
  25. if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
  26. cflags = CF_WRITE;
  27. sp->fcport->vha->hw->qla_stats.output_bytes +=
  28. scsi_bufflen(sp->cmd);
  29. } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
  30. cflags = CF_READ;
  31. sp->fcport->vha->hw->qla_stats.input_bytes +=
  32. scsi_bufflen(sp->cmd);
  33. }
  34. return (cflags);
  35. }
  36. /**
  37. * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  38. * Continuation Type 0 IOCBs to allocate.
  39. *
  40. * @dsds: number of data segment decriptors needed
  41. *
  42. * Returns the number of IOCB entries needed to store @dsds.
  43. */
  44. uint16_t
  45. qla2x00_calc_iocbs_32(uint16_t dsds)
  46. {
  47. uint16_t iocbs;
  48. iocbs = 1;
  49. if (dsds > 3) {
  50. iocbs += (dsds - 3) / 7;
  51. if ((dsds - 3) % 7)
  52. iocbs++;
  53. }
  54. return (iocbs);
  55. }
  56. /**
  57. * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  58. * Continuation Type 1 IOCBs to allocate.
  59. *
  60. * @dsds: number of data segment decriptors needed
  61. *
  62. * Returns the number of IOCB entries needed to store @dsds.
  63. */
  64. uint16_t
  65. qla2x00_calc_iocbs_64(uint16_t dsds)
  66. {
  67. uint16_t iocbs;
  68. iocbs = 1;
  69. if (dsds > 2) {
  70. iocbs += (dsds - 2) / 5;
  71. if ((dsds - 2) % 5)
  72. iocbs++;
  73. }
  74. return (iocbs);
  75. }
  76. /**
  77. * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  78. * @ha: HA context
  79. *
  80. * Returns a pointer to the Continuation Type 0 IOCB packet.
  81. */
  82. static inline cont_entry_t *
  83. qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *vha)
  84. {
  85. cont_entry_t *cont_pkt;
  86. struct req_que *req = vha->hw->req;
  87. /* Adjust ring index. */
  88. req->ring_index++;
  89. if (req->ring_index == req->length) {
  90. req->ring_index = 0;
  91. req->ring_ptr = req->ring;
  92. } else {
  93. req->ring_ptr++;
  94. }
  95. cont_pkt = (cont_entry_t *)req->ring_ptr;
  96. /* Load packet defaults. */
  97. *((uint32_t *)(&cont_pkt->entry_type)) =
  98. __constant_cpu_to_le32(CONTINUE_TYPE);
  99. return (cont_pkt);
  100. }
  101. /**
  102. * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
  103. * @ha: HA context
  104. *
  105. * Returns a pointer to the continuation type 1 IOCB packet.
  106. */
  107. static inline cont_a64_entry_t *
  108. qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
  109. {
  110. cont_a64_entry_t *cont_pkt;
  111. struct req_que *req = vha->hw->req;
  112. /* Adjust ring index. */
  113. req->ring_index++;
  114. if (req->ring_index == req->length) {
  115. req->ring_index = 0;
  116. req->ring_ptr = req->ring;
  117. } else {
  118. req->ring_ptr++;
  119. }
  120. cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
  121. /* Load packet defaults. */
  122. *((uint32_t *)(&cont_pkt->entry_type)) =
  123. __constant_cpu_to_le32(CONTINUE_A64_TYPE);
  124. return (cont_pkt);
  125. }
  126. /**
  127. * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
  128. * capable IOCB types.
  129. *
  130. * @sp: SRB command to process
  131. * @cmd_pkt: Command type 2 IOCB
  132. * @tot_dsds: Total number of segments to transfer
  133. */
  134. void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
  135. uint16_t tot_dsds)
  136. {
  137. uint16_t avail_dsds;
  138. uint32_t *cur_dsd;
  139. scsi_qla_host_t *vha;
  140. struct scsi_cmnd *cmd;
  141. struct scatterlist *sg;
  142. int i;
  143. cmd = sp->cmd;
  144. /* Update entry type to indicate Command Type 2 IOCB */
  145. *((uint32_t *)(&cmd_pkt->entry_type)) =
  146. __constant_cpu_to_le32(COMMAND_TYPE);
  147. /* No data transfer */
  148. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  149. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  150. return;
  151. }
  152. vha = sp->vha;
  153. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  154. /* Three DSDs are available in the Command Type 2 IOCB */
  155. avail_dsds = 3;
  156. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  157. /* Load data segments */
  158. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  159. cont_entry_t *cont_pkt;
  160. /* Allocate additional continuation packets? */
  161. if (avail_dsds == 0) {
  162. /*
  163. * Seven DSDs are available in the Continuation
  164. * Type 0 IOCB.
  165. */
  166. cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
  167. cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
  168. avail_dsds = 7;
  169. }
  170. *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
  171. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  172. avail_dsds--;
  173. }
  174. }
  175. /**
  176. * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
  177. * capable IOCB types.
  178. *
  179. * @sp: SRB command to process
  180. * @cmd_pkt: Command type 3 IOCB
  181. * @tot_dsds: Total number of segments to transfer
  182. */
  183. void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
  184. uint16_t tot_dsds)
  185. {
  186. uint16_t avail_dsds;
  187. uint32_t *cur_dsd;
  188. scsi_qla_host_t *vha;
  189. struct scsi_cmnd *cmd;
  190. struct scatterlist *sg;
  191. int i;
  192. cmd = sp->cmd;
  193. /* Update entry type to indicate Command Type 3 IOCB */
  194. *((uint32_t *)(&cmd_pkt->entry_type)) =
  195. __constant_cpu_to_le32(COMMAND_A64_TYPE);
  196. /* No data transfer */
  197. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  198. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  199. return;
  200. }
  201. vha = sp->vha;
  202. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  203. /* Two DSDs are available in the Command Type 3 IOCB */
  204. avail_dsds = 2;
  205. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  206. /* Load data segments */
  207. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  208. dma_addr_t sle_dma;
  209. cont_a64_entry_t *cont_pkt;
  210. /* Allocate additional continuation packets? */
  211. if (avail_dsds == 0) {
  212. /*
  213. * Five DSDs are available in the Continuation
  214. * Type 1 IOCB.
  215. */
  216. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  217. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  218. avail_dsds = 5;
  219. }
  220. sle_dma = sg_dma_address(sg);
  221. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  222. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  223. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  224. avail_dsds--;
  225. }
  226. }
  227. /**
  228. * qla2x00_start_scsi() - Send a SCSI command to the ISP
  229. * @sp: command to send to the ISP
  230. *
  231. * Returns non-zero if a failure occurred, else zero.
  232. */
  233. int
  234. qla2x00_start_scsi(srb_t *sp)
  235. {
  236. int ret, nseg;
  237. unsigned long flags;
  238. scsi_qla_host_t *vha;
  239. struct scsi_cmnd *cmd;
  240. uint32_t *clr_ptr;
  241. uint32_t index;
  242. uint32_t handle;
  243. cmd_entry_t *cmd_pkt;
  244. uint16_t cnt;
  245. uint16_t req_cnt;
  246. uint16_t tot_dsds;
  247. struct device_reg_2xxx __iomem *reg;
  248. struct qla_hw_data *ha;
  249. struct req_que *req;
  250. /* Setup device pointers. */
  251. ret = 0;
  252. vha = sp->vha;
  253. ha = vha->hw;
  254. reg = &ha->iobase->isp;
  255. cmd = sp->cmd;
  256. req = ha->req;
  257. /* So we know we haven't pci_map'ed anything yet */
  258. tot_dsds = 0;
  259. /* Send marker if required */
  260. if (vha->marker_needed != 0) {
  261. if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
  262. return (QLA_FUNCTION_FAILED);
  263. vha->marker_needed = 0;
  264. }
  265. /* Acquire ring specific lock */
  266. spin_lock_irqsave(&ha->hardware_lock, flags);
  267. /* Check for room in outstanding command list. */
  268. handle = req->current_outstanding_cmd;
  269. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  270. handle++;
  271. if (handle == MAX_OUTSTANDING_COMMANDS)
  272. handle = 1;
  273. if (!req->outstanding_cmds[handle])
  274. break;
  275. }
  276. if (index == MAX_OUTSTANDING_COMMANDS)
  277. goto queuing_error;
  278. /* Map the sg table so we have an accurate count of sg entries needed */
  279. if (scsi_sg_count(cmd)) {
  280. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  281. scsi_sg_count(cmd), cmd->sc_data_direction);
  282. if (unlikely(!nseg))
  283. goto queuing_error;
  284. } else
  285. nseg = 0;
  286. tot_dsds = nseg;
  287. /* Calculate the number of request entries needed. */
  288. req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
  289. if (req->cnt < (req_cnt + 2)) {
  290. cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
  291. if (req->ring_index < cnt)
  292. req->cnt = cnt - req->ring_index;
  293. else
  294. req->cnt = req->length -
  295. (req->ring_index - cnt);
  296. }
  297. if (req->cnt < (req_cnt + 2))
  298. goto queuing_error;
  299. /* Build command packet */
  300. req->current_outstanding_cmd = handle;
  301. req->outstanding_cmds[handle] = sp;
  302. sp->vha = vha;
  303. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  304. req->cnt -= req_cnt;
  305. cmd_pkt = (cmd_entry_t *)req->ring_ptr;
  306. cmd_pkt->handle = handle;
  307. /* Zero out remaining portion of packet. */
  308. clr_ptr = (uint32_t *)cmd_pkt + 2;
  309. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  310. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  311. /* Set target ID and LUN number*/
  312. SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
  313. cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
  314. /* Update tagged queuing modifier */
  315. cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
  316. /* Load SCSI command packet. */
  317. memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
  318. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  319. /* Build IOCB segments */
  320. ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
  321. /* Set total data segment count. */
  322. cmd_pkt->entry_count = (uint8_t)req_cnt;
  323. wmb();
  324. /* Adjust ring index. */
  325. req->ring_index++;
  326. if (req->ring_index == req->length) {
  327. req->ring_index = 0;
  328. req->ring_ptr = req->ring;
  329. } else
  330. req->ring_ptr++;
  331. sp->flags |= SRB_DMA_VALID;
  332. /* Set chip new ring index. */
  333. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
  334. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
  335. /* Manage unprocessed RIO/ZIO commands in response queue. */
  336. if (vha->flags.process_response_queue &&
  337. ha->rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  338. qla2x00_process_response_queue(vha);
  339. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  340. return (QLA_SUCCESS);
  341. queuing_error:
  342. if (tot_dsds)
  343. scsi_dma_unmap(cmd);
  344. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  345. return (QLA_FUNCTION_FAILED);
  346. }
  347. /**
  348. * qla2x00_marker() - Send a marker IOCB to the firmware.
  349. * @ha: HA context
  350. * @loop_id: loop ID
  351. * @lun: LUN
  352. * @type: marker modifier
  353. *
  354. * Can be called from both normal and interrupt context.
  355. *
  356. * Returns non-zero if a failure occurred, else zero.
  357. */
  358. int
  359. __qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun,
  360. uint8_t type)
  361. {
  362. mrk_entry_t *mrk;
  363. struct mrk_entry_24xx *mrk24;
  364. struct qla_hw_data *ha = vha->hw;
  365. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  366. mrk24 = NULL;
  367. mrk = (mrk_entry_t *)qla2x00_req_pkt(base_vha);
  368. if (mrk == NULL) {
  369. DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
  370. __func__, base_vha->host_no));
  371. return (QLA_FUNCTION_FAILED);
  372. }
  373. mrk->entry_type = MARKER_TYPE;
  374. mrk->modifier = type;
  375. if (type != MK_SYNC_ALL) {
  376. if (IS_FWI2_CAPABLE(ha)) {
  377. mrk24 = (struct mrk_entry_24xx *) mrk;
  378. mrk24->nport_handle = cpu_to_le16(loop_id);
  379. mrk24->lun[1] = LSB(lun);
  380. mrk24->lun[2] = MSB(lun);
  381. host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
  382. mrk24->vp_index = vha->vp_idx;
  383. } else {
  384. SET_TARGET_ID(ha, mrk->target, loop_id);
  385. mrk->lun = cpu_to_le16(lun);
  386. }
  387. }
  388. wmb();
  389. qla2x00_isp_cmd(base_vha);
  390. return (QLA_SUCCESS);
  391. }
  392. int
  393. qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun,
  394. uint8_t type)
  395. {
  396. int ret;
  397. unsigned long flags = 0;
  398. struct qla_hw_data *ha = vha->hw;
  399. spin_lock_irqsave(&ha->hardware_lock, flags);
  400. ret = __qla2x00_marker(vha, loop_id, lun, type);
  401. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  402. return (ret);
  403. }
  404. /**
  405. * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
  406. * @ha: HA context
  407. *
  408. * Note: The caller must hold the hardware lock before calling this routine.
  409. *
  410. * Returns NULL if function failed, else, a pointer to the request packet.
  411. */
  412. static request_t *
  413. qla2x00_req_pkt(scsi_qla_host_t *vha)
  414. {
  415. struct qla_hw_data *ha = vha->hw;
  416. device_reg_t __iomem *reg = ha->iobase;
  417. request_t *pkt = NULL;
  418. uint16_t cnt;
  419. uint32_t *dword_ptr;
  420. uint32_t timer;
  421. uint16_t req_cnt = 1;
  422. struct req_que *req = ha->req;
  423. /* Wait 1 second for slot. */
  424. for (timer = HZ; timer; timer--) {
  425. if ((req_cnt + 2) >= req->cnt) {
  426. /* Calculate number of free request entries. */
  427. if (IS_FWI2_CAPABLE(ha))
  428. cnt = (uint16_t)RD_REG_DWORD(
  429. &reg->isp24.req_q_out);
  430. else
  431. cnt = qla2x00_debounce_register(
  432. ISP_REQ_Q_OUT(ha, &reg->isp));
  433. if (req->ring_index < cnt)
  434. req->cnt = cnt - req->ring_index;
  435. else
  436. req->cnt = req->length -
  437. (req->ring_index - cnt);
  438. }
  439. /* If room for request in request ring. */
  440. if ((req_cnt + 2) < req->cnt) {
  441. req->cnt--;
  442. pkt = req->ring_ptr;
  443. /* Zero out packet. */
  444. dword_ptr = (uint32_t *)pkt;
  445. for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
  446. *dword_ptr++ = 0;
  447. /* Set system defined field. */
  448. pkt->sys_define = (uint8_t)req->ring_index;
  449. /* Set entry count. */
  450. pkt->entry_count = 1;
  451. break;
  452. }
  453. /* Release ring specific lock */
  454. spin_unlock_irq(&ha->hardware_lock);
  455. udelay(2); /* 2 us */
  456. /* Check for pending interrupts. */
  457. /* During init we issue marker directly */
  458. if (!vha->marker_needed && !vha->flags.init_done)
  459. qla2x00_poll(ha->rsp);
  460. spin_lock_irq(&ha->hardware_lock);
  461. }
  462. if (!pkt) {
  463. DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
  464. }
  465. return (pkt);
  466. }
  467. /**
  468. * qla2x00_isp_cmd() - Modify the request ring pointer.
  469. * @ha: HA context
  470. *
  471. * Note: The caller must hold the hardware lock before calling this routine.
  472. */
  473. static void
  474. qla2x00_isp_cmd(scsi_qla_host_t *vha)
  475. {
  476. struct qla_hw_data *ha = vha->hw;
  477. device_reg_t __iomem *reg = ha->iobase;
  478. struct req_que *req = ha->req;
  479. DEBUG5(printk("%s(): IOCB data:\n", __func__));
  480. DEBUG5(qla2x00_dump_buffer(
  481. (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
  482. /* Adjust ring index. */
  483. req->ring_index++;
  484. if (req->ring_index == req->length) {
  485. req->ring_index = 0;
  486. req->ring_ptr = req->ring;
  487. } else
  488. req->ring_ptr++;
  489. /* Set chip new ring index. */
  490. if (IS_FWI2_CAPABLE(ha)) {
  491. WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
  492. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  493. } else {
  494. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
  495. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  496. }
  497. }
  498. /**
  499. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  500. * Continuation Type 1 IOCBs to allocate.
  501. *
  502. * @dsds: number of data segment decriptors needed
  503. *
  504. * Returns the number of IOCB entries needed to store @dsds.
  505. */
  506. static inline uint16_t
  507. qla24xx_calc_iocbs(uint16_t dsds)
  508. {
  509. uint16_t iocbs;
  510. iocbs = 1;
  511. if (dsds > 1) {
  512. iocbs += (dsds - 1) / 5;
  513. if ((dsds - 1) % 5)
  514. iocbs++;
  515. }
  516. return iocbs;
  517. }
  518. /**
  519. * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  520. * IOCB types.
  521. *
  522. * @sp: SRB command to process
  523. * @cmd_pkt: Command type 3 IOCB
  524. * @tot_dsds: Total number of segments to transfer
  525. */
  526. static inline void
  527. qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
  528. uint16_t tot_dsds)
  529. {
  530. uint16_t avail_dsds;
  531. uint32_t *cur_dsd;
  532. scsi_qla_host_t *vha;
  533. struct scsi_cmnd *cmd;
  534. struct scatterlist *sg;
  535. int i;
  536. cmd = sp->cmd;
  537. /* Update entry type to indicate Command Type 3 IOCB */
  538. *((uint32_t *)(&cmd_pkt->entry_type)) =
  539. __constant_cpu_to_le32(COMMAND_TYPE_7);
  540. /* No data transfer */
  541. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  542. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  543. return;
  544. }
  545. vha = sp->vha;
  546. /* Set transfer direction */
  547. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  548. cmd_pkt->task_mgmt_flags =
  549. __constant_cpu_to_le16(TMF_WRITE_DATA);
  550. sp->fcport->vha->hw->qla_stats.output_bytes +=
  551. scsi_bufflen(sp->cmd);
  552. } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  553. cmd_pkt->task_mgmt_flags =
  554. __constant_cpu_to_le16(TMF_READ_DATA);
  555. sp->fcport->vha->hw->qla_stats.input_bytes +=
  556. scsi_bufflen(sp->cmd);
  557. }
  558. /* One DSD is available in the Command Type 3 IOCB */
  559. avail_dsds = 1;
  560. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  561. /* Load data segments */
  562. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  563. dma_addr_t sle_dma;
  564. cont_a64_entry_t *cont_pkt;
  565. /* Allocate additional continuation packets? */
  566. if (avail_dsds == 0) {
  567. /*
  568. * Five DSDs are available in the Continuation
  569. * Type 1 IOCB.
  570. */
  571. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  572. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  573. avail_dsds = 5;
  574. }
  575. sle_dma = sg_dma_address(sg);
  576. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  577. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  578. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  579. avail_dsds--;
  580. }
  581. }
  582. /**
  583. * qla24xx_start_scsi() - Send a SCSI command to the ISP
  584. * @sp: command to send to the ISP
  585. *
  586. * Returns non-zero if a failure occurred, else zero.
  587. */
  588. int
  589. qla24xx_start_scsi(srb_t *sp)
  590. {
  591. int ret, nseg;
  592. unsigned long flags;
  593. scsi_qla_host_t *vha;
  594. struct scsi_cmnd *cmd;
  595. uint32_t *clr_ptr;
  596. uint32_t index;
  597. uint32_t handle;
  598. struct cmd_type_7 *cmd_pkt;
  599. uint16_t cnt;
  600. uint16_t req_cnt;
  601. uint16_t tot_dsds;
  602. struct device_reg_24xx __iomem *reg;
  603. struct qla_hw_data *ha;
  604. struct req_que *req;
  605. /* Setup device pointers. */
  606. ret = 0;
  607. vha = sp->vha;
  608. ha = vha->hw;
  609. reg = &ha->iobase->isp24;
  610. cmd = sp->cmd;
  611. req = ha->req;
  612. /* So we know we haven't pci_map'ed anything yet */
  613. tot_dsds = 0;
  614. /* Send marker if required */
  615. if (vha->marker_needed != 0) {
  616. if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
  617. return QLA_FUNCTION_FAILED;
  618. vha->marker_needed = 0;
  619. }
  620. /* Acquire ring specific lock */
  621. spin_lock_irqsave(&ha->hardware_lock, flags);
  622. /* Check for room in outstanding command list. */
  623. handle = req->current_outstanding_cmd;
  624. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  625. handle++;
  626. if (handle == MAX_OUTSTANDING_COMMANDS)
  627. handle = 1;
  628. if (!req->outstanding_cmds[handle])
  629. break;
  630. }
  631. if (index == MAX_OUTSTANDING_COMMANDS)
  632. goto queuing_error;
  633. /* Map the sg table so we have an accurate count of sg entries needed */
  634. if (scsi_sg_count(cmd)) {
  635. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  636. scsi_sg_count(cmd), cmd->sc_data_direction);
  637. if (unlikely(!nseg))
  638. goto queuing_error;
  639. } else
  640. nseg = 0;
  641. tot_dsds = nseg;
  642. req_cnt = qla24xx_calc_iocbs(tot_dsds);
  643. if (req->cnt < (req_cnt + 2)) {
  644. cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
  645. if (req->ring_index < cnt)
  646. req->cnt = cnt - req->ring_index;
  647. else
  648. req->cnt = req->length -
  649. (req->ring_index - cnt);
  650. }
  651. if (req->cnt < (req_cnt + 2))
  652. goto queuing_error;
  653. /* Build command packet. */
  654. req->current_outstanding_cmd = handle;
  655. req->outstanding_cmds[handle] = sp;
  656. sp->vha = vha;
  657. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  658. req->cnt -= req_cnt;
  659. cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
  660. cmd_pkt->handle = handle;
  661. /* Zero out remaining portion of packet. */
  662. /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
  663. clr_ptr = (uint32_t *)cmd_pkt + 2;
  664. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  665. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  666. /* Set NPORT-ID and LUN number*/
  667. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  668. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  669. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  670. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  671. cmd_pkt->vp_index = sp->fcport->vp_idx;
  672. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  673. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  674. /* Load SCSI command packet. */
  675. memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
  676. host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
  677. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  678. /* Build IOCB segments */
  679. qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
  680. /* Set total data segment count. */
  681. cmd_pkt->entry_count = (uint8_t)req_cnt;
  682. wmb();
  683. /* Adjust ring index. */
  684. req->ring_index++;
  685. if (req->ring_index == req->length) {
  686. req->ring_index = 0;
  687. req->ring_ptr = req->ring;
  688. } else
  689. req->ring_ptr++;
  690. sp->flags |= SRB_DMA_VALID;
  691. /* Set chip new ring index. */
  692. WRT_REG_DWORD(&reg->req_q_in, req->ring_index);
  693. RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
  694. /* Manage unprocessed RIO/ZIO commands in response queue. */
  695. if (vha->flags.process_response_queue &&
  696. ha->rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  697. qla24xx_process_response_queue(vha);
  698. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  699. return QLA_SUCCESS;
  700. queuing_error:
  701. if (tot_dsds)
  702. scsi_dma_unmap(cmd);
  703. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  704. return QLA_FUNCTION_FAILED;
  705. }