qla_iocb.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/blkdev.h>
  9. #include <linux/delay.h>
  10. #include <scsi/scsi_tcq.h>
  11. static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
  12. struct rsp_que *rsp);
  13. static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
  14. static void qla25xx_set_que(srb_t *, struct rsp_que **);
  15. /**
  16. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  17. * @cmd: SCSI command
  18. *
  19. * Returns the proper CF_* direction based on CDB.
  20. */
  21. static inline uint16_t
  22. qla2x00_get_cmd_direction(srb_t *sp)
  23. {
  24. uint16_t cflags;
  25. cflags = 0;
  26. /* Set transfer direction */
  27. if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
  28. cflags = CF_WRITE;
  29. sp->fcport->vha->hw->qla_stats.output_bytes +=
  30. scsi_bufflen(sp->cmd);
  31. } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
  32. cflags = CF_READ;
  33. sp->fcport->vha->hw->qla_stats.input_bytes +=
  34. scsi_bufflen(sp->cmd);
  35. }
  36. return (cflags);
  37. }
  38. /**
  39. * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  40. * Continuation Type 0 IOCBs to allocate.
  41. *
  42. * @dsds: number of data segment decriptors needed
  43. *
  44. * Returns the number of IOCB entries needed to store @dsds.
  45. */
  46. uint16_t
  47. qla2x00_calc_iocbs_32(uint16_t dsds)
  48. {
  49. uint16_t iocbs;
  50. iocbs = 1;
  51. if (dsds > 3) {
  52. iocbs += (dsds - 3) / 7;
  53. if ((dsds - 3) % 7)
  54. iocbs++;
  55. }
  56. return (iocbs);
  57. }
  58. /**
  59. * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  60. * Continuation Type 1 IOCBs to allocate.
  61. *
  62. * @dsds: number of data segment decriptors needed
  63. *
  64. * Returns the number of IOCB entries needed to store @dsds.
  65. */
  66. uint16_t
  67. qla2x00_calc_iocbs_64(uint16_t dsds)
  68. {
  69. uint16_t iocbs;
  70. iocbs = 1;
  71. if (dsds > 2) {
  72. iocbs += (dsds - 2) / 5;
  73. if ((dsds - 2) % 5)
  74. iocbs++;
  75. }
  76. return (iocbs);
  77. }
  78. /**
  79. * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  80. * @ha: HA context
  81. *
  82. * Returns a pointer to the Continuation Type 0 IOCB packet.
  83. */
  84. static inline cont_entry_t *
  85. qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  86. {
  87. cont_entry_t *cont_pkt;
  88. struct req_que *req = vha->req;
  89. /* Adjust ring index. */
  90. req->ring_index++;
  91. if (req->ring_index == req->length) {
  92. req->ring_index = 0;
  93. req->ring_ptr = req->ring;
  94. } else {
  95. req->ring_ptr++;
  96. }
  97. cont_pkt = (cont_entry_t *)req->ring_ptr;
  98. /* Load packet defaults. */
  99. *((uint32_t *)(&cont_pkt->entry_type)) =
  100. __constant_cpu_to_le32(CONTINUE_TYPE);
  101. return (cont_pkt);
  102. }
  103. /**
  104. * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
  105. * @ha: HA context
  106. *
  107. * Returns a pointer to the continuation type 1 IOCB packet.
  108. */
  109. static inline cont_a64_entry_t *
  110. qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
  111. {
  112. cont_a64_entry_t *cont_pkt;
  113. struct req_que *req = vha->req;
  114. /* Adjust ring index. */
  115. req->ring_index++;
  116. if (req->ring_index == req->length) {
  117. req->ring_index = 0;
  118. req->ring_ptr = req->ring;
  119. } else {
  120. req->ring_ptr++;
  121. }
  122. cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
  123. /* Load packet defaults. */
  124. *((uint32_t *)(&cont_pkt->entry_type)) =
  125. __constant_cpu_to_le32(CONTINUE_A64_TYPE);
  126. return (cont_pkt);
  127. }
  128. /**
  129. * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
  130. * capable IOCB types.
  131. *
  132. * @sp: SRB command to process
  133. * @cmd_pkt: Command type 2 IOCB
  134. * @tot_dsds: Total number of segments to transfer
  135. */
  136. void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
  137. uint16_t tot_dsds)
  138. {
  139. uint16_t avail_dsds;
  140. uint32_t *cur_dsd;
  141. scsi_qla_host_t *vha;
  142. struct scsi_cmnd *cmd;
  143. struct scatterlist *sg;
  144. int i;
  145. cmd = sp->cmd;
  146. /* Update entry type to indicate Command Type 2 IOCB */
  147. *((uint32_t *)(&cmd_pkt->entry_type)) =
  148. __constant_cpu_to_le32(COMMAND_TYPE);
  149. /* No data transfer */
  150. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  151. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  152. return;
  153. }
  154. vha = sp->fcport->vha;
  155. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  156. /* Three DSDs are available in the Command Type 2 IOCB */
  157. avail_dsds = 3;
  158. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  159. /* Load data segments */
  160. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  161. cont_entry_t *cont_pkt;
  162. /* Allocate additional continuation packets? */
  163. if (avail_dsds == 0) {
  164. /*
  165. * Seven DSDs are available in the Continuation
  166. * Type 0 IOCB.
  167. */
  168. cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
  169. cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
  170. avail_dsds = 7;
  171. }
  172. *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
  173. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  174. avail_dsds--;
  175. }
  176. }
  177. /**
  178. * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
  179. * capable IOCB types.
  180. *
  181. * @sp: SRB command to process
  182. * @cmd_pkt: Command type 3 IOCB
  183. * @tot_dsds: Total number of segments to transfer
  184. */
  185. void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
  186. uint16_t tot_dsds)
  187. {
  188. uint16_t avail_dsds;
  189. uint32_t *cur_dsd;
  190. scsi_qla_host_t *vha;
  191. struct scsi_cmnd *cmd;
  192. struct scatterlist *sg;
  193. int i;
  194. cmd = sp->cmd;
  195. /* Update entry type to indicate Command Type 3 IOCB */
  196. *((uint32_t *)(&cmd_pkt->entry_type)) =
  197. __constant_cpu_to_le32(COMMAND_A64_TYPE);
  198. /* No data transfer */
  199. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  200. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  201. return;
  202. }
  203. vha = sp->fcport->vha;
  204. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  205. /* Two DSDs are available in the Command Type 3 IOCB */
  206. avail_dsds = 2;
  207. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  208. /* Load data segments */
  209. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  210. dma_addr_t sle_dma;
  211. cont_a64_entry_t *cont_pkt;
  212. /* Allocate additional continuation packets? */
  213. if (avail_dsds == 0) {
  214. /*
  215. * Five DSDs are available in the Continuation
  216. * Type 1 IOCB.
  217. */
  218. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  219. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  220. avail_dsds = 5;
  221. }
  222. sle_dma = sg_dma_address(sg);
  223. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  224. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  225. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  226. avail_dsds--;
  227. }
  228. }
  229. /**
  230. * qla2x00_start_scsi() - Send a SCSI command to the ISP
  231. * @sp: command to send to the ISP
  232. *
  233. * Returns non-zero if a failure occurred, else zero.
  234. */
  235. int
  236. qla2x00_start_scsi(srb_t *sp)
  237. {
  238. int ret, nseg;
  239. unsigned long flags;
  240. scsi_qla_host_t *vha;
  241. struct scsi_cmnd *cmd;
  242. uint32_t *clr_ptr;
  243. uint32_t index;
  244. uint32_t handle;
  245. cmd_entry_t *cmd_pkt;
  246. uint16_t cnt;
  247. uint16_t req_cnt;
  248. uint16_t tot_dsds;
  249. struct device_reg_2xxx __iomem *reg;
  250. struct qla_hw_data *ha;
  251. struct req_que *req;
  252. struct rsp_que *rsp;
  253. /* Setup device pointers. */
  254. ret = 0;
  255. vha = sp->fcport->vha;
  256. ha = vha->hw;
  257. reg = &ha->iobase->isp;
  258. cmd = sp->cmd;
  259. req = ha->req_q_map[0];
  260. rsp = ha->rsp_q_map[0];
  261. /* So we know we haven't pci_map'ed anything yet */
  262. tot_dsds = 0;
  263. /* Send marker if required */
  264. if (vha->marker_needed != 0) {
  265. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
  266. != QLA_SUCCESS)
  267. return (QLA_FUNCTION_FAILED);
  268. vha->marker_needed = 0;
  269. }
  270. /* Acquire ring specific lock */
  271. spin_lock_irqsave(&ha->hardware_lock, flags);
  272. /* Check for room in outstanding command list. */
  273. handle = req->current_outstanding_cmd;
  274. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  275. handle++;
  276. if (handle == MAX_OUTSTANDING_COMMANDS)
  277. handle = 1;
  278. if (!req->outstanding_cmds[handle])
  279. break;
  280. }
  281. if (index == MAX_OUTSTANDING_COMMANDS)
  282. goto queuing_error;
  283. /* Map the sg table so we have an accurate count of sg entries needed */
  284. if (scsi_sg_count(cmd)) {
  285. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  286. scsi_sg_count(cmd), cmd->sc_data_direction);
  287. if (unlikely(!nseg))
  288. goto queuing_error;
  289. } else
  290. nseg = 0;
  291. tot_dsds = nseg;
  292. /* Calculate the number of request entries needed. */
  293. req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
  294. if (req->cnt < (req_cnt + 2)) {
  295. cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
  296. if (req->ring_index < cnt)
  297. req->cnt = cnt - req->ring_index;
  298. else
  299. req->cnt = req->length -
  300. (req->ring_index - cnt);
  301. }
  302. if (req->cnt < (req_cnt + 2))
  303. goto queuing_error;
  304. /* Build command packet */
  305. req->current_outstanding_cmd = handle;
  306. req->outstanding_cmds[handle] = sp;
  307. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  308. req->cnt -= req_cnt;
  309. cmd_pkt = (cmd_entry_t *)req->ring_ptr;
  310. cmd_pkt->handle = handle;
  311. /* Zero out remaining portion of packet. */
  312. clr_ptr = (uint32_t *)cmd_pkt + 2;
  313. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  314. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  315. /* Set target ID and LUN number*/
  316. SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
  317. cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
  318. /* Update tagged queuing modifier */
  319. cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
  320. /* Load SCSI command packet. */
  321. memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
  322. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  323. /* Build IOCB segments */
  324. ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
  325. /* Set total data segment count. */
  326. cmd_pkt->entry_count = (uint8_t)req_cnt;
  327. wmb();
  328. /* Adjust ring index. */
  329. req->ring_index++;
  330. if (req->ring_index == req->length) {
  331. req->ring_index = 0;
  332. req->ring_ptr = req->ring;
  333. } else
  334. req->ring_ptr++;
  335. sp->flags |= SRB_DMA_VALID;
  336. /* Set chip new ring index. */
  337. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
  338. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
  339. /* Manage unprocessed RIO/ZIO commands in response queue. */
  340. if (vha->flags.process_response_queue &&
  341. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  342. qla2x00_process_response_queue(rsp);
  343. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  344. return (QLA_SUCCESS);
  345. queuing_error:
  346. if (tot_dsds)
  347. scsi_dma_unmap(cmd);
  348. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  349. return (QLA_FUNCTION_FAILED);
  350. }
  351. /**
  352. * qla2x00_marker() - Send a marker IOCB to the firmware.
  353. * @ha: HA context
  354. * @loop_id: loop ID
  355. * @lun: LUN
  356. * @type: marker modifier
  357. *
  358. * Can be called from both normal and interrupt context.
  359. *
  360. * Returns non-zero if a failure occurred, else zero.
  361. */
  362. int
  363. __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
  364. struct rsp_que *rsp, uint16_t loop_id,
  365. uint16_t lun, uint8_t type)
  366. {
  367. mrk_entry_t *mrk;
  368. struct mrk_entry_24xx *mrk24;
  369. struct qla_hw_data *ha = vha->hw;
  370. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  371. mrk24 = NULL;
  372. mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
  373. if (mrk == NULL) {
  374. DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
  375. __func__, base_vha->host_no));
  376. return (QLA_FUNCTION_FAILED);
  377. }
  378. mrk->entry_type = MARKER_TYPE;
  379. mrk->modifier = type;
  380. if (type != MK_SYNC_ALL) {
  381. if (IS_FWI2_CAPABLE(ha)) {
  382. mrk24 = (struct mrk_entry_24xx *) mrk;
  383. mrk24->nport_handle = cpu_to_le16(loop_id);
  384. mrk24->lun[1] = LSB(lun);
  385. mrk24->lun[2] = MSB(lun);
  386. host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
  387. mrk24->vp_index = vha->vp_idx;
  388. mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
  389. } else {
  390. SET_TARGET_ID(ha, mrk->target, loop_id);
  391. mrk->lun = cpu_to_le16(lun);
  392. }
  393. }
  394. wmb();
  395. qla2x00_isp_cmd(vha, req);
  396. return (QLA_SUCCESS);
  397. }
  398. int
  399. qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
  400. struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
  401. uint8_t type)
  402. {
  403. int ret;
  404. unsigned long flags = 0;
  405. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  406. ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
  407. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  408. return (ret);
  409. }
  410. /**
  411. * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
  412. * @ha: HA context
  413. *
  414. * Note: The caller must hold the hardware lock before calling this routine.
  415. *
  416. * Returns NULL if function failed, else, a pointer to the request packet.
  417. */
  418. static request_t *
  419. qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
  420. struct rsp_que *rsp)
  421. {
  422. struct qla_hw_data *ha = vha->hw;
  423. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  424. request_t *pkt = NULL;
  425. uint16_t cnt;
  426. uint32_t *dword_ptr;
  427. uint32_t timer;
  428. uint16_t req_cnt = 1;
  429. /* Wait 1 second for slot. */
  430. for (timer = HZ; timer; timer--) {
  431. if ((req_cnt + 2) >= req->cnt) {
  432. /* Calculate number of free request entries. */
  433. if (ha->mqenable)
  434. cnt = (uint16_t)
  435. RD_REG_DWORD(&reg->isp25mq.req_q_out);
  436. else {
  437. if (IS_FWI2_CAPABLE(ha))
  438. cnt = (uint16_t)RD_REG_DWORD(
  439. &reg->isp24.req_q_out);
  440. else
  441. cnt = qla2x00_debounce_register(
  442. ISP_REQ_Q_OUT(ha, &reg->isp));
  443. }
  444. if (req->ring_index < cnt)
  445. req->cnt = cnt - req->ring_index;
  446. else
  447. req->cnt = req->length -
  448. (req->ring_index - cnt);
  449. }
  450. /* If room for request in request ring. */
  451. if ((req_cnt + 2) < req->cnt) {
  452. req->cnt--;
  453. pkt = req->ring_ptr;
  454. /* Zero out packet. */
  455. dword_ptr = (uint32_t *)pkt;
  456. for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
  457. *dword_ptr++ = 0;
  458. /* Set entry count. */
  459. pkt->entry_count = 1;
  460. break;
  461. }
  462. /* Release ring specific lock */
  463. spin_unlock_irq(&ha->hardware_lock);
  464. udelay(2); /* 2 us */
  465. /* Check for pending interrupts. */
  466. /* During init we issue marker directly */
  467. if (!vha->marker_needed && !vha->flags.init_done)
  468. qla2x00_poll(rsp);
  469. spin_lock_irq(&ha->hardware_lock);
  470. }
  471. if (!pkt) {
  472. DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
  473. }
  474. return (pkt);
  475. }
  476. /**
  477. * qla2x00_isp_cmd() - Modify the request ring pointer.
  478. * @ha: HA context
  479. *
  480. * Note: The caller must hold the hardware lock before calling this routine.
  481. */
  482. static void
  483. qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
  484. {
  485. struct qla_hw_data *ha = vha->hw;
  486. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  487. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  488. DEBUG5(printk("%s(): IOCB data:\n", __func__));
  489. DEBUG5(qla2x00_dump_buffer(
  490. (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
  491. /* Adjust ring index. */
  492. req->ring_index++;
  493. if (req->ring_index == req->length) {
  494. req->ring_index = 0;
  495. req->ring_ptr = req->ring;
  496. } else
  497. req->ring_ptr++;
  498. /* Set chip new ring index. */
  499. if (ha->mqenable) {
  500. WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
  501. RD_REG_DWORD(&ioreg->hccr);
  502. }
  503. else {
  504. if (IS_FWI2_CAPABLE(ha)) {
  505. WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
  506. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  507. } else {
  508. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
  509. req->ring_index);
  510. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  511. }
  512. }
  513. }
  514. /**
  515. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  516. * Continuation Type 1 IOCBs to allocate.
  517. *
  518. * @dsds: number of data segment decriptors needed
  519. *
  520. * Returns the number of IOCB entries needed to store @dsds.
  521. */
  522. static inline uint16_t
  523. qla24xx_calc_iocbs(uint16_t dsds)
  524. {
  525. uint16_t iocbs;
  526. iocbs = 1;
  527. if (dsds > 1) {
  528. iocbs += (dsds - 1) / 5;
  529. if ((dsds - 1) % 5)
  530. iocbs++;
  531. }
  532. return iocbs;
  533. }
  534. /**
  535. * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  536. * IOCB types.
  537. *
  538. * @sp: SRB command to process
  539. * @cmd_pkt: Command type 3 IOCB
  540. * @tot_dsds: Total number of segments to transfer
  541. */
  542. static inline void
  543. qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
  544. uint16_t tot_dsds)
  545. {
  546. uint16_t avail_dsds;
  547. uint32_t *cur_dsd;
  548. scsi_qla_host_t *vha;
  549. struct scsi_cmnd *cmd;
  550. struct scatterlist *sg;
  551. int i;
  552. struct req_que *req;
  553. cmd = sp->cmd;
  554. /* Update entry type to indicate Command Type 3 IOCB */
  555. *((uint32_t *)(&cmd_pkt->entry_type)) =
  556. __constant_cpu_to_le32(COMMAND_TYPE_7);
  557. /* No data transfer */
  558. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  559. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  560. return;
  561. }
  562. vha = sp->fcport->vha;
  563. req = vha->req;
  564. /* Set transfer direction */
  565. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  566. cmd_pkt->task_mgmt_flags =
  567. __constant_cpu_to_le16(TMF_WRITE_DATA);
  568. sp->fcport->vha->hw->qla_stats.output_bytes +=
  569. scsi_bufflen(sp->cmd);
  570. } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  571. cmd_pkt->task_mgmt_flags =
  572. __constant_cpu_to_le16(TMF_READ_DATA);
  573. sp->fcport->vha->hw->qla_stats.input_bytes +=
  574. scsi_bufflen(sp->cmd);
  575. }
  576. /* One DSD is available in the Command Type 3 IOCB */
  577. avail_dsds = 1;
  578. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  579. /* Load data segments */
  580. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  581. dma_addr_t sle_dma;
  582. cont_a64_entry_t *cont_pkt;
  583. /* Allocate additional continuation packets? */
  584. if (avail_dsds == 0) {
  585. /*
  586. * Five DSDs are available in the Continuation
  587. * Type 1 IOCB.
  588. */
  589. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  590. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  591. avail_dsds = 5;
  592. }
  593. sle_dma = sg_dma_address(sg);
  594. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  595. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  596. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  597. avail_dsds--;
  598. }
  599. }
  600. /**
  601. * qla24xx_start_scsi() - Send a SCSI command to the ISP
  602. * @sp: command to send to the ISP
  603. *
  604. * Returns non-zero if a failure occurred, else zero.
  605. */
  606. int
  607. qla24xx_start_scsi(srb_t *sp)
  608. {
  609. int ret, nseg;
  610. unsigned long flags;
  611. uint32_t *clr_ptr;
  612. uint32_t index;
  613. uint32_t handle;
  614. struct cmd_type_7 *cmd_pkt;
  615. uint16_t cnt;
  616. uint16_t req_cnt;
  617. uint16_t tot_dsds;
  618. struct req_que *req = NULL;
  619. struct rsp_que *rsp = NULL;
  620. struct scsi_cmnd *cmd = sp->cmd;
  621. struct scsi_qla_host *vha = sp->fcport->vha;
  622. struct qla_hw_data *ha = vha->hw;
  623. /* Setup device pointers. */
  624. ret = 0;
  625. qla25xx_set_que(sp, &rsp);
  626. req = vha->req;
  627. /* So we know we haven't pci_map'ed anything yet */
  628. tot_dsds = 0;
  629. /* Send marker if required */
  630. if (vha->marker_needed != 0) {
  631. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
  632. != QLA_SUCCESS)
  633. return QLA_FUNCTION_FAILED;
  634. vha->marker_needed = 0;
  635. }
  636. /* Acquire ring specific lock */
  637. spin_lock_irqsave(&ha->hardware_lock, flags);
  638. /* Check for room in outstanding command list. */
  639. handle = req->current_outstanding_cmd;
  640. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  641. handle++;
  642. if (handle == MAX_OUTSTANDING_COMMANDS)
  643. handle = 1;
  644. if (!req->outstanding_cmds[handle])
  645. break;
  646. }
  647. if (index == MAX_OUTSTANDING_COMMANDS)
  648. goto queuing_error;
  649. /* Map the sg table so we have an accurate count of sg entries needed */
  650. if (scsi_sg_count(cmd)) {
  651. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  652. scsi_sg_count(cmd), cmd->sc_data_direction);
  653. if (unlikely(!nseg))
  654. goto queuing_error;
  655. } else
  656. nseg = 0;
  657. tot_dsds = nseg;
  658. req_cnt = qla24xx_calc_iocbs(tot_dsds);
  659. if (req->cnt < (req_cnt + 2)) {
  660. cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
  661. if (req->ring_index < cnt)
  662. req->cnt = cnt - req->ring_index;
  663. else
  664. req->cnt = req->length -
  665. (req->ring_index - cnt);
  666. }
  667. if (req->cnt < (req_cnt + 2))
  668. goto queuing_error;
  669. /* Build command packet. */
  670. req->current_outstanding_cmd = handle;
  671. req->outstanding_cmds[handle] = sp;
  672. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  673. req->cnt -= req_cnt;
  674. cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
  675. cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
  676. /* Zero out remaining portion of packet. */
  677. /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
  678. clr_ptr = (uint32_t *)cmd_pkt + 2;
  679. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  680. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  681. /* Set NPORT-ID and LUN number*/
  682. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  683. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  684. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  685. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  686. cmd_pkt->vp_index = sp->fcport->vp_idx;
  687. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  688. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  689. /* Load SCSI command packet. */
  690. memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
  691. host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
  692. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  693. /* Build IOCB segments */
  694. qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
  695. /* Set total data segment count. */
  696. cmd_pkt->entry_count = (uint8_t)req_cnt;
  697. /* Specify response queue number where completion should happen */
  698. cmd_pkt->entry_status = (uint8_t) rsp->id;
  699. wmb();
  700. /* Adjust ring index. */
  701. req->ring_index++;
  702. if (req->ring_index == req->length) {
  703. req->ring_index = 0;
  704. req->ring_ptr = req->ring;
  705. } else
  706. req->ring_ptr++;
  707. sp->flags |= SRB_DMA_VALID;
  708. /* Set chip new ring index. */
  709. WRT_REG_DWORD(req->req_q_in, req->ring_index);
  710. RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
  711. /* Manage unprocessed RIO/ZIO commands in response queue. */
  712. if (vha->flags.process_response_queue &&
  713. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  714. qla24xx_process_response_queue(vha, rsp);
  715. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  716. return QLA_SUCCESS;
  717. queuing_error:
  718. if (tot_dsds)
  719. scsi_dma_unmap(cmd);
  720. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  721. return QLA_FUNCTION_FAILED;
  722. }
  723. static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
  724. {
  725. struct scsi_cmnd *cmd = sp->cmd;
  726. struct qla_hw_data *ha = sp->fcport->vha->hw;
  727. int affinity = cmd->request->cpu;
  728. if (ql2xmultique_tag && affinity >= 0 &&
  729. affinity < ha->max_rsp_queues - 1)
  730. *rsp = ha->rsp_q_map[affinity + 1];
  731. else
  732. *rsp = ha->rsp_q_map[0];
  733. }