qla_iocb.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/blkdev.h>
  9. #include <linux/delay.h>
  10. #include <scsi/scsi_tcq.h>
  11. static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
  12. struct rsp_que *rsp);
  13. static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
  14. /**
  15. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  16. * @cmd: SCSI command
  17. *
  18. * Returns the proper CF_* direction based on CDB.
  19. */
  20. static inline uint16_t
  21. qla2x00_get_cmd_direction(srb_t *sp)
  22. {
  23. uint16_t cflags;
  24. cflags = 0;
  25. /* Set transfer direction */
  26. if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
  27. cflags = CF_WRITE;
  28. sp->fcport->vha->hw->qla_stats.output_bytes +=
  29. scsi_bufflen(sp->cmd);
  30. } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
  31. cflags = CF_READ;
  32. sp->fcport->vha->hw->qla_stats.input_bytes +=
  33. scsi_bufflen(sp->cmd);
  34. }
  35. return (cflags);
  36. }
  37. /**
  38. * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  39. * Continuation Type 0 IOCBs to allocate.
  40. *
  41. * @dsds: number of data segment decriptors needed
  42. *
  43. * Returns the number of IOCB entries needed to store @dsds.
  44. */
  45. uint16_t
  46. qla2x00_calc_iocbs_32(uint16_t dsds)
  47. {
  48. uint16_t iocbs;
  49. iocbs = 1;
  50. if (dsds > 3) {
  51. iocbs += (dsds - 3) / 7;
  52. if ((dsds - 3) % 7)
  53. iocbs++;
  54. }
  55. return (iocbs);
  56. }
  57. /**
  58. * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  59. * Continuation Type 1 IOCBs to allocate.
  60. *
  61. * @dsds: number of data segment decriptors needed
  62. *
  63. * Returns the number of IOCB entries needed to store @dsds.
  64. */
  65. uint16_t
  66. qla2x00_calc_iocbs_64(uint16_t dsds)
  67. {
  68. uint16_t iocbs;
  69. iocbs = 1;
  70. if (dsds > 2) {
  71. iocbs += (dsds - 2) / 5;
  72. if ((dsds - 2) % 5)
  73. iocbs++;
  74. }
  75. return (iocbs);
  76. }
  77. /**
  78. * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  79. * @ha: HA context
  80. *
  81. * Returns a pointer to the Continuation Type 0 IOCB packet.
  82. */
  83. static inline cont_entry_t *
  84. qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
  85. {
  86. cont_entry_t *cont_pkt;
  87. /* Adjust ring index. */
  88. req->ring_index++;
  89. if (req->ring_index == req->length) {
  90. req->ring_index = 0;
  91. req->ring_ptr = req->ring;
  92. } else {
  93. req->ring_ptr++;
  94. }
  95. cont_pkt = (cont_entry_t *)req->ring_ptr;
  96. /* Load packet defaults. */
  97. *((uint32_t *)(&cont_pkt->entry_type)) =
  98. __constant_cpu_to_le32(CONTINUE_TYPE);
  99. return (cont_pkt);
  100. }
  101. /**
  102. * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
  103. * @ha: HA context
  104. *
  105. * Returns a pointer to the continuation type 1 IOCB packet.
  106. */
  107. static inline cont_a64_entry_t *
  108. qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
  109. {
  110. cont_a64_entry_t *cont_pkt;
  111. /* Adjust ring index. */
  112. req->ring_index++;
  113. if (req->ring_index == req->length) {
  114. req->ring_index = 0;
  115. req->ring_ptr = req->ring;
  116. } else {
  117. req->ring_ptr++;
  118. }
  119. cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
  120. /* Load packet defaults. */
  121. *((uint32_t *)(&cont_pkt->entry_type)) =
  122. __constant_cpu_to_le32(CONTINUE_A64_TYPE);
  123. return (cont_pkt);
  124. }
  125. /**
  126. * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
  127. * capable IOCB types.
  128. *
  129. * @sp: SRB command to process
  130. * @cmd_pkt: Command type 2 IOCB
  131. * @tot_dsds: Total number of segments to transfer
  132. */
  133. void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
  134. uint16_t tot_dsds)
  135. {
  136. uint16_t avail_dsds;
  137. uint32_t *cur_dsd;
  138. scsi_qla_host_t *vha;
  139. struct scsi_cmnd *cmd;
  140. struct scatterlist *sg;
  141. int i;
  142. struct req_que *req;
  143. cmd = sp->cmd;
  144. /* Update entry type to indicate Command Type 2 IOCB */
  145. *((uint32_t *)(&cmd_pkt->entry_type)) =
  146. __constant_cpu_to_le32(COMMAND_TYPE);
  147. /* No data transfer */
  148. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  149. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  150. return;
  151. }
  152. vha = sp->vha;
  153. req = sp->que;
  154. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  155. /* Three DSDs are available in the Command Type 2 IOCB */
  156. avail_dsds = 3;
  157. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  158. /* Load data segments */
  159. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  160. cont_entry_t *cont_pkt;
  161. /* Allocate additional continuation packets? */
  162. if (avail_dsds == 0) {
  163. /*
  164. * Seven DSDs are available in the Continuation
  165. * Type 0 IOCB.
  166. */
  167. cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
  168. cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
  169. avail_dsds = 7;
  170. }
  171. *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
  172. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  173. avail_dsds--;
  174. }
  175. }
  176. /**
  177. * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
  178. * capable IOCB types.
  179. *
  180. * @sp: SRB command to process
  181. * @cmd_pkt: Command type 3 IOCB
  182. * @tot_dsds: Total number of segments to transfer
  183. */
  184. void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
  185. uint16_t tot_dsds)
  186. {
  187. uint16_t avail_dsds;
  188. uint32_t *cur_dsd;
  189. scsi_qla_host_t *vha;
  190. struct scsi_cmnd *cmd;
  191. struct scatterlist *sg;
  192. int i;
  193. struct req_que *req;
  194. cmd = sp->cmd;
  195. /* Update entry type to indicate Command Type 3 IOCB */
  196. *((uint32_t *)(&cmd_pkt->entry_type)) =
  197. __constant_cpu_to_le32(COMMAND_A64_TYPE);
  198. /* No data transfer */
  199. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  200. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  201. return;
  202. }
  203. vha = sp->vha;
  204. req = sp->que;
  205. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  206. /* Two DSDs are available in the Command Type 3 IOCB */
  207. avail_dsds = 2;
  208. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  209. /* Load data segments */
  210. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  211. dma_addr_t sle_dma;
  212. cont_a64_entry_t *cont_pkt;
  213. /* Allocate additional continuation packets? */
  214. if (avail_dsds == 0) {
  215. /*
  216. * Five DSDs are available in the Continuation
  217. * Type 1 IOCB.
  218. */
  219. cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
  220. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  221. avail_dsds = 5;
  222. }
  223. sle_dma = sg_dma_address(sg);
  224. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  225. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  226. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  227. avail_dsds--;
  228. }
  229. }
  230. /**
  231. * qla2x00_start_scsi() - Send a SCSI command to the ISP
  232. * @sp: command to send to the ISP
  233. *
  234. * Returns non-zero if a failure occurred, else zero.
  235. */
  236. int
  237. qla2x00_start_scsi(srb_t *sp)
  238. {
  239. int ret, nseg;
  240. unsigned long flags;
  241. scsi_qla_host_t *vha;
  242. struct scsi_cmnd *cmd;
  243. uint32_t *clr_ptr;
  244. uint32_t index;
  245. uint32_t handle;
  246. cmd_entry_t *cmd_pkt;
  247. uint16_t cnt;
  248. uint16_t req_cnt;
  249. uint16_t tot_dsds;
  250. struct device_reg_2xxx __iomem *reg;
  251. struct qla_hw_data *ha;
  252. struct req_que *req;
  253. struct rsp_que *rsp;
  254. /* Setup device pointers. */
  255. ret = 0;
  256. vha = sp->vha;
  257. ha = vha->hw;
  258. reg = &ha->iobase->isp;
  259. cmd = sp->cmd;
  260. req = ha->req_q_map[0];
  261. rsp = ha->rsp_q_map[0];
  262. /* So we know we haven't pci_map'ed anything yet */
  263. tot_dsds = 0;
  264. /* Send marker if required */
  265. if (vha->marker_needed != 0) {
  266. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
  267. != QLA_SUCCESS)
  268. return (QLA_FUNCTION_FAILED);
  269. vha->marker_needed = 0;
  270. }
  271. /* Acquire ring specific lock */
  272. spin_lock_irqsave(&ha->hardware_lock, flags);
  273. /* Check for room in outstanding command list. */
  274. handle = req->current_outstanding_cmd;
  275. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  276. handle++;
  277. if (handle == MAX_OUTSTANDING_COMMANDS)
  278. handle = 1;
  279. if (!req->outstanding_cmds[handle])
  280. break;
  281. }
  282. if (index == MAX_OUTSTANDING_COMMANDS)
  283. goto queuing_error;
  284. /* Map the sg table so we have an accurate count of sg entries needed */
  285. if (scsi_sg_count(cmd)) {
  286. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  287. scsi_sg_count(cmd), cmd->sc_data_direction);
  288. if (unlikely(!nseg))
  289. goto queuing_error;
  290. } else
  291. nseg = 0;
  292. tot_dsds = nseg;
  293. /* Calculate the number of request entries needed. */
  294. req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
  295. if (req->cnt < (req_cnt + 2)) {
  296. cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
  297. if (req->ring_index < cnt)
  298. req->cnt = cnt - req->ring_index;
  299. else
  300. req->cnt = req->length -
  301. (req->ring_index - cnt);
  302. }
  303. if (req->cnt < (req_cnt + 2))
  304. goto queuing_error;
  305. /* Build command packet */
  306. req->current_outstanding_cmd = handle;
  307. req->outstanding_cmds[handle] = sp;
  308. sp->vha = vha;
  309. sp->que = req;
  310. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  311. req->cnt -= req_cnt;
  312. cmd_pkt = (cmd_entry_t *)req->ring_ptr;
  313. cmd_pkt->handle = handle;
  314. /* Zero out remaining portion of packet. */
  315. clr_ptr = (uint32_t *)cmd_pkt + 2;
  316. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  317. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  318. /* Set target ID and LUN number*/
  319. SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
  320. cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
  321. /* Update tagged queuing modifier */
  322. cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
  323. /* Load SCSI command packet. */
  324. memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
  325. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  326. /* Build IOCB segments */
  327. ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
  328. /* Set total data segment count. */
  329. cmd_pkt->entry_count = (uint8_t)req_cnt;
  330. wmb();
  331. /* Adjust ring index. */
  332. req->ring_index++;
  333. if (req->ring_index == req->length) {
  334. req->ring_index = 0;
  335. req->ring_ptr = req->ring;
  336. } else
  337. req->ring_ptr++;
  338. sp->flags |= SRB_DMA_VALID;
  339. /* Set chip new ring index. */
  340. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
  341. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
  342. /* Manage unprocessed RIO/ZIO commands in response queue. */
  343. if (vha->flags.process_response_queue &&
  344. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  345. qla2x00_process_response_queue(rsp);
  346. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  347. return (QLA_SUCCESS);
  348. queuing_error:
  349. if (tot_dsds)
  350. scsi_dma_unmap(cmd);
  351. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  352. return (QLA_FUNCTION_FAILED);
  353. }
  354. /**
  355. * qla2x00_marker() - Send a marker IOCB to the firmware.
  356. * @ha: HA context
  357. * @loop_id: loop ID
  358. * @lun: LUN
  359. * @type: marker modifier
  360. *
  361. * Can be called from both normal and interrupt context.
  362. *
  363. * Returns non-zero if a failure occurred, else zero.
  364. */
  365. int
  366. __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
  367. struct rsp_que *rsp, uint16_t loop_id,
  368. uint16_t lun, uint8_t type)
  369. {
  370. mrk_entry_t *mrk;
  371. struct mrk_entry_24xx *mrk24;
  372. struct qla_hw_data *ha = vha->hw;
  373. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  374. mrk24 = NULL;
  375. mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
  376. if (mrk == NULL) {
  377. DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
  378. __func__, base_vha->host_no));
  379. return (QLA_FUNCTION_FAILED);
  380. }
  381. mrk->entry_type = MARKER_TYPE;
  382. mrk->modifier = type;
  383. if (type != MK_SYNC_ALL) {
  384. if (IS_FWI2_CAPABLE(ha)) {
  385. mrk24 = (struct mrk_entry_24xx *) mrk;
  386. mrk24->nport_handle = cpu_to_le16(loop_id);
  387. mrk24->lun[1] = LSB(lun);
  388. mrk24->lun[2] = MSB(lun);
  389. host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
  390. mrk24->vp_index = vha->vp_idx;
  391. } else {
  392. SET_TARGET_ID(ha, mrk->target, loop_id);
  393. mrk->lun = cpu_to_le16(lun);
  394. }
  395. }
  396. wmb();
  397. qla2x00_isp_cmd(vha, req);
  398. return (QLA_SUCCESS);
  399. }
  400. int
  401. qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
  402. struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
  403. uint8_t type)
  404. {
  405. int ret;
  406. unsigned long flags = 0;
  407. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  408. ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
  409. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  410. return (ret);
  411. }
  412. /**
  413. * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
  414. * @ha: HA context
  415. *
  416. * Note: The caller must hold the hardware lock before calling this routine.
  417. *
  418. * Returns NULL if function failed, else, a pointer to the request packet.
  419. */
  420. static request_t *
  421. qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
  422. struct rsp_que *rsp)
  423. {
  424. struct qla_hw_data *ha = vha->hw;
  425. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  426. request_t *pkt = NULL;
  427. uint16_t cnt;
  428. uint32_t *dword_ptr;
  429. uint32_t timer;
  430. uint16_t req_cnt = 1;
  431. /* Wait 1 second for slot. */
  432. for (timer = HZ; timer; timer--) {
  433. if ((req_cnt + 2) >= req->cnt) {
  434. /* Calculate number of free request entries. */
  435. if (ha->mqenable)
  436. cnt = (uint16_t)
  437. RD_REG_DWORD(&reg->isp25mq.req_q_out);
  438. else {
  439. if (IS_FWI2_CAPABLE(ha))
  440. cnt = (uint16_t)RD_REG_DWORD(
  441. &reg->isp24.req_q_out);
  442. else
  443. cnt = qla2x00_debounce_register(
  444. ISP_REQ_Q_OUT(ha, &reg->isp));
  445. }
  446. if (req->ring_index < cnt)
  447. req->cnt = cnt - req->ring_index;
  448. else
  449. req->cnt = req->length -
  450. (req->ring_index - cnt);
  451. }
  452. /* If room for request in request ring. */
  453. if ((req_cnt + 2) < req->cnt) {
  454. req->cnt--;
  455. pkt = req->ring_ptr;
  456. /* Zero out packet. */
  457. dword_ptr = (uint32_t *)pkt;
  458. for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
  459. *dword_ptr++ = 0;
  460. /* Set system defined field. */
  461. pkt->sys_define = (uint8_t)req->ring_index;
  462. /* Set entry count. */
  463. pkt->entry_count = 1;
  464. break;
  465. }
  466. /* Release ring specific lock */
  467. spin_unlock_irq(&ha->hardware_lock);
  468. udelay(2); /* 2 us */
  469. /* Check for pending interrupts. */
  470. /* During init we issue marker directly */
  471. if (!vha->marker_needed && !vha->flags.init_done)
  472. qla2x00_poll(rsp);
  473. spin_lock_irq(&ha->hardware_lock);
  474. }
  475. if (!pkt) {
  476. DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
  477. }
  478. return (pkt);
  479. }
  480. /**
  481. * qla2x00_isp_cmd() - Modify the request ring pointer.
  482. * @ha: HA context
  483. *
  484. * Note: The caller must hold the hardware lock before calling this routine.
  485. */
  486. static void
  487. qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
  488. {
  489. struct qla_hw_data *ha = vha->hw;
  490. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  491. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  492. DEBUG5(printk("%s(): IOCB data:\n", __func__));
  493. DEBUG5(qla2x00_dump_buffer(
  494. (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
  495. /* Adjust ring index. */
  496. req->ring_index++;
  497. if (req->ring_index == req->length) {
  498. req->ring_index = 0;
  499. req->ring_ptr = req->ring;
  500. } else
  501. req->ring_ptr++;
  502. /* Set chip new ring index. */
  503. if (ha->mqenable) {
  504. WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
  505. RD_REG_DWORD(&ioreg->hccr);
  506. }
  507. else {
  508. if (IS_FWI2_CAPABLE(ha)) {
  509. WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
  510. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  511. } else {
  512. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
  513. req->ring_index);
  514. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  515. }
  516. }
  517. }
  518. /**
  519. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  520. * Continuation Type 1 IOCBs to allocate.
  521. *
  522. * @dsds: number of data segment decriptors needed
  523. *
  524. * Returns the number of IOCB entries needed to store @dsds.
  525. */
  526. static inline uint16_t
  527. qla24xx_calc_iocbs(uint16_t dsds)
  528. {
  529. uint16_t iocbs;
  530. iocbs = 1;
  531. if (dsds > 1) {
  532. iocbs += (dsds - 1) / 5;
  533. if ((dsds - 1) % 5)
  534. iocbs++;
  535. }
  536. return iocbs;
  537. }
  538. /**
  539. * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  540. * IOCB types.
  541. *
  542. * @sp: SRB command to process
  543. * @cmd_pkt: Command type 3 IOCB
  544. * @tot_dsds: Total number of segments to transfer
  545. */
  546. static inline void
  547. qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
  548. uint16_t tot_dsds)
  549. {
  550. uint16_t avail_dsds;
  551. uint32_t *cur_dsd;
  552. scsi_qla_host_t *vha;
  553. struct scsi_cmnd *cmd;
  554. struct scatterlist *sg;
  555. int i;
  556. struct req_que *req;
  557. cmd = sp->cmd;
  558. /* Update entry type to indicate Command Type 3 IOCB */
  559. *((uint32_t *)(&cmd_pkt->entry_type)) =
  560. __constant_cpu_to_le32(COMMAND_TYPE_7);
  561. /* No data transfer */
  562. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  563. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  564. return;
  565. }
  566. vha = sp->vha;
  567. req = sp->que;
  568. /* Set transfer direction */
  569. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  570. cmd_pkt->task_mgmt_flags =
  571. __constant_cpu_to_le16(TMF_WRITE_DATA);
  572. sp->fcport->vha->hw->qla_stats.output_bytes +=
  573. scsi_bufflen(sp->cmd);
  574. } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  575. cmd_pkt->task_mgmt_flags =
  576. __constant_cpu_to_le16(TMF_READ_DATA);
  577. sp->fcport->vha->hw->qla_stats.input_bytes +=
  578. scsi_bufflen(sp->cmd);
  579. }
  580. /* One DSD is available in the Command Type 3 IOCB */
  581. avail_dsds = 1;
  582. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  583. /* Load data segments */
  584. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  585. dma_addr_t sle_dma;
  586. cont_a64_entry_t *cont_pkt;
  587. /* Allocate additional continuation packets? */
  588. if (avail_dsds == 0) {
  589. /*
  590. * Five DSDs are available in the Continuation
  591. * Type 1 IOCB.
  592. */
  593. cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
  594. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  595. avail_dsds = 5;
  596. }
  597. sle_dma = sg_dma_address(sg);
  598. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  599. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  600. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  601. avail_dsds--;
  602. }
  603. }
  604. /**
  605. * qla24xx_start_scsi() - Send a SCSI command to the ISP
  606. * @sp: command to send to the ISP
  607. *
  608. * Returns non-zero if a failure occurred, else zero.
  609. */
  610. int
  611. qla24xx_start_scsi(srb_t *sp)
  612. {
  613. int ret, nseg;
  614. unsigned long flags;
  615. uint32_t *clr_ptr;
  616. uint32_t index;
  617. uint32_t handle;
  618. struct cmd_type_7 *cmd_pkt;
  619. uint16_t cnt;
  620. uint16_t req_cnt;
  621. uint16_t tot_dsds;
  622. struct req_que *req = NULL;
  623. struct rsp_que *rsp = NULL;
  624. struct scsi_cmnd *cmd = sp->cmd;
  625. struct scsi_qla_host *vha = sp->vha;
  626. struct qla_hw_data *ha = vha->hw;
  627. uint16_t que_id;
  628. /* Setup device pointers. */
  629. ret = 0;
  630. que_id = vha->req_ques[0];
  631. req = ha->req_q_map[que_id];
  632. sp->que = req;
  633. if (req->rsp)
  634. rsp = req->rsp;
  635. else
  636. rsp = ha->rsp_q_map[que_id];
  637. /* So we know we haven't pci_map'ed anything yet */
  638. tot_dsds = 0;
  639. /* Send marker if required */
  640. if (vha->marker_needed != 0) {
  641. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
  642. != QLA_SUCCESS)
  643. return QLA_FUNCTION_FAILED;
  644. vha->marker_needed = 0;
  645. }
  646. /* Acquire ring specific lock */
  647. spin_lock_irqsave(&ha->hardware_lock, flags);
  648. /* Check for room in outstanding command list. */
  649. handle = req->current_outstanding_cmd;
  650. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  651. handle++;
  652. if (handle == MAX_OUTSTANDING_COMMANDS)
  653. handle = 1;
  654. if (!req->outstanding_cmds[handle])
  655. break;
  656. }
  657. if (index == MAX_OUTSTANDING_COMMANDS)
  658. goto queuing_error;
  659. /* Map the sg table so we have an accurate count of sg entries needed */
  660. if (scsi_sg_count(cmd)) {
  661. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  662. scsi_sg_count(cmd), cmd->sc_data_direction);
  663. if (unlikely(!nseg))
  664. goto queuing_error;
  665. } else
  666. nseg = 0;
  667. tot_dsds = nseg;
  668. req_cnt = qla24xx_calc_iocbs(tot_dsds);
  669. if (req->cnt < (req_cnt + 2)) {
  670. cnt = ha->isp_ops->rd_req_reg(ha, req->id);
  671. if (req->ring_index < cnt)
  672. req->cnt = cnt - req->ring_index;
  673. else
  674. req->cnt = req->length -
  675. (req->ring_index - cnt);
  676. }
  677. if (req->cnt < (req_cnt + 2))
  678. goto queuing_error;
  679. /* Build command packet. */
  680. req->current_outstanding_cmd = handle;
  681. req->outstanding_cmds[handle] = sp;
  682. sp->vha = vha;
  683. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  684. req->cnt -= req_cnt;
  685. cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
  686. cmd_pkt->handle = handle;
  687. /* Zero out remaining portion of packet. */
  688. /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
  689. clr_ptr = (uint32_t *)cmd_pkt + 2;
  690. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  691. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  692. /* Set NPORT-ID and LUN number*/
  693. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  694. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  695. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  696. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  697. cmd_pkt->vp_index = sp->fcport->vp_idx;
  698. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  699. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  700. /* Load SCSI command packet. */
  701. memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
  702. host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
  703. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  704. /* Build IOCB segments */
  705. qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
  706. /* Set total data segment count. */
  707. cmd_pkt->entry_count = (uint8_t)req_cnt;
  708. wmb();
  709. /* Adjust ring index. */
  710. req->ring_index++;
  711. if (req->ring_index == req->length) {
  712. req->ring_index = 0;
  713. req->ring_ptr = req->ring;
  714. } else
  715. req->ring_ptr++;
  716. sp->flags |= SRB_DMA_VALID;
  717. /* Set chip new ring index. */
  718. ha->isp_ops->wrt_req_reg(ha, req->id, req->ring_index);
  719. /* Manage unprocessed RIO/ZIO commands in response queue. */
  720. if (vha->flags.process_response_queue &&
  721. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  722. qla24xx_process_response_queue(rsp);
  723. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  724. return QLA_SUCCESS;
  725. queuing_error:
  726. if (tot_dsds)
  727. scsi_dma_unmap(cmd);
  728. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  729. return QLA_FUNCTION_FAILED;
  730. }
  731. uint16_t
  732. qla24xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
  733. {
  734. device_reg_t __iomem *reg = (void *) ha->iobase;
  735. return RD_REG_DWORD_RELAXED(&reg->isp24.req_q_out);
  736. }
  737. uint16_t
  738. qla25xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
  739. {
  740. device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
  741. return RD_REG_DWORD_RELAXED(&reg->isp25mq.req_q_out);
  742. }
  743. void
  744. qla24xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
  745. {
  746. device_reg_t __iomem *reg = (void *) ha->iobase;
  747. WRT_REG_DWORD(&reg->isp24.req_q_in, index);
  748. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  749. }
  750. void
  751. qla25xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
  752. {
  753. device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
  754. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  755. WRT_REG_DWORD(&reg->isp25mq.req_q_in, index);
  756. RD_REG_DWORD(&ioreg->hccr); /* PCI posting */
  757. }