qla_iocb.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2005 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/blkdev.h>
  9. #include <linux/delay.h>
  10. #include <scsi/scsi_tcq.h>
  11. static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
  12. static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
  13. static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
  14. static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
  15. static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
  16. /**
  17. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  18. * @cmd: SCSI command
  19. *
  20. * Returns the proper CF_* direction based on CDB.
  21. */
  22. static inline uint16_t
  23. qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
  24. {
  25. uint16_t cflags;
  26. cflags = 0;
  27. /* Set transfer direction */
  28. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  29. cflags = CF_WRITE;
  30. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  31. cflags = CF_READ;
  32. return (cflags);
  33. }
  34. /**
  35. * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  36. * Continuation Type 0 IOCBs to allocate.
  37. *
  38. * @dsds: number of data segment decriptors needed
  39. *
  40. * Returns the number of IOCB entries needed to store @dsds.
  41. */
  42. uint16_t
  43. qla2x00_calc_iocbs_32(uint16_t dsds)
  44. {
  45. uint16_t iocbs;
  46. iocbs = 1;
  47. if (dsds > 3) {
  48. iocbs += (dsds - 3) / 7;
  49. if ((dsds - 3) % 7)
  50. iocbs++;
  51. }
  52. return (iocbs);
  53. }
  54. /**
  55. * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  56. * Continuation Type 1 IOCBs to allocate.
  57. *
  58. * @dsds: number of data segment decriptors needed
  59. *
  60. * Returns the number of IOCB entries needed to store @dsds.
  61. */
  62. uint16_t
  63. qla2x00_calc_iocbs_64(uint16_t dsds)
  64. {
  65. uint16_t iocbs;
  66. iocbs = 1;
  67. if (dsds > 2) {
  68. iocbs += (dsds - 2) / 5;
  69. if ((dsds - 2) % 5)
  70. iocbs++;
  71. }
  72. return (iocbs);
  73. }
  74. /**
  75. * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  76. * @ha: HA context
  77. *
  78. * Returns a pointer to the Continuation Type 0 IOCB packet.
  79. */
  80. static inline cont_entry_t *
  81. qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
  82. {
  83. cont_entry_t *cont_pkt;
  84. /* Adjust ring index. */
  85. ha->req_ring_index++;
  86. if (ha->req_ring_index == ha->request_q_length) {
  87. ha->req_ring_index = 0;
  88. ha->request_ring_ptr = ha->request_ring;
  89. } else {
  90. ha->request_ring_ptr++;
  91. }
  92. cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
  93. /* Load packet defaults. */
  94. *((uint32_t *)(&cont_pkt->entry_type)) =
  95. __constant_cpu_to_le32(CONTINUE_TYPE);
  96. return (cont_pkt);
  97. }
  98. /**
  99. * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
  100. * @ha: HA context
  101. *
  102. * Returns a pointer to the continuation type 1 IOCB packet.
  103. */
  104. static inline cont_a64_entry_t *
  105. qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
  106. {
  107. cont_a64_entry_t *cont_pkt;
  108. /* Adjust ring index. */
  109. ha->req_ring_index++;
  110. if (ha->req_ring_index == ha->request_q_length) {
  111. ha->req_ring_index = 0;
  112. ha->request_ring_ptr = ha->request_ring;
  113. } else {
  114. ha->request_ring_ptr++;
  115. }
  116. cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
  117. /* Load packet defaults. */
  118. *((uint32_t *)(&cont_pkt->entry_type)) =
  119. __constant_cpu_to_le32(CONTINUE_A64_TYPE);
  120. return (cont_pkt);
  121. }
  122. /**
  123. * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
  124. * capable IOCB types.
  125. *
  126. * @sp: SRB command to process
  127. * @cmd_pkt: Command type 2 IOCB
  128. * @tot_dsds: Total number of segments to transfer
  129. */
  130. void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
  131. uint16_t tot_dsds)
  132. {
  133. uint16_t avail_dsds;
  134. uint32_t *cur_dsd;
  135. scsi_qla_host_t *ha;
  136. struct scsi_cmnd *cmd;
  137. struct scatterlist *sg;
  138. int i;
  139. cmd = sp->cmd;
  140. /* Update entry type to indicate Command Type 2 IOCB */
  141. *((uint32_t *)(&cmd_pkt->entry_type)) =
  142. __constant_cpu_to_le32(COMMAND_TYPE);
  143. /* No data transfer */
  144. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  145. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  146. return;
  147. }
  148. ha = sp->ha;
  149. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
  150. /* Three DSDs are available in the Command Type 2 IOCB */
  151. avail_dsds = 3;
  152. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  153. /* Load data segments */
  154. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  155. cont_entry_t *cont_pkt;
  156. /* Allocate additional continuation packets? */
  157. if (avail_dsds == 0) {
  158. /*
  159. * Seven DSDs are available in the Continuation
  160. * Type 0 IOCB.
  161. */
  162. cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
  163. cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
  164. avail_dsds = 7;
  165. }
  166. *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
  167. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  168. avail_dsds--;
  169. }
  170. }
  171. /**
  172. * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
  173. * capable IOCB types.
  174. *
  175. * @sp: SRB command to process
  176. * @cmd_pkt: Command type 3 IOCB
  177. * @tot_dsds: Total number of segments to transfer
  178. */
  179. void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
  180. uint16_t tot_dsds)
  181. {
  182. uint16_t avail_dsds;
  183. uint32_t *cur_dsd;
  184. scsi_qla_host_t *ha;
  185. struct scsi_cmnd *cmd;
  186. struct scatterlist *sg;
  187. int i;
  188. cmd = sp->cmd;
  189. /* Update entry type to indicate Command Type 3 IOCB */
  190. *((uint32_t *)(&cmd_pkt->entry_type)) =
  191. __constant_cpu_to_le32(COMMAND_A64_TYPE);
  192. /* No data transfer */
  193. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  194. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  195. return;
  196. }
  197. ha = sp->ha;
  198. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
  199. /* Two DSDs are available in the Command Type 3 IOCB */
  200. avail_dsds = 2;
  201. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  202. /* Load data segments */
  203. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  204. dma_addr_t sle_dma;
  205. cont_a64_entry_t *cont_pkt;
  206. /* Allocate additional continuation packets? */
  207. if (avail_dsds == 0) {
  208. /*
  209. * Five DSDs are available in the Continuation
  210. * Type 1 IOCB.
  211. */
  212. cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
  213. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  214. avail_dsds = 5;
  215. }
  216. sle_dma = sg_dma_address(sg);
  217. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  218. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  219. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  220. avail_dsds--;
  221. }
  222. }
  223. /**
  224. * qla2x00_start_scsi() - Send a SCSI command to the ISP
  225. * @sp: command to send to the ISP
  226. *
  227. * Returns non-zero if a failure occured, else zero.
  228. */
  229. int
  230. qla2x00_start_scsi(srb_t *sp)
  231. {
  232. int ret, nseg;
  233. unsigned long flags;
  234. scsi_qla_host_t *ha;
  235. struct scsi_cmnd *cmd;
  236. uint32_t *clr_ptr;
  237. uint32_t index;
  238. uint32_t handle;
  239. cmd_entry_t *cmd_pkt;
  240. uint16_t cnt;
  241. uint16_t req_cnt;
  242. uint16_t tot_dsds;
  243. struct device_reg_2xxx __iomem *reg;
  244. /* Setup device pointers. */
  245. ret = 0;
  246. ha = sp->ha;
  247. reg = &ha->iobase->isp;
  248. cmd = sp->cmd;
  249. /* So we know we haven't pci_map'ed anything yet */
  250. tot_dsds = 0;
  251. /* Send marker if required */
  252. if (ha->marker_needed != 0) {
  253. if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
  254. return (QLA_FUNCTION_FAILED);
  255. }
  256. ha->marker_needed = 0;
  257. }
  258. /* Acquire ring specific lock */
  259. spin_lock_irqsave(&ha->hardware_lock, flags);
  260. /* Check for room in outstanding command list. */
  261. handle = ha->current_outstanding_cmd;
  262. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  263. handle++;
  264. if (handle == MAX_OUTSTANDING_COMMANDS)
  265. handle = 1;
  266. if (ha->outstanding_cmds[handle] == 0)
  267. break;
  268. }
  269. if (index == MAX_OUTSTANDING_COMMANDS)
  270. goto queuing_error;
  271. /* Map the sg table so we have an accurate count of sg entries needed */
  272. if (scsi_sg_count(cmd)) {
  273. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  274. scsi_sg_count(cmd), cmd->sc_data_direction);
  275. if (unlikely(!nseg))
  276. goto queuing_error;
  277. } else
  278. nseg = 0;
  279. tot_dsds = nseg;
  280. /* Calculate the number of request entries needed. */
  281. req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
  282. if (ha->req_q_cnt < (req_cnt + 2)) {
  283. cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
  284. if (ha->req_ring_index < cnt)
  285. ha->req_q_cnt = cnt - ha->req_ring_index;
  286. else
  287. ha->req_q_cnt = ha->request_q_length -
  288. (ha->req_ring_index - cnt);
  289. }
  290. if (ha->req_q_cnt < (req_cnt + 2))
  291. goto queuing_error;
  292. /* Build command packet */
  293. ha->current_outstanding_cmd = handle;
  294. ha->outstanding_cmds[handle] = sp;
  295. sp->ha = ha;
  296. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  297. ha->req_q_cnt -= req_cnt;
  298. cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
  299. cmd_pkt->handle = handle;
  300. /* Zero out remaining portion of packet. */
  301. clr_ptr = (uint32_t *)cmd_pkt + 2;
  302. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  303. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  304. /* Set target ID and LUN number*/
  305. SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
  306. cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
  307. /* Update tagged queuing modifier */
  308. cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
  309. /* Load SCSI command packet. */
  310. memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
  311. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  312. /* Build IOCB segments */
  313. ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
  314. /* Set total data segment count. */
  315. cmd_pkt->entry_count = (uint8_t)req_cnt;
  316. wmb();
  317. /* Adjust ring index. */
  318. ha->req_ring_index++;
  319. if (ha->req_ring_index == ha->request_q_length) {
  320. ha->req_ring_index = 0;
  321. ha->request_ring_ptr = ha->request_ring;
  322. } else
  323. ha->request_ring_ptr++;
  324. sp->flags |= SRB_DMA_VALID;
  325. /* Set chip new ring index. */
  326. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
  327. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
  328. /* Manage unprocessed RIO/ZIO commands in response queue. */
  329. if (ha->flags.process_response_queue &&
  330. ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
  331. qla2x00_process_response_queue(ha);
  332. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  333. return (QLA_SUCCESS);
  334. queuing_error:
  335. if (tot_dsds)
  336. scsi_dma_unmap(cmd);
  337. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  338. return (QLA_FUNCTION_FAILED);
  339. }
  340. /**
  341. * qla2x00_marker() - Send a marker IOCB to the firmware.
  342. * @ha: HA context
  343. * @loop_id: loop ID
  344. * @lun: LUN
  345. * @type: marker modifier
  346. *
  347. * Can be called from both normal and interrupt context.
  348. *
  349. * Returns non-zero if a failure occured, else zero.
  350. */
  351. int
  352. __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
  353. uint8_t type)
  354. {
  355. mrk_entry_t *mrk;
  356. struct mrk_entry_24xx *mrk24;
  357. scsi_qla_host_t *pha = to_qla_parent(ha);
  358. mrk24 = NULL;
  359. mrk = (mrk_entry_t *)qla2x00_req_pkt(pha);
  360. if (mrk == NULL) {
  361. DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
  362. __func__, ha->host_no));
  363. return (QLA_FUNCTION_FAILED);
  364. }
  365. mrk->entry_type = MARKER_TYPE;
  366. mrk->modifier = type;
  367. if (type != MK_SYNC_ALL) {
  368. if (IS_FWI2_CAPABLE(ha)) {
  369. mrk24 = (struct mrk_entry_24xx *) mrk;
  370. mrk24->nport_handle = cpu_to_le16(loop_id);
  371. mrk24->lun[1] = LSB(lun);
  372. mrk24->lun[2] = MSB(lun);
  373. host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
  374. mrk24->vp_index = ha->vp_idx;
  375. } else {
  376. SET_TARGET_ID(ha, mrk->target, loop_id);
  377. mrk->lun = cpu_to_le16(lun);
  378. }
  379. }
  380. wmb();
  381. qla2x00_isp_cmd(pha);
  382. return (QLA_SUCCESS);
  383. }
  384. int
  385. qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
  386. uint8_t type)
  387. {
  388. int ret;
  389. unsigned long flags = 0;
  390. spin_lock_irqsave(&ha->hardware_lock, flags);
  391. ret = __qla2x00_marker(ha, loop_id, lun, type);
  392. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  393. return (ret);
  394. }
  395. /**
  396. * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
  397. * @ha: HA context
  398. *
  399. * Note: The caller must hold the hardware lock before calling this routine.
  400. *
  401. * Returns NULL if function failed, else, a pointer to the request packet.
  402. */
  403. static request_t *
  404. qla2x00_req_pkt(scsi_qla_host_t *ha)
  405. {
  406. device_reg_t __iomem *reg = ha->iobase;
  407. request_t *pkt = NULL;
  408. uint16_t cnt;
  409. uint32_t *dword_ptr;
  410. uint32_t timer;
  411. uint16_t req_cnt = 1;
  412. /* Wait 1 second for slot. */
  413. for (timer = HZ; timer; timer--) {
  414. if ((req_cnt + 2) >= ha->req_q_cnt) {
  415. /* Calculate number of free request entries. */
  416. if (IS_FWI2_CAPABLE(ha))
  417. cnt = (uint16_t)RD_REG_DWORD(
  418. &reg->isp24.req_q_out);
  419. else
  420. cnt = qla2x00_debounce_register(
  421. ISP_REQ_Q_OUT(ha, &reg->isp));
  422. if (ha->req_ring_index < cnt)
  423. ha->req_q_cnt = cnt - ha->req_ring_index;
  424. else
  425. ha->req_q_cnt = ha->request_q_length -
  426. (ha->req_ring_index - cnt);
  427. }
  428. /* If room for request in request ring. */
  429. if ((req_cnt + 2) < ha->req_q_cnt) {
  430. ha->req_q_cnt--;
  431. pkt = ha->request_ring_ptr;
  432. /* Zero out packet. */
  433. dword_ptr = (uint32_t *)pkt;
  434. for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
  435. *dword_ptr++ = 0;
  436. /* Set system defined field. */
  437. pkt->sys_define = (uint8_t)ha->req_ring_index;
  438. /* Set entry count. */
  439. pkt->entry_count = 1;
  440. break;
  441. }
  442. /* Release ring specific lock */
  443. spin_unlock(&ha->hardware_lock);
  444. udelay(2); /* 2 us */
  445. /* Check for pending interrupts. */
  446. /* During init we issue marker directly */
  447. if (!ha->marker_needed)
  448. qla2x00_poll(ha);
  449. spin_lock_irq(&ha->hardware_lock);
  450. }
  451. if (!pkt) {
  452. DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
  453. }
  454. return (pkt);
  455. }
  456. /**
  457. * qla2x00_isp_cmd() - Modify the request ring pointer.
  458. * @ha: HA context
  459. *
  460. * Note: The caller must hold the hardware lock before calling this routine.
  461. */
  462. static void
  463. qla2x00_isp_cmd(scsi_qla_host_t *ha)
  464. {
  465. device_reg_t __iomem *reg = ha->iobase;
  466. DEBUG5(printk("%s(): IOCB data:\n", __func__));
  467. DEBUG5(qla2x00_dump_buffer(
  468. (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
  469. /* Adjust ring index. */
  470. ha->req_ring_index++;
  471. if (ha->req_ring_index == ha->request_q_length) {
  472. ha->req_ring_index = 0;
  473. ha->request_ring_ptr = ha->request_ring;
  474. } else
  475. ha->request_ring_ptr++;
  476. /* Set chip new ring index. */
  477. if (IS_FWI2_CAPABLE(ha)) {
  478. WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
  479. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  480. } else {
  481. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
  482. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  483. }
  484. }
  485. /**
  486. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  487. * Continuation Type 1 IOCBs to allocate.
  488. *
  489. * @dsds: number of data segment decriptors needed
  490. *
  491. * Returns the number of IOCB entries needed to store @dsds.
  492. */
  493. static inline uint16_t
  494. qla24xx_calc_iocbs(uint16_t dsds)
  495. {
  496. uint16_t iocbs;
  497. iocbs = 1;
  498. if (dsds > 1) {
  499. iocbs += (dsds - 1) / 5;
  500. if ((dsds - 1) % 5)
  501. iocbs++;
  502. }
  503. return iocbs;
  504. }
  505. /**
  506. * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  507. * IOCB types.
  508. *
  509. * @sp: SRB command to process
  510. * @cmd_pkt: Command type 3 IOCB
  511. * @tot_dsds: Total number of segments to transfer
  512. */
  513. static inline void
  514. qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
  515. uint16_t tot_dsds)
  516. {
  517. uint16_t avail_dsds;
  518. uint32_t *cur_dsd;
  519. scsi_qla_host_t *ha;
  520. struct scsi_cmnd *cmd;
  521. struct scatterlist *sg;
  522. int i;
  523. cmd = sp->cmd;
  524. /* Update entry type to indicate Command Type 3 IOCB */
  525. *((uint32_t *)(&cmd_pkt->entry_type)) =
  526. __constant_cpu_to_le32(COMMAND_TYPE_7);
  527. /* No data transfer */
  528. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  529. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  530. return;
  531. }
  532. ha = sp->ha;
  533. /* Set transfer direction */
  534. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  535. cmd_pkt->task_mgmt_flags =
  536. __constant_cpu_to_le16(TMF_WRITE_DATA);
  537. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  538. cmd_pkt->task_mgmt_flags =
  539. __constant_cpu_to_le16(TMF_READ_DATA);
  540. /* One DSD is available in the Command Type 3 IOCB */
  541. avail_dsds = 1;
  542. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  543. /* Load data segments */
  544. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  545. dma_addr_t sle_dma;
  546. cont_a64_entry_t *cont_pkt;
  547. /* Allocate additional continuation packets? */
  548. if (avail_dsds == 0) {
  549. /*
  550. * Five DSDs are available in the Continuation
  551. * Type 1 IOCB.
  552. */
  553. cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
  554. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  555. avail_dsds = 5;
  556. }
  557. sle_dma = sg_dma_address(sg);
  558. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  559. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  560. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  561. avail_dsds--;
  562. }
  563. }
  564. /**
  565. * qla24xx_start_scsi() - Send a SCSI command to the ISP
  566. * @sp: command to send to the ISP
  567. *
  568. * Returns non-zero if a failure occured, else zero.
  569. */
  570. int
  571. qla24xx_start_scsi(srb_t *sp)
  572. {
  573. int ret, nseg;
  574. unsigned long flags;
  575. scsi_qla_host_t *ha;
  576. struct scsi_cmnd *cmd;
  577. uint32_t *clr_ptr;
  578. uint32_t index;
  579. uint32_t handle;
  580. struct cmd_type_7 *cmd_pkt;
  581. uint16_t cnt;
  582. uint16_t req_cnt;
  583. uint16_t tot_dsds;
  584. struct device_reg_24xx __iomem *reg;
  585. /* Setup device pointers. */
  586. ret = 0;
  587. ha = sp->ha;
  588. reg = &ha->iobase->isp24;
  589. cmd = sp->cmd;
  590. /* So we know we haven't pci_map'ed anything yet */
  591. tot_dsds = 0;
  592. /* Send marker if required */
  593. if (ha->marker_needed != 0) {
  594. if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
  595. return QLA_FUNCTION_FAILED;
  596. }
  597. ha->marker_needed = 0;
  598. }
  599. /* Acquire ring specific lock */
  600. spin_lock_irqsave(&ha->hardware_lock, flags);
  601. /* Check for room in outstanding command list. */
  602. handle = ha->current_outstanding_cmd;
  603. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  604. handle++;
  605. if (handle == MAX_OUTSTANDING_COMMANDS)
  606. handle = 1;
  607. if (ha->outstanding_cmds[handle] == 0)
  608. break;
  609. }
  610. if (index == MAX_OUTSTANDING_COMMANDS)
  611. goto queuing_error;
  612. /* Map the sg table so we have an accurate count of sg entries needed */
  613. if (scsi_sg_count(cmd)) {
  614. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  615. scsi_sg_count(cmd), cmd->sc_data_direction);
  616. if (unlikely(!nseg))
  617. goto queuing_error;
  618. } else
  619. nseg = 0;
  620. tot_dsds = nseg;
  621. req_cnt = qla24xx_calc_iocbs(tot_dsds);
  622. if (ha->req_q_cnt < (req_cnt + 2)) {
  623. cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
  624. if (ha->req_ring_index < cnt)
  625. ha->req_q_cnt = cnt - ha->req_ring_index;
  626. else
  627. ha->req_q_cnt = ha->request_q_length -
  628. (ha->req_ring_index - cnt);
  629. }
  630. if (ha->req_q_cnt < (req_cnt + 2))
  631. goto queuing_error;
  632. /* Build command packet. */
  633. ha->current_outstanding_cmd = handle;
  634. ha->outstanding_cmds[handle] = sp;
  635. sp->ha = ha;
  636. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  637. ha->req_q_cnt -= req_cnt;
  638. cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
  639. cmd_pkt->handle = handle;
  640. /* Zero out remaining portion of packet. */
  641. /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
  642. clr_ptr = (uint32_t *)cmd_pkt + 2;
  643. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  644. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  645. /* Set NPORT-ID and LUN number*/
  646. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  647. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  648. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  649. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  650. cmd_pkt->vp_index = sp->fcport->vp_idx;
  651. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  652. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  653. /* Load SCSI command packet. */
  654. memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
  655. host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
  656. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  657. /* Build IOCB segments */
  658. qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
  659. /* Set total data segment count. */
  660. cmd_pkt->entry_count = (uint8_t)req_cnt;
  661. wmb();
  662. /* Adjust ring index. */
  663. ha->req_ring_index++;
  664. if (ha->req_ring_index == ha->request_q_length) {
  665. ha->req_ring_index = 0;
  666. ha->request_ring_ptr = ha->request_ring;
  667. } else
  668. ha->request_ring_ptr++;
  669. sp->flags |= SRB_DMA_VALID;
  670. /* Set chip new ring index. */
  671. WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
  672. RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
  673. /* Manage unprocessed RIO/ZIO commands in response queue. */
  674. if (ha->flags.process_response_queue &&
  675. ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
  676. qla24xx_process_response_queue(ha);
  677. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  678. return QLA_SUCCESS;
  679. queuing_error:
  680. if (tot_dsds)
  681. scsi_dma_unmap(cmd);
  682. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  683. return QLA_FUNCTION_FAILED;
  684. }