qla_iocb.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/blkdev.h>
  9. #include <linux/delay.h>
  10. #include <scsi/scsi_tcq.h>
  11. static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
  12. static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
  13. /**
  14. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  15. * @cmd: SCSI command
  16. *
  17. * Returns the proper CF_* direction based on CDB.
  18. */
  19. static inline uint16_t
  20. qla2x00_get_cmd_direction(srb_t *sp)
  21. {
  22. uint16_t cflags;
  23. cflags = 0;
  24. /* Set transfer direction */
  25. if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
  26. cflags = CF_WRITE;
  27. sp->fcport->ha->qla_stats.output_bytes +=
  28. scsi_bufflen(sp->cmd);
  29. } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
  30. cflags = CF_READ;
  31. sp->fcport->ha->qla_stats.input_bytes +=
  32. scsi_bufflen(sp->cmd);
  33. }
  34. return (cflags);
  35. }
  36. /**
  37. * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  38. * Continuation Type 0 IOCBs to allocate.
  39. *
  40. * @dsds: number of data segment decriptors needed
  41. *
  42. * Returns the number of IOCB entries needed to store @dsds.
  43. */
  44. uint16_t
  45. qla2x00_calc_iocbs_32(uint16_t dsds)
  46. {
  47. uint16_t iocbs;
  48. iocbs = 1;
  49. if (dsds > 3) {
  50. iocbs += (dsds - 3) / 7;
  51. if ((dsds - 3) % 7)
  52. iocbs++;
  53. }
  54. return (iocbs);
  55. }
  56. /**
  57. * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  58. * Continuation Type 1 IOCBs to allocate.
  59. *
  60. * @dsds: number of data segment decriptors needed
  61. *
  62. * Returns the number of IOCB entries needed to store @dsds.
  63. */
  64. uint16_t
  65. qla2x00_calc_iocbs_64(uint16_t dsds)
  66. {
  67. uint16_t iocbs;
  68. iocbs = 1;
  69. if (dsds > 2) {
  70. iocbs += (dsds - 2) / 5;
  71. if ((dsds - 2) % 5)
  72. iocbs++;
  73. }
  74. return (iocbs);
  75. }
  76. /**
  77. * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  78. * @ha: HA context
  79. *
  80. * Returns a pointer to the Continuation Type 0 IOCB packet.
  81. */
  82. static inline cont_entry_t *
  83. qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
  84. {
  85. cont_entry_t *cont_pkt;
  86. /* Adjust ring index. */
  87. ha->req_ring_index++;
  88. if (ha->req_ring_index == ha->request_q_length) {
  89. ha->req_ring_index = 0;
  90. ha->request_ring_ptr = ha->request_ring;
  91. } else {
  92. ha->request_ring_ptr++;
  93. }
  94. cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
  95. /* Load packet defaults. */
  96. *((uint32_t *)(&cont_pkt->entry_type)) =
  97. __constant_cpu_to_le32(CONTINUE_TYPE);
  98. return (cont_pkt);
  99. }
  100. /**
  101. * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
  102. * @ha: HA context
  103. *
  104. * Returns a pointer to the continuation type 1 IOCB packet.
  105. */
  106. static inline cont_a64_entry_t *
  107. qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
  108. {
  109. cont_a64_entry_t *cont_pkt;
  110. /* Adjust ring index. */
  111. ha->req_ring_index++;
  112. if (ha->req_ring_index == ha->request_q_length) {
  113. ha->req_ring_index = 0;
  114. ha->request_ring_ptr = ha->request_ring;
  115. } else {
  116. ha->request_ring_ptr++;
  117. }
  118. cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
  119. /* Load packet defaults. */
  120. *((uint32_t *)(&cont_pkt->entry_type)) =
  121. __constant_cpu_to_le32(CONTINUE_A64_TYPE);
  122. return (cont_pkt);
  123. }
  124. /**
  125. * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
  126. * capable IOCB types.
  127. *
  128. * @sp: SRB command to process
  129. * @cmd_pkt: Command type 2 IOCB
  130. * @tot_dsds: Total number of segments to transfer
  131. */
  132. void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
  133. uint16_t tot_dsds)
  134. {
  135. uint16_t avail_dsds;
  136. uint32_t *cur_dsd;
  137. scsi_qla_host_t *ha;
  138. struct scsi_cmnd *cmd;
  139. struct scatterlist *sg;
  140. int i;
  141. cmd = sp->cmd;
  142. /* Update entry type to indicate Command Type 2 IOCB */
  143. *((uint32_t *)(&cmd_pkt->entry_type)) =
  144. __constant_cpu_to_le32(COMMAND_TYPE);
  145. /* No data transfer */
  146. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  147. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  148. return;
  149. }
  150. ha = sp->ha;
  151. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  152. /* Three DSDs are available in the Command Type 2 IOCB */
  153. avail_dsds = 3;
  154. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  155. /* Load data segments */
  156. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  157. cont_entry_t *cont_pkt;
  158. /* Allocate additional continuation packets? */
  159. if (avail_dsds == 0) {
  160. /*
  161. * Seven DSDs are available in the Continuation
  162. * Type 0 IOCB.
  163. */
  164. cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
  165. cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
  166. avail_dsds = 7;
  167. }
  168. *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
  169. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  170. avail_dsds--;
  171. }
  172. }
  173. /**
  174. * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
  175. * capable IOCB types.
  176. *
  177. * @sp: SRB command to process
  178. * @cmd_pkt: Command type 3 IOCB
  179. * @tot_dsds: Total number of segments to transfer
  180. */
  181. void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
  182. uint16_t tot_dsds)
  183. {
  184. uint16_t avail_dsds;
  185. uint32_t *cur_dsd;
  186. scsi_qla_host_t *ha;
  187. struct scsi_cmnd *cmd;
  188. struct scatterlist *sg;
  189. int i;
  190. cmd = sp->cmd;
  191. /* Update entry type to indicate Command Type 3 IOCB */
  192. *((uint32_t *)(&cmd_pkt->entry_type)) =
  193. __constant_cpu_to_le32(COMMAND_A64_TYPE);
  194. /* No data transfer */
  195. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  196. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  197. return;
  198. }
  199. ha = sp->ha;
  200. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  201. /* Two DSDs are available in the Command Type 3 IOCB */
  202. avail_dsds = 2;
  203. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  204. /* Load data segments */
  205. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  206. dma_addr_t sle_dma;
  207. cont_a64_entry_t *cont_pkt;
  208. /* Allocate additional continuation packets? */
  209. if (avail_dsds == 0) {
  210. /*
  211. * Five DSDs are available in the Continuation
  212. * Type 1 IOCB.
  213. */
  214. cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
  215. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  216. avail_dsds = 5;
  217. }
  218. sle_dma = sg_dma_address(sg);
  219. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  220. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  221. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  222. avail_dsds--;
  223. }
  224. }
  225. /**
  226. * qla2x00_start_scsi() - Send a SCSI command to the ISP
  227. * @sp: command to send to the ISP
  228. *
  229. * Returns non-zero if a failure occurred, else zero.
  230. */
  231. int
  232. qla2x00_start_scsi(srb_t *sp)
  233. {
  234. int ret, nseg;
  235. unsigned long flags;
  236. scsi_qla_host_t *ha;
  237. struct scsi_cmnd *cmd;
  238. uint32_t *clr_ptr;
  239. uint32_t index;
  240. uint32_t handle;
  241. cmd_entry_t *cmd_pkt;
  242. uint16_t cnt;
  243. uint16_t req_cnt;
  244. uint16_t tot_dsds;
  245. struct device_reg_2xxx __iomem *reg;
  246. /* Setup device pointers. */
  247. ret = 0;
  248. ha = sp->ha;
  249. reg = &ha->iobase->isp;
  250. cmd = sp->cmd;
  251. /* So we know we haven't pci_map'ed anything yet */
  252. tot_dsds = 0;
  253. /* Send marker if required */
  254. if (ha->marker_needed != 0) {
  255. if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
  256. return (QLA_FUNCTION_FAILED);
  257. }
  258. ha->marker_needed = 0;
  259. }
  260. /* Acquire ring specific lock */
  261. spin_lock_irqsave(&ha->hardware_lock, flags);
  262. /* Check for room in outstanding command list. */
  263. handle = ha->current_outstanding_cmd;
  264. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  265. handle++;
  266. if (handle == MAX_OUTSTANDING_COMMANDS)
  267. handle = 1;
  268. if (!ha->outstanding_cmds[handle])
  269. break;
  270. }
  271. if (index == MAX_OUTSTANDING_COMMANDS)
  272. goto queuing_error;
  273. /* Map the sg table so we have an accurate count of sg entries needed */
  274. if (scsi_sg_count(cmd)) {
  275. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  276. scsi_sg_count(cmd), cmd->sc_data_direction);
  277. if (unlikely(!nseg))
  278. goto queuing_error;
  279. } else
  280. nseg = 0;
  281. tot_dsds = nseg;
  282. /* Calculate the number of request entries needed. */
  283. req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
  284. if (ha->req_q_cnt < (req_cnt + 2)) {
  285. cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
  286. if (ha->req_ring_index < cnt)
  287. ha->req_q_cnt = cnt - ha->req_ring_index;
  288. else
  289. ha->req_q_cnt = ha->request_q_length -
  290. (ha->req_ring_index - cnt);
  291. }
  292. if (ha->req_q_cnt < (req_cnt + 2))
  293. goto queuing_error;
  294. /* Build command packet */
  295. ha->current_outstanding_cmd = handle;
  296. ha->outstanding_cmds[handle] = sp;
  297. sp->ha = ha;
  298. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  299. ha->req_q_cnt -= req_cnt;
  300. cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
  301. cmd_pkt->handle = handle;
  302. /* Zero out remaining portion of packet. */
  303. clr_ptr = (uint32_t *)cmd_pkt + 2;
  304. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  305. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  306. /* Set target ID and LUN number*/
  307. SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
  308. cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
  309. /* Update tagged queuing modifier */
  310. cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
  311. /* Load SCSI command packet. */
  312. memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
  313. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  314. /* Build IOCB segments */
  315. ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
  316. /* Set total data segment count. */
  317. cmd_pkt->entry_count = (uint8_t)req_cnt;
  318. wmb();
  319. /* Adjust ring index. */
  320. ha->req_ring_index++;
  321. if (ha->req_ring_index == ha->request_q_length) {
  322. ha->req_ring_index = 0;
  323. ha->request_ring_ptr = ha->request_ring;
  324. } else
  325. ha->request_ring_ptr++;
  326. sp->flags |= SRB_DMA_VALID;
  327. /* Set chip new ring index. */
  328. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
  329. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
  330. /* Manage unprocessed RIO/ZIO commands in response queue. */
  331. if (ha->flags.process_response_queue &&
  332. ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
  333. qla2x00_process_response_queue(ha);
  334. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  335. return (QLA_SUCCESS);
  336. queuing_error:
  337. if (tot_dsds)
  338. scsi_dma_unmap(cmd);
  339. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  340. return (QLA_FUNCTION_FAILED);
  341. }
  342. /**
  343. * qla2x00_marker() - Send a marker IOCB to the firmware.
  344. * @ha: HA context
  345. * @loop_id: loop ID
  346. * @lun: LUN
  347. * @type: marker modifier
  348. *
  349. * Can be called from both normal and interrupt context.
  350. *
  351. * Returns non-zero if a failure occurred, else zero.
  352. */
  353. int
  354. __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
  355. uint8_t type)
  356. {
  357. mrk_entry_t *mrk;
  358. struct mrk_entry_24xx *mrk24;
  359. scsi_qla_host_t *pha = to_qla_parent(ha);
  360. mrk24 = NULL;
  361. mrk = (mrk_entry_t *)qla2x00_req_pkt(pha);
  362. if (mrk == NULL) {
  363. DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
  364. __func__, ha->host_no));
  365. return (QLA_FUNCTION_FAILED);
  366. }
  367. mrk->entry_type = MARKER_TYPE;
  368. mrk->modifier = type;
  369. if (type != MK_SYNC_ALL) {
  370. if (IS_FWI2_CAPABLE(ha)) {
  371. mrk24 = (struct mrk_entry_24xx *) mrk;
  372. mrk24->nport_handle = cpu_to_le16(loop_id);
  373. mrk24->lun[1] = LSB(lun);
  374. mrk24->lun[2] = MSB(lun);
  375. host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
  376. mrk24->vp_index = ha->vp_idx;
  377. } else {
  378. SET_TARGET_ID(ha, mrk->target, loop_id);
  379. mrk->lun = cpu_to_le16(lun);
  380. }
  381. }
  382. wmb();
  383. qla2x00_isp_cmd(pha);
  384. return (QLA_SUCCESS);
  385. }
  386. int
  387. qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
  388. uint8_t type)
  389. {
  390. int ret;
  391. unsigned long flags = 0;
  392. scsi_qla_host_t *pha = to_qla_parent(ha);
  393. spin_lock_irqsave(&pha->hardware_lock, flags);
  394. ret = __qla2x00_marker(ha, loop_id, lun, type);
  395. spin_unlock_irqrestore(&pha->hardware_lock, flags);
  396. return (ret);
  397. }
  398. /**
  399. * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
  400. * @ha: HA context
  401. *
  402. * Note: The caller must hold the hardware lock before calling this routine.
  403. *
  404. * Returns NULL if function failed, else, a pointer to the request packet.
  405. */
  406. static request_t *
  407. qla2x00_req_pkt(scsi_qla_host_t *ha)
  408. {
  409. device_reg_t __iomem *reg = ha->iobase;
  410. request_t *pkt = NULL;
  411. uint16_t cnt;
  412. uint32_t *dword_ptr;
  413. uint32_t timer;
  414. uint16_t req_cnt = 1;
  415. /* Wait 1 second for slot. */
  416. for (timer = HZ; timer; timer--) {
  417. if ((req_cnt + 2) >= ha->req_q_cnt) {
  418. /* Calculate number of free request entries. */
  419. if (IS_FWI2_CAPABLE(ha))
  420. cnt = (uint16_t)RD_REG_DWORD(
  421. &reg->isp24.req_q_out);
  422. else
  423. cnt = qla2x00_debounce_register(
  424. ISP_REQ_Q_OUT(ha, &reg->isp));
  425. if (ha->req_ring_index < cnt)
  426. ha->req_q_cnt = cnt - ha->req_ring_index;
  427. else
  428. ha->req_q_cnt = ha->request_q_length -
  429. (ha->req_ring_index - cnt);
  430. }
  431. /* If room for request in request ring. */
  432. if ((req_cnt + 2) < ha->req_q_cnt) {
  433. ha->req_q_cnt--;
  434. pkt = ha->request_ring_ptr;
  435. /* Zero out packet. */
  436. dword_ptr = (uint32_t *)pkt;
  437. for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
  438. *dword_ptr++ = 0;
  439. /* Set system defined field. */
  440. pkt->sys_define = (uint8_t)ha->req_ring_index;
  441. /* Set entry count. */
  442. pkt->entry_count = 1;
  443. break;
  444. }
  445. /* Release ring specific lock */
  446. spin_unlock(&ha->hardware_lock);
  447. udelay(2); /* 2 us */
  448. /* Check for pending interrupts. */
  449. /* During init we issue marker directly */
  450. if (!ha->marker_needed && !ha->flags.init_done)
  451. qla2x00_poll(ha);
  452. spin_lock_irq(&ha->hardware_lock);
  453. }
  454. if (!pkt) {
  455. DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
  456. }
  457. return (pkt);
  458. }
  459. /**
  460. * qla2x00_isp_cmd() - Modify the request ring pointer.
  461. * @ha: HA context
  462. *
  463. * Note: The caller must hold the hardware lock before calling this routine.
  464. */
  465. static void
  466. qla2x00_isp_cmd(scsi_qla_host_t *ha)
  467. {
  468. device_reg_t __iomem *reg = ha->iobase;
  469. DEBUG5(printk("%s(): IOCB data:\n", __func__));
  470. DEBUG5(qla2x00_dump_buffer(
  471. (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
  472. /* Adjust ring index. */
  473. ha->req_ring_index++;
  474. if (ha->req_ring_index == ha->request_q_length) {
  475. ha->req_ring_index = 0;
  476. ha->request_ring_ptr = ha->request_ring;
  477. } else
  478. ha->request_ring_ptr++;
  479. /* Set chip new ring index. */
  480. if (IS_FWI2_CAPABLE(ha)) {
  481. WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
  482. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  483. } else {
  484. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
  485. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  486. }
  487. }
  488. /**
  489. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  490. * Continuation Type 1 IOCBs to allocate.
  491. *
  492. * @dsds: number of data segment decriptors needed
  493. *
  494. * Returns the number of IOCB entries needed to store @dsds.
  495. */
  496. static inline uint16_t
  497. qla24xx_calc_iocbs(uint16_t dsds)
  498. {
  499. uint16_t iocbs;
  500. iocbs = 1;
  501. if (dsds > 1) {
  502. iocbs += (dsds - 1) / 5;
  503. if ((dsds - 1) % 5)
  504. iocbs++;
  505. }
  506. return iocbs;
  507. }
  508. /**
  509. * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  510. * IOCB types.
  511. *
  512. * @sp: SRB command to process
  513. * @cmd_pkt: Command type 3 IOCB
  514. * @tot_dsds: Total number of segments to transfer
  515. */
  516. static inline void
  517. qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
  518. uint16_t tot_dsds)
  519. {
  520. uint16_t avail_dsds;
  521. uint32_t *cur_dsd;
  522. scsi_qla_host_t *ha;
  523. struct scsi_cmnd *cmd;
  524. struct scatterlist *sg;
  525. int i;
  526. cmd = sp->cmd;
  527. /* Update entry type to indicate Command Type 3 IOCB */
  528. *((uint32_t *)(&cmd_pkt->entry_type)) =
  529. __constant_cpu_to_le32(COMMAND_TYPE_7);
  530. /* No data transfer */
  531. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  532. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  533. return;
  534. }
  535. ha = sp->ha;
  536. /* Set transfer direction */
  537. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  538. cmd_pkt->task_mgmt_flags =
  539. __constant_cpu_to_le16(TMF_WRITE_DATA);
  540. sp->fcport->ha->qla_stats.output_bytes +=
  541. scsi_bufflen(sp->cmd);
  542. } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  543. cmd_pkt->task_mgmt_flags =
  544. __constant_cpu_to_le16(TMF_READ_DATA);
  545. sp->fcport->ha->qla_stats.input_bytes +=
  546. scsi_bufflen(sp->cmd);
  547. }
  548. /* One DSD is available in the Command Type 3 IOCB */
  549. avail_dsds = 1;
  550. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  551. /* Load data segments */
  552. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  553. dma_addr_t sle_dma;
  554. cont_a64_entry_t *cont_pkt;
  555. /* Allocate additional continuation packets? */
  556. if (avail_dsds == 0) {
  557. /*
  558. * Five DSDs are available in the Continuation
  559. * Type 1 IOCB.
  560. */
  561. cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
  562. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  563. avail_dsds = 5;
  564. }
  565. sle_dma = sg_dma_address(sg);
  566. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  567. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  568. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  569. avail_dsds--;
  570. }
  571. }
  572. /**
  573. * qla24xx_start_scsi() - Send a SCSI command to the ISP
  574. * @sp: command to send to the ISP
  575. *
  576. * Returns non-zero if a failure occurred, else zero.
  577. */
  578. int
  579. qla24xx_start_scsi(srb_t *sp)
  580. {
  581. int ret, nseg;
  582. unsigned long flags;
  583. scsi_qla_host_t *ha, *pha;
  584. struct scsi_cmnd *cmd;
  585. uint32_t *clr_ptr;
  586. uint32_t index;
  587. uint32_t handle;
  588. struct cmd_type_7 *cmd_pkt;
  589. uint16_t cnt;
  590. uint16_t req_cnt;
  591. uint16_t tot_dsds;
  592. struct device_reg_24xx __iomem *reg;
  593. /* Setup device pointers. */
  594. ret = 0;
  595. ha = sp->ha;
  596. pha = to_qla_parent(ha);
  597. reg = &ha->iobase->isp24;
  598. cmd = sp->cmd;
  599. /* So we know we haven't pci_map'ed anything yet */
  600. tot_dsds = 0;
  601. /* Send marker if required */
  602. if (ha->marker_needed != 0) {
  603. if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
  604. return QLA_FUNCTION_FAILED;
  605. }
  606. ha->marker_needed = 0;
  607. }
  608. /* Acquire ring specific lock */
  609. spin_lock_irqsave(&pha->hardware_lock, flags);
  610. /* Check for room in outstanding command list. */
  611. handle = ha->current_outstanding_cmd;
  612. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  613. handle++;
  614. if (handle == MAX_OUTSTANDING_COMMANDS)
  615. handle = 1;
  616. if (!ha->outstanding_cmds[handle])
  617. break;
  618. }
  619. if (index == MAX_OUTSTANDING_COMMANDS)
  620. goto queuing_error;
  621. /* Map the sg table so we have an accurate count of sg entries needed */
  622. if (scsi_sg_count(cmd)) {
  623. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  624. scsi_sg_count(cmd), cmd->sc_data_direction);
  625. if (unlikely(!nseg))
  626. goto queuing_error;
  627. } else
  628. nseg = 0;
  629. tot_dsds = nseg;
  630. req_cnt = qla24xx_calc_iocbs(tot_dsds);
  631. if (ha->req_q_cnt < (req_cnt + 2)) {
  632. cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
  633. if (ha->req_ring_index < cnt)
  634. ha->req_q_cnt = cnt - ha->req_ring_index;
  635. else
  636. ha->req_q_cnt = ha->request_q_length -
  637. (ha->req_ring_index - cnt);
  638. }
  639. if (ha->req_q_cnt < (req_cnt + 2))
  640. goto queuing_error;
  641. /* Build command packet. */
  642. ha->current_outstanding_cmd = handle;
  643. ha->outstanding_cmds[handle] = sp;
  644. sp->ha = ha;
  645. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  646. ha->req_q_cnt -= req_cnt;
  647. cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
  648. cmd_pkt->handle = handle;
  649. /* Zero out remaining portion of packet. */
  650. /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
  651. clr_ptr = (uint32_t *)cmd_pkt + 2;
  652. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  653. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  654. /* Set NPORT-ID and LUN number*/
  655. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  656. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  657. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  658. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  659. cmd_pkt->vp_index = sp->fcport->vp_idx;
  660. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  661. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  662. /* Load SCSI command packet. */
  663. memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
  664. host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
  665. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  666. /* Build IOCB segments */
  667. qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
  668. /* Set total data segment count. */
  669. cmd_pkt->entry_count = (uint8_t)req_cnt;
  670. wmb();
  671. /* Adjust ring index. */
  672. ha->req_ring_index++;
  673. if (ha->req_ring_index == ha->request_q_length) {
  674. ha->req_ring_index = 0;
  675. ha->request_ring_ptr = ha->request_ring;
  676. } else
  677. ha->request_ring_ptr++;
  678. sp->flags |= SRB_DMA_VALID;
  679. /* Set chip new ring index. */
  680. WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
  681. RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
  682. /* Manage unprocessed RIO/ZIO commands in response queue. */
  683. if (ha->flags.process_response_queue &&
  684. ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
  685. qla24xx_process_response_queue(ha);
  686. spin_unlock_irqrestore(&pha->hardware_lock, flags);
  687. return QLA_SUCCESS;
  688. queuing_error:
  689. if (tot_dsds)
  690. scsi_dma_unmap(cmd);
  691. spin_unlock_irqrestore(&pha->hardware_lock, flags);
  692. return QLA_FUNCTION_FAILED;
  693. }