qla_iocb.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2005 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/blkdev.h>
  9. #include <linux/delay.h>
  10. #include <scsi/scsi_tcq.h>
  11. static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
  12. static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
  13. static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
  14. static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
  15. /**
  16. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  17. * @cmd: SCSI command
  18. *
  19. * Returns the proper CF_* direction based on CDB.
  20. */
  21. static inline uint16_t
  22. qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
  23. {
  24. uint16_t cflags;
  25. cflags = 0;
  26. /* Set transfer direction */
  27. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  28. cflags = CF_WRITE;
  29. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  30. cflags = CF_READ;
  31. return (cflags);
  32. }
  33. /**
  34. * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  35. * Continuation Type 0 IOCBs to allocate.
  36. *
  37. * @dsds: number of data segment decriptors needed
  38. *
  39. * Returns the number of IOCB entries needed to store @dsds.
  40. */
  41. uint16_t
  42. qla2x00_calc_iocbs_32(uint16_t dsds)
  43. {
  44. uint16_t iocbs;
  45. iocbs = 1;
  46. if (dsds > 3) {
  47. iocbs += (dsds - 3) / 7;
  48. if ((dsds - 3) % 7)
  49. iocbs++;
  50. }
  51. return (iocbs);
  52. }
  53. /**
  54. * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  55. * Continuation Type 1 IOCBs to allocate.
  56. *
  57. * @dsds: number of data segment decriptors needed
  58. *
  59. * Returns the number of IOCB entries needed to store @dsds.
  60. */
  61. uint16_t
  62. qla2x00_calc_iocbs_64(uint16_t dsds)
  63. {
  64. uint16_t iocbs;
  65. iocbs = 1;
  66. if (dsds > 2) {
  67. iocbs += (dsds - 2) / 5;
  68. if ((dsds - 2) % 5)
  69. iocbs++;
  70. }
  71. return (iocbs);
  72. }
  73. /**
  74. * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  75. * @ha: HA context
  76. *
  77. * Returns a pointer to the Continuation Type 0 IOCB packet.
  78. */
  79. static inline cont_entry_t *
  80. qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
  81. {
  82. cont_entry_t *cont_pkt;
  83. /* Adjust ring index. */
  84. ha->req_ring_index++;
  85. if (ha->req_ring_index == ha->request_q_length) {
  86. ha->req_ring_index = 0;
  87. ha->request_ring_ptr = ha->request_ring;
  88. } else {
  89. ha->request_ring_ptr++;
  90. }
  91. cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
  92. /* Load packet defaults. */
  93. *((uint32_t *)(&cont_pkt->entry_type)) =
  94. __constant_cpu_to_le32(CONTINUE_TYPE);
  95. return (cont_pkt);
  96. }
  97. /**
  98. * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
  99. * @ha: HA context
  100. *
  101. * Returns a pointer to the continuation type 1 IOCB packet.
  102. */
  103. static inline cont_a64_entry_t *
  104. qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
  105. {
  106. cont_a64_entry_t *cont_pkt;
  107. /* Adjust ring index. */
  108. ha->req_ring_index++;
  109. if (ha->req_ring_index == ha->request_q_length) {
  110. ha->req_ring_index = 0;
  111. ha->request_ring_ptr = ha->request_ring;
  112. } else {
  113. ha->request_ring_ptr++;
  114. }
  115. cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
  116. /* Load packet defaults. */
  117. *((uint32_t *)(&cont_pkt->entry_type)) =
  118. __constant_cpu_to_le32(CONTINUE_A64_TYPE);
  119. return (cont_pkt);
  120. }
  121. /**
  122. * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
  123. * capable IOCB types.
  124. *
  125. * @sp: SRB command to process
  126. * @cmd_pkt: Command type 2 IOCB
  127. * @tot_dsds: Total number of segments to transfer
  128. */
  129. void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
  130. uint16_t tot_dsds)
  131. {
  132. uint16_t avail_dsds;
  133. uint32_t *cur_dsd;
  134. scsi_qla_host_t *ha;
  135. struct scsi_cmnd *cmd;
  136. cmd = sp->cmd;
  137. /* Update entry type to indicate Command Type 2 IOCB */
  138. *((uint32_t *)(&cmd_pkt->entry_type)) =
  139. __constant_cpu_to_le32(COMMAND_TYPE);
  140. /* No data transfer */
  141. if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
  142. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  143. return;
  144. }
  145. ha = sp->ha;
  146. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
  147. /* Three DSDs are available in the Command Type 2 IOCB */
  148. avail_dsds = 3;
  149. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  150. /* Load data segments */
  151. if (cmd->use_sg != 0) {
  152. struct scatterlist *cur_seg;
  153. struct scatterlist *end_seg;
  154. cur_seg = (struct scatterlist *)cmd->request_buffer;
  155. end_seg = cur_seg + tot_dsds;
  156. while (cur_seg < end_seg) {
  157. cont_entry_t *cont_pkt;
  158. /* Allocate additional continuation packets? */
  159. if (avail_dsds == 0) {
  160. /*
  161. * Seven DSDs are available in the Continuation
  162. * Type 0 IOCB.
  163. */
  164. cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
  165. cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
  166. avail_dsds = 7;
  167. }
  168. *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
  169. *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
  170. avail_dsds--;
  171. cur_seg++;
  172. }
  173. } else {
  174. *cur_dsd++ = cpu_to_le32(sp->dma_handle);
  175. *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
  176. }
  177. }
  178. /**
  179. * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
  180. * capable IOCB types.
  181. *
  182. * @sp: SRB command to process
  183. * @cmd_pkt: Command type 3 IOCB
  184. * @tot_dsds: Total number of segments to transfer
  185. */
  186. void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
  187. uint16_t tot_dsds)
  188. {
  189. uint16_t avail_dsds;
  190. uint32_t *cur_dsd;
  191. scsi_qla_host_t *ha;
  192. struct scsi_cmnd *cmd;
  193. cmd = sp->cmd;
  194. /* Update entry type to indicate Command Type 3 IOCB */
  195. *((uint32_t *)(&cmd_pkt->entry_type)) =
  196. __constant_cpu_to_le32(COMMAND_A64_TYPE);
  197. /* No data transfer */
  198. if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
  199. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  200. return;
  201. }
  202. ha = sp->ha;
  203. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
  204. /* Two DSDs are available in the Command Type 3 IOCB */
  205. avail_dsds = 2;
  206. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  207. /* Load data segments */
  208. if (cmd->use_sg != 0) {
  209. struct scatterlist *cur_seg;
  210. struct scatterlist *end_seg;
  211. cur_seg = (struct scatterlist *)cmd->request_buffer;
  212. end_seg = cur_seg + tot_dsds;
  213. while (cur_seg < end_seg) {
  214. dma_addr_t sle_dma;
  215. cont_a64_entry_t *cont_pkt;
  216. /* Allocate additional continuation packets? */
  217. if (avail_dsds == 0) {
  218. /*
  219. * Five DSDs are available in the Continuation
  220. * Type 1 IOCB.
  221. */
  222. cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
  223. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  224. avail_dsds = 5;
  225. }
  226. sle_dma = sg_dma_address(cur_seg);
  227. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  228. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  229. *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
  230. avail_dsds--;
  231. cur_seg++;
  232. }
  233. } else {
  234. *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
  235. *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
  236. *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
  237. }
  238. }
  239. /**
  240. * qla2x00_start_scsi() - Send a SCSI command to the ISP
  241. * @sp: command to send to the ISP
  242. *
  243. * Returns non-zero if a failure occured, else zero.
  244. */
  245. int
  246. qla2x00_start_scsi(srb_t *sp)
  247. {
  248. int ret;
  249. unsigned long flags;
  250. scsi_qla_host_t *ha;
  251. struct scsi_cmnd *cmd;
  252. uint32_t *clr_ptr;
  253. uint32_t index;
  254. uint32_t handle;
  255. cmd_entry_t *cmd_pkt;
  256. struct scatterlist *sg;
  257. uint16_t cnt;
  258. uint16_t req_cnt;
  259. uint16_t tot_dsds;
  260. struct device_reg_2xxx __iomem *reg;
  261. /* Setup device pointers. */
  262. ret = 0;
  263. ha = sp->ha;
  264. reg = &ha->iobase->isp;
  265. cmd = sp->cmd;
  266. /* So we know we haven't pci_map'ed anything yet */
  267. tot_dsds = 0;
  268. /* Send marker if required */
  269. if (ha->marker_needed != 0) {
  270. if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
  271. return (QLA_FUNCTION_FAILED);
  272. }
  273. ha->marker_needed = 0;
  274. }
  275. /* Acquire ring specific lock */
  276. spin_lock_irqsave(&ha->hardware_lock, flags);
  277. /* Check for room in outstanding command list. */
  278. handle = ha->current_outstanding_cmd;
  279. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  280. handle++;
  281. if (handle == MAX_OUTSTANDING_COMMANDS)
  282. handle = 1;
  283. if (ha->outstanding_cmds[handle] == 0)
  284. break;
  285. }
  286. if (index == MAX_OUTSTANDING_COMMANDS)
  287. goto queuing_error;
  288. /* Map the sg table so we have an accurate count of sg entries needed */
  289. if (cmd->use_sg) {
  290. sg = (struct scatterlist *) cmd->request_buffer;
  291. tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
  292. cmd->sc_data_direction);
  293. if (tot_dsds == 0)
  294. goto queuing_error;
  295. } else if (cmd->request_bufflen) {
  296. dma_addr_t req_dma;
  297. req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
  298. cmd->request_bufflen, cmd->sc_data_direction);
  299. if (dma_mapping_error(req_dma))
  300. goto queuing_error;
  301. sp->dma_handle = req_dma;
  302. tot_dsds = 1;
  303. }
  304. /* Calculate the number of request entries needed. */
  305. req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
  306. if (ha->req_q_cnt < (req_cnt + 2)) {
  307. cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
  308. if (ha->req_ring_index < cnt)
  309. ha->req_q_cnt = cnt - ha->req_ring_index;
  310. else
  311. ha->req_q_cnt = ha->request_q_length -
  312. (ha->req_ring_index - cnt);
  313. }
  314. if (ha->req_q_cnt < (req_cnt + 2))
  315. goto queuing_error;
  316. /* Build command packet */
  317. ha->current_outstanding_cmd = handle;
  318. ha->outstanding_cmds[handle] = sp;
  319. sp->ha = ha;
  320. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  321. ha->req_q_cnt -= req_cnt;
  322. cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
  323. cmd_pkt->handle = handle;
  324. /* Zero out remaining portion of packet. */
  325. clr_ptr = (uint32_t *)cmd_pkt + 2;
  326. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  327. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  328. /* Set target ID and LUN number*/
  329. SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
  330. cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
  331. /* Update tagged queuing modifier */
  332. cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
  333. /* Load SCSI command packet. */
  334. memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
  335. cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
  336. /* Build IOCB segments */
  337. ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
  338. /* Set total data segment count. */
  339. cmd_pkt->entry_count = (uint8_t)req_cnt;
  340. wmb();
  341. /* Adjust ring index. */
  342. ha->req_ring_index++;
  343. if (ha->req_ring_index == ha->request_q_length) {
  344. ha->req_ring_index = 0;
  345. ha->request_ring_ptr = ha->request_ring;
  346. } else
  347. ha->request_ring_ptr++;
  348. sp->flags |= SRB_DMA_VALID;
  349. /* Set chip new ring index. */
  350. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
  351. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
  352. /* Manage unprocessed RIO/ZIO commands in response queue. */
  353. if (ha->flags.process_response_queue &&
  354. ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
  355. qla2x00_process_response_queue(ha);
  356. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  357. return (QLA_SUCCESS);
  358. queuing_error:
  359. if (cmd->use_sg && tot_dsds) {
  360. sg = (struct scatterlist *) cmd->request_buffer;
  361. pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
  362. cmd->sc_data_direction);
  363. } else if (tot_dsds) {
  364. pci_unmap_single(ha->pdev, sp->dma_handle,
  365. cmd->request_bufflen, cmd->sc_data_direction);
  366. }
  367. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  368. return (QLA_FUNCTION_FAILED);
  369. }
  370. /**
  371. * qla2x00_marker() - Send a marker IOCB to the firmware.
  372. * @ha: HA context
  373. * @loop_id: loop ID
  374. * @lun: LUN
  375. * @type: marker modifier
  376. *
  377. * Can be called from both normal and interrupt context.
  378. *
  379. * Returns non-zero if a failure occured, else zero.
  380. */
  381. int
  382. __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
  383. uint8_t type)
  384. {
  385. mrk_entry_t *mrk;
  386. struct mrk_entry_24xx *mrk24;
  387. mrk24 = NULL;
  388. mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
  389. if (mrk == NULL) {
  390. DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
  391. __func__, ha->host_no));
  392. return (QLA_FUNCTION_FAILED);
  393. }
  394. mrk->entry_type = MARKER_TYPE;
  395. mrk->modifier = type;
  396. if (type != MK_SYNC_ALL) {
  397. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  398. mrk24 = (struct mrk_entry_24xx *) mrk;
  399. mrk24->nport_handle = cpu_to_le16(loop_id);
  400. mrk24->lun[1] = LSB(lun);
  401. mrk24->lun[2] = MSB(lun);
  402. } else {
  403. SET_TARGET_ID(ha, mrk->target, loop_id);
  404. mrk->lun = cpu_to_le16(lun);
  405. }
  406. }
  407. wmb();
  408. qla2x00_isp_cmd(ha);
  409. return (QLA_SUCCESS);
  410. }
  411. int
  412. qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
  413. uint8_t type)
  414. {
  415. int ret;
  416. unsigned long flags = 0;
  417. spin_lock_irqsave(&ha->hardware_lock, flags);
  418. ret = __qla2x00_marker(ha, loop_id, lun, type);
  419. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  420. return (ret);
  421. }
  422. /**
  423. * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
  424. * @ha: HA context
  425. *
  426. * Note: The caller must hold the hardware lock before calling this routine.
  427. *
  428. * Returns NULL if function failed, else, a pointer to the request packet.
  429. */
  430. static request_t *
  431. qla2x00_req_pkt(scsi_qla_host_t *ha)
  432. {
  433. device_reg_t __iomem *reg = ha->iobase;
  434. request_t *pkt = NULL;
  435. uint16_t cnt;
  436. uint32_t *dword_ptr;
  437. uint32_t timer;
  438. uint16_t req_cnt = 1;
  439. /* Wait 1 second for slot. */
  440. for (timer = HZ; timer; timer--) {
  441. if ((req_cnt + 2) >= ha->req_q_cnt) {
  442. /* Calculate number of free request entries. */
  443. if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
  444. cnt = (uint16_t)RD_REG_DWORD(
  445. &reg->isp24.req_q_out);
  446. else
  447. cnt = qla2x00_debounce_register(
  448. ISP_REQ_Q_OUT(ha, &reg->isp));
  449. if (ha->req_ring_index < cnt)
  450. ha->req_q_cnt = cnt - ha->req_ring_index;
  451. else
  452. ha->req_q_cnt = ha->request_q_length -
  453. (ha->req_ring_index - cnt);
  454. }
  455. /* If room for request in request ring. */
  456. if ((req_cnt + 2) < ha->req_q_cnt) {
  457. ha->req_q_cnt--;
  458. pkt = ha->request_ring_ptr;
  459. /* Zero out packet. */
  460. dword_ptr = (uint32_t *)pkt;
  461. for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
  462. *dword_ptr++ = 0;
  463. /* Set system defined field. */
  464. pkt->sys_define = (uint8_t)ha->req_ring_index;
  465. /* Set entry count. */
  466. pkt->entry_count = 1;
  467. break;
  468. }
  469. /* Release ring specific lock */
  470. spin_unlock(&ha->hardware_lock);
  471. udelay(2); /* 2 us */
  472. /* Check for pending interrupts. */
  473. /* During init we issue marker directly */
  474. if (!ha->marker_needed)
  475. qla2x00_poll(ha);
  476. spin_lock_irq(&ha->hardware_lock);
  477. }
  478. if (!pkt) {
  479. DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
  480. }
  481. return (pkt);
  482. }
  483. /**
  484. * qla2x00_isp_cmd() - Modify the request ring pointer.
  485. * @ha: HA context
  486. *
  487. * Note: The caller must hold the hardware lock before calling this routine.
  488. */
  489. void
  490. qla2x00_isp_cmd(scsi_qla_host_t *ha)
  491. {
  492. device_reg_t __iomem *reg = ha->iobase;
  493. DEBUG5(printk("%s(): IOCB data:\n", __func__));
  494. DEBUG5(qla2x00_dump_buffer(
  495. (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
  496. /* Adjust ring index. */
  497. ha->req_ring_index++;
  498. if (ha->req_ring_index == ha->request_q_length) {
  499. ha->req_ring_index = 0;
  500. ha->request_ring_ptr = ha->request_ring;
  501. } else
  502. ha->request_ring_ptr++;
  503. /* Set chip new ring index. */
  504. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  505. WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
  506. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  507. } else {
  508. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
  509. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  510. }
  511. }
  512. /**
  513. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  514. * Continuation Type 1 IOCBs to allocate.
  515. *
  516. * @dsds: number of data segment decriptors needed
  517. *
  518. * Returns the number of IOCB entries needed to store @dsds.
  519. */
  520. static inline uint16_t
  521. qla24xx_calc_iocbs(uint16_t dsds)
  522. {
  523. uint16_t iocbs;
  524. iocbs = 1;
  525. if (dsds > 1) {
  526. iocbs += (dsds - 1) / 5;
  527. if ((dsds - 1) % 5)
  528. iocbs++;
  529. }
  530. return iocbs;
  531. }
  532. /**
  533. * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  534. * IOCB types.
  535. *
  536. * @sp: SRB command to process
  537. * @cmd_pkt: Command type 3 IOCB
  538. * @tot_dsds: Total number of segments to transfer
  539. */
  540. static inline void
  541. qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
  542. uint16_t tot_dsds)
  543. {
  544. uint16_t avail_dsds;
  545. uint32_t *cur_dsd;
  546. scsi_qla_host_t *ha;
  547. struct scsi_cmnd *cmd;
  548. cmd = sp->cmd;
  549. /* Update entry type to indicate Command Type 3 IOCB */
  550. *((uint32_t *)(&cmd_pkt->entry_type)) =
  551. __constant_cpu_to_le32(COMMAND_TYPE_7);
  552. /* No data transfer */
  553. if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
  554. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  555. return;
  556. }
  557. ha = sp->ha;
  558. /* Set transfer direction */
  559. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  560. cmd_pkt->task_mgmt_flags =
  561. __constant_cpu_to_le16(TMF_WRITE_DATA);
  562. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  563. cmd_pkt->task_mgmt_flags =
  564. __constant_cpu_to_le16(TMF_READ_DATA);
  565. /* One DSD is available in the Command Type 3 IOCB */
  566. avail_dsds = 1;
  567. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  568. /* Load data segments */
  569. if (cmd->use_sg != 0) {
  570. struct scatterlist *cur_seg;
  571. struct scatterlist *end_seg;
  572. cur_seg = (struct scatterlist *)cmd->request_buffer;
  573. end_seg = cur_seg + tot_dsds;
  574. while (cur_seg < end_seg) {
  575. dma_addr_t sle_dma;
  576. cont_a64_entry_t *cont_pkt;
  577. /* Allocate additional continuation packets? */
  578. if (avail_dsds == 0) {
  579. /*
  580. * Five DSDs are available in the Continuation
  581. * Type 1 IOCB.
  582. */
  583. cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
  584. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  585. avail_dsds = 5;
  586. }
  587. sle_dma = sg_dma_address(cur_seg);
  588. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  589. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  590. *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
  591. avail_dsds--;
  592. cur_seg++;
  593. }
  594. } else {
  595. *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
  596. *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
  597. *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
  598. }
  599. }
  600. /**
  601. * qla24xx_start_scsi() - Send a SCSI command to the ISP
  602. * @sp: command to send to the ISP
  603. *
  604. * Returns non-zero if a failure occured, else zero.
  605. */
  606. int
  607. qla24xx_start_scsi(srb_t *sp)
  608. {
  609. int ret;
  610. unsigned long flags;
  611. scsi_qla_host_t *ha;
  612. struct scsi_cmnd *cmd;
  613. uint32_t *clr_ptr;
  614. uint32_t index;
  615. uint32_t handle;
  616. struct cmd_type_7 *cmd_pkt;
  617. struct scatterlist *sg;
  618. uint16_t cnt;
  619. uint16_t req_cnt;
  620. uint16_t tot_dsds;
  621. struct device_reg_24xx __iomem *reg;
  622. /* Setup device pointers. */
  623. ret = 0;
  624. ha = sp->ha;
  625. reg = &ha->iobase->isp24;
  626. cmd = sp->cmd;
  627. /* So we know we haven't pci_map'ed anything yet */
  628. tot_dsds = 0;
  629. /* Send marker if required */
  630. if (ha->marker_needed != 0) {
  631. if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
  632. return QLA_FUNCTION_FAILED;
  633. }
  634. ha->marker_needed = 0;
  635. }
  636. /* Acquire ring specific lock */
  637. spin_lock_irqsave(&ha->hardware_lock, flags);
  638. /* Check for room in outstanding command list. */
  639. handle = ha->current_outstanding_cmd;
  640. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  641. handle++;
  642. if (handle == MAX_OUTSTANDING_COMMANDS)
  643. handle = 1;
  644. if (ha->outstanding_cmds[handle] == 0)
  645. break;
  646. }
  647. if (index == MAX_OUTSTANDING_COMMANDS)
  648. goto queuing_error;
  649. /* Map the sg table so we have an accurate count of sg entries needed */
  650. if (cmd->use_sg) {
  651. sg = (struct scatterlist *) cmd->request_buffer;
  652. tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
  653. cmd->sc_data_direction);
  654. if (tot_dsds == 0)
  655. goto queuing_error;
  656. } else if (cmd->request_bufflen) {
  657. dma_addr_t req_dma;
  658. req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
  659. cmd->request_bufflen, cmd->sc_data_direction);
  660. if (dma_mapping_error(req_dma))
  661. goto queuing_error;
  662. sp->dma_handle = req_dma;
  663. tot_dsds = 1;
  664. }
  665. req_cnt = qla24xx_calc_iocbs(tot_dsds);
  666. if (ha->req_q_cnt < (req_cnt + 2)) {
  667. cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
  668. if (ha->req_ring_index < cnt)
  669. ha->req_q_cnt = cnt - ha->req_ring_index;
  670. else
  671. ha->req_q_cnt = ha->request_q_length -
  672. (ha->req_ring_index - cnt);
  673. }
  674. if (ha->req_q_cnt < (req_cnt + 2))
  675. goto queuing_error;
  676. /* Build command packet. */
  677. ha->current_outstanding_cmd = handle;
  678. ha->outstanding_cmds[handle] = sp;
  679. sp->ha = ha;
  680. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  681. ha->req_q_cnt -= req_cnt;
  682. cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
  683. cmd_pkt->handle = handle;
  684. /* Zero out remaining portion of packet. */
  685. /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
  686. clr_ptr = (uint32_t *)cmd_pkt + 2;
  687. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  688. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  689. /* Set NPORT-ID and LUN number*/
  690. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  691. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  692. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  693. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  694. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  695. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  696. /* Load SCSI command packet. */
  697. memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
  698. host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
  699. cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
  700. /* Build IOCB segments */
  701. qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
  702. /* Set total data segment count. */
  703. cmd_pkt->entry_count = (uint8_t)req_cnt;
  704. wmb();
  705. /* Adjust ring index. */
  706. ha->req_ring_index++;
  707. if (ha->req_ring_index == ha->request_q_length) {
  708. ha->req_ring_index = 0;
  709. ha->request_ring_ptr = ha->request_ring;
  710. } else
  711. ha->request_ring_ptr++;
  712. sp->flags |= SRB_DMA_VALID;
  713. /* Set chip new ring index. */
  714. WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
  715. RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
  716. /* Manage unprocessed RIO/ZIO commands in response queue. */
  717. if (ha->flags.process_response_queue &&
  718. ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
  719. qla24xx_process_response_queue(ha);
  720. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  721. return QLA_SUCCESS;
  722. queuing_error:
  723. if (cmd->use_sg && tot_dsds) {
  724. sg = (struct scatterlist *) cmd->request_buffer;
  725. pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
  726. cmd->sc_data_direction);
  727. } else if (tot_dsds) {
  728. pci_unmap_single(ha->pdev, sp->dma_handle,
  729. cmd->request_bufflen, cmd->sc_data_direction);
  730. }
  731. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  732. return QLA_FUNCTION_FAILED;
  733. }