qla_iocb.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2005 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/blkdev.h>
  9. #include <linux/delay.h>
  10. #include <scsi/scsi_tcq.h>
  11. static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
  12. static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
  13. static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
  14. static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
  15. static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
  16. /**
  17. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  18. * @cmd: SCSI command
  19. *
  20. * Returns the proper CF_* direction based on CDB.
  21. */
  22. static inline uint16_t
  23. qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
  24. {
  25. uint16_t cflags;
  26. cflags = 0;
  27. /* Set transfer direction */
  28. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  29. cflags = CF_WRITE;
  30. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  31. cflags = CF_READ;
  32. return (cflags);
  33. }
  34. /**
  35. * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  36. * Continuation Type 0 IOCBs to allocate.
  37. *
  38. * @dsds: number of data segment decriptors needed
  39. *
  40. * Returns the number of IOCB entries needed to store @dsds.
  41. */
  42. uint16_t
  43. qla2x00_calc_iocbs_32(uint16_t dsds)
  44. {
  45. uint16_t iocbs;
  46. iocbs = 1;
  47. if (dsds > 3) {
  48. iocbs += (dsds - 3) / 7;
  49. if ((dsds - 3) % 7)
  50. iocbs++;
  51. }
  52. return (iocbs);
  53. }
  54. /**
  55. * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  56. * Continuation Type 1 IOCBs to allocate.
  57. *
  58. * @dsds: number of data segment decriptors needed
  59. *
  60. * Returns the number of IOCB entries needed to store @dsds.
  61. */
  62. uint16_t
  63. qla2x00_calc_iocbs_64(uint16_t dsds)
  64. {
  65. uint16_t iocbs;
  66. iocbs = 1;
  67. if (dsds > 2) {
  68. iocbs += (dsds - 2) / 5;
  69. if ((dsds - 2) % 5)
  70. iocbs++;
  71. }
  72. return (iocbs);
  73. }
  74. /**
  75. * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  76. * @ha: HA context
  77. *
  78. * Returns a pointer to the Continuation Type 0 IOCB packet.
  79. */
  80. static inline cont_entry_t *
  81. qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
  82. {
  83. cont_entry_t *cont_pkt;
  84. /* Adjust ring index. */
  85. ha->req_ring_index++;
  86. if (ha->req_ring_index == ha->request_q_length) {
  87. ha->req_ring_index = 0;
  88. ha->request_ring_ptr = ha->request_ring;
  89. } else {
  90. ha->request_ring_ptr++;
  91. }
  92. cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
  93. /* Load packet defaults. */
  94. *((uint32_t *)(&cont_pkt->entry_type)) =
  95. __constant_cpu_to_le32(CONTINUE_TYPE);
  96. return (cont_pkt);
  97. }
  98. /**
  99. * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
  100. * @ha: HA context
  101. *
  102. * Returns a pointer to the continuation type 1 IOCB packet.
  103. */
  104. static inline cont_a64_entry_t *
  105. qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
  106. {
  107. cont_a64_entry_t *cont_pkt;
  108. /* Adjust ring index. */
  109. ha->req_ring_index++;
  110. if (ha->req_ring_index == ha->request_q_length) {
  111. ha->req_ring_index = 0;
  112. ha->request_ring_ptr = ha->request_ring;
  113. } else {
  114. ha->request_ring_ptr++;
  115. }
  116. cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
  117. /* Load packet defaults. */
  118. *((uint32_t *)(&cont_pkt->entry_type)) =
  119. __constant_cpu_to_le32(CONTINUE_A64_TYPE);
  120. return (cont_pkt);
  121. }
  122. /**
  123. * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
  124. * capable IOCB types.
  125. *
  126. * @sp: SRB command to process
  127. * @cmd_pkt: Command type 2 IOCB
  128. * @tot_dsds: Total number of segments to transfer
  129. */
  130. void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
  131. uint16_t tot_dsds)
  132. {
  133. uint16_t avail_dsds;
  134. uint32_t *cur_dsd;
  135. scsi_qla_host_t *ha;
  136. struct scsi_cmnd *cmd;
  137. cmd = sp->cmd;
  138. /* Update entry type to indicate Command Type 2 IOCB */
  139. *((uint32_t *)(&cmd_pkt->entry_type)) =
  140. __constant_cpu_to_le32(COMMAND_TYPE);
  141. /* No data transfer */
  142. if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
  143. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  144. return;
  145. }
  146. ha = sp->ha;
  147. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
  148. /* Three DSDs are available in the Command Type 2 IOCB */
  149. avail_dsds = 3;
  150. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  151. /* Load data segments */
  152. if (cmd->use_sg != 0) {
  153. struct scatterlist *cur_seg;
  154. struct scatterlist *end_seg;
  155. cur_seg = (struct scatterlist *)cmd->request_buffer;
  156. end_seg = cur_seg + tot_dsds;
  157. while (cur_seg < end_seg) {
  158. cont_entry_t *cont_pkt;
  159. /* Allocate additional continuation packets? */
  160. if (avail_dsds == 0) {
  161. /*
  162. * Seven DSDs are available in the Continuation
  163. * Type 0 IOCB.
  164. */
  165. cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
  166. cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
  167. avail_dsds = 7;
  168. }
  169. *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
  170. *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
  171. avail_dsds--;
  172. cur_seg++;
  173. }
  174. } else {
  175. *cur_dsd++ = cpu_to_le32(sp->dma_handle);
  176. *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
  177. }
  178. }
  179. /**
  180. * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
  181. * capable IOCB types.
  182. *
  183. * @sp: SRB command to process
  184. * @cmd_pkt: Command type 3 IOCB
  185. * @tot_dsds: Total number of segments to transfer
  186. */
  187. void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
  188. uint16_t tot_dsds)
  189. {
  190. uint16_t avail_dsds;
  191. uint32_t *cur_dsd;
  192. scsi_qla_host_t *ha;
  193. struct scsi_cmnd *cmd;
  194. cmd = sp->cmd;
  195. /* Update entry type to indicate Command Type 3 IOCB */
  196. *((uint32_t *)(&cmd_pkt->entry_type)) =
  197. __constant_cpu_to_le32(COMMAND_A64_TYPE);
  198. /* No data transfer */
  199. if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
  200. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  201. return;
  202. }
  203. ha = sp->ha;
  204. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
  205. /* Two DSDs are available in the Command Type 3 IOCB */
  206. avail_dsds = 2;
  207. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  208. /* Load data segments */
  209. if (cmd->use_sg != 0) {
  210. struct scatterlist *cur_seg;
  211. struct scatterlist *end_seg;
  212. cur_seg = (struct scatterlist *)cmd->request_buffer;
  213. end_seg = cur_seg + tot_dsds;
  214. while (cur_seg < end_seg) {
  215. dma_addr_t sle_dma;
  216. cont_a64_entry_t *cont_pkt;
  217. /* Allocate additional continuation packets? */
  218. if (avail_dsds == 0) {
  219. /*
  220. * Five DSDs are available in the Continuation
  221. * Type 1 IOCB.
  222. */
  223. cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
  224. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  225. avail_dsds = 5;
  226. }
  227. sle_dma = sg_dma_address(cur_seg);
  228. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  229. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  230. *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
  231. avail_dsds--;
  232. cur_seg++;
  233. }
  234. } else {
  235. *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
  236. *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
  237. *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
  238. }
  239. }
  240. /**
  241. * qla2x00_start_scsi() - Send a SCSI command to the ISP
  242. * @sp: command to send to the ISP
  243. *
  244. * Returns non-zero if a failure occured, else zero.
  245. */
  246. int
  247. qla2x00_start_scsi(srb_t *sp)
  248. {
  249. int ret;
  250. unsigned long flags;
  251. scsi_qla_host_t *ha;
  252. struct scsi_cmnd *cmd;
  253. uint32_t *clr_ptr;
  254. uint32_t index;
  255. uint32_t handle;
  256. cmd_entry_t *cmd_pkt;
  257. struct scatterlist *sg;
  258. uint16_t cnt;
  259. uint16_t req_cnt;
  260. uint16_t tot_dsds;
  261. struct device_reg_2xxx __iomem *reg;
  262. /* Setup device pointers. */
  263. ret = 0;
  264. ha = sp->ha;
  265. reg = &ha->iobase->isp;
  266. cmd = sp->cmd;
  267. /* So we know we haven't pci_map'ed anything yet */
  268. tot_dsds = 0;
  269. /* Send marker if required */
  270. if (ha->marker_needed != 0) {
  271. if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
  272. return (QLA_FUNCTION_FAILED);
  273. }
  274. ha->marker_needed = 0;
  275. }
  276. /* Acquire ring specific lock */
  277. spin_lock_irqsave(&ha->hardware_lock, flags);
  278. /* Check for room in outstanding command list. */
  279. handle = ha->current_outstanding_cmd;
  280. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  281. handle++;
  282. if (handle == MAX_OUTSTANDING_COMMANDS)
  283. handle = 1;
  284. if (ha->outstanding_cmds[handle] == 0)
  285. break;
  286. }
  287. if (index == MAX_OUTSTANDING_COMMANDS)
  288. goto queuing_error;
  289. /* Map the sg table so we have an accurate count of sg entries needed */
  290. if (cmd->use_sg) {
  291. sg = (struct scatterlist *) cmd->request_buffer;
  292. tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
  293. cmd->sc_data_direction);
  294. if (tot_dsds == 0)
  295. goto queuing_error;
  296. } else if (cmd->request_bufflen) {
  297. dma_addr_t req_dma;
  298. req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
  299. cmd->request_bufflen, cmd->sc_data_direction);
  300. if (dma_mapping_error(req_dma))
  301. goto queuing_error;
  302. sp->dma_handle = req_dma;
  303. tot_dsds = 1;
  304. }
  305. /* Calculate the number of request entries needed. */
  306. req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
  307. if (ha->req_q_cnt < (req_cnt + 2)) {
  308. cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
  309. if (ha->req_ring_index < cnt)
  310. ha->req_q_cnt = cnt - ha->req_ring_index;
  311. else
  312. ha->req_q_cnt = ha->request_q_length -
  313. (ha->req_ring_index - cnt);
  314. }
  315. if (ha->req_q_cnt < (req_cnt + 2))
  316. goto queuing_error;
  317. /* Build command packet */
  318. ha->current_outstanding_cmd = handle;
  319. ha->outstanding_cmds[handle] = sp;
  320. sp->ha = ha;
  321. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  322. ha->req_q_cnt -= req_cnt;
  323. cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
  324. cmd_pkt->handle = handle;
  325. /* Zero out remaining portion of packet. */
  326. clr_ptr = (uint32_t *)cmd_pkt + 2;
  327. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  328. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  329. /* Set target ID and LUN number*/
  330. SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
  331. cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
  332. /* Update tagged queuing modifier */
  333. cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
  334. /* Load SCSI command packet. */
  335. memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
  336. cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
  337. /* Build IOCB segments */
  338. ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
  339. /* Set total data segment count. */
  340. cmd_pkt->entry_count = (uint8_t)req_cnt;
  341. wmb();
  342. /* Adjust ring index. */
  343. ha->req_ring_index++;
  344. if (ha->req_ring_index == ha->request_q_length) {
  345. ha->req_ring_index = 0;
  346. ha->request_ring_ptr = ha->request_ring;
  347. } else
  348. ha->request_ring_ptr++;
  349. sp->flags |= SRB_DMA_VALID;
  350. /* Set chip new ring index. */
  351. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
  352. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
  353. /* Manage unprocessed RIO/ZIO commands in response queue. */
  354. if (ha->flags.process_response_queue &&
  355. ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
  356. qla2x00_process_response_queue(ha);
  357. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  358. return (QLA_SUCCESS);
  359. queuing_error:
  360. if (cmd->use_sg && tot_dsds) {
  361. sg = (struct scatterlist *) cmd->request_buffer;
  362. pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
  363. cmd->sc_data_direction);
  364. } else if (tot_dsds) {
  365. pci_unmap_single(ha->pdev, sp->dma_handle,
  366. cmd->request_bufflen, cmd->sc_data_direction);
  367. }
  368. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  369. return (QLA_FUNCTION_FAILED);
  370. }
  371. /**
  372. * qla2x00_marker() - Send a marker IOCB to the firmware.
  373. * @ha: HA context
  374. * @loop_id: loop ID
  375. * @lun: LUN
  376. * @type: marker modifier
  377. *
  378. * Can be called from both normal and interrupt context.
  379. *
  380. * Returns non-zero if a failure occured, else zero.
  381. */
  382. int
  383. __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
  384. uint8_t type)
  385. {
  386. mrk_entry_t *mrk;
  387. struct mrk_entry_24xx *mrk24;
  388. mrk24 = NULL;
  389. mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
  390. if (mrk == NULL) {
  391. DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
  392. __func__, ha->host_no));
  393. return (QLA_FUNCTION_FAILED);
  394. }
  395. mrk->entry_type = MARKER_TYPE;
  396. mrk->modifier = type;
  397. if (type != MK_SYNC_ALL) {
  398. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  399. mrk24 = (struct mrk_entry_24xx *) mrk;
  400. mrk24->nport_handle = cpu_to_le16(loop_id);
  401. mrk24->lun[1] = LSB(lun);
  402. mrk24->lun[2] = MSB(lun);
  403. } else {
  404. SET_TARGET_ID(ha, mrk->target, loop_id);
  405. mrk->lun = cpu_to_le16(lun);
  406. }
  407. }
  408. wmb();
  409. qla2x00_isp_cmd(ha);
  410. return (QLA_SUCCESS);
  411. }
  412. int
  413. qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
  414. uint8_t type)
  415. {
  416. int ret;
  417. unsigned long flags = 0;
  418. spin_lock_irqsave(&ha->hardware_lock, flags);
  419. ret = __qla2x00_marker(ha, loop_id, lun, type);
  420. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  421. return (ret);
  422. }
  423. /**
  424. * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
  425. * @ha: HA context
  426. *
  427. * Note: The caller must hold the hardware lock before calling this routine.
  428. *
  429. * Returns NULL if function failed, else, a pointer to the request packet.
  430. */
  431. static request_t *
  432. qla2x00_req_pkt(scsi_qla_host_t *ha)
  433. {
  434. device_reg_t __iomem *reg = ha->iobase;
  435. request_t *pkt = NULL;
  436. uint16_t cnt;
  437. uint32_t *dword_ptr;
  438. uint32_t timer;
  439. uint16_t req_cnt = 1;
  440. /* Wait 1 second for slot. */
  441. for (timer = HZ; timer; timer--) {
  442. if ((req_cnt + 2) >= ha->req_q_cnt) {
  443. /* Calculate number of free request entries. */
  444. if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
  445. cnt = (uint16_t)RD_REG_DWORD(
  446. &reg->isp24.req_q_out);
  447. else
  448. cnt = qla2x00_debounce_register(
  449. ISP_REQ_Q_OUT(ha, &reg->isp));
  450. if (ha->req_ring_index < cnt)
  451. ha->req_q_cnt = cnt - ha->req_ring_index;
  452. else
  453. ha->req_q_cnt = ha->request_q_length -
  454. (ha->req_ring_index - cnt);
  455. }
  456. /* If room for request in request ring. */
  457. if ((req_cnt + 2) < ha->req_q_cnt) {
  458. ha->req_q_cnt--;
  459. pkt = ha->request_ring_ptr;
  460. /* Zero out packet. */
  461. dword_ptr = (uint32_t *)pkt;
  462. for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
  463. *dword_ptr++ = 0;
  464. /* Set system defined field. */
  465. pkt->sys_define = (uint8_t)ha->req_ring_index;
  466. /* Set entry count. */
  467. pkt->entry_count = 1;
  468. break;
  469. }
  470. /* Release ring specific lock */
  471. spin_unlock(&ha->hardware_lock);
  472. udelay(2); /* 2 us */
  473. /* Check for pending interrupts. */
  474. /* During init we issue marker directly */
  475. if (!ha->marker_needed)
  476. qla2x00_poll(ha);
  477. spin_lock_irq(&ha->hardware_lock);
  478. }
  479. if (!pkt) {
  480. DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
  481. }
  482. return (pkt);
  483. }
  484. /**
  485. * qla2x00_isp_cmd() - Modify the request ring pointer.
  486. * @ha: HA context
  487. *
  488. * Note: The caller must hold the hardware lock before calling this routine.
  489. */
  490. static void
  491. qla2x00_isp_cmd(scsi_qla_host_t *ha)
  492. {
  493. device_reg_t __iomem *reg = ha->iobase;
  494. DEBUG5(printk("%s(): IOCB data:\n", __func__));
  495. DEBUG5(qla2x00_dump_buffer(
  496. (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
  497. /* Adjust ring index. */
  498. ha->req_ring_index++;
  499. if (ha->req_ring_index == ha->request_q_length) {
  500. ha->req_ring_index = 0;
  501. ha->request_ring_ptr = ha->request_ring;
  502. } else
  503. ha->request_ring_ptr++;
  504. /* Set chip new ring index. */
  505. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  506. WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
  507. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  508. } else {
  509. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
  510. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  511. }
  512. }
  513. /**
  514. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  515. * Continuation Type 1 IOCBs to allocate.
  516. *
  517. * @dsds: number of data segment decriptors needed
  518. *
  519. * Returns the number of IOCB entries needed to store @dsds.
  520. */
  521. static inline uint16_t
  522. qla24xx_calc_iocbs(uint16_t dsds)
  523. {
  524. uint16_t iocbs;
  525. iocbs = 1;
  526. if (dsds > 1) {
  527. iocbs += (dsds - 1) / 5;
  528. if ((dsds - 1) % 5)
  529. iocbs++;
  530. }
  531. return iocbs;
  532. }
  533. /**
  534. * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  535. * IOCB types.
  536. *
  537. * @sp: SRB command to process
  538. * @cmd_pkt: Command type 3 IOCB
  539. * @tot_dsds: Total number of segments to transfer
  540. */
  541. static inline void
  542. qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
  543. uint16_t tot_dsds)
  544. {
  545. uint16_t avail_dsds;
  546. uint32_t *cur_dsd;
  547. scsi_qla_host_t *ha;
  548. struct scsi_cmnd *cmd;
  549. cmd = sp->cmd;
  550. /* Update entry type to indicate Command Type 3 IOCB */
  551. *((uint32_t *)(&cmd_pkt->entry_type)) =
  552. __constant_cpu_to_le32(COMMAND_TYPE_7);
  553. /* No data transfer */
  554. if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
  555. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  556. return;
  557. }
  558. ha = sp->ha;
  559. /* Set transfer direction */
  560. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  561. cmd_pkt->task_mgmt_flags =
  562. __constant_cpu_to_le16(TMF_WRITE_DATA);
  563. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  564. cmd_pkt->task_mgmt_flags =
  565. __constant_cpu_to_le16(TMF_READ_DATA);
  566. /* One DSD is available in the Command Type 3 IOCB */
  567. avail_dsds = 1;
  568. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  569. /* Load data segments */
  570. if (cmd->use_sg != 0) {
  571. struct scatterlist *cur_seg;
  572. struct scatterlist *end_seg;
  573. cur_seg = (struct scatterlist *)cmd->request_buffer;
  574. end_seg = cur_seg + tot_dsds;
  575. while (cur_seg < end_seg) {
  576. dma_addr_t sle_dma;
  577. cont_a64_entry_t *cont_pkt;
  578. /* Allocate additional continuation packets? */
  579. if (avail_dsds == 0) {
  580. /*
  581. * Five DSDs are available in the Continuation
  582. * Type 1 IOCB.
  583. */
  584. cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
  585. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  586. avail_dsds = 5;
  587. }
  588. sle_dma = sg_dma_address(cur_seg);
  589. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  590. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  591. *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
  592. avail_dsds--;
  593. cur_seg++;
  594. }
  595. } else {
  596. *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
  597. *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
  598. *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
  599. }
  600. }
  601. /**
  602. * qla24xx_start_scsi() - Send a SCSI command to the ISP
  603. * @sp: command to send to the ISP
  604. *
  605. * Returns non-zero if a failure occured, else zero.
  606. */
  607. int
  608. qla24xx_start_scsi(srb_t *sp)
  609. {
  610. int ret;
  611. unsigned long flags;
  612. scsi_qla_host_t *ha;
  613. struct scsi_cmnd *cmd;
  614. uint32_t *clr_ptr;
  615. uint32_t index;
  616. uint32_t handle;
  617. struct cmd_type_7 *cmd_pkt;
  618. struct scatterlist *sg;
  619. uint16_t cnt;
  620. uint16_t req_cnt;
  621. uint16_t tot_dsds;
  622. struct device_reg_24xx __iomem *reg;
  623. /* Setup device pointers. */
  624. ret = 0;
  625. ha = sp->ha;
  626. reg = &ha->iobase->isp24;
  627. cmd = sp->cmd;
  628. /* So we know we haven't pci_map'ed anything yet */
  629. tot_dsds = 0;
  630. /* Send marker if required */
  631. if (ha->marker_needed != 0) {
  632. if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
  633. return QLA_FUNCTION_FAILED;
  634. }
  635. ha->marker_needed = 0;
  636. }
  637. /* Acquire ring specific lock */
  638. spin_lock_irqsave(&ha->hardware_lock, flags);
  639. /* Check for room in outstanding command list. */
  640. handle = ha->current_outstanding_cmd;
  641. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  642. handle++;
  643. if (handle == MAX_OUTSTANDING_COMMANDS)
  644. handle = 1;
  645. if (ha->outstanding_cmds[handle] == 0)
  646. break;
  647. }
  648. if (index == MAX_OUTSTANDING_COMMANDS)
  649. goto queuing_error;
  650. /* Map the sg table so we have an accurate count of sg entries needed */
  651. if (cmd->use_sg) {
  652. sg = (struct scatterlist *) cmd->request_buffer;
  653. tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
  654. cmd->sc_data_direction);
  655. if (tot_dsds == 0)
  656. goto queuing_error;
  657. } else if (cmd->request_bufflen) {
  658. dma_addr_t req_dma;
  659. req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
  660. cmd->request_bufflen, cmd->sc_data_direction);
  661. if (dma_mapping_error(req_dma))
  662. goto queuing_error;
  663. sp->dma_handle = req_dma;
  664. tot_dsds = 1;
  665. }
  666. req_cnt = qla24xx_calc_iocbs(tot_dsds);
  667. if (ha->req_q_cnt < (req_cnt + 2)) {
  668. cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
  669. if (ha->req_ring_index < cnt)
  670. ha->req_q_cnt = cnt - ha->req_ring_index;
  671. else
  672. ha->req_q_cnt = ha->request_q_length -
  673. (ha->req_ring_index - cnt);
  674. }
  675. if (ha->req_q_cnt < (req_cnt + 2))
  676. goto queuing_error;
  677. /* Build command packet. */
  678. ha->current_outstanding_cmd = handle;
  679. ha->outstanding_cmds[handle] = sp;
  680. sp->ha = ha;
  681. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  682. ha->req_q_cnt -= req_cnt;
  683. cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
  684. cmd_pkt->handle = handle;
  685. /* Zero out remaining portion of packet. */
  686. /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
  687. clr_ptr = (uint32_t *)cmd_pkt + 2;
  688. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  689. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  690. /* Set NPORT-ID and LUN number*/
  691. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  692. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  693. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  694. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  695. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  696. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  697. /* Load SCSI command packet. */
  698. memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
  699. host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
  700. cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
  701. /* Build IOCB segments */
  702. qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
  703. /* Set total data segment count. */
  704. cmd_pkt->entry_count = (uint8_t)req_cnt;
  705. wmb();
  706. /* Adjust ring index. */
  707. ha->req_ring_index++;
  708. if (ha->req_ring_index == ha->request_q_length) {
  709. ha->req_ring_index = 0;
  710. ha->request_ring_ptr = ha->request_ring;
  711. } else
  712. ha->request_ring_ptr++;
  713. sp->flags |= SRB_DMA_VALID;
  714. /* Set chip new ring index. */
  715. WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
  716. RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
  717. /* Manage unprocessed RIO/ZIO commands in response queue. */
  718. if (ha->flags.process_response_queue &&
  719. ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
  720. qla24xx_process_response_queue(ha);
  721. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  722. return QLA_SUCCESS;
  723. queuing_error:
  724. if (cmd->use_sg && tot_dsds) {
  725. sg = (struct scatterlist *) cmd->request_buffer;
  726. pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
  727. cmd->sc_data_direction);
  728. } else if (tot_dsds) {
  729. pci_unmap_single(ha->pdev, sp->dma_handle,
  730. cmd->request_bufflen, cmd->sc_data_direction);
  731. }
  732. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  733. return QLA_FUNCTION_FAILED;
  734. }