ql4_iocb.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include "ql4_def.h"
  8. #include "ql4_glbl.h"
  9. #include "ql4_dbg.h"
  10. #include "ql4_inline.h"
  11. #include <scsi/scsi_tcq.h>
  12. static int
  13. qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
  14. {
  15. uint16_t cnt;
  16. /* Calculate number of free request entries. */
  17. if ((req_cnt + 2) >= ha->req_q_count) {
  18. cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
  19. if (ha->request_in < cnt)
  20. ha->req_q_count = cnt - ha->request_in;
  21. else
  22. ha->req_q_count = REQUEST_QUEUE_DEPTH -
  23. (ha->request_in - cnt);
  24. }
  25. /* Check if room for request in request ring. */
  26. if ((req_cnt + 2) < ha->req_q_count)
  27. return 1;
  28. else
  29. return 0;
  30. }
  31. static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
  32. {
  33. /* Advance request queue pointer */
  34. if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
  35. ha->request_in = 0;
  36. ha->request_ptr = ha->request_ring;
  37. } else {
  38. ha->request_in++;
  39. ha->request_ptr++;
  40. }
  41. }
  42. /**
  43. * qla4xxx_get_req_pkt - returns a valid entry in request queue.
  44. * @ha: Pointer to host adapter structure.
  45. * @queue_entry: Pointer to pointer to queue entry structure
  46. *
  47. * This routine performs the following tasks:
  48. * - returns the current request_in pointer (if queue not full)
  49. * - advances the request_in pointer
  50. * - checks for queue full
  51. **/
  52. static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
  53. struct queue_entry **queue_entry)
  54. {
  55. uint16_t req_cnt = 1;
  56. if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
  57. *queue_entry = ha->request_ptr;
  58. memset(*queue_entry, 0, sizeof(**queue_entry));
  59. qla4xxx_advance_req_ring_ptr(ha);
  60. ha->req_q_count -= req_cnt;
  61. return QLA_SUCCESS;
  62. }
  63. return QLA_ERROR;
  64. }
  65. /**
  66. * qla4xxx_send_marker_iocb - issues marker iocb to HBA
  67. * @ha: Pointer to host adapter structure.
  68. * @ddb_entry: Pointer to device database entry
  69. * @lun: SCSI LUN
  70. * @marker_type: marker identifier
  71. *
  72. * This routine issues a marker IOCB.
  73. **/
  74. int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
  75. struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod)
  76. {
  77. struct qla4_marker_entry *marker_entry;
  78. unsigned long flags = 0;
  79. uint8_t status = QLA_SUCCESS;
  80. /* Acquire hardware specific lock */
  81. spin_lock_irqsave(&ha->hardware_lock, flags);
  82. /* Get pointer to the queue entry for the marker */
  83. if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
  84. QLA_SUCCESS) {
  85. status = QLA_ERROR;
  86. goto exit_send_marker;
  87. }
  88. /* Put the marker in the request queue */
  89. marker_entry->hdr.entryType = ET_MARKER;
  90. marker_entry->hdr.entryCount = 1;
  91. marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  92. marker_entry->modifier = cpu_to_le16(mrkr_mod);
  93. int_to_scsilun(lun, &marker_entry->lun);
  94. wmb();
  95. /* Tell ISP it's got a new I/O request */
  96. writel(ha->request_in, &ha->reg->req_q_in);
  97. readl(&ha->reg->req_q_in);
  98. exit_send_marker:
  99. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  100. return status;
  101. }
  102. static struct continuation_t1_entry *
  103. qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
  104. {
  105. struct continuation_t1_entry *cont_entry;
  106. cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
  107. qla4xxx_advance_req_ring_ptr(ha);
  108. /* Load packet defaults */
  109. cont_entry->hdr.entryType = ET_CONTINUE;
  110. cont_entry->hdr.entryCount = 1;
  111. cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
  112. return cont_entry;
  113. }
  114. static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
  115. {
  116. uint16_t iocbs;
  117. iocbs = 1;
  118. if (dsds > COMMAND_SEG) {
  119. iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
  120. if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
  121. iocbs++;
  122. }
  123. return iocbs;
  124. }
  125. static void qla4xxx_build_scsi_iocbs(struct srb *srb,
  126. struct command_t3_entry *cmd_entry,
  127. uint16_t tot_dsds)
  128. {
  129. struct scsi_qla_host *ha;
  130. uint16_t avail_dsds;
  131. struct data_seg_a64 *cur_dsd;
  132. struct scsi_cmnd *cmd;
  133. struct scatterlist *sg;
  134. int i;
  135. cmd = srb->cmd;
  136. ha = srb->ha;
  137. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  138. /* No data being transferred */
  139. cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
  140. return;
  141. }
  142. avail_dsds = COMMAND_SEG;
  143. cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
  144. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  145. dma_addr_t sle_dma;
  146. /* Allocate additional continuation packets? */
  147. if (avail_dsds == 0) {
  148. struct continuation_t1_entry *cont_entry;
  149. cont_entry = qla4xxx_alloc_cont_entry(ha);
  150. cur_dsd =
  151. (struct data_seg_a64 *)
  152. &cont_entry->dataseg[0];
  153. avail_dsds = CONTINUE_SEG;
  154. }
  155. sle_dma = sg_dma_address(sg);
  156. cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
  157. cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
  158. cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
  159. avail_dsds--;
  160. cur_dsd++;
  161. }
  162. }
  163. /**
  164. * qla4xxx_send_command_to_isp - issues command to HBA
  165. * @ha: pointer to host adapter structure.
  166. * @srb: pointer to SCSI Request Block to be sent to ISP
  167. *
  168. * This routine is called by qla4xxx_queuecommand to build an ISP
  169. * command and pass it to the ISP for execution.
  170. **/
  171. int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
  172. {
  173. struct scsi_cmnd *cmd = srb->cmd;
  174. struct ddb_entry *ddb_entry;
  175. struct command_t3_entry *cmd_entry;
  176. int nseg;
  177. uint16_t tot_dsds;
  178. uint16_t req_cnt;
  179. unsigned long flags;
  180. uint32_t index;
  181. char tag[2];
  182. /* Get real lun and adapter */
  183. ddb_entry = srb->ddb;
  184. tot_dsds = 0;
  185. /* Acquire hardware specific lock */
  186. spin_lock_irqsave(&ha->hardware_lock, flags);
  187. index = (uint32_t)cmd->request->tag;
  188. /*
  189. * Check to see if adapter is online before placing request on
  190. * request queue. If a reset occurs and a request is in the queue,
  191. * the firmware will still attempt to process the request, retrieving
  192. * garbage for pointers.
  193. */
  194. if (!test_bit(AF_ONLINE, &ha->flags)) {
  195. DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
  196. "Do not issue command.\n",
  197. ha->host_no, __func__));
  198. goto queuing_error;
  199. }
  200. /* Calculate the number of request entries needed. */
  201. nseg = scsi_dma_map(cmd);
  202. if (nseg < 0)
  203. goto queuing_error;
  204. tot_dsds = nseg;
  205. req_cnt = qla4xxx_calc_request_entries(tot_dsds);
  206. if (!qla4xxx_space_in_req_ring(ha, req_cnt))
  207. goto queuing_error;
  208. /* total iocbs active */
  209. if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
  210. goto queuing_error;
  211. /* Build command packet */
  212. cmd_entry = (struct command_t3_entry *) ha->request_ptr;
  213. memset(cmd_entry, 0, sizeof(struct command_t3_entry));
  214. cmd_entry->hdr.entryType = ET_COMMAND;
  215. cmd_entry->handle = cpu_to_le32(index);
  216. cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  217. cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
  218. int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
  219. cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
  220. cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
  221. memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
  222. cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
  223. cmd_entry->hdr.entryCount = req_cnt;
  224. /* Set data transfer direction control flags
  225. * NOTE: Look at data_direction bits iff there is data to be
  226. * transferred, as the data direction bit is sometimed filled
  227. * in when there is no data to be transferred */
  228. cmd_entry->control_flags = CF_NO_DATA;
  229. if (scsi_bufflen(cmd)) {
  230. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  231. cmd_entry->control_flags = CF_WRITE;
  232. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  233. cmd_entry->control_flags = CF_READ;
  234. ha->bytes_xfered += scsi_bufflen(cmd);
  235. if (ha->bytes_xfered & ~0xFFFFF){
  236. ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
  237. ha->bytes_xfered &= 0xFFFFF;
  238. }
  239. }
  240. /* Set tagged queueing control flags */
  241. cmd_entry->control_flags |= CF_SIMPLE_TAG;
  242. if (scsi_populate_tag_msg(cmd, tag))
  243. switch (tag[0]) {
  244. case MSG_HEAD_TAG:
  245. cmd_entry->control_flags |= CF_HEAD_TAG;
  246. break;
  247. case MSG_ORDERED_TAG:
  248. cmd_entry->control_flags |= CF_ORDERED_TAG;
  249. break;
  250. }
  251. qla4xxx_advance_req_ring_ptr(ha);
  252. qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
  253. wmb();
  254. srb->cmd->host_scribble = (unsigned char *)srb;
  255. /* update counters */
  256. srb->state = SRB_ACTIVE_STATE;
  257. srb->flags |= SRB_DMA_VALID;
  258. /* Track IOCB used */
  259. ha->iocb_cnt += req_cnt;
  260. srb->iocb_cnt = req_cnt;
  261. ha->req_q_count -= req_cnt;
  262. /* Debug print statements */
  263. writel(ha->request_in, &ha->reg->req_q_in);
  264. readl(&ha->reg->req_q_in);
  265. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  266. return QLA_SUCCESS;
  267. queuing_error:
  268. if (tot_dsds)
  269. scsi_dma_unmap(cmd);
  270. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  271. return QLA_ERROR;
  272. }