ql4_iocb.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include "ql4_def.h"
  8. #include "ql4_glbl.h"
  9. #include "ql4_dbg.h"
  10. #include "ql4_inline.h"
  11. #include <scsi/scsi_tcq.h>
  12. /**
  13. * qla4xxx_get_req_pkt - returns a valid entry in request queue.
  14. * @ha: Pointer to host adapter structure.
  15. * @queue_entry: Pointer to pointer to queue entry structure
  16. *
  17. * This routine performs the following tasks:
  18. * - returns the current request_in pointer (if queue not full)
  19. * - advances the request_in pointer
  20. * - checks for queue full
  21. **/
  22. static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
  23. struct queue_entry **queue_entry)
  24. {
  25. uint16_t request_in;
  26. uint8_t status = QLA_SUCCESS;
  27. *queue_entry = ha->request_ptr;
  28. /* get the latest request_in and request_out index */
  29. request_in = ha->request_in;
  30. ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
  31. /* Advance request queue pointer and check for queue full */
  32. if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
  33. request_in = 0;
  34. ha->request_ptr = ha->request_ring;
  35. } else {
  36. request_in++;
  37. ha->request_ptr++;
  38. }
  39. /* request queue is full, try again later */
  40. if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
  41. /* restore request pointer */
  42. ha->request_ptr = *queue_entry;
  43. status = QLA_ERROR;
  44. } else {
  45. ha->request_in = request_in;
  46. memset(*queue_entry, 0, sizeof(**queue_entry));
  47. }
  48. return status;
  49. }
  50. /**
  51. * qla4xxx_send_marker_iocb - issues marker iocb to HBA
  52. * @ha: Pointer to host adapter structure.
  53. * @ddb_entry: Pointer to device database entry
  54. * @lun: SCSI LUN
  55. * @marker_type: marker identifier
  56. *
  57. * This routine issues a marker IOCB.
  58. **/
  59. int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
  60. struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod)
  61. {
  62. struct qla4_marker_entry *marker_entry;
  63. unsigned long flags = 0;
  64. uint8_t status = QLA_SUCCESS;
  65. /* Acquire hardware specific lock */
  66. spin_lock_irqsave(&ha->hardware_lock, flags);
  67. /* Get pointer to the queue entry for the marker */
  68. if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
  69. QLA_SUCCESS) {
  70. status = QLA_ERROR;
  71. goto exit_send_marker;
  72. }
  73. /* Put the marker in the request queue */
  74. marker_entry->hdr.entryType = ET_MARKER;
  75. marker_entry->hdr.entryCount = 1;
  76. marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  77. marker_entry->modifier = cpu_to_le16(mrkr_mod);
  78. int_to_scsilun(lun, &marker_entry->lun);
  79. wmb();
  80. /* Tell ISP it's got a new I/O request */
  81. writel(ha->request_in, &ha->reg->req_q_in);
  82. readl(&ha->reg->req_q_in);
  83. exit_send_marker:
  84. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  85. return status;
  86. }
  87. static struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
  88. struct scsi_qla_host *ha)
  89. {
  90. struct continuation_t1_entry *cont_entry;
  91. cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
  92. /* Advance request queue pointer */
  93. if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
  94. ha->request_in = 0;
  95. ha->request_ptr = ha->request_ring;
  96. } else {
  97. ha->request_in++;
  98. ha->request_ptr++;
  99. }
  100. /* Load packet defaults */
  101. cont_entry->hdr.entryType = ET_CONTINUE;
  102. cont_entry->hdr.entryCount = 1;
  103. cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
  104. return cont_entry;
  105. }
  106. static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
  107. {
  108. uint16_t iocbs;
  109. iocbs = 1;
  110. if (dsds > COMMAND_SEG) {
  111. iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
  112. if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
  113. iocbs++;
  114. }
  115. return iocbs;
  116. }
  117. static void qla4xxx_build_scsi_iocbs(struct srb *srb,
  118. struct command_t3_entry *cmd_entry,
  119. uint16_t tot_dsds)
  120. {
  121. struct scsi_qla_host *ha;
  122. uint16_t avail_dsds;
  123. struct data_seg_a64 *cur_dsd;
  124. struct scsi_cmnd *cmd;
  125. struct scatterlist *sg;
  126. int i;
  127. cmd = srb->cmd;
  128. ha = srb->ha;
  129. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  130. /* No data being transferred */
  131. cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
  132. return;
  133. }
  134. avail_dsds = COMMAND_SEG;
  135. cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
  136. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  137. dma_addr_t sle_dma;
  138. /* Allocate additional continuation packets? */
  139. if (avail_dsds == 0) {
  140. struct continuation_t1_entry *cont_entry;
  141. cont_entry = qla4xxx_alloc_cont_entry(ha);
  142. cur_dsd =
  143. (struct data_seg_a64 *)
  144. &cont_entry->dataseg[0];
  145. avail_dsds = CONTINUE_SEG;
  146. }
  147. sle_dma = sg_dma_address(sg);
  148. cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
  149. cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
  150. cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
  151. avail_dsds--;
  152. cur_dsd++;
  153. }
  154. }
  155. /**
  156. * qla4xxx_send_command_to_isp - issues command to HBA
  157. * @ha: pointer to host adapter structure.
  158. * @srb: pointer to SCSI Request Block to be sent to ISP
  159. *
  160. * This routine is called by qla4xxx_queuecommand to build an ISP
  161. * command and pass it to the ISP for execution.
  162. **/
  163. int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
  164. {
  165. struct scsi_cmnd *cmd = srb->cmd;
  166. struct ddb_entry *ddb_entry;
  167. struct command_t3_entry *cmd_entry;
  168. int nseg;
  169. uint16_t tot_dsds;
  170. uint16_t req_cnt;
  171. unsigned long flags;
  172. uint16_t cnt;
  173. uint32_t index;
  174. char tag[2];
  175. /* Get real lun and adapter */
  176. ddb_entry = srb->ddb;
  177. tot_dsds = 0;
  178. /* Acquire hardware specific lock */
  179. spin_lock_irqsave(&ha->hardware_lock, flags);
  180. index = (uint32_t)cmd->request->tag;
  181. /* Calculate the number of request entries needed. */
  182. nseg = scsi_dma_map(cmd);
  183. if (nseg < 0)
  184. goto queuing_error;
  185. tot_dsds = nseg;
  186. req_cnt = qla4xxx_calc_request_entries(tot_dsds);
  187. if (ha->req_q_count < (req_cnt + 2)) {
  188. cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
  189. if (ha->request_in < cnt)
  190. ha->req_q_count = cnt - ha->request_in;
  191. else
  192. ha->req_q_count = REQUEST_QUEUE_DEPTH -
  193. (ha->request_in - cnt);
  194. }
  195. if (ha->req_q_count < (req_cnt + 2))
  196. goto queuing_error;
  197. /* total iocbs active */
  198. if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
  199. goto queuing_error;
  200. /* Build command packet */
  201. cmd_entry = (struct command_t3_entry *) ha->request_ptr;
  202. memset(cmd_entry, 0, sizeof(struct command_t3_entry));
  203. cmd_entry->hdr.entryType = ET_COMMAND;
  204. cmd_entry->handle = cpu_to_le32(index);
  205. cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  206. cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
  207. int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
  208. cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
  209. cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
  210. memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
  211. cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
  212. cmd_entry->hdr.entryCount = req_cnt;
  213. /* Set data transfer direction control flags
  214. * NOTE: Look at data_direction bits iff there is data to be
  215. * transferred, as the data direction bit is sometimed filled
  216. * in when there is no data to be transferred */
  217. cmd_entry->control_flags = CF_NO_DATA;
  218. if (scsi_bufflen(cmd)) {
  219. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  220. cmd_entry->control_flags = CF_WRITE;
  221. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  222. cmd_entry->control_flags = CF_READ;
  223. ha->bytes_xfered += scsi_bufflen(cmd);
  224. if (ha->bytes_xfered & ~0xFFFFF){
  225. ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
  226. ha->bytes_xfered &= 0xFFFFF;
  227. }
  228. }
  229. /* Set tagged queueing control flags */
  230. cmd_entry->control_flags |= CF_SIMPLE_TAG;
  231. if (scsi_populate_tag_msg(cmd, tag))
  232. switch (tag[0]) {
  233. case MSG_HEAD_TAG:
  234. cmd_entry->control_flags |= CF_HEAD_TAG;
  235. break;
  236. case MSG_ORDERED_TAG:
  237. cmd_entry->control_flags |= CF_ORDERED_TAG;
  238. break;
  239. }
  240. /* Advance request queue pointer */
  241. ha->request_in++;
  242. if (ha->request_in == REQUEST_QUEUE_DEPTH) {
  243. ha->request_in = 0;
  244. ha->request_ptr = ha->request_ring;
  245. } else
  246. ha->request_ptr++;
  247. qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
  248. wmb();
  249. /*
  250. * Check to see if adapter is online before placing request on
  251. * request queue. If a reset occurs and a request is in the queue,
  252. * the firmware will still attempt to process the request, retrieving
  253. * garbage for pointers.
  254. */
  255. if (!test_bit(AF_ONLINE, &ha->flags)) {
  256. DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
  257. "Do not issue command.\n",
  258. ha->host_no, __func__));
  259. goto queuing_error;
  260. }
  261. srb->cmd->host_scribble = (unsigned char *)srb;
  262. /* update counters */
  263. srb->state = SRB_ACTIVE_STATE;
  264. srb->flags |= SRB_DMA_VALID;
  265. /* Track IOCB used */
  266. ha->iocb_cnt += req_cnt;
  267. srb->iocb_cnt = req_cnt;
  268. ha->req_q_count -= req_cnt;
  269. /* Debug print statements */
  270. writel(ha->request_in, &ha->reg->req_q_in);
  271. readl(&ha->reg->req_q_in);
  272. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  273. return QLA_SUCCESS;
  274. queuing_error:
  275. if (tot_dsds)
  276. scsi_dma_unmap(cmd);
  277. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  278. return QLA_ERROR;
  279. }