ql4_iocb.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include "ql4_def.h"
  8. #include <scsi/scsi_tcq.h>
  9. /**
  10. * qla4xxx_get_req_pkt - returns a valid entry in request queue.
  11. * @ha: Pointer to host adapter structure.
  12. * @queue_entry: Pointer to pointer to queue entry structure
  13. *
  14. * This routine performs the following tasks:
  15. * - returns the current request_in pointer (if queue not full)
  16. * - advances the request_in pointer
  17. * - checks for queue full
  18. **/
  19. int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
  20. struct queue_entry **queue_entry)
  21. {
  22. uint16_t request_in;
  23. uint8_t status = QLA_SUCCESS;
  24. *queue_entry = ha->request_ptr;
  25. /* get the latest request_in and request_out index */
  26. request_in = ha->request_in;
  27. ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
  28. /* Advance request queue pointer and check for queue full */
  29. if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
  30. request_in = 0;
  31. ha->request_ptr = ha->request_ring;
  32. } else {
  33. request_in++;
  34. ha->request_ptr++;
  35. }
  36. /* request queue is full, try again later */
  37. if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
  38. /* restore request pointer */
  39. ha->request_ptr = *queue_entry;
  40. status = QLA_ERROR;
  41. } else {
  42. ha->request_in = request_in;
  43. memset(*queue_entry, 0, sizeof(**queue_entry));
  44. }
  45. return status;
  46. }
  47. /**
  48. * qla4xxx_send_marker_iocb - issues marker iocb to HBA
  49. * @ha: Pointer to host adapter structure.
  50. * @ddb_entry: Pointer to device database entry
  51. * @lun: SCSI LUN
  52. * @marker_type: marker identifier
  53. *
  54. * This routine issues a marker IOCB.
  55. **/
  56. int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
  57. struct ddb_entry *ddb_entry, int lun)
  58. {
  59. struct marker_entry *marker_entry;
  60. unsigned long flags = 0;
  61. uint8_t status = QLA_SUCCESS;
  62. /* Acquire hardware specific lock */
  63. spin_lock_irqsave(&ha->hardware_lock, flags);
  64. /* Get pointer to the queue entry for the marker */
  65. if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
  66. QLA_SUCCESS) {
  67. status = QLA_ERROR;
  68. goto exit_send_marker;
  69. }
  70. /* Put the marker in the request queue */
  71. marker_entry->hdr.entryType = ET_MARKER;
  72. marker_entry->hdr.entryCount = 1;
  73. marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  74. marker_entry->modifier = cpu_to_le16(MM_LUN_RESET);
  75. int_to_scsilun(lun, &marker_entry->lun);
  76. wmb();
  77. /* Tell ISP it's got a new I/O request */
  78. writel(ha->request_in, &ha->reg->req_q_in);
  79. readl(&ha->reg->req_q_in);
  80. exit_send_marker:
  81. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  82. return status;
  83. }
  84. struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
  85. struct scsi_qla_host *ha)
  86. {
  87. struct continuation_t1_entry *cont_entry;
  88. cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
  89. /* Advance request queue pointer */
  90. if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
  91. ha->request_in = 0;
  92. ha->request_ptr = ha->request_ring;
  93. } else {
  94. ha->request_in++;
  95. ha->request_ptr++;
  96. }
  97. /* Load packet defaults */
  98. cont_entry->hdr.entryType = ET_CONTINUE;
  99. cont_entry->hdr.entryCount = 1;
  100. cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
  101. return cont_entry;
  102. }
  103. uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
  104. {
  105. uint16_t iocbs;
  106. iocbs = 1;
  107. if (dsds > COMMAND_SEG) {
  108. iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
  109. if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
  110. iocbs++;
  111. }
  112. return iocbs;
  113. }
  114. void qla4xxx_build_scsi_iocbs(struct srb *srb,
  115. struct command_t3_entry *cmd_entry,
  116. uint16_t tot_dsds)
  117. {
  118. struct scsi_qla_host *ha;
  119. uint16_t avail_dsds;
  120. struct data_seg_a64 *cur_dsd;
  121. struct scsi_cmnd *cmd;
  122. cmd = srb->cmd;
  123. ha = srb->ha;
  124. if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
  125. /* No data being transferred */
  126. cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
  127. return;
  128. }
  129. avail_dsds = COMMAND_SEG;
  130. cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
  131. /* Load data segments */
  132. if (cmd->use_sg) {
  133. struct scatterlist *cur_seg;
  134. struct scatterlist *end_seg;
  135. cur_seg = (struct scatterlist *)cmd->request_buffer;
  136. end_seg = cur_seg + tot_dsds;
  137. while (cur_seg < end_seg) {
  138. dma_addr_t sle_dma;
  139. /* Allocate additional continuation packets? */
  140. if (avail_dsds == 0) {
  141. struct continuation_t1_entry *cont_entry;
  142. cont_entry = qla4xxx_alloc_cont_entry(ha);
  143. cur_dsd =
  144. (struct data_seg_a64 *)
  145. &cont_entry->dataseg[0];
  146. avail_dsds = CONTINUE_SEG;
  147. }
  148. sle_dma = sg_dma_address(cur_seg);
  149. cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
  150. cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
  151. cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
  152. avail_dsds--;
  153. cur_dsd++;
  154. cur_seg++;
  155. }
  156. } else {
  157. cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle));
  158. cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle));
  159. cur_dsd->count = cpu_to_le32(cmd->request_bufflen);
  160. }
  161. }
  162. /**
  163. * qla4xxx_send_command_to_isp - issues command to HBA
  164. * @ha: pointer to host adapter structure.
  165. * @srb: pointer to SCSI Request Block to be sent to ISP
  166. *
  167. * This routine is called by qla4xxx_queuecommand to build an ISP
  168. * command and pass it to the ISP for execution.
  169. **/
  170. int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
  171. {
  172. struct scsi_cmnd *cmd = srb->cmd;
  173. struct ddb_entry *ddb_entry;
  174. struct command_t3_entry *cmd_entry;
  175. struct scatterlist *sg = NULL;
  176. uint16_t tot_dsds;
  177. uint16_t req_cnt;
  178. unsigned long flags;
  179. uint16_t cnt;
  180. uint32_t index;
  181. char tag[2];
  182. /* Get real lun and adapter */
  183. ddb_entry = srb->ddb;
  184. /* Send marker(s) if needed. */
  185. if (ha->marker_needed == 1) {
  186. if (qla4xxx_send_marker_iocb(ha, ddb_entry,
  187. cmd->device->lun) != QLA_SUCCESS)
  188. return QLA_ERROR;
  189. ha->marker_needed = 0;
  190. }
  191. tot_dsds = 0;
  192. /* Acquire hardware specific lock */
  193. spin_lock_irqsave(&ha->hardware_lock, flags);
  194. index = (uint32_t)cmd->request->tag;
  195. /* Calculate the number of request entries needed. */
  196. if (cmd->use_sg) {
  197. sg = (struct scatterlist *)cmd->request_buffer;
  198. tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
  199. cmd->sc_data_direction);
  200. if (tot_dsds == 0)
  201. goto queuing_error;
  202. } else if (cmd->request_bufflen) {
  203. dma_addr_t req_dma;
  204. req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
  205. cmd->request_bufflen,
  206. cmd->sc_data_direction);
  207. if (dma_mapping_error(req_dma))
  208. goto queuing_error;
  209. srb->dma_handle = req_dma;
  210. tot_dsds = 1;
  211. }
  212. req_cnt = qla4xxx_calc_request_entries(tot_dsds);
  213. if (ha->req_q_count < (req_cnt + 2)) {
  214. cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
  215. if (ha->request_in < cnt)
  216. ha->req_q_count = cnt - ha->request_in;
  217. else
  218. ha->req_q_count = REQUEST_QUEUE_DEPTH -
  219. (ha->request_in - cnt);
  220. }
  221. if (ha->req_q_count < (req_cnt + 2))
  222. goto queuing_error;
  223. /* total iocbs active */
  224. if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
  225. goto queuing_error;
  226. /* Build command packet */
  227. cmd_entry = (struct command_t3_entry *) ha->request_ptr;
  228. memset(cmd_entry, 0, sizeof(struct command_t3_entry));
  229. cmd_entry->hdr.entryType = ET_COMMAND;
  230. cmd_entry->handle = cpu_to_le32(index);
  231. cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  232. cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
  233. int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
  234. cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
  235. cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
  236. memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
  237. cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
  238. cmd_entry->hdr.entryCount = req_cnt;
  239. /* Set data transfer direction control flags
  240. * NOTE: Look at data_direction bits iff there is data to be
  241. * transferred, as the data direction bit is sometimed filled
  242. * in when there is no data to be transferred */
  243. cmd_entry->control_flags = CF_NO_DATA;
  244. if (cmd->request_bufflen) {
  245. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  246. cmd_entry->control_flags = CF_WRITE;
  247. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  248. cmd_entry->control_flags = CF_READ;
  249. }
  250. /* Set tagged queueing control flags */
  251. cmd_entry->control_flags |= CF_SIMPLE_TAG;
  252. if (scsi_populate_tag_msg(cmd, tag))
  253. switch (tag[0]) {
  254. case MSG_HEAD_TAG:
  255. cmd_entry->control_flags |= CF_HEAD_TAG;
  256. break;
  257. case MSG_ORDERED_TAG:
  258. cmd_entry->control_flags |= CF_ORDERED_TAG;
  259. break;
  260. }
  261. /* Advance request queue pointer */
  262. ha->request_in++;
  263. if (ha->request_in == REQUEST_QUEUE_DEPTH) {
  264. ha->request_in = 0;
  265. ha->request_ptr = ha->request_ring;
  266. } else
  267. ha->request_ptr++;
  268. qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
  269. wmb();
  270. /*
  271. * Check to see if adapter is online before placing request on
  272. * request queue. If a reset occurs and a request is in the queue,
  273. * the firmware will still attempt to process the request, retrieving
  274. * garbage for pointers.
  275. */
  276. if (!test_bit(AF_ONLINE, &ha->flags)) {
  277. DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
  278. "Do not issue command.\n",
  279. ha->host_no, __func__));
  280. goto queuing_error;
  281. }
  282. srb->cmd->host_scribble = (unsigned char *)srb;
  283. /* update counters */
  284. srb->state = SRB_ACTIVE_STATE;
  285. srb->flags |= SRB_DMA_VALID;
  286. /* Track IOCB used */
  287. ha->iocb_cnt += req_cnt;
  288. srb->iocb_cnt = req_cnt;
  289. ha->req_q_count -= req_cnt;
  290. /* Debug print statements */
  291. writel(ha->request_in, &ha->reg->req_q_in);
  292. readl(&ha->reg->req_q_in);
  293. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  294. return QLA_SUCCESS;
  295. queuing_error:
  296. if (cmd->use_sg && tot_dsds) {
  297. sg = (struct scatterlist *) cmd->request_buffer;
  298. pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
  299. cmd->sc_data_direction);
  300. } else if (tot_dsds)
  301. pci_unmap_single(ha->pdev, srb->dma_handle,
  302. cmd->request_bufflen, cmd->sc_data_direction);
  303. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  304. return QLA_ERROR;
  305. }