ql4_iocb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2003-2012 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include "ql4_def.h"
  8. #include "ql4_glbl.h"
  9. #include "ql4_dbg.h"
  10. #include "ql4_inline.h"
  11. #include <scsi/scsi_tcq.h>
  12. static int
  13. qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
  14. {
  15. uint16_t cnt;
  16. /* Calculate number of free request entries. */
  17. if ((req_cnt + 2) >= ha->req_q_count) {
  18. cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
  19. if (ha->request_in < cnt)
  20. ha->req_q_count = cnt - ha->request_in;
  21. else
  22. ha->req_q_count = REQUEST_QUEUE_DEPTH -
  23. (ha->request_in - cnt);
  24. }
  25. /* Check if room for request in request ring. */
  26. if ((req_cnt + 2) < ha->req_q_count)
  27. return 1;
  28. else
  29. return 0;
  30. }
  31. static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
  32. {
  33. /* Advance request queue pointer */
  34. if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
  35. ha->request_in = 0;
  36. ha->request_ptr = ha->request_ring;
  37. } else {
  38. ha->request_in++;
  39. ha->request_ptr++;
  40. }
  41. }
  42. /**
  43. * qla4xxx_get_req_pkt - returns a valid entry in request queue.
  44. * @ha: Pointer to host adapter structure.
  45. * @queue_entry: Pointer to pointer to queue entry structure
  46. *
  47. * This routine performs the following tasks:
  48. * - returns the current request_in pointer (if queue not full)
  49. * - advances the request_in pointer
  50. * - checks for queue full
  51. **/
  52. static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
  53. struct queue_entry **queue_entry)
  54. {
  55. uint16_t req_cnt = 1;
  56. if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
  57. *queue_entry = ha->request_ptr;
  58. memset(*queue_entry, 0, sizeof(**queue_entry));
  59. qla4xxx_advance_req_ring_ptr(ha);
  60. ha->req_q_count -= req_cnt;
  61. return QLA_SUCCESS;
  62. }
  63. return QLA_ERROR;
  64. }
  65. /**
  66. * qla4xxx_send_marker_iocb - issues marker iocb to HBA
  67. * @ha: Pointer to host adapter structure.
  68. * @ddb_entry: Pointer to device database entry
  69. * @lun: SCSI LUN
  70. * @marker_type: marker identifier
  71. *
  72. * This routine issues a marker IOCB.
  73. **/
  74. int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
  75. struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod)
  76. {
  77. struct qla4_marker_entry *marker_entry;
  78. unsigned long flags = 0;
  79. uint8_t status = QLA_SUCCESS;
  80. /* Acquire hardware specific lock */
  81. spin_lock_irqsave(&ha->hardware_lock, flags);
  82. /* Get pointer to the queue entry for the marker */
  83. if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
  84. QLA_SUCCESS) {
  85. status = QLA_ERROR;
  86. goto exit_send_marker;
  87. }
  88. /* Put the marker in the request queue */
  89. marker_entry->hdr.entryType = ET_MARKER;
  90. marker_entry->hdr.entryCount = 1;
  91. marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  92. marker_entry->modifier = cpu_to_le16(mrkr_mod);
  93. int_to_scsilun(lun, &marker_entry->lun);
  94. wmb();
  95. /* Tell ISP it's got a new I/O request */
  96. ha->isp_ops->queue_iocb(ha);
  97. exit_send_marker:
  98. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  99. return status;
  100. }
  101. static struct continuation_t1_entry *
  102. qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
  103. {
  104. struct continuation_t1_entry *cont_entry;
  105. cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
  106. qla4xxx_advance_req_ring_ptr(ha);
  107. /* Load packet defaults */
  108. cont_entry->hdr.entryType = ET_CONTINUE;
  109. cont_entry->hdr.entryCount = 1;
  110. cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
  111. return cont_entry;
  112. }
  113. static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
  114. {
  115. uint16_t iocbs;
  116. iocbs = 1;
  117. if (dsds > COMMAND_SEG) {
  118. iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
  119. if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
  120. iocbs++;
  121. }
  122. return iocbs;
  123. }
  124. static void qla4xxx_build_scsi_iocbs(struct srb *srb,
  125. struct command_t3_entry *cmd_entry,
  126. uint16_t tot_dsds)
  127. {
  128. struct scsi_qla_host *ha;
  129. uint16_t avail_dsds;
  130. struct data_seg_a64 *cur_dsd;
  131. struct scsi_cmnd *cmd;
  132. struct scatterlist *sg;
  133. int i;
  134. cmd = srb->cmd;
  135. ha = srb->ha;
  136. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  137. /* No data being transferred */
  138. cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
  139. return;
  140. }
  141. avail_dsds = COMMAND_SEG;
  142. cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
  143. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  144. dma_addr_t sle_dma;
  145. /* Allocate additional continuation packets? */
  146. if (avail_dsds == 0) {
  147. struct continuation_t1_entry *cont_entry;
  148. cont_entry = qla4xxx_alloc_cont_entry(ha);
  149. cur_dsd =
  150. (struct data_seg_a64 *)
  151. &cont_entry->dataseg[0];
  152. avail_dsds = CONTINUE_SEG;
  153. }
  154. sle_dma = sg_dma_address(sg);
  155. cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
  156. cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
  157. cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
  158. avail_dsds--;
  159. cur_dsd++;
  160. }
  161. }
  162. void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
  163. {
  164. writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
  165. readl(&ha->qla4_83xx_reg->req_q_in);
  166. }
  167. void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
  168. {
  169. writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
  170. readl(&ha->qla4_83xx_reg->rsp_q_out);
  171. }
  172. /**
  173. * qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
  174. * @ha: pointer to host adapter structure.
  175. *
  176. * This routine notifies the ISP that one or more new request
  177. * queue entries have been placed on the request queue.
  178. **/
  179. void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
  180. {
  181. uint32_t dbval = 0;
  182. dbval = 0x14 | (ha->func_num << 5);
  183. dbval = dbval | (0 << 8) | (ha->request_in << 16);
  184. qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
  185. }
  186. /**
  187. * qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
  188. * @ha: pointer to host adapter structure.
  189. *
  190. * This routine notifies the ISP that one or more response/completion
  191. * queue entries have been processed by the driver.
  192. * This also clears the interrupt.
  193. **/
  194. void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
  195. {
  196. writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
  197. readl(&ha->qla4_82xx_reg->rsp_q_out);
  198. }
  199. /**
  200. * qla4xxx_queue_iocb - Tell ISP it's got new request(s)
  201. * @ha: pointer to host adapter structure.
  202. *
  203. * This routine is notifies the ISP that one or more new request
  204. * queue entries have been placed on the request queue.
  205. **/
  206. void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
  207. {
  208. writel(ha->request_in, &ha->reg->req_q_in);
  209. readl(&ha->reg->req_q_in);
  210. }
  211. /**
  212. * qla4xxx_complete_iocb - Tell ISP we're done with response(s)
  213. * @ha: pointer to host adapter structure.
  214. *
  215. * This routine is notifies the ISP that one or more response/completion
  216. * queue entries have been processed by the driver.
  217. * This also clears the interrupt.
  218. **/
  219. void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
  220. {
  221. writel(ha->response_out, &ha->reg->rsp_q_out);
  222. readl(&ha->reg->rsp_q_out);
  223. }
  224. /**
  225. * qla4xxx_send_command_to_isp - issues command to HBA
  226. * @ha: pointer to host adapter structure.
  227. * @srb: pointer to SCSI Request Block to be sent to ISP
  228. *
  229. * This routine is called by qla4xxx_queuecommand to build an ISP
  230. * command and pass it to the ISP for execution.
  231. **/
  232. int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
  233. {
  234. struct scsi_cmnd *cmd = srb->cmd;
  235. struct ddb_entry *ddb_entry;
  236. struct command_t3_entry *cmd_entry;
  237. int nseg;
  238. uint16_t tot_dsds;
  239. uint16_t req_cnt;
  240. unsigned long flags;
  241. uint32_t index;
  242. char tag[2];
  243. /* Get real lun and adapter */
  244. ddb_entry = srb->ddb;
  245. tot_dsds = 0;
  246. /* Acquire hardware specific lock */
  247. spin_lock_irqsave(&ha->hardware_lock, flags);
  248. index = (uint32_t)cmd->request->tag;
  249. /*
  250. * Check to see if adapter is online before placing request on
  251. * request queue. If a reset occurs and a request is in the queue,
  252. * the firmware will still attempt to process the request, retrieving
  253. * garbage for pointers.
  254. */
  255. if (!test_bit(AF_ONLINE, &ha->flags)) {
  256. DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
  257. "Do not issue command.\n",
  258. ha->host_no, __func__));
  259. goto queuing_error;
  260. }
  261. /* Calculate the number of request entries needed. */
  262. nseg = scsi_dma_map(cmd);
  263. if (nseg < 0)
  264. goto queuing_error;
  265. tot_dsds = nseg;
  266. req_cnt = qla4xxx_calc_request_entries(tot_dsds);
  267. if (!qla4xxx_space_in_req_ring(ha, req_cnt))
  268. goto queuing_error;
  269. /* total iocbs active */
  270. if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
  271. goto queuing_error;
  272. /* Build command packet */
  273. cmd_entry = (struct command_t3_entry *) ha->request_ptr;
  274. memset(cmd_entry, 0, sizeof(struct command_t3_entry));
  275. cmd_entry->hdr.entryType = ET_COMMAND;
  276. cmd_entry->handle = cpu_to_le32(index);
  277. cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  278. int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
  279. cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
  280. memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
  281. cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
  282. cmd_entry->hdr.entryCount = req_cnt;
  283. /* Set data transfer direction control flags
  284. * NOTE: Look at data_direction bits iff there is data to be
  285. * transferred, as the data direction bit is sometimed filled
  286. * in when there is no data to be transferred */
  287. cmd_entry->control_flags = CF_NO_DATA;
  288. if (scsi_bufflen(cmd)) {
  289. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  290. cmd_entry->control_flags = CF_WRITE;
  291. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  292. cmd_entry->control_flags = CF_READ;
  293. ha->bytes_xfered += scsi_bufflen(cmd);
  294. if (ha->bytes_xfered & ~0xFFFFF){
  295. ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
  296. ha->bytes_xfered &= 0xFFFFF;
  297. }
  298. }
  299. /* Set tagged queueing control flags */
  300. cmd_entry->control_flags |= CF_SIMPLE_TAG;
  301. if (scsi_populate_tag_msg(cmd, tag))
  302. switch (tag[0]) {
  303. case MSG_HEAD_TAG:
  304. cmd_entry->control_flags |= CF_HEAD_TAG;
  305. break;
  306. case MSG_ORDERED_TAG:
  307. cmd_entry->control_flags |= CF_ORDERED_TAG;
  308. break;
  309. }
  310. qla4xxx_advance_req_ring_ptr(ha);
  311. qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
  312. wmb();
  313. srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
  314. /* update counters */
  315. srb->state = SRB_ACTIVE_STATE;
  316. srb->flags |= SRB_DMA_VALID;
  317. /* Track IOCB used */
  318. ha->iocb_cnt += req_cnt;
  319. srb->iocb_cnt = req_cnt;
  320. ha->req_q_count -= req_cnt;
  321. ha->isp_ops->queue_iocb(ha);
  322. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  323. return QLA_SUCCESS;
  324. queuing_error:
  325. if (tot_dsds)
  326. scsi_dma_unmap(cmd);
  327. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  328. return QLA_ERROR;
  329. }
  330. int qla4xxx_send_passthru0(struct iscsi_task *task)
  331. {
  332. struct passthru0 *passthru_iocb;
  333. struct iscsi_session *sess = task->conn->session;
  334. struct ddb_entry *ddb_entry = sess->dd_data;
  335. struct scsi_qla_host *ha = ddb_entry->ha;
  336. struct ql4_task_data *task_data = task->dd_data;
  337. uint16_t ctrl_flags = 0;
  338. unsigned long flags;
  339. int ret = QLA_ERROR;
  340. spin_lock_irqsave(&ha->hardware_lock, flags);
  341. task_data->iocb_req_cnt = 1;
  342. /* Put the IOCB on the request queue */
  343. if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
  344. goto queuing_error;
  345. passthru_iocb = (struct passthru0 *) ha->request_ptr;
  346. memset(passthru_iocb, 0, sizeof(struct passthru0));
  347. passthru_iocb->hdr.entryType = ET_PASSTHRU0;
  348. passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
  349. passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
  350. passthru_iocb->handle = task->itt;
  351. passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  352. passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
  353. /* Setup the out & in DSDs */
  354. if (task_data->req_len) {
  355. memcpy((uint8_t *)task_data->req_buffer +
  356. sizeof(struct iscsi_hdr), task->data, task->data_count);
  357. ctrl_flags |= PT_FLAG_SEND_BUFFER;
  358. passthru_iocb->out_dsd.base.addrLow =
  359. cpu_to_le32(LSDW(task_data->req_dma));
  360. passthru_iocb->out_dsd.base.addrHigh =
  361. cpu_to_le32(MSDW(task_data->req_dma));
  362. passthru_iocb->out_dsd.count =
  363. cpu_to_le32(task->data_count +
  364. sizeof(struct iscsi_hdr));
  365. }
  366. if (task_data->resp_len) {
  367. passthru_iocb->in_dsd.base.addrLow =
  368. cpu_to_le32(LSDW(task_data->resp_dma));
  369. passthru_iocb->in_dsd.base.addrHigh =
  370. cpu_to_le32(MSDW(task_data->resp_dma));
  371. passthru_iocb->in_dsd.count =
  372. cpu_to_le32(task_data->resp_len);
  373. }
  374. ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
  375. passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
  376. /* Update the request pointer */
  377. qla4xxx_advance_req_ring_ptr(ha);
  378. wmb();
  379. /* Track IOCB used */
  380. ha->iocb_cnt += task_data->iocb_req_cnt;
  381. ha->req_q_count -= task_data->iocb_req_cnt;
  382. ha->isp_ops->queue_iocb(ha);
  383. ret = QLA_SUCCESS;
  384. queuing_error:
  385. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  386. return ret;
  387. }
  388. static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
  389. {
  390. struct mrb *mrb;
  391. mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
  392. if (!mrb)
  393. return mrb;
  394. mrb->ha = ha;
  395. return mrb;
  396. }
  397. static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
  398. uint32_t *in_mbox)
  399. {
  400. int rval = QLA_SUCCESS;
  401. uint32_t i;
  402. unsigned long flags;
  403. uint32_t index = 0;
  404. /* Acquire hardware specific lock */
  405. spin_lock_irqsave(&ha->hardware_lock, flags);
  406. /* Get pointer to the queue entry for the marker */
  407. rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
  408. if (rval != QLA_SUCCESS)
  409. goto exit_mbox_iocb;
  410. index = ha->mrb_index;
  411. /* get valid mrb index*/
  412. for (i = 0; i < MAX_MRB; i++) {
  413. index++;
  414. if (index == MAX_MRB)
  415. index = 1;
  416. if (ha->active_mrb_array[index] == NULL) {
  417. ha->mrb_index = index;
  418. break;
  419. }
  420. }
  421. mrb->iocb_cnt = 1;
  422. ha->active_mrb_array[index] = mrb;
  423. mrb->mbox->handle = index;
  424. mrb->mbox->hdr.entryType = ET_MBOX_CMD;
  425. mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
  426. memcpy(mrb->mbox->in_mbox, in_mbox, 32);
  427. mrb->mbox_cmd = in_mbox[0];
  428. wmb();
  429. ha->isp_ops->queue_iocb(ha);
  430. exit_mbox_iocb:
  431. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  432. return rval;
  433. }
  434. int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
  435. uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
  436. {
  437. uint32_t in_mbox[8];
  438. struct mrb *mrb = NULL;
  439. int rval = QLA_SUCCESS;
  440. memset(in_mbox, 0, sizeof(in_mbox));
  441. mrb = qla4xxx_get_new_mrb(ha);
  442. if (!mrb) {
  443. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
  444. __func__));
  445. rval = QLA_ERROR;
  446. goto exit_ping;
  447. }
  448. in_mbox[0] = MBOX_CMD_PING;
  449. in_mbox[1] = options;
  450. memcpy(&in_mbox[2], &ipaddr[0], 4);
  451. memcpy(&in_mbox[3], &ipaddr[4], 4);
  452. memcpy(&in_mbox[4], &ipaddr[8], 4);
  453. memcpy(&in_mbox[5], &ipaddr[12], 4);
  454. in_mbox[6] = payload_size;
  455. mrb->pid = pid;
  456. rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
  457. if (rval != QLA_SUCCESS)
  458. goto exit_ping;
  459. return rval;
  460. exit_ping:
  461. kfree(mrb);
  462. return rval;
  463. }