qla_iocb.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/blkdev.h>
  9. #include <linux/delay.h>
  10. #include <scsi/scsi_tcq.h>
  11. static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
  12. struct rsp_que *rsp);
  13. static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
  14. static void qla25xx_set_que(srb_t *, struct rsp_que **);
  15. /**
  16. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  17. * @cmd: SCSI command
  18. *
  19. * Returns the proper CF_* direction based on CDB.
  20. */
  21. static inline uint16_t
  22. qla2x00_get_cmd_direction(srb_t *sp)
  23. {
  24. uint16_t cflags;
  25. cflags = 0;
  26. /* Set transfer direction */
  27. if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
  28. cflags = CF_WRITE;
  29. sp->fcport->vha->hw->qla_stats.output_bytes +=
  30. scsi_bufflen(sp->cmd);
  31. } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
  32. cflags = CF_READ;
  33. sp->fcport->vha->hw->qla_stats.input_bytes +=
  34. scsi_bufflen(sp->cmd);
  35. }
  36. return (cflags);
  37. }
  38. /**
  39. * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  40. * Continuation Type 0 IOCBs to allocate.
  41. *
  42. * @dsds: number of data segment decriptors needed
  43. *
  44. * Returns the number of IOCB entries needed to store @dsds.
  45. */
  46. uint16_t
  47. qla2x00_calc_iocbs_32(uint16_t dsds)
  48. {
  49. uint16_t iocbs;
  50. iocbs = 1;
  51. if (dsds > 3) {
  52. iocbs += (dsds - 3) / 7;
  53. if ((dsds - 3) % 7)
  54. iocbs++;
  55. }
  56. return (iocbs);
  57. }
  58. /**
  59. * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  60. * Continuation Type 1 IOCBs to allocate.
  61. *
  62. * @dsds: number of data segment decriptors needed
  63. *
  64. * Returns the number of IOCB entries needed to store @dsds.
  65. */
  66. uint16_t
  67. qla2x00_calc_iocbs_64(uint16_t dsds)
  68. {
  69. uint16_t iocbs;
  70. iocbs = 1;
  71. if (dsds > 2) {
  72. iocbs += (dsds - 2) / 5;
  73. if ((dsds - 2) % 5)
  74. iocbs++;
  75. }
  76. return (iocbs);
  77. }
  78. /**
  79. * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  80. * @ha: HA context
  81. *
  82. * Returns a pointer to the Continuation Type 0 IOCB packet.
  83. */
  84. static inline cont_entry_t *
  85. qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  86. {
  87. cont_entry_t *cont_pkt;
  88. struct req_que *req = vha->req;
  89. /* Adjust ring index. */
  90. req->ring_index++;
  91. if (req->ring_index == req->length) {
  92. req->ring_index = 0;
  93. req->ring_ptr = req->ring;
  94. } else {
  95. req->ring_ptr++;
  96. }
  97. cont_pkt = (cont_entry_t *)req->ring_ptr;
  98. /* Load packet defaults. */
  99. *((uint32_t *)(&cont_pkt->entry_type)) =
  100. __constant_cpu_to_le32(CONTINUE_TYPE);
  101. return (cont_pkt);
  102. }
  103. /**
  104. * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
  105. * @ha: HA context
  106. *
  107. * Returns a pointer to the continuation type 1 IOCB packet.
  108. */
  109. static inline cont_a64_entry_t *
  110. qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
  111. {
  112. cont_a64_entry_t *cont_pkt;
  113. struct req_que *req = vha->req;
  114. /* Adjust ring index. */
  115. req->ring_index++;
  116. if (req->ring_index == req->length) {
  117. req->ring_index = 0;
  118. req->ring_ptr = req->ring;
  119. } else {
  120. req->ring_ptr++;
  121. }
  122. cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
  123. /* Load packet defaults. */
  124. *((uint32_t *)(&cont_pkt->entry_type)) =
  125. __constant_cpu_to_le32(CONTINUE_A64_TYPE);
  126. return (cont_pkt);
  127. }
  128. /**
  129. * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
  130. * capable IOCB types.
  131. *
  132. * @sp: SRB command to process
  133. * @cmd_pkt: Command type 2 IOCB
  134. * @tot_dsds: Total number of segments to transfer
  135. */
  136. void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
  137. uint16_t tot_dsds)
  138. {
  139. uint16_t avail_dsds;
  140. uint32_t *cur_dsd;
  141. scsi_qla_host_t *vha;
  142. struct scsi_cmnd *cmd;
  143. struct scatterlist *sg;
  144. int i;
  145. cmd = sp->cmd;
  146. /* Update entry type to indicate Command Type 2 IOCB */
  147. *((uint32_t *)(&cmd_pkt->entry_type)) =
  148. __constant_cpu_to_le32(COMMAND_TYPE);
  149. /* No data transfer */
  150. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  151. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  152. return;
  153. }
  154. vha = sp->fcport->vha;
  155. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  156. /* Three DSDs are available in the Command Type 2 IOCB */
  157. avail_dsds = 3;
  158. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  159. /* Load data segments */
  160. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  161. cont_entry_t *cont_pkt;
  162. /* Allocate additional continuation packets? */
  163. if (avail_dsds == 0) {
  164. /*
  165. * Seven DSDs are available in the Continuation
  166. * Type 0 IOCB.
  167. */
  168. cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
  169. cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
  170. avail_dsds = 7;
  171. }
  172. *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
  173. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  174. avail_dsds--;
  175. }
  176. }
  177. /**
  178. * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
  179. * capable IOCB types.
  180. *
  181. * @sp: SRB command to process
  182. * @cmd_pkt: Command type 3 IOCB
  183. * @tot_dsds: Total number of segments to transfer
  184. */
  185. void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
  186. uint16_t tot_dsds)
  187. {
  188. uint16_t avail_dsds;
  189. uint32_t *cur_dsd;
  190. scsi_qla_host_t *vha;
  191. struct scsi_cmnd *cmd;
  192. struct scatterlist *sg;
  193. int i;
  194. cmd = sp->cmd;
  195. /* Update entry type to indicate Command Type 3 IOCB */
  196. *((uint32_t *)(&cmd_pkt->entry_type)) =
  197. __constant_cpu_to_le32(COMMAND_A64_TYPE);
  198. /* No data transfer */
  199. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  200. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  201. return;
  202. }
  203. vha = sp->fcport->vha;
  204. cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
  205. /* Two DSDs are available in the Command Type 3 IOCB */
  206. avail_dsds = 2;
  207. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  208. /* Load data segments */
  209. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  210. dma_addr_t sle_dma;
  211. cont_a64_entry_t *cont_pkt;
  212. /* Allocate additional continuation packets? */
  213. if (avail_dsds == 0) {
  214. /*
  215. * Five DSDs are available in the Continuation
  216. * Type 1 IOCB.
  217. */
  218. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  219. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  220. avail_dsds = 5;
  221. }
  222. sle_dma = sg_dma_address(sg);
  223. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  224. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  225. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  226. avail_dsds--;
  227. }
  228. }
  229. /**
  230. * qla2x00_start_scsi() - Send a SCSI command to the ISP
  231. * @sp: command to send to the ISP
  232. *
  233. * Returns non-zero if a failure occurred, else zero.
  234. */
  235. int
  236. qla2x00_start_scsi(srb_t *sp)
  237. {
  238. int ret, nseg;
  239. unsigned long flags;
  240. scsi_qla_host_t *vha;
  241. struct scsi_cmnd *cmd;
  242. uint32_t *clr_ptr;
  243. uint32_t index;
  244. uint32_t handle;
  245. cmd_entry_t *cmd_pkt;
  246. uint16_t cnt;
  247. uint16_t req_cnt;
  248. uint16_t tot_dsds;
  249. struct device_reg_2xxx __iomem *reg;
  250. struct qla_hw_data *ha;
  251. struct req_que *req;
  252. struct rsp_que *rsp;
  253. /* Setup device pointers. */
  254. ret = 0;
  255. vha = sp->fcport->vha;
  256. ha = vha->hw;
  257. reg = &ha->iobase->isp;
  258. cmd = sp->cmd;
  259. req = ha->req_q_map[0];
  260. rsp = ha->rsp_q_map[0];
  261. /* So we know we haven't pci_map'ed anything yet */
  262. tot_dsds = 0;
  263. /* Send marker if required */
  264. if (vha->marker_needed != 0) {
  265. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
  266. != QLA_SUCCESS)
  267. return (QLA_FUNCTION_FAILED);
  268. vha->marker_needed = 0;
  269. }
  270. /* Acquire ring specific lock */
  271. spin_lock_irqsave(&ha->hardware_lock, flags);
  272. /* Check for room in outstanding command list. */
  273. handle = req->current_outstanding_cmd;
  274. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  275. handle++;
  276. if (handle == MAX_OUTSTANDING_COMMANDS)
  277. handle = 1;
  278. if (!req->outstanding_cmds[handle])
  279. break;
  280. }
  281. if (index == MAX_OUTSTANDING_COMMANDS)
  282. goto queuing_error;
  283. /* Map the sg table so we have an accurate count of sg entries needed */
  284. if (scsi_sg_count(cmd)) {
  285. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  286. scsi_sg_count(cmd), cmd->sc_data_direction);
  287. if (unlikely(!nseg))
  288. goto queuing_error;
  289. } else
  290. nseg = 0;
  291. tot_dsds = nseg;
  292. /* Calculate the number of request entries needed. */
  293. req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
  294. if (req->cnt < (req_cnt + 2)) {
  295. cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
  296. if (req->ring_index < cnt)
  297. req->cnt = cnt - req->ring_index;
  298. else
  299. req->cnt = req->length -
  300. (req->ring_index - cnt);
  301. }
  302. if (req->cnt < (req_cnt + 2))
  303. goto queuing_error;
  304. /* Build command packet */
  305. req->current_outstanding_cmd = handle;
  306. req->outstanding_cmds[handle] = sp;
  307. sp->handle = handle;
  308. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  309. req->cnt -= req_cnt;
  310. cmd_pkt = (cmd_entry_t *)req->ring_ptr;
  311. cmd_pkt->handle = handle;
  312. /* Zero out remaining portion of packet. */
  313. clr_ptr = (uint32_t *)cmd_pkt + 2;
  314. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  315. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  316. /* Set target ID and LUN number*/
  317. SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
  318. cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
  319. /* Update tagged queuing modifier */
  320. cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
  321. /* Load SCSI command packet. */
  322. memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
  323. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  324. /* Build IOCB segments */
  325. ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
  326. /* Set total data segment count. */
  327. cmd_pkt->entry_count = (uint8_t)req_cnt;
  328. wmb();
  329. /* Adjust ring index. */
  330. req->ring_index++;
  331. if (req->ring_index == req->length) {
  332. req->ring_index = 0;
  333. req->ring_ptr = req->ring;
  334. } else
  335. req->ring_ptr++;
  336. sp->flags |= SRB_DMA_VALID;
  337. /* Set chip new ring index. */
  338. WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
  339. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
  340. /* Manage unprocessed RIO/ZIO commands in response queue. */
  341. if (vha->flags.process_response_queue &&
  342. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  343. qla2x00_process_response_queue(rsp);
  344. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  345. return (QLA_SUCCESS);
  346. queuing_error:
  347. if (tot_dsds)
  348. scsi_dma_unmap(cmd);
  349. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  350. return (QLA_FUNCTION_FAILED);
  351. }
  352. /**
  353. * qla2x00_marker() - Send a marker IOCB to the firmware.
  354. * @ha: HA context
  355. * @loop_id: loop ID
  356. * @lun: LUN
  357. * @type: marker modifier
  358. *
  359. * Can be called from both normal and interrupt context.
  360. *
  361. * Returns non-zero if a failure occurred, else zero.
  362. */
  363. int
  364. __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
  365. struct rsp_que *rsp, uint16_t loop_id,
  366. uint16_t lun, uint8_t type)
  367. {
  368. mrk_entry_t *mrk;
  369. struct mrk_entry_24xx *mrk24;
  370. struct qla_hw_data *ha = vha->hw;
  371. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  372. mrk24 = NULL;
  373. mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
  374. if (mrk == NULL) {
  375. DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
  376. __func__, base_vha->host_no));
  377. return (QLA_FUNCTION_FAILED);
  378. }
  379. mrk->entry_type = MARKER_TYPE;
  380. mrk->modifier = type;
  381. if (type != MK_SYNC_ALL) {
  382. if (IS_FWI2_CAPABLE(ha)) {
  383. mrk24 = (struct mrk_entry_24xx *) mrk;
  384. mrk24->nport_handle = cpu_to_le16(loop_id);
  385. mrk24->lun[1] = LSB(lun);
  386. mrk24->lun[2] = MSB(lun);
  387. host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
  388. mrk24->vp_index = vha->vp_idx;
  389. mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
  390. } else {
  391. SET_TARGET_ID(ha, mrk->target, loop_id);
  392. mrk->lun = cpu_to_le16(lun);
  393. }
  394. }
  395. wmb();
  396. qla2x00_isp_cmd(vha, req);
  397. return (QLA_SUCCESS);
  398. }
  399. int
  400. qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
  401. struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
  402. uint8_t type)
  403. {
  404. int ret;
  405. unsigned long flags = 0;
  406. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  407. ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
  408. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  409. return (ret);
  410. }
  411. /**
  412. * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
  413. * @ha: HA context
  414. *
  415. * Note: The caller must hold the hardware lock before calling this routine.
  416. *
  417. * Returns NULL if function failed, else, a pointer to the request packet.
  418. */
  419. static request_t *
  420. qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
  421. struct rsp_que *rsp)
  422. {
  423. struct qla_hw_data *ha = vha->hw;
  424. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  425. request_t *pkt = NULL;
  426. uint16_t cnt;
  427. uint32_t *dword_ptr;
  428. uint32_t timer;
  429. uint16_t req_cnt = 1;
  430. /* Wait 1 second for slot. */
  431. for (timer = HZ; timer; timer--) {
  432. if ((req_cnt + 2) >= req->cnt) {
  433. /* Calculate number of free request entries. */
  434. if (ha->mqenable)
  435. cnt = (uint16_t)
  436. RD_REG_DWORD(&reg->isp25mq.req_q_out);
  437. else {
  438. if (IS_FWI2_CAPABLE(ha))
  439. cnt = (uint16_t)RD_REG_DWORD(
  440. &reg->isp24.req_q_out);
  441. else
  442. cnt = qla2x00_debounce_register(
  443. ISP_REQ_Q_OUT(ha, &reg->isp));
  444. }
  445. if (req->ring_index < cnt)
  446. req->cnt = cnt - req->ring_index;
  447. else
  448. req->cnt = req->length -
  449. (req->ring_index - cnt);
  450. }
  451. /* If room for request in request ring. */
  452. if ((req_cnt + 2) < req->cnt) {
  453. req->cnt--;
  454. pkt = req->ring_ptr;
  455. /* Zero out packet. */
  456. dword_ptr = (uint32_t *)pkt;
  457. for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
  458. *dword_ptr++ = 0;
  459. /* Set entry count. */
  460. pkt->entry_count = 1;
  461. break;
  462. }
  463. /* Release ring specific lock */
  464. spin_unlock_irq(&ha->hardware_lock);
  465. udelay(2); /* 2 us */
  466. /* Check for pending interrupts. */
  467. /* During init we issue marker directly */
  468. if (!vha->marker_needed && !vha->flags.init_done)
  469. qla2x00_poll(rsp);
  470. spin_lock_irq(&ha->hardware_lock);
  471. }
  472. if (!pkt) {
  473. DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
  474. }
  475. return (pkt);
  476. }
  477. /**
  478. * qla2x00_isp_cmd() - Modify the request ring pointer.
  479. * @ha: HA context
  480. *
  481. * Note: The caller must hold the hardware lock before calling this routine.
  482. */
  483. static void
  484. qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
  485. {
  486. struct qla_hw_data *ha = vha->hw;
  487. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  488. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  489. DEBUG5(printk("%s(): IOCB data:\n", __func__));
  490. DEBUG5(qla2x00_dump_buffer(
  491. (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
  492. /* Adjust ring index. */
  493. req->ring_index++;
  494. if (req->ring_index == req->length) {
  495. req->ring_index = 0;
  496. req->ring_ptr = req->ring;
  497. } else
  498. req->ring_ptr++;
  499. /* Set chip new ring index. */
  500. if (ha->mqenable) {
  501. WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
  502. RD_REG_DWORD(&ioreg->hccr);
  503. }
  504. else {
  505. if (IS_FWI2_CAPABLE(ha)) {
  506. WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
  507. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  508. } else {
  509. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
  510. req->ring_index);
  511. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  512. }
  513. }
  514. }
  515. /**
  516. * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  517. * Continuation Type 1 IOCBs to allocate.
  518. *
  519. * @dsds: number of data segment decriptors needed
  520. *
  521. * Returns the number of IOCB entries needed to store @dsds.
  522. */
  523. static inline uint16_t
  524. qla24xx_calc_iocbs(uint16_t dsds)
  525. {
  526. uint16_t iocbs;
  527. iocbs = 1;
  528. if (dsds > 1) {
  529. iocbs += (dsds - 1) / 5;
  530. if ((dsds - 1) % 5)
  531. iocbs++;
  532. }
  533. return iocbs;
  534. }
  535. /**
  536. * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
  537. * IOCB types.
  538. *
  539. * @sp: SRB command to process
  540. * @cmd_pkt: Command type 3 IOCB
  541. * @tot_dsds: Total number of segments to transfer
  542. */
  543. static inline void
  544. qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
  545. uint16_t tot_dsds)
  546. {
  547. uint16_t avail_dsds;
  548. uint32_t *cur_dsd;
  549. scsi_qla_host_t *vha;
  550. struct scsi_cmnd *cmd;
  551. struct scatterlist *sg;
  552. int i;
  553. struct req_que *req;
  554. cmd = sp->cmd;
  555. /* Update entry type to indicate Command Type 3 IOCB */
  556. *((uint32_t *)(&cmd_pkt->entry_type)) =
  557. __constant_cpu_to_le32(COMMAND_TYPE_7);
  558. /* No data transfer */
  559. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  560. cmd_pkt->byte_count = __constant_cpu_to_le32(0);
  561. return;
  562. }
  563. vha = sp->fcport->vha;
  564. req = vha->req;
  565. /* Set transfer direction */
  566. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  567. cmd_pkt->task_mgmt_flags =
  568. __constant_cpu_to_le16(TMF_WRITE_DATA);
  569. sp->fcport->vha->hw->qla_stats.output_bytes +=
  570. scsi_bufflen(sp->cmd);
  571. } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  572. cmd_pkt->task_mgmt_flags =
  573. __constant_cpu_to_le16(TMF_READ_DATA);
  574. sp->fcport->vha->hw->qla_stats.input_bytes +=
  575. scsi_bufflen(sp->cmd);
  576. }
  577. /* One DSD is available in the Command Type 3 IOCB */
  578. avail_dsds = 1;
  579. cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
  580. /* Load data segments */
  581. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  582. dma_addr_t sle_dma;
  583. cont_a64_entry_t *cont_pkt;
  584. /* Allocate additional continuation packets? */
  585. if (avail_dsds == 0) {
  586. /*
  587. * Five DSDs are available in the Continuation
  588. * Type 1 IOCB.
  589. */
  590. cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
  591. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  592. avail_dsds = 5;
  593. }
  594. sle_dma = sg_dma_address(sg);
  595. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  596. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  597. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  598. avail_dsds--;
  599. }
  600. }
  601. /**
  602. * qla24xx_start_scsi() - Send a SCSI command to the ISP
  603. * @sp: command to send to the ISP
  604. *
  605. * Returns non-zero if a failure occurred, else zero.
  606. */
  607. int
  608. qla24xx_start_scsi(srb_t *sp)
  609. {
  610. int ret, nseg;
  611. unsigned long flags;
  612. uint32_t *clr_ptr;
  613. uint32_t index;
  614. uint32_t handle;
  615. struct cmd_type_7 *cmd_pkt;
  616. uint16_t cnt;
  617. uint16_t req_cnt;
  618. uint16_t tot_dsds;
  619. struct req_que *req = NULL;
  620. struct rsp_que *rsp = NULL;
  621. struct scsi_cmnd *cmd = sp->cmd;
  622. struct scsi_qla_host *vha = sp->fcport->vha;
  623. struct qla_hw_data *ha = vha->hw;
  624. /* Setup device pointers. */
  625. ret = 0;
  626. qla25xx_set_que(sp, &rsp);
  627. req = vha->req;
  628. /* So we know we haven't pci_map'ed anything yet */
  629. tot_dsds = 0;
  630. /* Send marker if required */
  631. if (vha->marker_needed != 0) {
  632. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
  633. != QLA_SUCCESS)
  634. return QLA_FUNCTION_FAILED;
  635. vha->marker_needed = 0;
  636. }
  637. /* Acquire ring specific lock */
  638. spin_lock_irqsave(&ha->hardware_lock, flags);
  639. /* Check for room in outstanding command list. */
  640. handle = req->current_outstanding_cmd;
  641. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  642. handle++;
  643. if (handle == MAX_OUTSTANDING_COMMANDS)
  644. handle = 1;
  645. if (!req->outstanding_cmds[handle])
  646. break;
  647. }
  648. if (index == MAX_OUTSTANDING_COMMANDS)
  649. goto queuing_error;
  650. /* Map the sg table so we have an accurate count of sg entries needed */
  651. if (scsi_sg_count(cmd)) {
  652. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  653. scsi_sg_count(cmd), cmd->sc_data_direction);
  654. if (unlikely(!nseg))
  655. goto queuing_error;
  656. } else
  657. nseg = 0;
  658. tot_dsds = nseg;
  659. req_cnt = qla24xx_calc_iocbs(tot_dsds);
  660. if (req->cnt < (req_cnt + 2)) {
  661. cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
  662. if (req->ring_index < cnt)
  663. req->cnt = cnt - req->ring_index;
  664. else
  665. req->cnt = req->length -
  666. (req->ring_index - cnt);
  667. }
  668. if (req->cnt < (req_cnt + 2))
  669. goto queuing_error;
  670. /* Build command packet. */
  671. req->current_outstanding_cmd = handle;
  672. req->outstanding_cmds[handle] = sp;
  673. sp->handle = handle;
  674. sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  675. req->cnt -= req_cnt;
  676. cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
  677. cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
  678. /* Zero out remaining portion of packet. */
  679. /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
  680. clr_ptr = (uint32_t *)cmd_pkt + 2;
  681. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  682. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  683. /* Set NPORT-ID and LUN number*/
  684. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  685. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  686. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  687. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  688. cmd_pkt->vp_index = sp->fcport->vp_idx;
  689. int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
  690. host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  691. /* Load SCSI command packet. */
  692. memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
  693. host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
  694. cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  695. /* Build IOCB segments */
  696. qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
  697. /* Set total data segment count. */
  698. cmd_pkt->entry_count = (uint8_t)req_cnt;
  699. /* Specify response queue number where completion should happen */
  700. cmd_pkt->entry_status = (uint8_t) rsp->id;
  701. wmb();
  702. /* Adjust ring index. */
  703. req->ring_index++;
  704. if (req->ring_index == req->length) {
  705. req->ring_index = 0;
  706. req->ring_ptr = req->ring;
  707. } else
  708. req->ring_ptr++;
  709. sp->flags |= SRB_DMA_VALID;
  710. /* Set chip new ring index. */
  711. WRT_REG_DWORD(req->req_q_in, req->ring_index);
  712. RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
  713. /* Manage unprocessed RIO/ZIO commands in response queue. */
  714. if (vha->flags.process_response_queue &&
  715. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  716. qla24xx_process_response_queue(vha, rsp);
  717. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  718. return QLA_SUCCESS;
  719. queuing_error:
  720. if (tot_dsds)
  721. scsi_dma_unmap(cmd);
  722. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  723. return QLA_FUNCTION_FAILED;
  724. }
  725. static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
  726. {
  727. struct scsi_cmnd *cmd = sp->cmd;
  728. struct qla_hw_data *ha = sp->fcport->vha->hw;
  729. int affinity = cmd->request->cpu;
  730. if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
  731. affinity < ha->max_rsp_queues - 1)
  732. *rsp = ha->rsp_q_map[affinity + 1];
  733. else
  734. *rsp = ha->rsp_q_map[0];
  735. }
  736. /* Generic Control-SRB manipulation functions. */
  737. static void *
  738. qla2x00_alloc_iocbs(srb_t *sp)
  739. {
  740. scsi_qla_host_t *vha = sp->fcport->vha;
  741. struct qla_hw_data *ha = vha->hw;
  742. struct req_que *req = ha->req_q_map[0];
  743. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  744. uint32_t index, handle;
  745. request_t *pkt;
  746. uint16_t cnt, req_cnt;
  747. pkt = NULL;
  748. req_cnt = 1;
  749. /* Check for room in outstanding command list. */
  750. handle = req->current_outstanding_cmd;
  751. for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
  752. handle++;
  753. if (handle == MAX_OUTSTANDING_COMMANDS)
  754. handle = 1;
  755. if (!req->outstanding_cmds[handle])
  756. break;
  757. }
  758. if (index == MAX_OUTSTANDING_COMMANDS)
  759. goto queuing_error;
  760. /* Check for room on request queue. */
  761. if (req->cnt < req_cnt) {
  762. if (ha->mqenable)
  763. cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
  764. else if (IS_FWI2_CAPABLE(ha))
  765. cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
  766. else
  767. cnt = qla2x00_debounce_register(
  768. ISP_REQ_Q_OUT(ha, &reg->isp));
  769. if (req->ring_index < cnt)
  770. req->cnt = cnt - req->ring_index;
  771. else
  772. req->cnt = req->length -
  773. (req->ring_index - cnt);
  774. }
  775. if (req->cnt < req_cnt)
  776. goto queuing_error;
  777. /* Prep packet */
  778. req->current_outstanding_cmd = handle;
  779. req->outstanding_cmds[handle] = sp;
  780. req->cnt -= req_cnt;
  781. pkt = req->ring_ptr;
  782. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  783. pkt->entry_count = req_cnt;
  784. pkt->handle = handle;
  785. sp->handle = handle;
  786. queuing_error:
  787. return pkt;
  788. }
  789. static void
  790. qla2x00_start_iocbs(srb_t *sp)
  791. {
  792. struct qla_hw_data *ha = sp->fcport->vha->hw;
  793. struct req_que *req = ha->req_q_map[0];
  794. device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
  795. struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
  796. /* Adjust ring index. */
  797. req->ring_index++;
  798. if (req->ring_index == req->length) {
  799. req->ring_index = 0;
  800. req->ring_ptr = req->ring;
  801. } else
  802. req->ring_ptr++;
  803. /* Set chip new ring index. */
  804. if (ha->mqenable) {
  805. WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
  806. RD_REG_DWORD(&ioreg->hccr);
  807. } else if (IS_FWI2_CAPABLE(ha)) {
  808. WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
  809. RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
  810. } else {
  811. WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
  812. RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
  813. }
  814. }
  815. static void
  816. qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
  817. {
  818. struct srb_logio *lio = sp->ctx;
  819. logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  820. logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
  821. if (lio->flags & SRB_LOGIN_COND_PLOGI)
  822. logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
  823. if (lio->flags & SRB_LOGIN_SKIP_PRLI)
  824. logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
  825. logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  826. logio->port_id[0] = sp->fcport->d_id.b.al_pa;
  827. logio->port_id[1] = sp->fcport->d_id.b.area;
  828. logio->port_id[2] = sp->fcport->d_id.b.domain;
  829. logio->vp_index = sp->fcport->vp_idx;
  830. }
  831. static void
  832. qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
  833. {
  834. struct qla_hw_data *ha = sp->fcport->vha->hw;
  835. struct srb_logio *lio = sp->ctx;
  836. uint16_t opts;
  837. mbx->entry_type = MBX_IOCB_TYPE;;
  838. SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
  839. mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
  840. opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0;
  841. opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0;
  842. if (HAS_EXTENDED_IDS(ha)) {
  843. mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
  844. mbx->mb10 = cpu_to_le16(opts);
  845. } else {
  846. mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
  847. }
  848. mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
  849. mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
  850. sp->fcport->d_id.b.al_pa);
  851. mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
  852. }
  853. static void
  854. qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
  855. {
  856. logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  857. logio->control_flags =
  858. cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
  859. logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  860. logio->port_id[0] = sp->fcport->d_id.b.al_pa;
  861. logio->port_id[1] = sp->fcport->d_id.b.area;
  862. logio->port_id[2] = sp->fcport->d_id.b.domain;
  863. logio->vp_index = sp->fcport->vp_idx;
  864. }
  865. static void
  866. qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
  867. {
  868. struct qla_hw_data *ha = sp->fcport->vha->hw;
  869. mbx->entry_type = MBX_IOCB_TYPE;;
  870. SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
  871. mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
  872. mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
  873. cpu_to_le16(sp->fcport->loop_id):
  874. cpu_to_le16(sp->fcport->loop_id << 8);
  875. mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
  876. mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
  877. sp->fcport->d_id.b.al_pa);
  878. mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
  879. /* Implicit: mbx->mbx10 = 0. */
  880. }
  881. int
  882. qla2x00_start_sp(srb_t *sp)
  883. {
  884. int rval;
  885. struct qla_hw_data *ha = sp->fcport->vha->hw;
  886. void *pkt;
  887. struct srb_ctx *ctx = sp->ctx;
  888. unsigned long flags;
  889. rval = QLA_FUNCTION_FAILED;
  890. spin_lock_irqsave(&ha->hardware_lock, flags);
  891. pkt = qla2x00_alloc_iocbs(sp);
  892. if (!pkt)
  893. goto done;
  894. rval = QLA_SUCCESS;
  895. switch (ctx->type) {
  896. case SRB_LOGIN_CMD:
  897. IS_FWI2_CAPABLE(ha) ?
  898. qla24xx_login_iocb(sp, pkt):
  899. qla2x00_login_iocb(sp, pkt);
  900. break;
  901. case SRB_LOGOUT_CMD:
  902. IS_FWI2_CAPABLE(ha) ?
  903. qla24xx_logout_iocb(sp, pkt):
  904. qla2x00_logout_iocb(sp, pkt);
  905. break;
  906. default:
  907. break;
  908. }
  909. wmb();
  910. qla2x00_start_iocbs(sp);
  911. done:
  912. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  913. return rval;
  914. }