lpfc_scsi.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2004-2005 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of version 2 of the GNU General *
  11. * Public License as published by the Free Software Foundation. *
  12. * This program is distributed in the hope that it will be useful. *
  13. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  14. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  15. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  16. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  18. * more details, a copy of which can be found in the file COPYING *
  19. * included with this package. *
  20. *******************************************************************/
  21. #include <linux/pci.h>
  22. #include <linux/interrupt.h>
  23. #include <scsi/scsi.h>
  24. #include <scsi/scsi_device.h>
  25. #include <scsi/scsi_host.h>
  26. #include <scsi/scsi_tcq.h>
  27. #include <scsi/scsi_transport_fc.h>
  28. #include "lpfc_version.h"
  29. #include "lpfc_hw.h"
  30. #include "lpfc_sli.h"
  31. #include "lpfc_disc.h"
  32. #include "lpfc_scsi.h"
  33. #include "lpfc.h"
  34. #include "lpfc_logmsg.h"
  35. #include "lpfc_crtn.h"
  36. #define LPFC_RESET_WAIT 2
  37. #define LPFC_ABORT_WAIT 2
  38. /*
  39. * This routine allocates a scsi buffer, which contains all the necessary
  40. * information needed to initiate a SCSI I/O. The non-DMAable buffer region
  41. * contains information to build the IOCB. The DMAable region contains
  42. * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
  43. * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
  44. * and the BPL BDE is setup in the IOCB.
  45. */
  46. static struct lpfc_scsi_buf *
  47. lpfc_get_scsi_buf(struct lpfc_hba * phba)
  48. {
  49. struct lpfc_scsi_buf *psb;
  50. struct ulp_bde64 *bpl;
  51. IOCB_t *iocb;
  52. dma_addr_t pdma_phys;
  53. psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
  54. if (!psb)
  55. return NULL;
  56. memset(psb, 0, sizeof (struct lpfc_scsi_buf));
  57. psb->scsi_hba = phba;
  58. /*
  59. * Get memory from the pci pool to map the virt space to pci bus space
  60. * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
  61. * struct fcp_rsp and the number of bde's necessary to support the
  62. * sg_tablesize.
  63. */
  64. psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
  65. &psb->dma_handle);
  66. if (!psb->data) {
  67. kfree(psb);
  68. return NULL;
  69. }
  70. /* Initialize virtual ptrs to dma_buf region. */
  71. memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
  72. psb->fcp_cmnd = psb->data;
  73. psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
  74. psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
  75. sizeof(struct fcp_rsp);
  76. /* Initialize local short-hand pointers. */
  77. bpl = psb->fcp_bpl;
  78. pdma_phys = psb->dma_handle;
  79. /*
  80. * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
  81. * list bdes. Initialize the first two and leave the rest for
  82. * queuecommand.
  83. */
  84. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
  85. bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
  86. bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
  87. bpl->tus.f.bdeFlags = BUFF_USE_CMND;
  88. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  89. bpl++;
  90. /* Setup the physical region for the FCP RSP */
  91. pdma_phys += sizeof (struct fcp_cmnd);
  92. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
  93. bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
  94. bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
  95. bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
  96. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  97. /*
  98. * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
  99. * initialize it with all known data now.
  100. */
  101. pdma_phys += (sizeof (struct fcp_rsp));
  102. iocb = &psb->cur_iocbq.iocb;
  103. iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
  104. iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
  105. iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
  106. iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
  107. iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
  108. iocb->ulpBdeCount = 1;
  109. iocb->ulpClass = CLASS3;
  110. return psb;
  111. }
  112. static void
  113. lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb)
  114. {
  115. struct lpfc_hba *phba = psb->scsi_hba;
  116. /*
  117. * There are only two special cases to consider. (1) the scsi command
  118. * requested scatter-gather usage or (2) the scsi command allocated
  119. * a request buffer, but did not request use_sg. There is a third
  120. * case, but it does not require resource deallocation.
  121. */
  122. if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
  123. dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
  124. psb->seg_cnt, psb->pCmd->sc_data_direction);
  125. } else {
  126. if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
  127. dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
  128. psb->pCmd->request_bufflen,
  129. psb->pCmd->sc_data_direction);
  130. }
  131. }
  132. list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
  133. }
  134. static int
  135. lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
  136. {
  137. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  138. struct scatterlist *sgel = NULL;
  139. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  140. struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
  141. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  142. dma_addr_t physaddr;
  143. uint32_t i, num_bde = 0;
  144. int datadir = scsi_cmnd->sc_data_direction;
  145. int dma_error;
  146. /*
  147. * There are three possibilities here - use scatter-gather segment, use
  148. * the single mapping, or neither. Start the lpfc command prep by
  149. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  150. * data bde entry.
  151. */
  152. bpl += 2;
  153. if (scsi_cmnd->use_sg) {
  154. /*
  155. * The driver stores the segment count returned from pci_map_sg
  156. * because this a count of dma-mappings used to map the use_sg
  157. * pages. They are not guaranteed to be the same for those
  158. * architectures that implement an IOMMU.
  159. */
  160. sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
  161. lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
  162. scsi_cmnd->use_sg, datadir);
  163. if (lpfc_cmd->seg_cnt == 0)
  164. return 1;
  165. if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
  166. printk(KERN_ERR "%s: Too many sg segments from "
  167. "dma_map_sg. Config %d, seg_cnt %d",
  168. __FUNCTION__, phba->cfg_sg_seg_cnt,
  169. lpfc_cmd->seg_cnt);
  170. dma_unmap_sg(&phba->pcidev->dev, sgel,
  171. lpfc_cmd->seg_cnt, datadir);
  172. return 1;
  173. }
  174. /*
  175. * The driver established a maximum scatter-gather segment count
  176. * during probe that limits the number of sg elements in any
  177. * single scsi command. Just run through the seg_cnt and format
  178. * the bde's.
  179. */
  180. for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
  181. physaddr = sg_dma_address(sgel);
  182. bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
  183. bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
  184. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  185. if (datadir == DMA_TO_DEVICE)
  186. bpl->tus.f.bdeFlags = 0;
  187. else
  188. bpl->tus.f.bdeFlags = BUFF_USE_RCV;
  189. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  190. bpl++;
  191. sgel++;
  192. num_bde++;
  193. }
  194. } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
  195. physaddr = dma_map_single(&phba->pcidev->dev,
  196. scsi_cmnd->request_buffer,
  197. scsi_cmnd->request_bufflen,
  198. datadir);
  199. dma_error = dma_mapping_error(physaddr);
  200. if (dma_error) {
  201. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  202. "%d:0718 Unable to dma_map_single "
  203. "request_buffer: x%x\n",
  204. phba->brd_no, dma_error);
  205. return 1;
  206. }
  207. lpfc_cmd->nonsg_phys = physaddr;
  208. bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
  209. bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
  210. bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
  211. if (datadir == DMA_TO_DEVICE)
  212. bpl->tus.f.bdeFlags = 0;
  213. else
  214. bpl->tus.f.bdeFlags = BUFF_USE_RCV;
  215. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  216. num_bde = 1;
  217. bpl++;
  218. }
  219. /*
  220. * Finish initializing those IOCB fields that are dependent on the
  221. * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
  222. * reinitialized since all iocb memory resources are used many times
  223. * for transmit, receive, and continuation bpl's.
  224. */
  225. iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
  226. iocb_cmd->un.fcpi64.bdl.bdeSize +=
  227. (num_bde * sizeof (struct ulp_bde64));
  228. iocb_cmd->ulpBdeCount = 1;
  229. iocb_cmd->ulpLe = 1;
  230. fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
  231. return 0;
  232. }
  233. static void
  234. lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
  235. {
  236. struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
  237. struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
  238. struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
  239. struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
  240. uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
  241. uint32_t resp_info = fcprsp->rspStatus2;
  242. uint32_t scsi_status = fcprsp->rspStatus3;
  243. uint32_t host_status = DID_OK;
  244. uint32_t rsplen = 0;
  245. /*
  246. * If this is a task management command, there is no
  247. * scsi packet associated with this lpfc_cmd. The driver
  248. * consumes it.
  249. */
  250. if (fcpcmd->fcpCntl2) {
  251. scsi_status = 0;
  252. goto out;
  253. }
  254. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  255. "%d:0730 FCP command failed: RSP "
  256. "Data: x%x x%x x%x x%x x%x x%x\n",
  257. phba->brd_no, resp_info, scsi_status,
  258. be32_to_cpu(fcprsp->rspResId),
  259. be32_to_cpu(fcprsp->rspSnsLen),
  260. be32_to_cpu(fcprsp->rspRspLen),
  261. fcprsp->rspInfo3);
  262. if (resp_info & RSP_LEN_VALID) {
  263. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  264. if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
  265. (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
  266. host_status = DID_ERROR;
  267. goto out;
  268. }
  269. }
  270. if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
  271. uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
  272. if (snslen > SCSI_SENSE_BUFFERSIZE)
  273. snslen = SCSI_SENSE_BUFFERSIZE;
  274. memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
  275. }
  276. cmnd->resid = 0;
  277. if (resp_info & RESID_UNDER) {
  278. cmnd->resid = be32_to_cpu(fcprsp->rspResId);
  279. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  280. "%d:0716 FCP Read Underrun, expected %d, "
  281. "residual %d Data: x%x x%x x%x\n", phba->brd_no,
  282. be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
  283. fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
  284. /*
  285. * The cmnd->underflow is the minimum number of bytes that must
  286. * be transfered for this command. Provided a sense condition
  287. * is not present, make sure the actual amount transferred is at
  288. * least the underflow value or fail.
  289. */
  290. if (!(resp_info & SNS_LEN_VALID) &&
  291. (scsi_status == SAM_STAT_GOOD) &&
  292. (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
  293. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  294. "%d:0717 FCP command x%x residual "
  295. "underrun converted to error "
  296. "Data: x%x x%x x%x\n", phba->brd_no,
  297. cmnd->cmnd[0], cmnd->request_bufflen,
  298. cmnd->resid, cmnd->underflow);
  299. host_status = DID_ERROR;
  300. }
  301. } else if (resp_info & RESID_OVER) {
  302. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  303. "%d:0720 FCP command x%x residual "
  304. "overrun error. Data: x%x x%x \n",
  305. phba->brd_no, cmnd->cmnd[0],
  306. cmnd->request_bufflen, cmnd->resid);
  307. host_status = DID_ERROR;
  308. /*
  309. * Check SLI validation that all the transfer was actually done
  310. * (fcpi_parm should be zero). Apply check only to reads.
  311. */
  312. } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
  313. (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
  314. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  315. "%d:0734 FCP Read Check Error Data: "
  316. "x%x x%x x%x x%x\n", phba->brd_no,
  317. be32_to_cpu(fcpcmd->fcpDl),
  318. be32_to_cpu(fcprsp->rspResId),
  319. fcpi_parm, cmnd->cmnd[0]);
  320. host_status = DID_ERROR;
  321. cmnd->resid = cmnd->request_bufflen;
  322. }
  323. out:
  324. cmnd->result = ScsiResult(host_status, scsi_status);
  325. }
  326. static void
  327. lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
  328. struct lpfc_iocbq *pIocbOut)
  329. {
  330. struct lpfc_scsi_buf *lpfc_cmd =
  331. (struct lpfc_scsi_buf *) pIocbIn->context1;
  332. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  333. struct lpfc_nodelist *pnode = rdata->pnode;
  334. struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
  335. unsigned long iflag;
  336. lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
  337. lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
  338. if (lpfc_cmd->status) {
  339. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  340. (lpfc_cmd->result & IOERR_DRVR_MASK))
  341. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  342. else if (lpfc_cmd->status >= IOSTAT_CNT)
  343. lpfc_cmd->status = IOSTAT_DEFAULT;
  344. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  345. "%d:0729 FCP cmd x%x failed <%d/%d> status: "
  346. "x%x result: x%x Data: x%x x%x\n",
  347. phba->brd_no, cmd->cmnd[0], cmd->device->id,
  348. cmd->device->lun, lpfc_cmd->status,
  349. lpfc_cmd->result, pIocbOut->iocb.ulpContext,
  350. lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
  351. switch (lpfc_cmd->status) {
  352. case IOSTAT_FCP_RSP_ERROR:
  353. /* Call FCP RSP handler to determine result */
  354. lpfc_handle_fcp_err(lpfc_cmd);
  355. break;
  356. case IOSTAT_NPORT_BSY:
  357. case IOSTAT_FABRIC_BSY:
  358. cmd->result = ScsiResult(DID_BUS_BUSY, 0);
  359. break;
  360. default:
  361. cmd->result = ScsiResult(DID_ERROR, 0);
  362. break;
  363. }
  364. if (pnode) {
  365. if (pnode->nlp_state != NLP_STE_MAPPED_NODE)
  366. cmd->result = ScsiResult(DID_BUS_BUSY,
  367. SAM_STAT_BUSY);
  368. }
  369. else {
  370. cmd->result = ScsiResult(DID_NO_CONNECT, 0);
  371. }
  372. } else {
  373. cmd->result = ScsiResult(DID_OK, 0);
  374. }
  375. if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
  376. uint32_t *lp = (uint32_t *)cmd->sense_buffer;
  377. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  378. "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
  379. "SNS x%x x%x Data: x%x x%x\n",
  380. phba->brd_no, cmd->device->id,
  381. cmd->device->lun, cmd, cmd->result,
  382. *lp, *(lp + 3), cmd->retries, cmd->resid);
  383. }
  384. spin_lock_irqsave(phba->host->host_lock, iflag);
  385. lpfc_free_scsi_buf(lpfc_cmd);
  386. cmd->host_scribble = NULL;
  387. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  388. cmd->scsi_done(cmd);
  389. }
  390. static void
  391. lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
  392. struct lpfc_nodelist *pnode)
  393. {
  394. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  395. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  396. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  397. struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
  398. int datadir = scsi_cmnd->sc_data_direction;
  399. lpfc_cmd->fcp_rsp->rspSnsLen = 0;
  400. /* clear task management bits */
  401. lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
  402. int_to_scsilun(lpfc_cmd->pCmd->device->lun,
  403. &lpfc_cmd->fcp_cmnd->fcp_lun);
  404. memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
  405. if (scsi_cmnd->device->tagged_supported) {
  406. switch (scsi_cmnd->tag) {
  407. case HEAD_OF_QUEUE_TAG:
  408. fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
  409. break;
  410. case ORDERED_QUEUE_TAG:
  411. fcp_cmnd->fcpCntl1 = ORDERED_Q;
  412. break;
  413. default:
  414. fcp_cmnd->fcpCntl1 = SIMPLE_Q;
  415. break;
  416. }
  417. } else
  418. fcp_cmnd->fcpCntl1 = 0;
  419. /*
  420. * There are three possibilities here - use scatter-gather segment, use
  421. * the single mapping, or neither. Start the lpfc command prep by
  422. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  423. * data bde entry.
  424. */
  425. if (scsi_cmnd->use_sg) {
  426. if (datadir == DMA_TO_DEVICE) {
  427. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  428. iocb_cmd->un.fcpi.fcpi_parm = 0;
  429. iocb_cmd->ulpPU = 0;
  430. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  431. phba->fc4OutputRequests++;
  432. } else {
  433. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  434. iocb_cmd->ulpPU = PARM_READ_CHECK;
  435. iocb_cmd->un.fcpi.fcpi_parm =
  436. scsi_cmnd->request_bufflen;
  437. fcp_cmnd->fcpCntl3 = READ_DATA;
  438. phba->fc4InputRequests++;
  439. }
  440. } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
  441. if (datadir == DMA_TO_DEVICE) {
  442. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  443. iocb_cmd->un.fcpi.fcpi_parm = 0;
  444. iocb_cmd->ulpPU = 0;
  445. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  446. phba->fc4OutputRequests++;
  447. } else {
  448. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  449. iocb_cmd->ulpPU = PARM_READ_CHECK;
  450. iocb_cmd->un.fcpi.fcpi_parm =
  451. scsi_cmnd->request_bufflen;
  452. fcp_cmnd->fcpCntl3 = READ_DATA;
  453. phba->fc4InputRequests++;
  454. }
  455. } else {
  456. iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
  457. iocb_cmd->un.fcpi.fcpi_parm = 0;
  458. iocb_cmd->ulpPU = 0;
  459. fcp_cmnd->fcpCntl3 = 0;
  460. phba->fc4ControlRequests++;
  461. }
  462. /*
  463. * Finish initializing those IOCB fields that are independent
  464. * of the scsi_cmnd request_buffer
  465. */
  466. piocbq->iocb.ulpContext = pnode->nlp_rpi;
  467. if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
  468. piocbq->iocb.ulpFCP2Rcvy = 1;
  469. piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
  470. piocbq->context1 = lpfc_cmd;
  471. piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
  472. piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
  473. }
  474. static int
  475. lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
  476. struct lpfc_scsi_buf *lpfc_cmd,
  477. uint8_t task_mgmt_cmd)
  478. {
  479. struct lpfc_sli *psli;
  480. struct lpfc_iocbq *piocbq;
  481. IOCB_t *piocb;
  482. struct fcp_cmnd *fcp_cmnd;
  483. struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device;
  484. struct lpfc_rport_data *rdata = scsi_dev->hostdata;
  485. struct lpfc_nodelist *ndlp = rdata->pnode;
  486. if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
  487. return 0;
  488. }
  489. psli = &phba->sli;
  490. piocbq = &(lpfc_cmd->cur_iocbq);
  491. piocb = &piocbq->iocb;
  492. fcp_cmnd = lpfc_cmd->fcp_cmnd;
  493. int_to_scsilun(lpfc_cmd->pCmd->device->lun,
  494. &lpfc_cmd->fcp_cmnd->fcp_lun);
  495. fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
  496. piocb->ulpCommand = CMD_FCP_ICMND64_CR;
  497. piocb->ulpContext = ndlp->nlp_rpi;
  498. if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
  499. piocb->ulpFCP2Rcvy = 1;
  500. }
  501. piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
  502. /* ulpTimeout is only one byte */
  503. if (lpfc_cmd->timeout > 0xff) {
  504. /*
  505. * Do not timeout the command at the firmware level.
  506. * The driver will provide the timeout mechanism.
  507. */
  508. piocb->ulpTimeout = 0;
  509. } else {
  510. piocb->ulpTimeout = lpfc_cmd->timeout;
  511. }
  512. lpfc_cmd->rdata = rdata;
  513. switch (task_mgmt_cmd) {
  514. case FCP_LUN_RESET:
  515. /* Issue LUN Reset to TGT <num> LUN <num> */
  516. lpfc_printf_log(phba,
  517. KERN_INFO,
  518. LOG_FCP,
  519. "%d:0703 Issue LUN Reset to TGT %d LUN %d "
  520. "Data: x%x x%x\n",
  521. phba->brd_no,
  522. scsi_dev->id, scsi_dev->lun,
  523. ndlp->nlp_rpi, ndlp->nlp_flag);
  524. break;
  525. case FCP_ABORT_TASK_SET:
  526. /* Issue Abort Task Set to TGT <num> LUN <num> */
  527. lpfc_printf_log(phba,
  528. KERN_INFO,
  529. LOG_FCP,
  530. "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
  531. "Data: x%x x%x\n",
  532. phba->brd_no,
  533. scsi_dev->id, scsi_dev->lun,
  534. ndlp->nlp_rpi, ndlp->nlp_flag);
  535. break;
  536. case FCP_TARGET_RESET:
  537. /* Issue Target Reset to TGT <num> */
  538. lpfc_printf_log(phba,
  539. KERN_INFO,
  540. LOG_FCP,
  541. "%d:0702 Issue Target Reset to TGT %d "
  542. "Data: x%x x%x\n",
  543. phba->brd_no,
  544. scsi_dev->id, ndlp->nlp_rpi,
  545. ndlp->nlp_flag);
  546. break;
  547. }
  548. return (1);
  549. }
  550. static int
  551. lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
  552. {
  553. struct lpfc_iocbq *iocbq;
  554. struct lpfc_iocbq *iocbqrsp = NULL;
  555. struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
  556. int ret;
  557. ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
  558. if (!ret)
  559. return FAILED;
  560. lpfc_cmd->scsi_hba = phba;
  561. iocbq = &lpfc_cmd->cur_iocbq;
  562. list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
  563. if (!iocbqrsp)
  564. return FAILED;
  565. memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
  566. iocbq->iocb_flag |= LPFC_IO_POLL;
  567. ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
  568. &phba->sli.ring[phba->sli.fcp_ring],
  569. iocbq, SLI_IOCB_HIGH_PRIORITY,
  570. iocbqrsp,
  571. lpfc_cmd->timeout);
  572. if (ret != IOCB_SUCCESS) {
  573. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  574. ret = FAILED;
  575. } else {
  576. ret = SUCCESS;
  577. lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
  578. lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
  579. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  580. (lpfc_cmd->result & IOERR_DRVR_MASK))
  581. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  582. }
  583. /*
  584. * All outstanding txcmplq I/Os should have been aborted by the target.
  585. * Unfortunately, some targets do not abide by this forcing the driver
  586. * to double check.
  587. */
  588. lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  589. lpfc_cmd->pCmd->device->id,
  590. lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT);
  591. /* Return response IOCB to free list. */
  592. list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
  593. return ret;
  594. }
  595. static void
  596. lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
  597. struct lpfc_iocbq *pIocbOut)
  598. {
  599. unsigned long iflag;
  600. struct lpfc_scsi_buf *lpfc_cmd =
  601. (struct lpfc_scsi_buf *) pIocbIn->context1;
  602. spin_lock_irqsave(phba->host->host_lock, iflag);
  603. lpfc_free_scsi_buf(lpfc_cmd);
  604. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  605. }
  606. static void
  607. lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba *phba,
  608. struct lpfc_iocbq *pIocbIn,
  609. struct lpfc_iocbq *pIocbOut)
  610. {
  611. struct scsi_cmnd *ml_cmd =
  612. ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
  613. lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut);
  614. ml_cmd->host_scribble = NULL;
  615. }
  616. const char *
  617. lpfc_info(struct Scsi_Host *host)
  618. {
  619. struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
  620. int len;
  621. static char lpfcinfobuf[384];
  622. memset(lpfcinfobuf,0,384);
  623. if (phba && phba->pcidev){
  624. strncpy(lpfcinfobuf, phba->ModelDesc, 256);
  625. len = strlen(lpfcinfobuf);
  626. snprintf(lpfcinfobuf + len,
  627. 384-len,
  628. " on PCI bus %02x device %02x irq %d",
  629. phba->pcidev->bus->number,
  630. phba->pcidev->devfn,
  631. phba->pcidev->irq);
  632. len = strlen(lpfcinfobuf);
  633. if (phba->Port[0]) {
  634. snprintf(lpfcinfobuf + len,
  635. 384-len,
  636. " port %s",
  637. phba->Port);
  638. }
  639. }
  640. return lpfcinfobuf;
  641. }
  642. static int
  643. lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
  644. {
  645. struct lpfc_hba *phba =
  646. (struct lpfc_hba *) cmnd->device->host->hostdata[0];
  647. struct lpfc_sli *psli = &phba->sli;
  648. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  649. struct lpfc_nodelist *ndlp = rdata->pnode;
  650. struct lpfc_scsi_buf *lpfc_cmd = NULL;
  651. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  652. int err = 0;
  653. /*
  654. * The target pointer is guaranteed not to be NULL because the driver
  655. * only clears the device->hostdata field in lpfc_slave_destroy. This
  656. * approach guarantees no further IO calls on this target.
  657. */
  658. if (!ndlp) {
  659. cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
  660. goto out_fail_command;
  661. }
  662. /*
  663. * A Fibre Channel target is present and functioning only when the node
  664. * state is MAPPED. Any other state is a failure.
  665. */
  666. if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
  667. if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
  668. (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) {
  669. cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
  670. goto out_fail_command;
  671. }
  672. else if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
  673. cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
  674. goto out_fail_command;
  675. }
  676. /*
  677. * The device is most likely recovered and the driver
  678. * needs a bit more time to finish. Ask the midlayer
  679. * to retry.
  680. */
  681. goto out_host_busy;
  682. }
  683. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  684. if (lpfc_cmd == NULL) {
  685. printk(KERN_WARNING "%s: No buffer available - list empty, "
  686. "total count %d\n", __FUNCTION__, phba->total_scsi_bufs);
  687. goto out_host_busy;
  688. }
  689. /*
  690. * Store the midlayer's command structure for the completion phase
  691. * and complete the command initialization.
  692. */
  693. lpfc_cmd->pCmd = cmnd;
  694. lpfc_cmd->rdata = rdata;
  695. lpfc_cmd->timeout = 0;
  696. cmnd->host_scribble = (unsigned char *)lpfc_cmd;
  697. cmnd->scsi_done = done;
  698. err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
  699. if (err)
  700. goto out_host_busy_free_buf;
  701. lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
  702. err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
  703. &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
  704. if (err)
  705. goto out_host_busy_free_buf;
  706. return 0;
  707. out_host_busy_free_buf:
  708. lpfc_free_scsi_buf(lpfc_cmd);
  709. cmnd->host_scribble = NULL;
  710. out_host_busy:
  711. return SCSI_MLQUEUE_HOST_BUSY;
  712. out_fail_command:
  713. done(cmnd);
  714. return 0;
  715. }
  716. static int
  717. __lpfc_abort_handler(struct scsi_cmnd *cmnd)
  718. {
  719. struct lpfc_hba *phba =
  720. (struct lpfc_hba *)cmnd->device->host->hostdata[0];
  721. struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
  722. struct lpfc_iocbq *iocb, *next_iocb;
  723. struct lpfc_iocbq *abtsiocb = NULL;
  724. struct lpfc_scsi_buf *lpfc_cmd;
  725. struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
  726. IOCB_t *cmd, *icmd;
  727. unsigned long snum;
  728. unsigned int id, lun;
  729. unsigned int loop_count = 0;
  730. int ret = IOCB_SUCCESS;
  731. /*
  732. * If the host_scribble data area is NULL, then the driver has already
  733. * completed this command, but the midlayer did not see the completion
  734. * before the eh fired. Just return SUCCESS.
  735. */
  736. lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
  737. if (!lpfc_cmd)
  738. return SUCCESS;
  739. /* save these now since lpfc_cmd can be freed */
  740. id = lpfc_cmd->pCmd->device->id;
  741. lun = lpfc_cmd->pCmd->device->lun;
  742. snum = lpfc_cmd->pCmd->serial_number;
  743. list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
  744. cmd = &iocb->iocb;
  745. if (iocb->context1 != lpfc_cmd)
  746. continue;
  747. list_del_init(&iocb->list);
  748. pring->txq_cnt--;
  749. if (!iocb->iocb_cmpl) {
  750. list_add_tail(&iocb->list, lpfc_iocb_list);
  751. }
  752. else {
  753. cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
  754. cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
  755. lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb);
  756. }
  757. goto out;
  758. }
  759. list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, list);
  760. if (abtsiocb == NULL)
  761. return FAILED;
  762. memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
  763. /*
  764. * The scsi command was not in the txq. Check the txcmplq and if it is
  765. * found, send an abort to the FW.
  766. */
  767. list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
  768. if (iocb->context1 != lpfc_cmd)
  769. continue;
  770. iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted;
  771. cmd = &iocb->iocb;
  772. icmd = &abtsiocb->iocb;
  773. icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
  774. icmd->un.acxri.abortContextTag = cmd->ulpContext;
  775. icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
  776. icmd->ulpLe = 1;
  777. icmd->ulpClass = cmd->ulpClass;
  778. if (phba->hba_state >= LPFC_LINK_UP)
  779. icmd->ulpCommand = CMD_ABORT_XRI_CN;
  780. else
  781. icmd->ulpCommand = CMD_CLOSE_XRI_CN;
  782. abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
  783. if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) ==
  784. IOCB_ERROR) {
  785. list_add_tail(&abtsiocb->list, lpfc_iocb_list);
  786. ret = IOCB_ERROR;
  787. break;
  788. }
  789. /* Wait for abort to complete */
  790. while (cmnd->host_scribble)
  791. {
  792. spin_unlock_irq(phba->host->host_lock);
  793. set_current_state(TASK_UNINTERRUPTIBLE);
  794. schedule_timeout(LPFC_ABORT_WAIT*HZ);
  795. spin_lock_irq(phba->host->host_lock);
  796. if (++loop_count
  797. > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
  798. break;
  799. }
  800. if(cmnd->host_scribble) {
  801. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  802. "%d:0748 abort handler timed "
  803. "out waiting for abort to "
  804. "complete. Data: "
  805. "x%x x%x x%x x%lx\n",
  806. phba->brd_no, ret, id, lun, snum);
  807. cmnd->host_scribble = NULL;
  808. iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup;
  809. ret = IOCB_ERROR;
  810. }
  811. break;
  812. }
  813. out:
  814. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  815. "%d:0749 SCSI layer issued abort device "
  816. "Data: x%x x%x x%x x%lx\n",
  817. phba->brd_no, ret, id, lun, snum);
  818. return ret == IOCB_SUCCESS ? SUCCESS : FAILED;
  819. }
  820. static int
  821. lpfc_abort_handler(struct scsi_cmnd *cmnd)
  822. {
  823. int rc;
  824. spin_lock_irq(cmnd->device->host->host_lock);
  825. rc = __lpfc_abort_handler(cmnd);
  826. spin_unlock_irq(cmnd->device->host->host_lock);
  827. return rc;
  828. }
  829. static int
  830. __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
  831. {
  832. struct Scsi_Host *shost = cmnd->device->host;
  833. struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
  834. struct lpfc_sli *psli = &phba->sli;
  835. struct lpfc_scsi_buf *lpfc_cmd = NULL;
  836. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  837. struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
  838. struct lpfc_iocbq *iocbq, *iocbqrsp = NULL;
  839. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  840. struct lpfc_nodelist *pnode = rdata->pnode;
  841. int ret = FAILED;
  842. int cnt, loopcnt;
  843. /*
  844. * If target is not in a MAPPED state, delay the reset until
  845. * target is rediscovered or nodev timeout expires.
  846. */
  847. while ( 1 ) {
  848. if (!pnode)
  849. break;
  850. if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
  851. spin_unlock_irq(phba->host->host_lock);
  852. set_current_state(TASK_UNINTERRUPTIBLE);
  853. schedule_timeout( HZ/2);
  854. spin_lock_irq(phba->host->host_lock);
  855. }
  856. if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
  857. break;
  858. }
  859. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  860. if (lpfc_cmd == NULL)
  861. goto out;
  862. lpfc_cmd->pCmd = cmnd;
  863. lpfc_cmd->timeout = 60;
  864. lpfc_cmd->scsi_hba = phba;
  865. ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
  866. if (!ret)
  867. goto out_free_scsi_buf;
  868. iocbq = &lpfc_cmd->cur_iocbq;
  869. /* get a buffer for this IOCB command response */
  870. list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
  871. if (iocbqrsp == NULL)
  872. goto out_free_scsi_buf;
  873. memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
  874. iocbq->iocb_flag |= LPFC_IO_POLL;
  875. iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
  876. ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
  877. &phba->sli.ring[psli->fcp_ring],
  878. iocbq, 0, iocbqrsp, 60);
  879. if (ret == IOCB_SUCCESS)
  880. ret = SUCCESS;
  881. lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
  882. lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
  883. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT)
  884. if (lpfc_cmd->result & IOERR_DRVR_MASK)
  885. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  886. /*
  887. * All outstanding txcmplq I/Os should have been aborted by the target.
  888. * Unfortunately, some targets do not abide by this forcing the driver
  889. * to double check.
  890. */
  891. lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  892. cmnd->device->id, cmnd->device->lun, 0,
  893. LPFC_CTX_LUN);
  894. loopcnt = 0;
  895. while((cnt = lpfc_sli_sum_iocb(phba,
  896. &phba->sli.ring[phba->sli.fcp_ring],
  897. cmnd->device->id, cmnd->device->lun,
  898. LPFC_CTX_LUN))) {
  899. spin_unlock_irq(phba->host->host_lock);
  900. set_current_state(TASK_UNINTERRUPTIBLE);
  901. schedule_timeout(LPFC_RESET_WAIT*HZ);
  902. spin_lock_irq(phba->host->host_lock);
  903. if (++loopcnt
  904. > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
  905. break;
  906. }
  907. if (cnt) {
  908. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  909. "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
  910. phba->brd_no, cnt);
  911. }
  912. list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
  913. out_free_scsi_buf:
  914. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  915. "%d:0713 SCSI layer issued LUN reset (%d, %d) "
  916. "Data: x%x x%x x%x\n",
  917. phba->brd_no, lpfc_cmd->pCmd->device->id,
  918. lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status,
  919. lpfc_cmd->result);
  920. lpfc_free_scsi_buf(lpfc_cmd);
  921. out:
  922. return ret;
  923. }
  924. static int
  925. lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
  926. {
  927. int rc;
  928. spin_lock_irq(cmnd->device->host->host_lock);
  929. rc = __lpfc_reset_lun_handler(cmnd);
  930. spin_unlock_irq(cmnd->device->host->host_lock);
  931. return rc;
  932. }
  933. /*
  934. * Note: midlayer calls this function with the host_lock held
  935. */
  936. static int
  937. __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
  938. {
  939. struct Scsi_Host *shost = cmnd->device->host;
  940. struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
  941. struct lpfc_nodelist *ndlp = NULL;
  942. int match;
  943. int ret = FAILED, i, err_count = 0;
  944. int cnt, loopcnt;
  945. unsigned int midlayer_id = 0;
  946. struct lpfc_scsi_buf * lpfc_cmd = NULL;
  947. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  948. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  949. if (lpfc_cmd == NULL)
  950. goto out;
  951. /* The lpfc_cmd storage is reused. Set all loop invariants. */
  952. lpfc_cmd->timeout = 60;
  953. lpfc_cmd->pCmd = cmnd;
  954. lpfc_cmd->scsi_hba = phba;
  955. /*
  956. * Since the driver manages a single bus device, reset all
  957. * targets known to the driver. Should any target reset
  958. * fail, this routine returns failure to the midlayer.
  959. */
  960. midlayer_id = cmnd->device->id;
  961. for (i = 0; i < MAX_FCP_TARGET; i++) {
  962. /* Search the mapped list for this target ID */
  963. match = 0;
  964. list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
  965. if ((i == ndlp->nlp_sid) && ndlp->rport) {
  966. match = 1;
  967. break;
  968. }
  969. }
  970. if (!match)
  971. continue;
  972. lpfc_cmd->pCmd->device->id = i;
  973. lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data;
  974. ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
  975. if (ret != SUCCESS) {
  976. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  977. "%d:0713 Bus Reset on target %d failed\n",
  978. phba->brd_no, i);
  979. err_count++;
  980. }
  981. }
  982. cmnd->device->id = midlayer_id;
  983. loopcnt = 0;
  984. while((cnt = lpfc_sli_sum_iocb(phba,
  985. &phba->sli.ring[phba->sli.fcp_ring],
  986. 0, 0, LPFC_CTX_HOST))) {
  987. spin_unlock_irq(phba->host->host_lock);
  988. set_current_state(TASK_UNINTERRUPTIBLE);
  989. schedule_timeout(LPFC_RESET_WAIT*HZ);
  990. spin_lock_irq(phba->host->host_lock);
  991. if (++loopcnt
  992. > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
  993. break;
  994. }
  995. if (cnt) {
  996. /* flush all outstanding commands on the host */
  997. i = lpfc_sli_abort_iocb(phba,
  998. &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0,
  999. LPFC_CTX_HOST);
  1000. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  1001. "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
  1002. phba->brd_no, cnt, i);
  1003. }
  1004. if (!err_count)
  1005. ret = SUCCESS;
  1006. lpfc_free_scsi_buf(lpfc_cmd);
  1007. lpfc_printf_log(phba,
  1008. KERN_ERR,
  1009. LOG_FCP,
  1010. "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
  1011. phba->brd_no, ret);
  1012. out:
  1013. return ret;
  1014. }
  1015. static int
  1016. lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
  1017. {
  1018. int rc;
  1019. spin_lock_irq(cmnd->device->host->host_lock);
  1020. rc = __lpfc_reset_bus_handler(cmnd);
  1021. spin_unlock_irq(cmnd->device->host->host_lock);
  1022. return rc;
  1023. }
  1024. static int
  1025. lpfc_slave_alloc(struct scsi_device *sdev)
  1026. {
  1027. struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0];
  1028. struct lpfc_nodelist *ndlp = NULL;
  1029. int match = 0;
  1030. struct lpfc_scsi_buf *scsi_buf = NULL;
  1031. uint32_t total = 0, i;
  1032. uint32_t num_to_alloc = 0;
  1033. unsigned long flags;
  1034. struct list_head *listp;
  1035. struct list_head *node_list[6];
  1036. /*
  1037. * Store the target pointer in the scsi_device hostdata pointer provided
  1038. * the driver has already discovered the target id.
  1039. */
  1040. /* Search the nlp lists other than unmap_list for this target ID */
  1041. node_list[0] = &phba->fc_npr_list;
  1042. node_list[1] = &phba->fc_nlpmap_list;
  1043. node_list[2] = &phba->fc_prli_list;
  1044. node_list[3] = &phba->fc_reglogin_list;
  1045. node_list[4] = &phba->fc_adisc_list;
  1046. node_list[5] = &phba->fc_plogi_list;
  1047. for (i = 0; i < 6 && !match; i++) {
  1048. listp = node_list[i];
  1049. if (list_empty(listp))
  1050. continue;
  1051. list_for_each_entry(ndlp, listp, nlp_listp) {
  1052. if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) {
  1053. match = 1;
  1054. break;
  1055. }
  1056. }
  1057. }
  1058. if (!match)
  1059. return -ENXIO;
  1060. sdev->hostdata = ndlp->rport->dd_data;
  1061. /*
  1062. * Populate the cmds_per_lun count scsi_bufs into this host's globally
  1063. * available list of scsi buffers. Don't allocate more than the
  1064. * HBA limit conveyed to the midlayer via the host structure. Note
  1065. * that this list of scsi bufs exists for the lifetime of the driver.
  1066. */
  1067. total = phba->total_scsi_bufs;
  1068. num_to_alloc = LPFC_CMD_PER_LUN;
  1069. if (total >= phba->cfg_hba_queue_depth) {
  1070. printk(KERN_WARNING "%s, At config limitation of "
  1071. "%d allocated scsi_bufs\n", __FUNCTION__, total);
  1072. return 0;
  1073. } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
  1074. num_to_alloc = phba->cfg_hba_queue_depth - total;
  1075. }
  1076. for (i = 0; i < num_to_alloc; i++) {
  1077. scsi_buf = lpfc_get_scsi_buf(phba);
  1078. if (!scsi_buf) {
  1079. printk(KERN_ERR "%s, failed to allocate "
  1080. "scsi_buf\n", __FUNCTION__);
  1081. break;
  1082. }
  1083. spin_lock_irqsave(phba->host->host_lock, flags);
  1084. phba->total_scsi_bufs++;
  1085. list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
  1086. spin_unlock_irqrestore(phba->host->host_lock, flags);
  1087. }
  1088. return 0;
  1089. }
  1090. static int
  1091. lpfc_slave_configure(struct scsi_device *sdev)
  1092. {
  1093. struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0];
  1094. struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
  1095. if (sdev->tagged_supported)
  1096. scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
  1097. else
  1098. scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
  1099. /*
  1100. * Initialize the fc transport attributes for the target
  1101. * containing this scsi device. Also note that the driver's
  1102. * target pointer is stored in the starget_data for the
  1103. * driver's sysfs entry point functions.
  1104. */
  1105. rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5;
  1106. return 0;
  1107. }
  1108. static void
  1109. lpfc_slave_destroy(struct scsi_device *sdev)
  1110. {
  1111. sdev->hostdata = NULL;
  1112. return;
  1113. }
  1114. struct scsi_host_template lpfc_template = {
  1115. .module = THIS_MODULE,
  1116. .name = LPFC_DRIVER_NAME,
  1117. .info = lpfc_info,
  1118. .queuecommand = lpfc_queuecommand,
  1119. .eh_abort_handler = lpfc_abort_handler,
  1120. .eh_device_reset_handler= lpfc_reset_lun_handler,
  1121. .eh_bus_reset_handler = lpfc_reset_bus_handler,
  1122. .slave_alloc = lpfc_slave_alloc,
  1123. .slave_configure = lpfc_slave_configure,
  1124. .slave_destroy = lpfc_slave_destroy,
  1125. .this_id = -1,
  1126. .sg_tablesize = LPFC_SG_SEG_CNT,
  1127. .cmd_per_lun = LPFC_CMD_PER_LUN,
  1128. .use_clustering = ENABLE_CLUSTERING,
  1129. .shost_attrs = lpfc_host_attrs,
  1130. .max_sectors = 0xFFFF,
  1131. };