lpfc_scsi.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2004-2005 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of version 2 of the GNU General *
  11. * Public License as published by the Free Software Foundation. *
  12. * This program is distributed in the hope that it will be useful. *
  13. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  14. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  15. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  16. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  18. * more details, a copy of which can be found in the file COPYING *
  19. * included with this package. *
  20. *******************************************************************/
  21. #include <linux/pci.h>
  22. #include <linux/interrupt.h>
  23. #include <scsi/scsi.h>
  24. #include <scsi/scsi_device.h>
  25. #include <scsi/scsi_host.h>
  26. #include <scsi/scsi_tcq.h>
  27. #include <scsi/scsi_transport_fc.h>
  28. #include "lpfc_version.h"
  29. #include "lpfc_hw.h"
  30. #include "lpfc_sli.h"
  31. #include "lpfc_disc.h"
  32. #include "lpfc_scsi.h"
  33. #include "lpfc.h"
  34. #include "lpfc_logmsg.h"
  35. #include "lpfc_crtn.h"
  36. #define LPFC_RESET_WAIT 2
  37. #define LPFC_ABORT_WAIT 2
  38. static inline void lpfc_put_lun(struct fcp_cmnd *fcmd, unsigned int lun)
  39. {
  40. fcmd->fcpLunLsl = 0;
  41. fcmd->fcpLunMsl = swab16((uint16_t)lun);
  42. }
  43. /*
  44. * This routine allocates a scsi buffer, which contains all the necessary
  45. * information needed to initiate a SCSI I/O. The non-DMAable buffer region
  46. * contains information to build the IOCB. The DMAable region contains
  47. * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
  48. * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
  49. * and the BPL BDE is setup in the IOCB.
  50. */
  51. static struct lpfc_scsi_buf *
  52. lpfc_get_scsi_buf(struct lpfc_hba * phba)
  53. {
  54. struct lpfc_scsi_buf *psb;
  55. struct ulp_bde64 *bpl;
  56. IOCB_t *iocb;
  57. dma_addr_t pdma_phys;
  58. psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
  59. if (!psb)
  60. return NULL;
  61. memset(psb, 0, sizeof (struct lpfc_scsi_buf));
  62. psb->scsi_hba = phba;
  63. /*
  64. * Get memory from the pci pool to map the virt space to pci bus space
  65. * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
  66. * struct fcp_rsp and the number of bde's necessary to support the
  67. * sg_tablesize.
  68. */
  69. psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
  70. &psb->dma_handle);
  71. if (!psb->data) {
  72. kfree(psb);
  73. return NULL;
  74. }
  75. /* Initialize virtual ptrs to dma_buf region. */
  76. memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
  77. psb->fcp_cmnd = psb->data;
  78. psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
  79. psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
  80. sizeof(struct fcp_rsp);
  81. /* Initialize local short-hand pointers. */
  82. bpl = psb->fcp_bpl;
  83. pdma_phys = psb->dma_handle;
  84. /*
  85. * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
  86. * list bdes. Initialize the first two and leave the rest for
  87. * queuecommand.
  88. */
  89. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
  90. bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
  91. bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
  92. bpl->tus.f.bdeFlags = BUFF_USE_CMND;
  93. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  94. bpl++;
  95. /* Setup the physical region for the FCP RSP */
  96. pdma_phys += sizeof (struct fcp_cmnd);
  97. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
  98. bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
  99. bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
  100. bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
  101. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  102. /*
  103. * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
  104. * initialize it with all known data now.
  105. */
  106. pdma_phys += (sizeof (struct fcp_rsp));
  107. iocb = &psb->cur_iocbq.iocb;
  108. iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
  109. iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
  110. iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
  111. iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
  112. iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
  113. iocb->ulpBdeCount = 1;
  114. iocb->ulpClass = CLASS3;
  115. return psb;
  116. }
  117. static void
  118. lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb)
  119. {
  120. struct lpfc_hba *phba = psb->scsi_hba;
  121. /*
  122. * There are only two special cases to consider. (1) the scsi command
  123. * requested scatter-gather usage or (2) the scsi command allocated
  124. * a request buffer, but did not request use_sg. There is a third
  125. * case, but it does not require resource deallocation.
  126. */
  127. if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
  128. dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
  129. psb->seg_cnt, psb->pCmd->sc_data_direction);
  130. } else {
  131. if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
  132. dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
  133. psb->pCmd->request_bufflen,
  134. psb->pCmd->sc_data_direction);
  135. }
  136. }
  137. list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
  138. }
  139. static int
  140. lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
  141. {
  142. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  143. struct scatterlist *sgel = NULL;
  144. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  145. struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
  146. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  147. dma_addr_t physaddr;
  148. uint32_t i, num_bde = 0;
  149. int datadir = scsi_cmnd->sc_data_direction;
  150. int dma_error;
  151. /*
  152. * There are three possibilities here - use scatter-gather segment, use
  153. * the single mapping, or neither. Start the lpfc command prep by
  154. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  155. * data bde entry.
  156. */
  157. bpl += 2;
  158. if (scsi_cmnd->use_sg) {
  159. /*
  160. * The driver stores the segment count returned from pci_map_sg
  161. * because this a count of dma-mappings used to map the use_sg
  162. * pages. They are not guaranteed to be the same for those
  163. * architectures that implement an IOMMU.
  164. */
  165. sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
  166. lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
  167. scsi_cmnd->use_sg, datadir);
  168. if (lpfc_cmd->seg_cnt == 0)
  169. return 1;
  170. if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
  171. printk(KERN_ERR "%s: Too many sg segments from "
  172. "dma_map_sg. Config %d, seg_cnt %d",
  173. __FUNCTION__, phba->cfg_sg_seg_cnt,
  174. lpfc_cmd->seg_cnt);
  175. dma_unmap_sg(&phba->pcidev->dev, sgel,
  176. lpfc_cmd->seg_cnt, datadir);
  177. return 1;
  178. }
  179. /*
  180. * The driver established a maximum scatter-gather segment count
  181. * during probe that limits the number of sg elements in any
  182. * single scsi command. Just run through the seg_cnt and format
  183. * the bde's.
  184. */
  185. for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
  186. physaddr = sg_dma_address(sgel);
  187. bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
  188. bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
  189. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  190. if (datadir == DMA_TO_DEVICE)
  191. bpl->tus.f.bdeFlags = 0;
  192. else
  193. bpl->tus.f.bdeFlags = BUFF_USE_RCV;
  194. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  195. bpl++;
  196. sgel++;
  197. num_bde++;
  198. }
  199. } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
  200. physaddr = dma_map_single(&phba->pcidev->dev,
  201. scsi_cmnd->request_buffer,
  202. scsi_cmnd->request_bufflen,
  203. datadir);
  204. dma_error = dma_mapping_error(physaddr);
  205. if (dma_error) {
  206. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  207. "%d:0718 Unable to dma_map_single "
  208. "request_buffer: x%x\n",
  209. phba->brd_no, dma_error);
  210. return 1;
  211. }
  212. lpfc_cmd->nonsg_phys = physaddr;
  213. bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
  214. bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
  215. bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
  216. if (datadir == DMA_TO_DEVICE)
  217. bpl->tus.f.bdeFlags = 0;
  218. else
  219. bpl->tus.f.bdeFlags = BUFF_USE_RCV;
  220. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  221. num_bde = 1;
  222. bpl++;
  223. }
  224. /*
  225. * Finish initializing those IOCB fields that are dependent on the
  226. * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
  227. * reinitialized since all iocb memory resources are used many times
  228. * for transmit, receive, and continuation bpl's.
  229. */
  230. iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
  231. iocb_cmd->un.fcpi64.bdl.bdeSize +=
  232. (num_bde * sizeof (struct ulp_bde64));
  233. iocb_cmd->ulpBdeCount = 1;
  234. iocb_cmd->ulpLe = 1;
  235. fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
  236. return 0;
  237. }
  238. static void
  239. lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
  240. {
  241. struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
  242. struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
  243. struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
  244. struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
  245. uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
  246. uint32_t resp_info = fcprsp->rspStatus2;
  247. uint32_t scsi_status = fcprsp->rspStatus3;
  248. uint32_t host_status = DID_OK;
  249. uint32_t rsplen = 0;
  250. /*
  251. * If this is a task management command, there is no
  252. * scsi packet associated with this lpfc_cmd. The driver
  253. * consumes it.
  254. */
  255. if (fcpcmd->fcpCntl2) {
  256. scsi_status = 0;
  257. goto out;
  258. }
  259. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  260. "%d:0730 FCP command failed: RSP "
  261. "Data: x%x x%x x%x x%x x%x x%x\n",
  262. phba->brd_no, resp_info, scsi_status,
  263. be32_to_cpu(fcprsp->rspResId),
  264. be32_to_cpu(fcprsp->rspSnsLen),
  265. be32_to_cpu(fcprsp->rspRspLen),
  266. fcprsp->rspInfo3);
  267. if (resp_info & RSP_LEN_VALID) {
  268. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  269. if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
  270. (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
  271. host_status = DID_ERROR;
  272. goto out;
  273. }
  274. }
  275. if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
  276. uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
  277. if (snslen > SCSI_SENSE_BUFFERSIZE)
  278. snslen = SCSI_SENSE_BUFFERSIZE;
  279. memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
  280. }
  281. cmnd->resid = 0;
  282. if (resp_info & RESID_UNDER) {
  283. cmnd->resid = be32_to_cpu(fcprsp->rspResId);
  284. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  285. "%d:0716 FCP Read Underrun, expected %d, "
  286. "residual %d Data: x%x x%x x%x\n", phba->brd_no,
  287. be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
  288. fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
  289. /*
  290. * The cmnd->underflow is the minimum number of bytes that must
  291. * be transfered for this command. Provided a sense condition
  292. * is not present, make sure the actual amount transferred is at
  293. * least the underflow value or fail.
  294. */
  295. if (!(resp_info & SNS_LEN_VALID) &&
  296. (scsi_status == SAM_STAT_GOOD) &&
  297. (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
  298. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  299. "%d:0717 FCP command x%x residual "
  300. "underrun converted to error "
  301. "Data: x%x x%x x%x\n", phba->brd_no,
  302. cmnd->cmnd[0], cmnd->request_bufflen,
  303. cmnd->resid, cmnd->underflow);
  304. host_status = DID_ERROR;
  305. }
  306. } else if (resp_info & RESID_OVER) {
  307. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  308. "%d:0720 FCP command x%x residual "
  309. "overrun error. Data: x%x x%x \n",
  310. phba->brd_no, cmnd->cmnd[0],
  311. cmnd->request_bufflen, cmnd->resid);
  312. host_status = DID_ERROR;
  313. /*
  314. * Check SLI validation that all the transfer was actually done
  315. * (fcpi_parm should be zero). Apply check only to reads.
  316. */
  317. } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
  318. (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
  319. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  320. "%d:0734 FCP Read Check Error Data: "
  321. "x%x x%x x%x x%x\n", phba->brd_no,
  322. be32_to_cpu(fcpcmd->fcpDl),
  323. be32_to_cpu(fcprsp->rspResId),
  324. fcpi_parm, cmnd->cmnd[0]);
  325. host_status = DID_ERROR;
  326. cmnd->resid = cmnd->request_bufflen;
  327. }
  328. out:
  329. cmnd->result = ScsiResult(host_status, scsi_status);
  330. }
  331. static void
  332. lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
  333. struct lpfc_iocbq *pIocbOut)
  334. {
  335. struct lpfc_scsi_buf *lpfc_cmd =
  336. (struct lpfc_scsi_buf *) pIocbIn->context1;
  337. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  338. struct lpfc_nodelist *pnode = rdata->pnode;
  339. struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
  340. unsigned long iflag;
  341. lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
  342. lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
  343. if (lpfc_cmd->status) {
  344. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  345. (lpfc_cmd->result & IOERR_DRVR_MASK))
  346. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  347. else if (lpfc_cmd->status >= IOSTAT_CNT)
  348. lpfc_cmd->status = IOSTAT_DEFAULT;
  349. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  350. "%d:0729 FCP cmd x%x failed <%d/%d> status: "
  351. "x%x result: x%x Data: x%x x%x\n",
  352. phba->brd_no, cmd->cmnd[0], cmd->device->id,
  353. cmd->device->lun, lpfc_cmd->status,
  354. lpfc_cmd->result, pIocbOut->iocb.ulpContext,
  355. lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
  356. switch (lpfc_cmd->status) {
  357. case IOSTAT_FCP_RSP_ERROR:
  358. /* Call FCP RSP handler to determine result */
  359. lpfc_handle_fcp_err(lpfc_cmd);
  360. break;
  361. case IOSTAT_NPORT_BSY:
  362. case IOSTAT_FABRIC_BSY:
  363. cmd->result = ScsiResult(DID_BUS_BUSY, 0);
  364. break;
  365. default:
  366. cmd->result = ScsiResult(DID_ERROR, 0);
  367. break;
  368. }
  369. if (pnode) {
  370. if (pnode->nlp_state != NLP_STE_MAPPED_NODE)
  371. cmd->result = ScsiResult(DID_BUS_BUSY,
  372. SAM_STAT_BUSY);
  373. }
  374. else {
  375. cmd->result = ScsiResult(DID_NO_CONNECT, 0);
  376. }
  377. } else {
  378. cmd->result = ScsiResult(DID_OK, 0);
  379. }
  380. if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
  381. uint32_t *lp = (uint32_t *)cmd->sense_buffer;
  382. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  383. "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
  384. "SNS x%x x%x Data: x%x x%x\n",
  385. phba->brd_no, cmd->device->id,
  386. cmd->device->lun, cmd, cmd->result,
  387. *lp, *(lp + 3), cmd->retries, cmd->resid);
  388. }
  389. spin_lock_irqsave(phba->host->host_lock, iflag);
  390. lpfc_free_scsi_buf(lpfc_cmd);
  391. cmd->host_scribble = NULL;
  392. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  393. cmd->scsi_done(cmd);
  394. }
  395. static void
  396. lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
  397. struct lpfc_nodelist *pnode)
  398. {
  399. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  400. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  401. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  402. struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
  403. int datadir = scsi_cmnd->sc_data_direction;
  404. lpfc_cmd->fcp_rsp->rspSnsLen = 0;
  405. /* clear task management bits */
  406. lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
  407. lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
  408. memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
  409. if (scsi_cmnd->device->tagged_supported) {
  410. switch (scsi_cmnd->tag) {
  411. case HEAD_OF_QUEUE_TAG:
  412. fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
  413. break;
  414. case ORDERED_QUEUE_TAG:
  415. fcp_cmnd->fcpCntl1 = ORDERED_Q;
  416. break;
  417. default:
  418. fcp_cmnd->fcpCntl1 = SIMPLE_Q;
  419. break;
  420. }
  421. } else
  422. fcp_cmnd->fcpCntl1 = 0;
  423. /*
  424. * There are three possibilities here - use scatter-gather segment, use
  425. * the single mapping, or neither. Start the lpfc command prep by
  426. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  427. * data bde entry.
  428. */
  429. if (scsi_cmnd->use_sg) {
  430. if (datadir == DMA_TO_DEVICE) {
  431. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  432. iocb_cmd->un.fcpi.fcpi_parm = 0;
  433. iocb_cmd->ulpPU = 0;
  434. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  435. phba->fc4OutputRequests++;
  436. } else {
  437. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  438. iocb_cmd->ulpPU = PARM_READ_CHECK;
  439. iocb_cmd->un.fcpi.fcpi_parm =
  440. scsi_cmnd->request_bufflen;
  441. fcp_cmnd->fcpCntl3 = READ_DATA;
  442. phba->fc4InputRequests++;
  443. }
  444. } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
  445. if (datadir == DMA_TO_DEVICE) {
  446. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  447. iocb_cmd->un.fcpi.fcpi_parm = 0;
  448. iocb_cmd->ulpPU = 0;
  449. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  450. phba->fc4OutputRequests++;
  451. } else {
  452. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  453. iocb_cmd->ulpPU = PARM_READ_CHECK;
  454. iocb_cmd->un.fcpi.fcpi_parm =
  455. scsi_cmnd->request_bufflen;
  456. fcp_cmnd->fcpCntl3 = READ_DATA;
  457. phba->fc4InputRequests++;
  458. }
  459. } else {
  460. iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
  461. iocb_cmd->un.fcpi.fcpi_parm = 0;
  462. iocb_cmd->ulpPU = 0;
  463. fcp_cmnd->fcpCntl3 = 0;
  464. phba->fc4ControlRequests++;
  465. }
  466. /*
  467. * Finish initializing those IOCB fields that are independent
  468. * of the scsi_cmnd request_buffer
  469. */
  470. piocbq->iocb.ulpContext = pnode->nlp_rpi;
  471. if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
  472. piocbq->iocb.ulpFCP2Rcvy = 1;
  473. piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
  474. piocbq->context1 = lpfc_cmd;
  475. piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
  476. piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
  477. }
  478. static int
  479. lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
  480. struct lpfc_scsi_buf *lpfc_cmd,
  481. uint8_t task_mgmt_cmd)
  482. {
  483. struct lpfc_sli *psli;
  484. struct lpfc_iocbq *piocbq;
  485. IOCB_t *piocb;
  486. struct fcp_cmnd *fcp_cmnd;
  487. struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device;
  488. struct lpfc_rport_data *rdata = scsi_dev->hostdata;
  489. struct lpfc_nodelist *ndlp = rdata->pnode;
  490. if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
  491. return 0;
  492. }
  493. psli = &phba->sli;
  494. piocbq = &(lpfc_cmd->cur_iocbq);
  495. piocb = &piocbq->iocb;
  496. fcp_cmnd = lpfc_cmd->fcp_cmnd;
  497. lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
  498. fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
  499. piocb->ulpCommand = CMD_FCP_ICMND64_CR;
  500. piocb->ulpContext = ndlp->nlp_rpi;
  501. if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
  502. piocb->ulpFCP2Rcvy = 1;
  503. }
  504. piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
  505. /* ulpTimeout is only one byte */
  506. if (lpfc_cmd->timeout > 0xff) {
  507. /*
  508. * Do not timeout the command at the firmware level.
  509. * The driver will provide the timeout mechanism.
  510. */
  511. piocb->ulpTimeout = 0;
  512. } else {
  513. piocb->ulpTimeout = lpfc_cmd->timeout;
  514. }
  515. lpfc_cmd->rdata = rdata;
  516. switch (task_mgmt_cmd) {
  517. case FCP_LUN_RESET:
  518. /* Issue LUN Reset to TGT <num> LUN <num> */
  519. lpfc_printf_log(phba,
  520. KERN_INFO,
  521. LOG_FCP,
  522. "%d:0703 Issue LUN Reset to TGT %d LUN %d "
  523. "Data: x%x x%x\n",
  524. phba->brd_no,
  525. scsi_dev->id, scsi_dev->lun,
  526. ndlp->nlp_rpi, ndlp->nlp_flag);
  527. break;
  528. case FCP_ABORT_TASK_SET:
  529. /* Issue Abort Task Set to TGT <num> LUN <num> */
  530. lpfc_printf_log(phba,
  531. KERN_INFO,
  532. LOG_FCP,
  533. "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
  534. "Data: x%x x%x\n",
  535. phba->brd_no,
  536. scsi_dev->id, scsi_dev->lun,
  537. ndlp->nlp_rpi, ndlp->nlp_flag);
  538. break;
  539. case FCP_TARGET_RESET:
  540. /* Issue Target Reset to TGT <num> */
  541. lpfc_printf_log(phba,
  542. KERN_INFO,
  543. LOG_FCP,
  544. "%d:0702 Issue Target Reset to TGT %d "
  545. "Data: x%x x%x\n",
  546. phba->brd_no,
  547. scsi_dev->id, ndlp->nlp_rpi,
  548. ndlp->nlp_flag);
  549. break;
  550. }
  551. return (1);
  552. }
  553. static int
  554. lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
  555. {
  556. struct lpfc_iocbq *iocbq;
  557. struct lpfc_iocbq *iocbqrsp = NULL;
  558. struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
  559. int ret;
  560. ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
  561. if (!ret)
  562. return FAILED;
  563. lpfc_cmd->scsi_hba = phba;
  564. iocbq = &lpfc_cmd->cur_iocbq;
  565. list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
  566. if (!iocbqrsp)
  567. return FAILED;
  568. memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
  569. iocbq->iocb_flag |= LPFC_IO_POLL;
  570. ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
  571. &phba->sli.ring[phba->sli.fcp_ring],
  572. iocbq, SLI_IOCB_HIGH_PRIORITY,
  573. iocbqrsp,
  574. lpfc_cmd->timeout);
  575. if (ret != IOCB_SUCCESS) {
  576. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  577. ret = FAILED;
  578. } else {
  579. ret = SUCCESS;
  580. lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
  581. lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
  582. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  583. (lpfc_cmd->result & IOERR_DRVR_MASK))
  584. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  585. }
  586. /*
  587. * All outstanding txcmplq I/Os should have been aborted by the target.
  588. * Unfortunately, some targets do not abide by this forcing the driver
  589. * to double check.
  590. */
  591. lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  592. lpfc_cmd->pCmd->device->id,
  593. lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT);
  594. /* Return response IOCB to free list. */
  595. list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
  596. return ret;
  597. }
  598. static void
  599. lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
  600. struct lpfc_iocbq *pIocbOut)
  601. {
  602. unsigned long iflag;
  603. struct lpfc_scsi_buf *lpfc_cmd =
  604. (struct lpfc_scsi_buf *) pIocbIn->context1;
  605. spin_lock_irqsave(phba->host->host_lock, iflag);
  606. lpfc_free_scsi_buf(lpfc_cmd);
  607. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  608. }
  609. static void
  610. lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba *phba,
  611. struct lpfc_iocbq *pIocbIn,
  612. struct lpfc_iocbq *pIocbOut)
  613. {
  614. struct scsi_cmnd *ml_cmd =
  615. ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
  616. lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut);
  617. ml_cmd->host_scribble = NULL;
  618. }
  619. const char *
  620. lpfc_info(struct Scsi_Host *host)
  621. {
  622. struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
  623. int len;
  624. static char lpfcinfobuf[384];
  625. memset(lpfcinfobuf,0,384);
  626. if (phba && phba->pcidev){
  627. strncpy(lpfcinfobuf, phba->ModelDesc, 256);
  628. len = strlen(lpfcinfobuf);
  629. snprintf(lpfcinfobuf + len,
  630. 384-len,
  631. " on PCI bus %02x device %02x irq %d",
  632. phba->pcidev->bus->number,
  633. phba->pcidev->devfn,
  634. phba->pcidev->irq);
  635. len = strlen(lpfcinfobuf);
  636. if (phba->Port[0]) {
  637. snprintf(lpfcinfobuf + len,
  638. 384-len,
  639. " port %s",
  640. phba->Port);
  641. }
  642. }
  643. return lpfcinfobuf;
  644. }
  645. static int
  646. lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
  647. {
  648. struct lpfc_hba *phba =
  649. (struct lpfc_hba *) cmnd->device->host->hostdata[0];
  650. struct lpfc_sli *psli = &phba->sli;
  651. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  652. struct lpfc_nodelist *ndlp = rdata->pnode;
  653. struct lpfc_scsi_buf *lpfc_cmd = NULL;
  654. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  655. int err = 0;
  656. /*
  657. * The target pointer is guaranteed not to be NULL because the driver
  658. * only clears the device->hostdata field in lpfc_slave_destroy. This
  659. * approach guarantees no further IO calls on this target.
  660. */
  661. if (!ndlp) {
  662. cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
  663. goto out_fail_command;
  664. }
  665. /*
  666. * A Fibre Channel target is present and functioning only when the node
  667. * state is MAPPED. Any other state is a failure.
  668. */
  669. if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
  670. if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
  671. (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) {
  672. cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
  673. goto out_fail_command;
  674. }
  675. else if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
  676. cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
  677. goto out_fail_command;
  678. }
  679. /*
  680. * The device is most likely recovered and the driver
  681. * needs a bit more time to finish. Ask the midlayer
  682. * to retry.
  683. */
  684. goto out_host_busy;
  685. }
  686. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  687. if (lpfc_cmd == NULL) {
  688. printk(KERN_WARNING "%s: No buffer available - list empty, "
  689. "total count %d\n", __FUNCTION__, phba->total_scsi_bufs);
  690. goto out_host_busy;
  691. }
  692. /*
  693. * Store the midlayer's command structure for the completion phase
  694. * and complete the command initialization.
  695. */
  696. lpfc_cmd->pCmd = cmnd;
  697. lpfc_cmd->rdata = rdata;
  698. lpfc_cmd->timeout = 0;
  699. cmnd->host_scribble = (unsigned char *)lpfc_cmd;
  700. cmnd->scsi_done = done;
  701. err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
  702. if (err)
  703. goto out_host_busy_free_buf;
  704. lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
  705. err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
  706. &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
  707. if (err)
  708. goto out_host_busy_free_buf;
  709. return 0;
  710. out_host_busy_free_buf:
  711. lpfc_free_scsi_buf(lpfc_cmd);
  712. cmnd->host_scribble = NULL;
  713. out_host_busy:
  714. return SCSI_MLQUEUE_HOST_BUSY;
  715. out_fail_command:
  716. done(cmnd);
  717. return 0;
  718. }
  719. static int
  720. __lpfc_abort_handler(struct scsi_cmnd *cmnd)
  721. {
  722. struct lpfc_hba *phba =
  723. (struct lpfc_hba *)cmnd->device->host->hostdata[0];
  724. struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
  725. struct lpfc_iocbq *iocb, *next_iocb;
  726. struct lpfc_iocbq *abtsiocb = NULL;
  727. struct lpfc_scsi_buf *lpfc_cmd;
  728. struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
  729. IOCB_t *cmd, *icmd;
  730. unsigned long snum;
  731. unsigned int id, lun;
  732. unsigned int loop_count = 0;
  733. int ret = IOCB_SUCCESS;
  734. /*
  735. * If the host_scribble data area is NULL, then the driver has already
  736. * completed this command, but the midlayer did not see the completion
  737. * before the eh fired. Just return SUCCESS.
  738. */
  739. lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
  740. if (!lpfc_cmd)
  741. return SUCCESS;
  742. /* save these now since lpfc_cmd can be freed */
  743. id = lpfc_cmd->pCmd->device->id;
  744. lun = lpfc_cmd->pCmd->device->lun;
  745. snum = lpfc_cmd->pCmd->serial_number;
  746. list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
  747. cmd = &iocb->iocb;
  748. if (iocb->context1 != lpfc_cmd)
  749. continue;
  750. list_del_init(&iocb->list);
  751. pring->txq_cnt--;
  752. if (!iocb->iocb_cmpl) {
  753. list_add_tail(&iocb->list, lpfc_iocb_list);
  754. }
  755. else {
  756. cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
  757. cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
  758. lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb);
  759. }
  760. goto out;
  761. }
  762. list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, list);
  763. if (abtsiocb == NULL)
  764. return FAILED;
  765. memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
  766. /*
  767. * The scsi command was not in the txq. Check the txcmplq and if it is
  768. * found, send an abort to the FW.
  769. */
  770. list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
  771. if (iocb->context1 != lpfc_cmd)
  772. continue;
  773. iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted;
  774. cmd = &iocb->iocb;
  775. icmd = &abtsiocb->iocb;
  776. icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
  777. icmd->un.acxri.abortContextTag = cmd->ulpContext;
  778. icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
  779. icmd->ulpLe = 1;
  780. icmd->ulpClass = cmd->ulpClass;
  781. if (phba->hba_state >= LPFC_LINK_UP)
  782. icmd->ulpCommand = CMD_ABORT_XRI_CN;
  783. else
  784. icmd->ulpCommand = CMD_CLOSE_XRI_CN;
  785. abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
  786. if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) ==
  787. IOCB_ERROR) {
  788. list_add_tail(&abtsiocb->list, lpfc_iocb_list);
  789. ret = IOCB_ERROR;
  790. break;
  791. }
  792. /* Wait for abort to complete */
  793. while (cmnd->host_scribble)
  794. {
  795. spin_unlock_irq(phba->host->host_lock);
  796. set_current_state(TASK_UNINTERRUPTIBLE);
  797. schedule_timeout(LPFC_ABORT_WAIT*HZ);
  798. spin_lock_irq(phba->host->host_lock);
  799. if (++loop_count
  800. > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
  801. break;
  802. }
  803. if(cmnd->host_scribble) {
  804. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  805. "%d:0748 abort handler timed "
  806. "out waiting for abort to "
  807. "complete. Data: "
  808. "x%x x%x x%x x%lx\n",
  809. phba->brd_no, ret, id, lun, snum);
  810. cmnd->host_scribble = NULL;
  811. iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup;
  812. ret = IOCB_ERROR;
  813. }
  814. break;
  815. }
  816. out:
  817. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  818. "%d:0749 SCSI layer issued abort device "
  819. "Data: x%x x%x x%x x%lx\n",
  820. phba->brd_no, ret, id, lun, snum);
  821. return ret == IOCB_SUCCESS ? SUCCESS : FAILED;
  822. }
  823. static int
  824. lpfc_abort_handler(struct scsi_cmnd *cmnd)
  825. {
  826. int rc;
  827. spin_lock_irq(cmnd->device->host->host_lock);
  828. rc = __lpfc_abort_handler(cmnd);
  829. spin_unlock_irq(cmnd->device->host->host_lock);
  830. return rc;
  831. }
  832. static int
  833. __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
  834. {
  835. struct Scsi_Host *shost = cmnd->device->host;
  836. struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
  837. struct lpfc_sli *psli = &phba->sli;
  838. struct lpfc_scsi_buf *lpfc_cmd = NULL;
  839. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  840. struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
  841. struct lpfc_iocbq *iocbq, *iocbqrsp = NULL;
  842. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  843. struct lpfc_nodelist *pnode = rdata->pnode;
  844. int ret = FAILED;
  845. int cnt, loopcnt;
  846. /*
  847. * If target is not in a MAPPED state, delay the reset until
  848. * target is rediscovered or nodev timeout expires.
  849. */
  850. while ( 1 ) {
  851. if (!pnode)
  852. break;
  853. if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
  854. spin_unlock_irq(phba->host->host_lock);
  855. set_current_state(TASK_UNINTERRUPTIBLE);
  856. schedule_timeout( HZ/2);
  857. spin_lock_irq(phba->host->host_lock);
  858. }
  859. if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
  860. break;
  861. }
  862. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  863. if (lpfc_cmd == NULL)
  864. goto out;
  865. lpfc_cmd->pCmd = cmnd;
  866. lpfc_cmd->timeout = 60;
  867. lpfc_cmd->scsi_hba = phba;
  868. ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
  869. if (!ret)
  870. goto out_free_scsi_buf;
  871. iocbq = &lpfc_cmd->cur_iocbq;
  872. /* get a buffer for this IOCB command response */
  873. list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
  874. if (iocbqrsp == NULL)
  875. goto out_free_scsi_buf;
  876. memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
  877. iocbq->iocb_flag |= LPFC_IO_POLL;
  878. iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
  879. ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
  880. &phba->sli.ring[psli->fcp_ring],
  881. iocbq, 0, iocbqrsp, 60);
  882. if (ret == IOCB_SUCCESS)
  883. ret = SUCCESS;
  884. lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
  885. lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
  886. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT)
  887. if (lpfc_cmd->result & IOERR_DRVR_MASK)
  888. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  889. /*
  890. * All outstanding txcmplq I/Os should have been aborted by the target.
  891. * Unfortunately, some targets do not abide by this forcing the driver
  892. * to double check.
  893. */
  894. lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  895. cmnd->device->id, cmnd->device->lun, 0,
  896. LPFC_CTX_LUN);
  897. loopcnt = 0;
  898. while((cnt = lpfc_sli_sum_iocb(phba,
  899. &phba->sli.ring[phba->sli.fcp_ring],
  900. cmnd->device->id, cmnd->device->lun,
  901. LPFC_CTX_LUN))) {
  902. spin_unlock_irq(phba->host->host_lock);
  903. set_current_state(TASK_UNINTERRUPTIBLE);
  904. schedule_timeout(LPFC_RESET_WAIT*HZ);
  905. spin_lock_irq(phba->host->host_lock);
  906. if (++loopcnt
  907. > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
  908. break;
  909. }
  910. if (cnt) {
  911. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  912. "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
  913. phba->brd_no, cnt);
  914. }
  915. list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
  916. out_free_scsi_buf:
  917. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  918. "%d:0713 SCSI layer issued LUN reset (%d, %d) "
  919. "Data: x%x x%x x%x\n",
  920. phba->brd_no, lpfc_cmd->pCmd->device->id,
  921. lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status,
  922. lpfc_cmd->result);
  923. lpfc_free_scsi_buf(lpfc_cmd);
  924. out:
  925. return ret;
  926. }
  927. static int
  928. lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
  929. {
  930. int rc;
  931. spin_lock_irq(cmnd->device->host->host_lock);
  932. rc = __lpfc_reset_lun_handler(cmnd);
  933. spin_unlock_irq(cmnd->device->host->host_lock);
  934. return rc;
  935. }
  936. /*
  937. * Note: midlayer calls this function with the host_lock held
  938. */
  939. static int
  940. __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
  941. {
  942. struct Scsi_Host *shost = cmnd->device->host;
  943. struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
  944. struct lpfc_nodelist *ndlp = NULL;
  945. int match;
  946. int ret = FAILED, i, err_count = 0;
  947. int cnt, loopcnt;
  948. unsigned int midlayer_id = 0;
  949. struct lpfc_scsi_buf * lpfc_cmd = NULL;
  950. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  951. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  952. if (lpfc_cmd == NULL)
  953. goto out;
  954. /* The lpfc_cmd storage is reused. Set all loop invariants. */
  955. lpfc_cmd->timeout = 60;
  956. lpfc_cmd->pCmd = cmnd;
  957. lpfc_cmd->scsi_hba = phba;
  958. /*
  959. * Since the driver manages a single bus device, reset all
  960. * targets known to the driver. Should any target reset
  961. * fail, this routine returns failure to the midlayer.
  962. */
  963. midlayer_id = cmnd->device->id;
  964. for (i = 0; i < MAX_FCP_TARGET; i++) {
  965. /* Search the mapped list for this target ID */
  966. match = 0;
  967. list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
  968. if ((i == ndlp->nlp_sid) && ndlp->rport) {
  969. match = 1;
  970. break;
  971. }
  972. }
  973. if (!match)
  974. continue;
  975. lpfc_cmd->pCmd->device->id = i;
  976. lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data;
  977. ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
  978. if (ret != SUCCESS) {
  979. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  980. "%d:0713 Bus Reset on target %d failed\n",
  981. phba->brd_no, i);
  982. err_count++;
  983. }
  984. }
  985. cmnd->device->id = midlayer_id;
  986. loopcnt = 0;
  987. while((cnt = lpfc_sli_sum_iocb(phba,
  988. &phba->sli.ring[phba->sli.fcp_ring],
  989. 0, 0, LPFC_CTX_HOST))) {
  990. spin_unlock_irq(phba->host->host_lock);
  991. set_current_state(TASK_UNINTERRUPTIBLE);
  992. schedule_timeout(LPFC_RESET_WAIT*HZ);
  993. spin_lock_irq(phba->host->host_lock);
  994. if (++loopcnt
  995. > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
  996. break;
  997. }
  998. if (cnt) {
  999. /* flush all outstanding commands on the host */
  1000. i = lpfc_sli_abort_iocb(phba,
  1001. &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0,
  1002. LPFC_CTX_HOST);
  1003. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  1004. "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
  1005. phba->brd_no, cnt, i);
  1006. }
  1007. if (!err_count)
  1008. ret = SUCCESS;
  1009. lpfc_free_scsi_buf(lpfc_cmd);
  1010. lpfc_printf_log(phba,
  1011. KERN_ERR,
  1012. LOG_FCP,
  1013. "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
  1014. phba->brd_no, ret);
  1015. out:
  1016. return ret;
  1017. }
  1018. static int
  1019. lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
  1020. {
  1021. int rc;
  1022. spin_lock_irq(cmnd->device->host->host_lock);
  1023. rc = __lpfc_reset_bus_handler(cmnd);
  1024. spin_unlock_irq(cmnd->device->host->host_lock);
  1025. return rc;
  1026. }
  1027. static int
  1028. lpfc_slave_alloc(struct scsi_device *sdev)
  1029. {
  1030. struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0];
  1031. struct lpfc_nodelist *ndlp = NULL;
  1032. int match = 0;
  1033. struct lpfc_scsi_buf *scsi_buf = NULL;
  1034. uint32_t total = 0, i;
  1035. uint32_t num_to_alloc = 0;
  1036. unsigned long flags;
  1037. struct list_head *listp;
  1038. struct list_head *node_list[6];
  1039. /*
  1040. * Store the target pointer in the scsi_device hostdata pointer provided
  1041. * the driver has already discovered the target id.
  1042. */
  1043. /* Search the nlp lists other than unmap_list for this target ID */
  1044. node_list[0] = &phba->fc_npr_list;
  1045. node_list[1] = &phba->fc_nlpmap_list;
  1046. node_list[2] = &phba->fc_prli_list;
  1047. node_list[3] = &phba->fc_reglogin_list;
  1048. node_list[4] = &phba->fc_adisc_list;
  1049. node_list[5] = &phba->fc_plogi_list;
  1050. for (i = 0; i < 6 && !match; i++) {
  1051. listp = node_list[i];
  1052. if (list_empty(listp))
  1053. continue;
  1054. list_for_each_entry(ndlp, listp, nlp_listp) {
  1055. if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) {
  1056. match = 1;
  1057. break;
  1058. }
  1059. }
  1060. }
  1061. if (!match)
  1062. return -ENXIO;
  1063. sdev->hostdata = ndlp->rport->dd_data;
  1064. /*
  1065. * Populate the cmds_per_lun count scsi_bufs into this host's globally
  1066. * available list of scsi buffers. Don't allocate more than the
  1067. * HBA limit conveyed to the midlayer via the host structure. Note
  1068. * that this list of scsi bufs exists for the lifetime of the driver.
  1069. */
  1070. total = phba->total_scsi_bufs;
  1071. num_to_alloc = LPFC_CMD_PER_LUN;
  1072. if (total >= phba->cfg_hba_queue_depth) {
  1073. printk(KERN_WARNING "%s, At config limitation of "
  1074. "%d allocated scsi_bufs\n", __FUNCTION__, total);
  1075. return 0;
  1076. } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
  1077. num_to_alloc = phba->cfg_hba_queue_depth - total;
  1078. }
  1079. for (i = 0; i < num_to_alloc; i++) {
  1080. scsi_buf = lpfc_get_scsi_buf(phba);
  1081. if (!scsi_buf) {
  1082. printk(KERN_ERR "%s, failed to allocate "
  1083. "scsi_buf\n", __FUNCTION__);
  1084. break;
  1085. }
  1086. spin_lock_irqsave(phba->host->host_lock, flags);
  1087. phba->total_scsi_bufs++;
  1088. list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
  1089. spin_unlock_irqrestore(phba->host->host_lock, flags);
  1090. }
  1091. return 0;
  1092. }
  1093. static int
  1094. lpfc_slave_configure(struct scsi_device *sdev)
  1095. {
  1096. struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0];
  1097. struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
  1098. if (sdev->tagged_supported)
  1099. scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
  1100. else
  1101. scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
  1102. /*
  1103. * Initialize the fc transport attributes for the target
  1104. * containing this scsi device. Also note that the driver's
  1105. * target pointer is stored in the starget_data for the
  1106. * driver's sysfs entry point functions.
  1107. */
  1108. rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5;
  1109. return 0;
  1110. }
  1111. static void
  1112. lpfc_slave_destroy(struct scsi_device *sdev)
  1113. {
  1114. sdev->hostdata = NULL;
  1115. return;
  1116. }
  1117. struct scsi_host_template lpfc_template = {
  1118. .module = THIS_MODULE,
  1119. .name = LPFC_DRIVER_NAME,
  1120. .info = lpfc_info,
  1121. .queuecommand = lpfc_queuecommand,
  1122. .eh_abort_handler = lpfc_abort_handler,
  1123. .eh_device_reset_handler= lpfc_reset_lun_handler,
  1124. .eh_bus_reset_handler = lpfc_reset_bus_handler,
  1125. .slave_alloc = lpfc_slave_alloc,
  1126. .slave_configure = lpfc_slave_configure,
  1127. .slave_destroy = lpfc_slave_destroy,
  1128. .this_id = -1,
  1129. .sg_tablesize = LPFC_SG_SEG_CNT,
  1130. .cmd_per_lun = LPFC_CMD_PER_LUN,
  1131. .use_clustering = ENABLE_CLUSTERING,
  1132. .shost_attrs = lpfc_host_attrs,
  1133. .max_sectors = 0xFFFF,
  1134. };