lpfc_scsi.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2004-2006 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of version 2 of the GNU General *
  11. * Public License as published by the Free Software Foundation. *
  12. * This program is distributed in the hope that it will be useful. *
  13. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  14. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  15. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  16. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  18. * more details, a copy of which can be found in the file COPYING *
  19. * included with this package. *
  20. *******************************************************************/
  21. #include <linux/pci.h>
  22. #include <linux/interrupt.h>
  23. #include <scsi/scsi.h>
  24. #include <scsi/scsi_device.h>
  25. #include <scsi/scsi_host.h>
  26. #include <scsi/scsi_tcq.h>
  27. #include <scsi/scsi_transport_fc.h>
  28. #include "lpfc_version.h"
  29. #include "lpfc_hw.h"
  30. #include "lpfc_sli.h"
  31. #include "lpfc_disc.h"
  32. #include "lpfc_scsi.h"
  33. #include "lpfc.h"
  34. #include "lpfc_logmsg.h"
  35. #include "lpfc_crtn.h"
  36. #define LPFC_RESET_WAIT 2
  37. #define LPFC_ABORT_WAIT 2
  38. /*
  39. * This routine allocates a scsi buffer, which contains all the necessary
  40. * information needed to initiate a SCSI I/O. The non-DMAable buffer region
  41. * contains information to build the IOCB. The DMAable region contains
  42. * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
  43. * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
  44. * and the BPL BDE is setup in the IOCB.
  45. */
  46. static struct lpfc_scsi_buf *
  47. lpfc_new_scsi_buf(struct lpfc_hba * phba)
  48. {
  49. struct lpfc_scsi_buf *psb;
  50. struct ulp_bde64 *bpl;
  51. IOCB_t *iocb;
  52. dma_addr_t pdma_phys;
  53. uint16_t iotag;
  54. psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
  55. if (!psb)
  56. return NULL;
  57. memset(psb, 0, sizeof (struct lpfc_scsi_buf));
  58. psb->scsi_hba = phba;
  59. /*
  60. * Get memory from the pci pool to map the virt space to pci bus space
  61. * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
  62. * struct fcp_rsp and the number of bde's necessary to support the
  63. * sg_tablesize.
  64. */
  65. psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
  66. &psb->dma_handle);
  67. if (!psb->data) {
  68. kfree(psb);
  69. return NULL;
  70. }
  71. /* Initialize virtual ptrs to dma_buf region. */
  72. memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
  73. /* Allocate iotag for psb->cur_iocbq. */
  74. iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
  75. if (iotag == 0) {
  76. pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
  77. psb->data, psb->dma_handle);
  78. kfree (psb);
  79. return NULL;
  80. }
  81. psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
  82. psb->fcp_cmnd = psb->data;
  83. psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
  84. psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
  85. sizeof(struct fcp_rsp);
  86. /* Initialize local short-hand pointers. */
  87. bpl = psb->fcp_bpl;
  88. pdma_phys = psb->dma_handle;
  89. /*
  90. * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
  91. * list bdes. Initialize the first two and leave the rest for
  92. * queuecommand.
  93. */
  94. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
  95. bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
  96. bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
  97. bpl->tus.f.bdeFlags = BUFF_USE_CMND;
  98. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  99. bpl++;
  100. /* Setup the physical region for the FCP RSP */
  101. pdma_phys += sizeof (struct fcp_cmnd);
  102. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
  103. bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
  104. bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
  105. bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
  106. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  107. /*
  108. * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
  109. * initialize it with all known data now.
  110. */
  111. pdma_phys += (sizeof (struct fcp_rsp));
  112. iocb = &psb->cur_iocbq.iocb;
  113. iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
  114. iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
  115. iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
  116. iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
  117. iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
  118. iocb->ulpBdeCount = 1;
  119. iocb->ulpClass = CLASS3;
  120. return psb;
  121. }
  122. static struct lpfc_scsi_buf*
  123. lpfc_get_scsi_buf(struct lpfc_hba * phba)
  124. {
  125. struct lpfc_scsi_buf * lpfc_cmd = NULL;
  126. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  127. unsigned long iflag = 0;
  128. spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
  129. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  130. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
  131. return lpfc_cmd;
  132. }
  133. static void
  134. lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
  135. {
  136. unsigned long iflag = 0;
  137. /*
  138. * There are only two special cases to consider. (1) the scsi command
  139. * requested scatter-gather usage or (2) the scsi command allocated
  140. * a request buffer, but did not request use_sg. There is a third
  141. * case, but it does not require resource deallocation.
  142. */
  143. if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
  144. dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
  145. psb->seg_cnt, psb->pCmd->sc_data_direction);
  146. } else {
  147. if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
  148. dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
  149. psb->pCmd->request_bufflen,
  150. psb->pCmd->sc_data_direction);
  151. }
  152. }
  153. spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
  154. psb->pCmd = NULL;
  155. list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
  156. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
  157. }
  158. static int
  159. lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
  160. {
  161. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  162. struct scatterlist *sgel = NULL;
  163. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  164. struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
  165. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  166. dma_addr_t physaddr;
  167. uint32_t i, num_bde = 0;
  168. int datadir = scsi_cmnd->sc_data_direction;
  169. int dma_error;
  170. /*
  171. * There are three possibilities here - use scatter-gather segment, use
  172. * the single mapping, or neither. Start the lpfc command prep by
  173. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  174. * data bde entry.
  175. */
  176. bpl += 2;
  177. if (scsi_cmnd->use_sg) {
  178. /*
  179. * The driver stores the segment count returned from pci_map_sg
  180. * because this a count of dma-mappings used to map the use_sg
  181. * pages. They are not guaranteed to be the same for those
  182. * architectures that implement an IOMMU.
  183. */
  184. sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
  185. lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
  186. scsi_cmnd->use_sg, datadir);
  187. if (lpfc_cmd->seg_cnt == 0)
  188. return 1;
  189. if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
  190. printk(KERN_ERR "%s: Too many sg segments from "
  191. "dma_map_sg. Config %d, seg_cnt %d",
  192. __FUNCTION__, phba->cfg_sg_seg_cnt,
  193. lpfc_cmd->seg_cnt);
  194. dma_unmap_sg(&phba->pcidev->dev, sgel,
  195. lpfc_cmd->seg_cnt, datadir);
  196. return 1;
  197. }
  198. /*
  199. * The driver established a maximum scatter-gather segment count
  200. * during probe that limits the number of sg elements in any
  201. * single scsi command. Just run through the seg_cnt and format
  202. * the bde's.
  203. */
  204. for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
  205. physaddr = sg_dma_address(sgel);
  206. bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
  207. bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
  208. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  209. if (datadir == DMA_TO_DEVICE)
  210. bpl->tus.f.bdeFlags = 0;
  211. else
  212. bpl->tus.f.bdeFlags = BUFF_USE_RCV;
  213. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  214. bpl++;
  215. sgel++;
  216. num_bde++;
  217. }
  218. } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
  219. physaddr = dma_map_single(&phba->pcidev->dev,
  220. scsi_cmnd->request_buffer,
  221. scsi_cmnd->request_bufflen,
  222. datadir);
  223. dma_error = dma_mapping_error(physaddr);
  224. if (dma_error) {
  225. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  226. "%d:0718 Unable to dma_map_single "
  227. "request_buffer: x%x\n",
  228. phba->brd_no, dma_error);
  229. return 1;
  230. }
  231. lpfc_cmd->nonsg_phys = physaddr;
  232. bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
  233. bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
  234. bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
  235. if (datadir == DMA_TO_DEVICE)
  236. bpl->tus.f.bdeFlags = 0;
  237. else
  238. bpl->tus.f.bdeFlags = BUFF_USE_RCV;
  239. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  240. num_bde = 1;
  241. bpl++;
  242. }
  243. /*
  244. * Finish initializing those IOCB fields that are dependent on the
  245. * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
  246. * reinitialized since all iocb memory resources are used many times
  247. * for transmit, receive, and continuation bpl's.
  248. */
  249. iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
  250. iocb_cmd->un.fcpi64.bdl.bdeSize +=
  251. (num_bde * sizeof (struct ulp_bde64));
  252. iocb_cmd->ulpBdeCount = 1;
  253. iocb_cmd->ulpLe = 1;
  254. fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
  255. return 0;
  256. }
  257. static void
  258. lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
  259. {
  260. struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
  261. struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
  262. struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
  263. struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
  264. uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
  265. uint32_t resp_info = fcprsp->rspStatus2;
  266. uint32_t scsi_status = fcprsp->rspStatus3;
  267. uint32_t host_status = DID_OK;
  268. uint32_t rsplen = 0;
  269. /*
  270. * If this is a task management command, there is no
  271. * scsi packet associated with this lpfc_cmd. The driver
  272. * consumes it.
  273. */
  274. if (fcpcmd->fcpCntl2) {
  275. scsi_status = 0;
  276. goto out;
  277. }
  278. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  279. "%d:0730 FCP command failed: RSP "
  280. "Data: x%x x%x x%x x%x x%x x%x\n",
  281. phba->brd_no, resp_info, scsi_status,
  282. be32_to_cpu(fcprsp->rspResId),
  283. be32_to_cpu(fcprsp->rspSnsLen),
  284. be32_to_cpu(fcprsp->rspRspLen),
  285. fcprsp->rspInfo3);
  286. if (resp_info & RSP_LEN_VALID) {
  287. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  288. if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
  289. (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
  290. host_status = DID_ERROR;
  291. goto out;
  292. }
  293. }
  294. if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
  295. uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
  296. if (snslen > SCSI_SENSE_BUFFERSIZE)
  297. snslen = SCSI_SENSE_BUFFERSIZE;
  298. memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
  299. }
  300. cmnd->resid = 0;
  301. if (resp_info & RESID_UNDER) {
  302. cmnd->resid = be32_to_cpu(fcprsp->rspResId);
  303. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  304. "%d:0716 FCP Read Underrun, expected %d, "
  305. "residual %d Data: x%x x%x x%x\n", phba->brd_no,
  306. be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
  307. fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
  308. /*
  309. * The cmnd->underflow is the minimum number of bytes that must
  310. * be transfered for this command. Provided a sense condition
  311. * is not present, make sure the actual amount transferred is at
  312. * least the underflow value or fail.
  313. */
  314. if (!(resp_info & SNS_LEN_VALID) &&
  315. (scsi_status == SAM_STAT_GOOD) &&
  316. (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
  317. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  318. "%d:0717 FCP command x%x residual "
  319. "underrun converted to error "
  320. "Data: x%x x%x x%x\n", phba->brd_no,
  321. cmnd->cmnd[0], cmnd->request_bufflen,
  322. cmnd->resid, cmnd->underflow);
  323. host_status = DID_ERROR;
  324. }
  325. } else if (resp_info & RESID_OVER) {
  326. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  327. "%d:0720 FCP command x%x residual "
  328. "overrun error. Data: x%x x%x \n",
  329. phba->brd_no, cmnd->cmnd[0],
  330. cmnd->request_bufflen, cmnd->resid);
  331. host_status = DID_ERROR;
  332. /*
  333. * Check SLI validation that all the transfer was actually done
  334. * (fcpi_parm should be zero). Apply check only to reads.
  335. */
  336. } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
  337. (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
  338. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  339. "%d:0734 FCP Read Check Error Data: "
  340. "x%x x%x x%x x%x\n", phba->brd_no,
  341. be32_to_cpu(fcpcmd->fcpDl),
  342. be32_to_cpu(fcprsp->rspResId),
  343. fcpi_parm, cmnd->cmnd[0]);
  344. host_status = DID_ERROR;
  345. cmnd->resid = cmnd->request_bufflen;
  346. }
  347. out:
  348. cmnd->result = ScsiResult(host_status, scsi_status);
  349. }
  350. static void
  351. lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
  352. struct lpfc_iocbq *pIocbOut)
  353. {
  354. struct lpfc_scsi_buf *lpfc_cmd =
  355. (struct lpfc_scsi_buf *) pIocbIn->context1;
  356. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  357. struct lpfc_nodelist *pnode = rdata->pnode;
  358. struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
  359. int result;
  360. struct scsi_device *sdev, *tmp_sdev;
  361. int depth = 0;
  362. lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
  363. lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
  364. if (lpfc_cmd->status) {
  365. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  366. (lpfc_cmd->result & IOERR_DRVR_MASK))
  367. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  368. else if (lpfc_cmd->status >= IOSTAT_CNT)
  369. lpfc_cmd->status = IOSTAT_DEFAULT;
  370. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  371. "%d:0729 FCP cmd x%x failed <%d/%d> status: "
  372. "x%x result: x%x Data: x%x x%x\n",
  373. phba->brd_no, cmd->cmnd[0], cmd->device->id,
  374. cmd->device->lun, lpfc_cmd->status,
  375. lpfc_cmd->result, pIocbOut->iocb.ulpContext,
  376. lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
  377. switch (lpfc_cmd->status) {
  378. case IOSTAT_FCP_RSP_ERROR:
  379. /* Call FCP RSP handler to determine result */
  380. lpfc_handle_fcp_err(lpfc_cmd);
  381. break;
  382. case IOSTAT_NPORT_BSY:
  383. case IOSTAT_FABRIC_BSY:
  384. cmd->result = ScsiResult(DID_BUS_BUSY, 0);
  385. break;
  386. default:
  387. cmd->result = ScsiResult(DID_ERROR, 0);
  388. break;
  389. }
  390. if ((pnode == NULL )
  391. || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
  392. cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
  393. } else {
  394. cmd->result = ScsiResult(DID_OK, 0);
  395. }
  396. if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
  397. uint32_t *lp = (uint32_t *)cmd->sense_buffer;
  398. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  399. "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
  400. "SNS x%x x%x Data: x%x x%x\n",
  401. phba->brd_no, cmd->device->id,
  402. cmd->device->lun, cmd, cmd->result,
  403. *lp, *(lp + 3), cmd->retries, cmd->resid);
  404. }
  405. result = cmd->result;
  406. sdev = cmd->device;
  407. cmd->scsi_done(cmd);
  408. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  409. lpfc_release_scsi_buf(phba, lpfc_cmd);
  410. return;
  411. }
  412. if (!result && pnode != NULL &&
  413. ((jiffies - pnode->last_ramp_up_time) >
  414. LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
  415. ((jiffies - pnode->last_q_full_time) >
  416. LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
  417. (phba->cfg_lun_queue_depth > sdev->queue_depth)) {
  418. shost_for_each_device(tmp_sdev, sdev->host) {
  419. if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) {
  420. if (tmp_sdev->id != sdev->id)
  421. continue;
  422. if (tmp_sdev->ordered_tags)
  423. scsi_adjust_queue_depth(tmp_sdev,
  424. MSG_ORDERED_TAG,
  425. tmp_sdev->queue_depth+1);
  426. else
  427. scsi_adjust_queue_depth(tmp_sdev,
  428. MSG_SIMPLE_TAG,
  429. tmp_sdev->queue_depth+1);
  430. pnode->last_ramp_up_time = jiffies;
  431. }
  432. }
  433. }
  434. /*
  435. * Check for queue full. If the lun is reporting queue full, then
  436. * back off the lun queue depth to prevent target overloads.
  437. */
  438. if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) {
  439. pnode->last_q_full_time = jiffies;
  440. shost_for_each_device(tmp_sdev, sdev->host) {
  441. if (tmp_sdev->id != sdev->id)
  442. continue;
  443. depth = scsi_track_queue_full(tmp_sdev,
  444. tmp_sdev->queue_depth - 1);
  445. }
  446. /*
  447. * The queue depth cannot be lowered any more.
  448. * Modify the returned error code to store
  449. * the final depth value set by
  450. * scsi_track_queue_full.
  451. */
  452. if (depth == -1)
  453. depth = sdev->host->cmd_per_lun;
  454. if (depth) {
  455. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  456. "%d:0711 detected queue full - lun queue depth "
  457. " adjusted to %d.\n", phba->brd_no, depth);
  458. }
  459. }
  460. lpfc_release_scsi_buf(phba, lpfc_cmd);
  461. }
  462. static void
  463. lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
  464. struct lpfc_nodelist *pnode)
  465. {
  466. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  467. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  468. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  469. struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
  470. int datadir = scsi_cmnd->sc_data_direction;
  471. lpfc_cmd->fcp_rsp->rspSnsLen = 0;
  472. /* clear task management bits */
  473. lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
  474. int_to_scsilun(lpfc_cmd->pCmd->device->lun,
  475. &lpfc_cmd->fcp_cmnd->fcp_lun);
  476. memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
  477. if (scsi_cmnd->device->tagged_supported) {
  478. switch (scsi_cmnd->tag) {
  479. case HEAD_OF_QUEUE_TAG:
  480. fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
  481. break;
  482. case ORDERED_QUEUE_TAG:
  483. fcp_cmnd->fcpCntl1 = ORDERED_Q;
  484. break;
  485. default:
  486. fcp_cmnd->fcpCntl1 = SIMPLE_Q;
  487. break;
  488. }
  489. } else
  490. fcp_cmnd->fcpCntl1 = 0;
  491. /*
  492. * There are three possibilities here - use scatter-gather segment, use
  493. * the single mapping, or neither. Start the lpfc command prep by
  494. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  495. * data bde entry.
  496. */
  497. if (scsi_cmnd->use_sg) {
  498. if (datadir == DMA_TO_DEVICE) {
  499. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  500. iocb_cmd->un.fcpi.fcpi_parm = 0;
  501. iocb_cmd->ulpPU = 0;
  502. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  503. phba->fc4OutputRequests++;
  504. } else {
  505. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  506. iocb_cmd->ulpPU = PARM_READ_CHECK;
  507. iocb_cmd->un.fcpi.fcpi_parm =
  508. scsi_cmnd->request_bufflen;
  509. fcp_cmnd->fcpCntl3 = READ_DATA;
  510. phba->fc4InputRequests++;
  511. }
  512. } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
  513. if (datadir == DMA_TO_DEVICE) {
  514. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  515. iocb_cmd->un.fcpi.fcpi_parm = 0;
  516. iocb_cmd->ulpPU = 0;
  517. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  518. phba->fc4OutputRequests++;
  519. } else {
  520. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  521. iocb_cmd->ulpPU = PARM_READ_CHECK;
  522. iocb_cmd->un.fcpi.fcpi_parm =
  523. scsi_cmnd->request_bufflen;
  524. fcp_cmnd->fcpCntl3 = READ_DATA;
  525. phba->fc4InputRequests++;
  526. }
  527. } else {
  528. iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
  529. iocb_cmd->un.fcpi.fcpi_parm = 0;
  530. iocb_cmd->ulpPU = 0;
  531. fcp_cmnd->fcpCntl3 = 0;
  532. phba->fc4ControlRequests++;
  533. }
  534. /*
  535. * Finish initializing those IOCB fields that are independent
  536. * of the scsi_cmnd request_buffer
  537. */
  538. piocbq->iocb.ulpContext = pnode->nlp_rpi;
  539. if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
  540. piocbq->iocb.ulpFCP2Rcvy = 1;
  541. piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
  542. piocbq->context1 = lpfc_cmd;
  543. piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
  544. piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
  545. }
  546. static int
  547. lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
  548. struct lpfc_scsi_buf *lpfc_cmd,
  549. uint8_t task_mgmt_cmd)
  550. {
  551. struct lpfc_sli *psli;
  552. struct lpfc_iocbq *piocbq;
  553. IOCB_t *piocb;
  554. struct fcp_cmnd *fcp_cmnd;
  555. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  556. struct lpfc_nodelist *ndlp = rdata->pnode;
  557. if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
  558. return 0;
  559. }
  560. psli = &phba->sli;
  561. piocbq = &(lpfc_cmd->cur_iocbq);
  562. piocb = &piocbq->iocb;
  563. fcp_cmnd = lpfc_cmd->fcp_cmnd;
  564. int_to_scsilun(lpfc_cmd->pCmd->device->lun,
  565. &lpfc_cmd->fcp_cmnd->fcp_lun);
  566. fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
  567. piocb->ulpCommand = CMD_FCP_ICMND64_CR;
  568. piocb->ulpContext = ndlp->nlp_rpi;
  569. if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
  570. piocb->ulpFCP2Rcvy = 1;
  571. }
  572. piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
  573. /* ulpTimeout is only one byte */
  574. if (lpfc_cmd->timeout > 0xff) {
  575. /*
  576. * Do not timeout the command at the firmware level.
  577. * The driver will provide the timeout mechanism.
  578. */
  579. piocb->ulpTimeout = 0;
  580. } else {
  581. piocb->ulpTimeout = lpfc_cmd->timeout;
  582. }
  583. return (1);
  584. }
  585. static int
  586. lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
  587. unsigned tgt_id, struct lpfc_rport_data *rdata)
  588. {
  589. struct lpfc_iocbq *iocbq;
  590. struct lpfc_iocbq *iocbqrsp;
  591. int ret;
  592. lpfc_cmd->rdata = rdata;
  593. ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
  594. if (!ret)
  595. return FAILED;
  596. lpfc_cmd->scsi_hba = phba;
  597. iocbq = &lpfc_cmd->cur_iocbq;
  598. iocbqrsp = lpfc_sli_get_iocbq(phba);
  599. if (!iocbqrsp)
  600. return FAILED;
  601. /* Issue Target Reset to TGT <num> */
  602. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  603. "%d:0702 Issue Target Reset to TGT %d "
  604. "Data: x%x x%x\n",
  605. phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
  606. rdata->pnode->nlp_flag);
  607. ret = lpfc_sli_issue_iocb_wait(phba,
  608. &phba->sli.ring[phba->sli.fcp_ring],
  609. iocbq, iocbqrsp, lpfc_cmd->timeout);
  610. if (ret != IOCB_SUCCESS) {
  611. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  612. ret = FAILED;
  613. } else {
  614. ret = SUCCESS;
  615. lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
  616. lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
  617. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  618. (lpfc_cmd->result & IOERR_DRVR_MASK))
  619. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  620. }
  621. lpfc_sli_release_iocbq(phba, iocbqrsp);
  622. return ret;
  623. }
  624. const char *
  625. lpfc_info(struct Scsi_Host *host)
  626. {
  627. struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata;
  628. int len;
  629. static char lpfcinfobuf[384];
  630. memset(lpfcinfobuf,0,384);
  631. if (phba && phba->pcidev){
  632. strncpy(lpfcinfobuf, phba->ModelDesc, 256);
  633. len = strlen(lpfcinfobuf);
  634. snprintf(lpfcinfobuf + len,
  635. 384-len,
  636. " on PCI bus %02x device %02x irq %d",
  637. phba->pcidev->bus->number,
  638. phba->pcidev->devfn,
  639. phba->pcidev->irq);
  640. len = strlen(lpfcinfobuf);
  641. if (phba->Port[0]) {
  642. snprintf(lpfcinfobuf + len,
  643. 384-len,
  644. " port %s",
  645. phba->Port);
  646. }
  647. }
  648. return lpfcinfobuf;
  649. }
  650. static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
  651. {
  652. unsigned long poll_tmo_expires =
  653. (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
  654. if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
  655. mod_timer(&phba->fcp_poll_timer,
  656. poll_tmo_expires);
  657. }
  658. void lpfc_poll_start_timer(struct lpfc_hba * phba)
  659. {
  660. lpfc_poll_rearm_timer(phba);
  661. }
  662. void lpfc_poll_timeout(unsigned long ptr)
  663. {
  664. struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
  665. unsigned long iflag;
  666. spin_lock_irqsave(phba->host->host_lock, iflag);
  667. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  668. lpfc_sli_poll_fcp_ring (phba);
  669. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  670. lpfc_poll_rearm_timer(phba);
  671. }
  672. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  673. }
  674. static int
  675. lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
  676. {
  677. struct lpfc_hba *phba =
  678. (struct lpfc_hba *) cmnd->device->host->hostdata;
  679. struct lpfc_sli *psli = &phba->sli;
  680. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  681. struct lpfc_nodelist *ndlp = rdata->pnode;
  682. struct lpfc_scsi_buf *lpfc_cmd;
  683. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  684. int err;
  685. err = fc_remote_port_chkready(rport);
  686. if (err) {
  687. cmnd->result = err;
  688. goto out_fail_command;
  689. }
  690. /*
  691. * Catch race where our node has transitioned, but the
  692. * transport is still transitioning.
  693. */
  694. if (!ndlp) {
  695. cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
  696. goto out_fail_command;
  697. }
  698. lpfc_cmd = lpfc_get_scsi_buf (phba);
  699. if (lpfc_cmd == NULL) {
  700. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  701. "%d:0707 driver's buffer pool is empty, "
  702. "IO busied\n", phba->brd_no);
  703. goto out_host_busy;
  704. }
  705. /*
  706. * Store the midlayer's command structure for the completion phase
  707. * and complete the command initialization.
  708. */
  709. lpfc_cmd->pCmd = cmnd;
  710. lpfc_cmd->rdata = rdata;
  711. lpfc_cmd->timeout = 0;
  712. cmnd->host_scribble = (unsigned char *)lpfc_cmd;
  713. cmnd->scsi_done = done;
  714. err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
  715. if (err)
  716. goto out_host_busy_free_buf;
  717. lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
  718. err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
  719. &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
  720. if (err)
  721. goto out_host_busy_free_buf;
  722. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  723. lpfc_sli_poll_fcp_ring(phba);
  724. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  725. lpfc_poll_rearm_timer(phba);
  726. }
  727. return 0;
  728. out_host_busy_free_buf:
  729. lpfc_release_scsi_buf(phba, lpfc_cmd);
  730. out_host_busy:
  731. return SCSI_MLQUEUE_HOST_BUSY;
  732. out_fail_command:
  733. done(cmnd);
  734. return 0;
  735. }
  736. static int
  737. lpfc_abort_handler(struct scsi_cmnd *cmnd)
  738. {
  739. struct Scsi_Host *shost = cmnd->device->host;
  740. struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
  741. struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
  742. struct lpfc_iocbq *iocb;
  743. struct lpfc_iocbq *abtsiocb;
  744. struct lpfc_scsi_buf *lpfc_cmd;
  745. IOCB_t *cmd, *icmd;
  746. unsigned int loop_count = 0;
  747. int ret = SUCCESS;
  748. spin_lock_irq(shost->host_lock);
  749. lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
  750. BUG_ON(!lpfc_cmd);
  751. /*
  752. * If pCmd field of the corresponding lpfc_scsi_buf structure
  753. * points to a different SCSI command, then the driver has
  754. * already completed this command, but the midlayer did not
  755. * see the completion before the eh fired. Just return
  756. * SUCCESS.
  757. */
  758. iocb = &lpfc_cmd->cur_iocbq;
  759. if (lpfc_cmd->pCmd != cmnd)
  760. goto out;
  761. BUG_ON(iocb->context1 != lpfc_cmd);
  762. abtsiocb = lpfc_sli_get_iocbq(phba);
  763. if (abtsiocb == NULL) {
  764. ret = FAILED;
  765. goto out;
  766. }
  767. /*
  768. * The scsi command can not be in txq and it is in flight because the
  769. * pCmd is still pointig at the SCSI command we have to abort. There
  770. * is no need to search the txcmplq. Just send an abort to the FW.
  771. */
  772. cmd = &iocb->iocb;
  773. icmd = &abtsiocb->iocb;
  774. icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
  775. icmd->un.acxri.abortContextTag = cmd->ulpContext;
  776. icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
  777. icmd->ulpLe = 1;
  778. icmd->ulpClass = cmd->ulpClass;
  779. if (phba->hba_state >= LPFC_LINK_UP)
  780. icmd->ulpCommand = CMD_ABORT_XRI_CN;
  781. else
  782. icmd->ulpCommand = CMD_CLOSE_XRI_CN;
  783. abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
  784. if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
  785. lpfc_sli_release_iocbq(phba, abtsiocb);
  786. ret = FAILED;
  787. goto out;
  788. }
  789. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  790. lpfc_sli_poll_fcp_ring (phba);
  791. /* Wait for abort to complete */
  792. while (lpfc_cmd->pCmd == cmnd)
  793. {
  794. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  795. lpfc_sli_poll_fcp_ring (phba);
  796. spin_unlock_irq(phba->host->host_lock);
  797. schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
  798. spin_lock_irq(phba->host->host_lock);
  799. if (++loop_count
  800. > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
  801. break;
  802. }
  803. if (lpfc_cmd->pCmd == cmnd) {
  804. ret = FAILED;
  805. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  806. "%d:0748 abort handler timed out waiting for "
  807. "abort to complete: ret %#x, ID %d, LUN %d, "
  808. "snum %#lx\n",
  809. phba->brd_no, ret, cmnd->device->id,
  810. cmnd->device->lun, cmnd->serial_number);
  811. }
  812. out:
  813. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  814. "%d:0749 SCSI Layer I/O Abort Request "
  815. "Status x%x ID %d LUN %d snum %#lx\n",
  816. phba->brd_no, ret, cmnd->device->id,
  817. cmnd->device->lun, cmnd->serial_number);
  818. spin_unlock_irq(shost->host_lock);
  819. return ret;
  820. }
  821. static int
  822. lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
  823. {
  824. struct Scsi_Host *shost = cmnd->device->host;
  825. struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
  826. struct lpfc_scsi_buf *lpfc_cmd;
  827. struct lpfc_iocbq *iocbq, *iocbqrsp;
  828. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  829. struct lpfc_nodelist *pnode = rdata->pnode;
  830. uint32_t cmd_result = 0, cmd_status = 0;
  831. int ret = FAILED;
  832. int cnt, loopcnt;
  833. spin_lock_irq(shost->host_lock);
  834. /*
  835. * If target is not in a MAPPED state, delay the reset until
  836. * target is rediscovered or nodev timeout expires.
  837. */
  838. while ( 1 ) {
  839. if (!pnode)
  840. break;
  841. if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
  842. spin_unlock_irq(phba->host->host_lock);
  843. schedule_timeout_uninterruptible(msecs_to_jiffies(500));
  844. spin_lock_irq(phba->host->host_lock);
  845. }
  846. if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
  847. break;
  848. }
  849. lpfc_cmd = lpfc_get_scsi_buf (phba);
  850. if (lpfc_cmd == NULL)
  851. goto out;
  852. lpfc_cmd->pCmd = cmnd;
  853. lpfc_cmd->timeout = 60;
  854. lpfc_cmd->scsi_hba = phba;
  855. lpfc_cmd->rdata = rdata;
  856. ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
  857. if (!ret)
  858. goto out_free_scsi_buf;
  859. iocbq = &lpfc_cmd->cur_iocbq;
  860. /* get a buffer for this IOCB command response */
  861. iocbqrsp = lpfc_sli_get_iocbq(phba);
  862. if (iocbqrsp == NULL)
  863. goto out_free_scsi_buf;
  864. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  865. "%d:0703 Issue LUN Reset to TGT %d LUN %d "
  866. "Data: x%x x%x\n", phba->brd_no, cmnd->device->id,
  867. cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
  868. ret = lpfc_sli_issue_iocb_wait(phba,
  869. &phba->sli.ring[phba->sli.fcp_ring],
  870. iocbq, iocbqrsp, lpfc_cmd->timeout);
  871. if (ret == IOCB_SUCCESS)
  872. ret = SUCCESS;
  873. cmd_result = iocbqrsp->iocb.un.ulpWord[4];
  874. cmd_status = iocbqrsp->iocb.ulpStatus;
  875. lpfc_sli_release_iocbq(phba, iocbqrsp);
  876. lpfc_release_scsi_buf(phba, lpfc_cmd);
  877. /*
  878. * All outstanding txcmplq I/Os should have been aborted by the device.
  879. * Unfortunately, some targets do not abide by this forcing the driver
  880. * to double check.
  881. */
  882. cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  883. cmnd->device->id, cmnd->device->lun,
  884. LPFC_CTX_LUN);
  885. if (cnt)
  886. lpfc_sli_abort_iocb(phba,
  887. &phba->sli.ring[phba->sli.fcp_ring],
  888. cmnd->device->id, cmnd->device->lun,
  889. 0, LPFC_CTX_LUN);
  890. loopcnt = 0;
  891. while(cnt) {
  892. spin_unlock_irq(phba->host->host_lock);
  893. schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
  894. spin_lock_irq(phba->host->host_lock);
  895. if (++loopcnt
  896. > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
  897. break;
  898. cnt = lpfc_sli_sum_iocb(phba,
  899. &phba->sli.ring[phba->sli.fcp_ring],
  900. cmnd->device->id, cmnd->device->lun,
  901. LPFC_CTX_LUN);
  902. }
  903. if (cnt) {
  904. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  905. "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
  906. phba->brd_no, cnt);
  907. ret = FAILED;
  908. }
  909. out_free_scsi_buf:
  910. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  911. "%d:0713 SCSI layer issued LUN reset (%d, %d) "
  912. "Data: x%x x%x x%x\n",
  913. phba->brd_no, cmnd->device->id,cmnd->device->lun,
  914. ret, cmd_status, cmd_result);
  915. out:
  916. spin_unlock_irq(shost->host_lock);
  917. return ret;
  918. }
  919. static int
  920. lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
  921. {
  922. struct Scsi_Host *shost = cmnd->device->host;
  923. struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
  924. struct lpfc_nodelist *ndlp = NULL;
  925. int match;
  926. int ret = FAILED, i, err_count = 0;
  927. int cnt, loopcnt;
  928. struct lpfc_scsi_buf * lpfc_cmd;
  929. spin_lock_irq(shost->host_lock);
  930. lpfc_cmd = lpfc_get_scsi_buf(phba);
  931. if (lpfc_cmd == NULL)
  932. goto out;
  933. /* The lpfc_cmd storage is reused. Set all loop invariants. */
  934. lpfc_cmd->timeout = 60;
  935. lpfc_cmd->pCmd = cmnd;
  936. lpfc_cmd->scsi_hba = phba;
  937. /*
  938. * Since the driver manages a single bus device, reset all
  939. * targets known to the driver. Should any target reset
  940. * fail, this routine returns failure to the midlayer.
  941. */
  942. for (i = 0; i < MAX_FCP_TARGET; i++) {
  943. /* Search the mapped list for this target ID */
  944. match = 0;
  945. list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
  946. if ((i == ndlp->nlp_sid) && ndlp->rport) {
  947. match = 1;
  948. break;
  949. }
  950. }
  951. if (!match)
  952. continue;
  953. ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba,
  954. i, ndlp->rport->dd_data);
  955. if (ret != SUCCESS) {
  956. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  957. "%d:0713 Bus Reset on target %d failed\n",
  958. phba->brd_no, i);
  959. err_count++;
  960. }
  961. }
  962. if (err_count == 0)
  963. ret = SUCCESS;
  964. lpfc_release_scsi_buf(phba, lpfc_cmd);
  965. /*
  966. * All outstanding txcmplq I/Os should have been aborted by
  967. * the targets. Unfortunately, some targets do not abide by
  968. * this forcing the driver to double check.
  969. */
  970. cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  971. 0, 0, LPFC_CTX_HOST);
  972. if (cnt)
  973. lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  974. 0, 0, 0, LPFC_CTX_HOST);
  975. loopcnt = 0;
  976. while(cnt) {
  977. spin_unlock_irq(phba->host->host_lock);
  978. schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
  979. spin_lock_irq(phba->host->host_lock);
  980. if (++loopcnt
  981. > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
  982. break;
  983. cnt = lpfc_sli_sum_iocb(phba,
  984. &phba->sli.ring[phba->sli.fcp_ring],
  985. 0, 0, LPFC_CTX_HOST);
  986. }
  987. if (cnt) {
  988. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  989. "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
  990. phba->brd_no, cnt, i);
  991. ret = FAILED;
  992. }
  993. lpfc_printf_log(phba,
  994. KERN_ERR,
  995. LOG_FCP,
  996. "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
  997. phba->brd_no, ret);
  998. out:
  999. spin_unlock_irq(shost->host_lock);
  1000. return ret;
  1001. }
  1002. static int
  1003. lpfc_slave_alloc(struct scsi_device *sdev)
  1004. {
  1005. struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata;
  1006. struct lpfc_scsi_buf *scsi_buf = NULL;
  1007. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1008. uint32_t total = 0, i;
  1009. uint32_t num_to_alloc = 0;
  1010. unsigned long flags;
  1011. if (!rport || fc_remote_port_chkready(rport))
  1012. return -ENXIO;
  1013. sdev->hostdata = rport->dd_data;
  1014. /*
  1015. * Populate the cmds_per_lun count scsi_bufs into this host's globally
  1016. * available list of scsi buffers. Don't allocate more than the
  1017. * HBA limit conveyed to the midlayer via the host structure. The
  1018. * formula accounts for the lun_queue_depth + error handlers + 1
  1019. * extra. This list of scsi bufs exists for the lifetime of the driver.
  1020. */
  1021. total = phba->total_scsi_bufs;
  1022. num_to_alloc = phba->cfg_lun_queue_depth + 2;
  1023. if (total >= phba->cfg_hba_queue_depth) {
  1024. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  1025. "%d:0704 At limitation of %d preallocated "
  1026. "command buffers\n", phba->brd_no, total);
  1027. return 0;
  1028. } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
  1029. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  1030. "%d:0705 Allocation request of %d command "
  1031. "buffers will exceed max of %d. Reducing "
  1032. "allocation request to %d.\n", phba->brd_no,
  1033. num_to_alloc, phba->cfg_hba_queue_depth,
  1034. (phba->cfg_hba_queue_depth - total));
  1035. num_to_alloc = phba->cfg_hba_queue_depth - total;
  1036. }
  1037. for (i = 0; i < num_to_alloc; i++) {
  1038. scsi_buf = lpfc_new_scsi_buf(phba);
  1039. if (!scsi_buf) {
  1040. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  1041. "%d:0706 Failed to allocate command "
  1042. "buffer\n", phba->brd_no);
  1043. break;
  1044. }
  1045. spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
  1046. phba->total_scsi_bufs++;
  1047. list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
  1048. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
  1049. }
  1050. return 0;
  1051. }
  1052. static int
  1053. lpfc_slave_configure(struct scsi_device *sdev)
  1054. {
  1055. struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata;
  1056. struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
  1057. if (sdev->tagged_supported)
  1058. scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
  1059. else
  1060. scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
  1061. /*
  1062. * Initialize the fc transport attributes for the target
  1063. * containing this scsi device. Also note that the driver's
  1064. * target pointer is stored in the starget_data for the
  1065. * driver's sysfs entry point functions.
  1066. */
  1067. rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5;
  1068. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  1069. lpfc_sli_poll_fcp_ring(phba);
  1070. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1071. lpfc_poll_rearm_timer(phba);
  1072. }
  1073. return 0;
  1074. }
  1075. static void
  1076. lpfc_slave_destroy(struct scsi_device *sdev)
  1077. {
  1078. sdev->hostdata = NULL;
  1079. return;
  1080. }
  1081. struct scsi_host_template lpfc_template = {
  1082. .module = THIS_MODULE,
  1083. .name = LPFC_DRIVER_NAME,
  1084. .info = lpfc_info,
  1085. .queuecommand = lpfc_queuecommand,
  1086. .eh_abort_handler = lpfc_abort_handler,
  1087. .eh_device_reset_handler= lpfc_reset_lun_handler,
  1088. .eh_bus_reset_handler = lpfc_reset_bus_handler,
  1089. .slave_alloc = lpfc_slave_alloc,
  1090. .slave_configure = lpfc_slave_configure,
  1091. .slave_destroy = lpfc_slave_destroy,
  1092. .this_id = -1,
  1093. .sg_tablesize = LPFC_SG_SEG_CNT,
  1094. .cmd_per_lun = LPFC_CMD_PER_LUN,
  1095. .use_clustering = ENABLE_CLUSTERING,
  1096. .shost_attrs = lpfc_host_attrs,
  1097. .max_sectors = 0xFFFF,
  1098. };