lpfc_scsi.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2004-2007 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of version 2 of the GNU General *
  11. * Public License as published by the Free Software Foundation. *
  12. * This program is distributed in the hope that it will be useful. *
  13. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  14. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  15. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  16. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  18. * more details, a copy of which can be found in the file COPYING *
  19. * included with this package. *
  20. *******************************************************************/
  21. #include <linux/pci.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/delay.h>
  24. #include <scsi/scsi.h>
  25. #include <scsi/scsi_device.h>
  26. #include <scsi/scsi_host.h>
  27. #include <scsi/scsi_tcq.h>
  28. #include <scsi/scsi_transport_fc.h>
  29. #include "lpfc_version.h"
  30. #include "lpfc_hw.h"
  31. #include "lpfc_sli.h"
  32. #include "lpfc_disc.h"
  33. #include "lpfc_scsi.h"
  34. #include "lpfc.h"
  35. #include "lpfc_logmsg.h"
  36. #include "lpfc_crtn.h"
  37. #define LPFC_RESET_WAIT 2
  38. #define LPFC_ABORT_WAIT 2
  39. /*
  40. * This routine allocates a scsi buffer, which contains all the necessary
  41. * information needed to initiate a SCSI I/O. The non-DMAable buffer region
  42. * contains information to build the IOCB. The DMAable region contains
  43. * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
  44. * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
  45. * and the BPL BDE is setup in the IOCB.
  46. */
  47. static struct lpfc_scsi_buf *
  48. lpfc_new_scsi_buf(struct lpfc_vport *vport)
  49. {
  50. struct lpfc_hba *phba = vport->phba;
  51. struct lpfc_scsi_buf *psb;
  52. struct ulp_bde64 *bpl;
  53. IOCB_t *iocb;
  54. dma_addr_t pdma_phys;
  55. uint16_t iotag;
  56. psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
  57. if (!psb)
  58. return NULL;
  59. memset(psb, 0, sizeof (struct lpfc_scsi_buf));
  60. /*
  61. * Get memory from the pci pool to map the virt space to pci bus space
  62. * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
  63. * struct fcp_rsp and the number of bde's necessary to support the
  64. * sg_tablesize.
  65. */
  66. psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
  67. &psb->dma_handle);
  68. if (!psb->data) {
  69. kfree(psb);
  70. return NULL;
  71. }
  72. /* Initialize virtual ptrs to dma_buf region. */
  73. memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
  74. /* Allocate iotag for psb->cur_iocbq. */
  75. iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
  76. if (iotag == 0) {
  77. pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
  78. psb->data, psb->dma_handle);
  79. kfree (psb);
  80. return NULL;
  81. }
  82. psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
  83. psb->fcp_cmnd = psb->data;
  84. psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
  85. psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
  86. sizeof(struct fcp_rsp);
  87. /* Initialize local short-hand pointers. */
  88. bpl = psb->fcp_bpl;
  89. pdma_phys = psb->dma_handle;
  90. /*
  91. * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
  92. * list bdes. Initialize the first two and leave the rest for
  93. * queuecommand.
  94. */
  95. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
  96. bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
  97. bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
  98. bpl->tus.f.bdeFlags = BUFF_USE_CMND;
  99. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  100. bpl++;
  101. /* Setup the physical region for the FCP RSP */
  102. pdma_phys += sizeof (struct fcp_cmnd);
  103. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
  104. bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
  105. bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
  106. bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
  107. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  108. /*
  109. * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
  110. * initialize it with all known data now.
  111. */
  112. pdma_phys += (sizeof (struct fcp_rsp));
  113. iocb = &psb->cur_iocbq.iocb;
  114. iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
  115. iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
  116. iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
  117. iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
  118. iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
  119. iocb->ulpBdeCount = 1;
  120. iocb->ulpClass = CLASS3;
  121. return psb;
  122. }
  123. static struct lpfc_scsi_buf*
  124. lpfc_get_scsi_buf(struct lpfc_hba * phba)
  125. {
  126. struct lpfc_scsi_buf * lpfc_cmd = NULL;
  127. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  128. unsigned long iflag = 0;
  129. spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
  130. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  131. if (lpfc_cmd) {
  132. lpfc_cmd->seg_cnt = 0;
  133. lpfc_cmd->nonsg_phys = 0;
  134. }
  135. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
  136. return lpfc_cmd;
  137. }
  138. static void
  139. lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
  140. {
  141. unsigned long iflag = 0;
  142. spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
  143. psb->pCmd = NULL;
  144. list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
  145. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
  146. }
  147. static int
  148. lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
  149. {
  150. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  151. struct scatterlist *sgel = NULL;
  152. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  153. struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
  154. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  155. dma_addr_t physaddr;
  156. uint32_t i, num_bde = 0;
  157. int datadir = scsi_cmnd->sc_data_direction;
  158. int dma_error;
  159. /*
  160. * There are three possibilities here - use scatter-gather segment, use
  161. * the single mapping, or neither. Start the lpfc command prep by
  162. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  163. * data bde entry.
  164. */
  165. bpl += 2;
  166. if (scsi_cmnd->use_sg) {
  167. /*
  168. * The driver stores the segment count returned from pci_map_sg
  169. * because this a count of dma-mappings used to map the use_sg
  170. * pages. They are not guaranteed to be the same for those
  171. * architectures that implement an IOMMU.
  172. */
  173. sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
  174. lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
  175. scsi_cmnd->use_sg, datadir);
  176. if (lpfc_cmd->seg_cnt == 0)
  177. return 1;
  178. if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
  179. printk(KERN_ERR "%s: Too many sg segments from "
  180. "dma_map_sg. Config %d, seg_cnt %d",
  181. __FUNCTION__, phba->cfg_sg_seg_cnt,
  182. lpfc_cmd->seg_cnt);
  183. dma_unmap_sg(&phba->pcidev->dev, sgel,
  184. lpfc_cmd->seg_cnt, datadir);
  185. return 1;
  186. }
  187. /*
  188. * The driver established a maximum scatter-gather segment count
  189. * during probe that limits the number of sg elements in any
  190. * single scsi command. Just run through the seg_cnt and format
  191. * the bde's.
  192. */
  193. for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
  194. physaddr = sg_dma_address(sgel);
  195. bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
  196. bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
  197. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  198. if (datadir == DMA_TO_DEVICE)
  199. bpl->tus.f.bdeFlags = 0;
  200. else
  201. bpl->tus.f.bdeFlags = BUFF_USE_RCV;
  202. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  203. bpl++;
  204. sgel++;
  205. num_bde++;
  206. }
  207. } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
  208. physaddr = dma_map_single(&phba->pcidev->dev,
  209. scsi_cmnd->request_buffer,
  210. scsi_cmnd->request_bufflen,
  211. datadir);
  212. dma_error = dma_mapping_error(physaddr);
  213. if (dma_error) {
  214. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  215. "%d:0718 Unable to dma_map_single "
  216. "request_buffer: x%x\n",
  217. phba->brd_no, dma_error);
  218. return 1;
  219. }
  220. lpfc_cmd->nonsg_phys = physaddr;
  221. bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
  222. bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
  223. bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
  224. if (datadir == DMA_TO_DEVICE)
  225. bpl->tus.f.bdeFlags = 0;
  226. else
  227. bpl->tus.f.bdeFlags = BUFF_USE_RCV;
  228. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  229. num_bde = 1;
  230. bpl++;
  231. }
  232. /*
  233. * Finish initializing those IOCB fields that are dependent on the
  234. * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
  235. * reinitialized since all iocb memory resources are used many times
  236. * for transmit, receive, and continuation bpl's.
  237. */
  238. iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
  239. iocb_cmd->un.fcpi64.bdl.bdeSize +=
  240. (num_bde * sizeof (struct ulp_bde64));
  241. iocb_cmd->ulpBdeCount = 1;
  242. iocb_cmd->ulpLe = 1;
  243. fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
  244. return 0;
  245. }
  246. static void
  247. lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
  248. {
  249. /*
  250. * There are only two special cases to consider. (1) the scsi command
  251. * requested scatter-gather usage or (2) the scsi command allocated
  252. * a request buffer, but did not request use_sg. There is a third
  253. * case, but it does not require resource deallocation.
  254. */
  255. if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
  256. dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
  257. psb->seg_cnt, psb->pCmd->sc_data_direction);
  258. } else {
  259. if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
  260. dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
  261. psb->pCmd->request_bufflen,
  262. psb->pCmd->sc_data_direction);
  263. }
  264. }
  265. }
  266. static void
  267. lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
  268. struct lpfc_iocbq *rsp_iocb)
  269. {
  270. struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
  271. struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
  272. struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
  273. struct lpfc_hba *phba = vport->phba;
  274. uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
  275. uint32_t resp_info = fcprsp->rspStatus2;
  276. uint32_t scsi_status = fcprsp->rspStatus3;
  277. uint32_t *lp;
  278. uint32_t host_status = DID_OK;
  279. uint32_t rsplen = 0;
  280. uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
  281. /*
  282. * If this is a task management command, there is no
  283. * scsi packet associated with this lpfc_cmd. The driver
  284. * consumes it.
  285. */
  286. if (fcpcmd->fcpCntl2) {
  287. scsi_status = 0;
  288. goto out;
  289. }
  290. if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
  291. uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
  292. if (snslen > SCSI_SENSE_BUFFERSIZE)
  293. snslen = SCSI_SENSE_BUFFERSIZE;
  294. if (resp_info & RSP_LEN_VALID)
  295. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  296. memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
  297. }
  298. lp = (uint32_t *)cmnd->sense_buffer;
  299. if (!scsi_status && (resp_info & RESID_UNDER))
  300. logit = LOG_FCP;
  301. lpfc_printf_log(phba, KERN_WARNING, logit,
  302. "%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
  303. "Data: x%x x%x x%x x%x x%x\n",
  304. phba->brd_no, cmnd->cmnd[0], scsi_status,
  305. be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
  306. be32_to_cpu(fcprsp->rspResId),
  307. be32_to_cpu(fcprsp->rspSnsLen),
  308. be32_to_cpu(fcprsp->rspRspLen),
  309. fcprsp->rspInfo3);
  310. if (resp_info & RSP_LEN_VALID) {
  311. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  312. if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
  313. (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
  314. host_status = DID_ERROR;
  315. goto out;
  316. }
  317. }
  318. cmnd->resid = 0;
  319. if (resp_info & RESID_UNDER) {
  320. cmnd->resid = be32_to_cpu(fcprsp->rspResId);
  321. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  322. "%d:0716 FCP Read Underrun, expected %d, "
  323. "residual %d Data: x%x x%x x%x\n", phba->brd_no,
  324. be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
  325. fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
  326. /*
  327. * If there is an under run check if under run reported by
  328. * storage array is same as the under run reported by HBA.
  329. * If this is not same, there is a dropped frame.
  330. */
  331. if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
  332. fcpi_parm &&
  333. (cmnd->resid != fcpi_parm)) {
  334. lpfc_printf_log(phba, KERN_WARNING,
  335. LOG_FCP | LOG_FCP_ERROR,
  336. "%d:0735 FCP Read Check Error and Underrun "
  337. "Data: x%x x%x x%x x%x\n", phba->brd_no,
  338. be32_to_cpu(fcpcmd->fcpDl),
  339. cmnd->resid,
  340. fcpi_parm, cmnd->cmnd[0]);
  341. cmnd->resid = cmnd->request_bufflen;
  342. host_status = DID_ERROR;
  343. }
  344. /*
  345. * The cmnd->underflow is the minimum number of bytes that must
  346. * be transfered for this command. Provided a sense condition
  347. * is not present, make sure the actual amount transferred is at
  348. * least the underflow value or fail.
  349. */
  350. if (!(resp_info & SNS_LEN_VALID) &&
  351. (scsi_status == SAM_STAT_GOOD) &&
  352. (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
  353. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  354. "%d:0717 FCP command x%x residual "
  355. "underrun converted to error "
  356. "Data: x%x x%x x%x\n", phba->brd_no,
  357. cmnd->cmnd[0], cmnd->request_bufflen,
  358. cmnd->resid, cmnd->underflow);
  359. host_status = DID_ERROR;
  360. }
  361. } else if (resp_info & RESID_OVER) {
  362. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  363. "%d:0720 FCP command x%x residual "
  364. "overrun error. Data: x%x x%x \n",
  365. phba->brd_no, cmnd->cmnd[0],
  366. cmnd->request_bufflen, cmnd->resid);
  367. host_status = DID_ERROR;
  368. /*
  369. * Check SLI validation that all the transfer was actually done
  370. * (fcpi_parm should be zero). Apply check only to reads.
  371. */
  372. } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
  373. (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
  374. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
  375. "%d:0734 FCP Read Check Error Data: "
  376. "x%x x%x x%x x%x\n", phba->brd_no,
  377. be32_to_cpu(fcpcmd->fcpDl),
  378. be32_to_cpu(fcprsp->rspResId),
  379. fcpi_parm, cmnd->cmnd[0]);
  380. host_status = DID_ERROR;
  381. cmnd->resid = cmnd->request_bufflen;
  382. }
  383. out:
  384. cmnd->result = ScsiResult(host_status, scsi_status);
  385. }
  386. static void
  387. lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
  388. struct lpfc_iocbq *pIocbOut)
  389. {
  390. struct lpfc_scsi_buf *lpfc_cmd =
  391. (struct lpfc_scsi_buf *) pIocbIn->context1;
  392. struct lpfc_vport *vport = pIocbIn->vport;
  393. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  394. struct lpfc_nodelist *pnode = rdata->pnode;
  395. struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
  396. int result;
  397. struct scsi_device *sdev, *tmp_sdev;
  398. int depth = 0;
  399. lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
  400. lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
  401. if (lpfc_cmd->status) {
  402. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  403. (lpfc_cmd->result & IOERR_DRVR_MASK))
  404. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  405. else if (lpfc_cmd->status >= IOSTAT_CNT)
  406. lpfc_cmd->status = IOSTAT_DEFAULT;
  407. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  408. "%d:0729 FCP cmd x%x failed <%d/%d> status: "
  409. "x%x result: x%x Data: x%x x%x\n",
  410. phba->brd_no, cmd->cmnd[0], cmd->device->id,
  411. cmd->device->lun, lpfc_cmd->status,
  412. lpfc_cmd->result, pIocbOut->iocb.ulpContext,
  413. lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
  414. switch (lpfc_cmd->status) {
  415. case IOSTAT_FCP_RSP_ERROR:
  416. /* Call FCP RSP handler to determine result */
  417. lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
  418. break;
  419. case IOSTAT_NPORT_BSY:
  420. case IOSTAT_FABRIC_BSY:
  421. cmd->result = ScsiResult(DID_BUS_BUSY, 0);
  422. break;
  423. default:
  424. cmd->result = ScsiResult(DID_ERROR, 0);
  425. break;
  426. }
  427. if ((pnode == NULL )
  428. || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
  429. cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
  430. } else {
  431. cmd->result = ScsiResult(DID_OK, 0);
  432. }
  433. if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
  434. uint32_t *lp = (uint32_t *)cmd->sense_buffer;
  435. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  436. "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
  437. "SNS x%x x%x Data: x%x x%x\n",
  438. phba->brd_no, cmd->device->id,
  439. cmd->device->lun, cmd, cmd->result,
  440. *lp, *(lp + 3), cmd->retries, cmd->resid);
  441. }
  442. result = cmd->result;
  443. sdev = cmd->device;
  444. lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
  445. cmd->scsi_done(cmd);
  446. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  447. lpfc_release_scsi_buf(phba, lpfc_cmd);
  448. return;
  449. }
  450. if (!result && pnode != NULL &&
  451. ((jiffies - pnode->last_ramp_up_time) >
  452. LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
  453. ((jiffies - pnode->last_q_full_time) >
  454. LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
  455. (phba->cfg_lun_queue_depth > sdev->queue_depth)) {
  456. shost_for_each_device(tmp_sdev, sdev->host) {
  457. if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) {
  458. if (tmp_sdev->id != sdev->id)
  459. continue;
  460. if (tmp_sdev->ordered_tags)
  461. scsi_adjust_queue_depth(tmp_sdev,
  462. MSG_ORDERED_TAG,
  463. tmp_sdev->queue_depth+1);
  464. else
  465. scsi_adjust_queue_depth(tmp_sdev,
  466. MSG_SIMPLE_TAG,
  467. tmp_sdev->queue_depth+1);
  468. pnode->last_ramp_up_time = jiffies;
  469. }
  470. }
  471. }
  472. /*
  473. * Check for queue full. If the lun is reporting queue full, then
  474. * back off the lun queue depth to prevent target overloads.
  475. */
  476. if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) {
  477. pnode->last_q_full_time = jiffies;
  478. shost_for_each_device(tmp_sdev, sdev->host) {
  479. if (tmp_sdev->id != sdev->id)
  480. continue;
  481. depth = scsi_track_queue_full(tmp_sdev,
  482. tmp_sdev->queue_depth - 1);
  483. }
  484. /*
  485. * The queue depth cannot be lowered any more.
  486. * Modify the returned error code to store
  487. * the final depth value set by
  488. * scsi_track_queue_full.
  489. */
  490. if (depth == -1)
  491. depth = sdev->host->cmd_per_lun;
  492. if (depth) {
  493. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  494. "%d:0711 detected queue full - lun queue depth "
  495. " adjusted to %d.\n", phba->brd_no, depth);
  496. }
  497. }
  498. lpfc_release_scsi_buf(phba, lpfc_cmd);
  499. }
  500. static void
  501. lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
  502. struct lpfc_nodelist *pnode)
  503. {
  504. struct lpfc_hba *phba = vport->phba;
  505. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  506. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  507. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  508. struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
  509. int datadir = scsi_cmnd->sc_data_direction;
  510. lpfc_cmd->fcp_rsp->rspSnsLen = 0;
  511. /* clear task management bits */
  512. lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
  513. int_to_scsilun(lpfc_cmd->pCmd->device->lun,
  514. &lpfc_cmd->fcp_cmnd->fcp_lun);
  515. memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
  516. if (scsi_cmnd->device->tagged_supported) {
  517. switch (scsi_cmnd->tag) {
  518. case HEAD_OF_QUEUE_TAG:
  519. fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
  520. break;
  521. case ORDERED_QUEUE_TAG:
  522. fcp_cmnd->fcpCntl1 = ORDERED_Q;
  523. break;
  524. default:
  525. fcp_cmnd->fcpCntl1 = SIMPLE_Q;
  526. break;
  527. }
  528. } else
  529. fcp_cmnd->fcpCntl1 = 0;
  530. /*
  531. * There are three possibilities here - use scatter-gather segment, use
  532. * the single mapping, or neither. Start the lpfc command prep by
  533. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  534. * data bde entry.
  535. */
  536. if (scsi_cmnd->use_sg) {
  537. if (datadir == DMA_TO_DEVICE) {
  538. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  539. iocb_cmd->un.fcpi.fcpi_parm = 0;
  540. iocb_cmd->ulpPU = 0;
  541. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  542. phba->fc4OutputRequests++;
  543. } else {
  544. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  545. iocb_cmd->ulpPU = PARM_READ_CHECK;
  546. iocb_cmd->un.fcpi.fcpi_parm =
  547. scsi_cmnd->request_bufflen;
  548. fcp_cmnd->fcpCntl3 = READ_DATA;
  549. phba->fc4InputRequests++;
  550. }
  551. } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
  552. if (datadir == DMA_TO_DEVICE) {
  553. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  554. iocb_cmd->un.fcpi.fcpi_parm = 0;
  555. iocb_cmd->ulpPU = 0;
  556. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  557. phba->fc4OutputRequests++;
  558. } else {
  559. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  560. iocb_cmd->ulpPU = PARM_READ_CHECK;
  561. iocb_cmd->un.fcpi.fcpi_parm =
  562. scsi_cmnd->request_bufflen;
  563. fcp_cmnd->fcpCntl3 = READ_DATA;
  564. phba->fc4InputRequests++;
  565. }
  566. } else {
  567. iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
  568. iocb_cmd->un.fcpi.fcpi_parm = 0;
  569. iocb_cmd->ulpPU = 0;
  570. fcp_cmnd->fcpCntl3 = 0;
  571. phba->fc4ControlRequests++;
  572. }
  573. /*
  574. * Finish initializing those IOCB fields that are independent
  575. * of the scsi_cmnd request_buffer
  576. */
  577. piocbq->iocb.ulpContext = pnode->nlp_rpi;
  578. if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
  579. piocbq->iocb.ulpFCP2Rcvy = 1;
  580. piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
  581. piocbq->context1 = lpfc_cmd;
  582. piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
  583. piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
  584. piocbq->vport = vport;
  585. }
  586. static int
  587. lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
  588. struct lpfc_scsi_buf *lpfc_cmd,
  589. unsigned int lun,
  590. uint8_t task_mgmt_cmd)
  591. {
  592. struct lpfc_iocbq *piocbq;
  593. IOCB_t *piocb;
  594. struct fcp_cmnd *fcp_cmnd;
  595. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  596. struct lpfc_nodelist *ndlp = rdata->pnode;
  597. if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
  598. return 0;
  599. }
  600. piocbq = &(lpfc_cmd->cur_iocbq);
  601. piocbq->vport = vport;
  602. piocb = &piocbq->iocb;
  603. fcp_cmnd = lpfc_cmd->fcp_cmnd;
  604. int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
  605. fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
  606. piocb->ulpCommand = CMD_FCP_ICMND64_CR;
  607. piocb->ulpContext = ndlp->nlp_rpi;
  608. if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
  609. piocb->ulpFCP2Rcvy = 1;
  610. }
  611. piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
  612. /* ulpTimeout is only one byte */
  613. if (lpfc_cmd->timeout > 0xff) {
  614. /*
  615. * Do not timeout the command at the firmware level.
  616. * The driver will provide the timeout mechanism.
  617. */
  618. piocb->ulpTimeout = 0;
  619. } else {
  620. piocb->ulpTimeout = lpfc_cmd->timeout;
  621. }
  622. return 1;
  623. }
  624. static void
  625. lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
  626. struct lpfc_iocbq *cmdiocbq,
  627. struct lpfc_iocbq *rspiocbq)
  628. {
  629. struct lpfc_scsi_buf *lpfc_cmd =
  630. (struct lpfc_scsi_buf *) cmdiocbq->context1;
  631. if (lpfc_cmd)
  632. lpfc_release_scsi_buf(phba, lpfc_cmd);
  633. return;
  634. }
  635. static int
  636. lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
  637. unsigned tgt_id, unsigned int lun,
  638. struct lpfc_rport_data *rdata)
  639. {
  640. struct lpfc_hba *phba = vport->phba;
  641. struct lpfc_iocbq *iocbq;
  642. struct lpfc_iocbq *iocbqrsp;
  643. int ret;
  644. if (!rdata->pnode)
  645. return FAILED;
  646. lpfc_cmd->rdata = rdata;
  647. ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
  648. FCP_TARGET_RESET);
  649. if (!ret)
  650. return FAILED;
  651. iocbq = &lpfc_cmd->cur_iocbq;
  652. iocbqrsp = lpfc_sli_get_iocbq(phba);
  653. if (!iocbqrsp)
  654. return FAILED;
  655. /* Issue Target Reset to TGT <num> */
  656. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  657. "%d:0702 Issue Target Reset to TGT %d "
  658. "Data: x%x x%x\n",
  659. phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
  660. rdata->pnode->nlp_flag);
  661. ret = lpfc_sli_issue_iocb_wait(phba,
  662. &phba->sli.ring[phba->sli.fcp_ring],
  663. iocbq, iocbqrsp, lpfc_cmd->timeout);
  664. if (ret != IOCB_SUCCESS) {
  665. if (ret == IOCB_TIMEDOUT)
  666. iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
  667. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  668. } else {
  669. ret = SUCCESS;
  670. lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
  671. lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
  672. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  673. (lpfc_cmd->result & IOERR_DRVR_MASK))
  674. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  675. }
  676. lpfc_sli_release_iocbq(phba, iocbqrsp);
  677. return ret;
  678. }
  679. const char *
  680. lpfc_info(struct Scsi_Host *host)
  681. {
  682. struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
  683. struct lpfc_hba *phba = vport->phba;
  684. int len;
  685. static char lpfcinfobuf[384];
  686. memset(lpfcinfobuf,0,384);
  687. if (phba && phba->pcidev){
  688. strncpy(lpfcinfobuf, phba->ModelDesc, 256);
  689. len = strlen(lpfcinfobuf);
  690. snprintf(lpfcinfobuf + len,
  691. 384-len,
  692. " on PCI bus %02x device %02x irq %d",
  693. phba->pcidev->bus->number,
  694. phba->pcidev->devfn,
  695. phba->pcidev->irq);
  696. len = strlen(lpfcinfobuf);
  697. if (phba->Port[0]) {
  698. snprintf(lpfcinfobuf + len,
  699. 384-len,
  700. " port %s",
  701. phba->Port);
  702. }
  703. }
  704. return lpfcinfobuf;
  705. }
  706. static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
  707. {
  708. unsigned long poll_tmo_expires =
  709. (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
  710. if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
  711. mod_timer(&phba->fcp_poll_timer,
  712. poll_tmo_expires);
  713. }
  714. void lpfc_poll_start_timer(struct lpfc_hba * phba)
  715. {
  716. lpfc_poll_rearm_timer(phba);
  717. }
  718. void lpfc_poll_timeout(unsigned long ptr)
  719. {
  720. struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
  721. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  722. lpfc_sli_poll_fcp_ring (phba);
  723. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  724. lpfc_poll_rearm_timer(phba);
  725. }
  726. }
  727. static int
  728. lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
  729. {
  730. struct Scsi_Host *shost = cmnd->device->host;
  731. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  732. struct lpfc_hba *phba = vport->phba;
  733. struct lpfc_sli *psli = &phba->sli;
  734. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  735. struct lpfc_nodelist *ndlp = rdata->pnode;
  736. struct lpfc_scsi_buf *lpfc_cmd;
  737. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  738. int err;
  739. err = fc_remote_port_chkready(rport);
  740. if (err) {
  741. cmnd->result = err;
  742. goto out_fail_command;
  743. }
  744. /*
  745. * Catch race where our node has transitioned, but the
  746. * transport is still transitioning.
  747. */
  748. if (!ndlp) {
  749. cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
  750. goto out_fail_command;
  751. }
  752. lpfc_cmd = lpfc_get_scsi_buf(phba);
  753. if (lpfc_cmd == NULL) {
  754. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  755. "%d:0707 driver's buffer pool is empty, "
  756. "IO busied\n", phba->brd_no);
  757. goto out_host_busy;
  758. }
  759. /*
  760. * Store the midlayer's command structure for the completion phase
  761. * and complete the command initialization.
  762. */
  763. lpfc_cmd->pCmd = cmnd;
  764. lpfc_cmd->rdata = rdata;
  765. lpfc_cmd->timeout = 0;
  766. cmnd->host_scribble = (unsigned char *)lpfc_cmd;
  767. cmnd->scsi_done = done;
  768. err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
  769. if (err)
  770. goto out_host_busy_free_buf;
  771. lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
  772. err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
  773. &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
  774. if (err)
  775. goto out_host_busy_free_buf;
  776. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  777. lpfc_sli_poll_fcp_ring(phba);
  778. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  779. lpfc_poll_rearm_timer(phba);
  780. }
  781. return 0;
  782. out_host_busy_free_buf:
  783. lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
  784. lpfc_release_scsi_buf(phba, lpfc_cmd);
  785. out_host_busy:
  786. return SCSI_MLQUEUE_HOST_BUSY;
  787. out_fail_command:
  788. done(cmnd);
  789. return 0;
  790. }
  791. static void
  792. lpfc_block_error_handler(struct scsi_cmnd *cmnd)
  793. {
  794. struct Scsi_Host *shost = cmnd->device->host;
  795. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  796. spin_lock_irq(shost->host_lock);
  797. while (rport->port_state == FC_PORTSTATE_BLOCKED) {
  798. spin_unlock_irq(shost->host_lock);
  799. msleep(1000);
  800. spin_lock_irq(shost->host_lock);
  801. }
  802. spin_unlock_irq(shost->host_lock);
  803. return;
  804. }
  805. static int
  806. lpfc_abort_handler(struct scsi_cmnd *cmnd)
  807. {
  808. struct Scsi_Host *shost = cmnd->device->host;
  809. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  810. struct lpfc_hba *phba = vport->phba;
  811. struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
  812. struct lpfc_iocbq *iocb;
  813. struct lpfc_iocbq *abtsiocb;
  814. struct lpfc_scsi_buf *lpfc_cmd;
  815. IOCB_t *cmd, *icmd;
  816. unsigned int loop_count = 0;
  817. int ret = SUCCESS;
  818. lpfc_block_error_handler(cmnd);
  819. lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
  820. BUG_ON(!lpfc_cmd);
  821. /*
  822. * If pCmd field of the corresponding lpfc_scsi_buf structure
  823. * points to a different SCSI command, then the driver has
  824. * already completed this command, but the midlayer did not
  825. * see the completion before the eh fired. Just return
  826. * SUCCESS.
  827. */
  828. iocb = &lpfc_cmd->cur_iocbq;
  829. if (lpfc_cmd->pCmd != cmnd)
  830. goto out;
  831. BUG_ON(iocb->context1 != lpfc_cmd);
  832. abtsiocb = lpfc_sli_get_iocbq(phba);
  833. if (abtsiocb == NULL) {
  834. ret = FAILED;
  835. goto out;
  836. }
  837. /*
  838. * The scsi command can not be in txq and it is in flight because the
  839. * pCmd is still pointig at the SCSI command we have to abort. There
  840. * is no need to search the txcmplq. Just send an abort to the FW.
  841. */
  842. cmd = &iocb->iocb;
  843. icmd = &abtsiocb->iocb;
  844. icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
  845. icmd->un.acxri.abortContextTag = cmd->ulpContext;
  846. icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
  847. icmd->ulpLe = 1;
  848. icmd->ulpClass = cmd->ulpClass;
  849. if (lpfc_is_link_up(phba))
  850. icmd->ulpCommand = CMD_ABORT_XRI_CN;
  851. else
  852. icmd->ulpCommand = CMD_CLOSE_XRI_CN;
  853. abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
  854. abtsiocb->vport = vport;
  855. if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
  856. lpfc_sli_release_iocbq(phba, abtsiocb);
  857. ret = FAILED;
  858. goto out;
  859. }
  860. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  861. lpfc_sli_poll_fcp_ring (phba);
  862. /* Wait for abort to complete */
  863. while (lpfc_cmd->pCmd == cmnd)
  864. {
  865. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  866. lpfc_sli_poll_fcp_ring (phba);
  867. schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ);
  868. if (++loop_count
  869. > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
  870. break;
  871. }
  872. if (lpfc_cmd->pCmd == cmnd) {
  873. ret = FAILED;
  874. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  875. "%d:0748 abort handler timed out waiting for "
  876. "abort to complete: ret %#x, ID %d, LUN %d, "
  877. "snum %#lx\n",
  878. phba->brd_no, ret, cmnd->device->id,
  879. cmnd->device->lun, cmnd->serial_number);
  880. }
  881. out:
  882. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  883. "%d:0749 SCSI Layer I/O Abort Request "
  884. "Status x%x ID %d LUN %d snum %#lx\n",
  885. phba->brd_no, ret, cmnd->device->id,
  886. cmnd->device->lun, cmnd->serial_number);
  887. return ret;
  888. }
  889. static int
  890. lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
  891. {
  892. struct Scsi_Host *shost = cmnd->device->host;
  893. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  894. struct lpfc_hba *phba = vport->phba;
  895. struct lpfc_scsi_buf *lpfc_cmd;
  896. struct lpfc_iocbq *iocbq, *iocbqrsp;
  897. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  898. struct lpfc_nodelist *pnode = rdata->pnode;
  899. uint32_t cmd_result = 0, cmd_status = 0;
  900. int ret = FAILED;
  901. int iocb_status = IOCB_SUCCESS;
  902. int cnt, loopcnt;
  903. lpfc_block_error_handler(cmnd);
  904. loopcnt = 0;
  905. /*
  906. * If target is not in a MAPPED state, delay the reset until
  907. * target is rediscovered or devloss timeout expires.
  908. */
  909. while ( 1 ) {
  910. if (!pnode)
  911. goto out;
  912. if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
  913. schedule_timeout_uninterruptible(msecs_to_jiffies(500));
  914. loopcnt++;
  915. rdata = cmnd->device->hostdata;
  916. if (!rdata ||
  917. (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
  918. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  919. "%d:0721 LUN Reset rport failure:"
  920. " cnt x%x rdata x%p\n",
  921. phba->brd_no, loopcnt, rdata);
  922. goto out;
  923. }
  924. pnode = rdata->pnode;
  925. if (!pnode)
  926. goto out;
  927. }
  928. if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
  929. break;
  930. }
  931. lpfc_cmd = lpfc_get_scsi_buf(phba);
  932. if (lpfc_cmd == NULL)
  933. goto out;
  934. lpfc_cmd->timeout = 60;
  935. lpfc_cmd->rdata = rdata;
  936. ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
  937. FCP_TARGET_RESET);
  938. if (!ret)
  939. goto out_free_scsi_buf;
  940. iocbq = &lpfc_cmd->cur_iocbq;
  941. /* get a buffer for this IOCB command response */
  942. iocbqrsp = lpfc_sli_get_iocbq(phba);
  943. if (iocbqrsp == NULL)
  944. goto out_free_scsi_buf;
  945. lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
  946. "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x "
  947. "nlp_flag x%x\n", phba->brd_no, cmnd->device->id,
  948. cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
  949. iocb_status = lpfc_sli_issue_iocb_wait(phba,
  950. &phba->sli.ring[phba->sli.fcp_ring],
  951. iocbq, iocbqrsp, lpfc_cmd->timeout);
  952. if (iocb_status == IOCB_TIMEDOUT)
  953. iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
  954. if (iocb_status == IOCB_SUCCESS)
  955. ret = SUCCESS;
  956. else
  957. ret = iocb_status;
  958. cmd_result = iocbqrsp->iocb.un.ulpWord[4];
  959. cmd_status = iocbqrsp->iocb.ulpStatus;
  960. lpfc_sli_release_iocbq(phba, iocbqrsp);
  961. /*
  962. * All outstanding txcmplq I/Os should have been aborted by the device.
  963. * Unfortunately, some targets do not abide by this forcing the driver
  964. * to double check.
  965. */
  966. cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  967. cmnd->device->id, cmnd->device->lun,
  968. LPFC_CTX_LUN);
  969. if (cnt)
  970. lpfc_sli_abort_iocb(phba,
  971. &phba->sli.ring[phba->sli.fcp_ring],
  972. cmnd->device->id, cmnd->device->lun,
  973. 0, LPFC_CTX_LUN);
  974. loopcnt = 0;
  975. while (cnt) {
  976. schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
  977. if (++loopcnt
  978. > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
  979. break;
  980. cnt = lpfc_sli_sum_iocb(phba,
  981. &phba->sli.ring[phba->sli.fcp_ring],
  982. cmnd->device->id, cmnd->device->lun,
  983. LPFC_CTX_LUN);
  984. }
  985. if (cnt) {
  986. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  987. "%d:0719 device reset I/O flush failure: cnt x%x\n",
  988. phba->brd_no, cnt);
  989. ret = FAILED;
  990. }
  991. out_free_scsi_buf:
  992. if (iocb_status != IOCB_TIMEDOUT) {
  993. lpfc_release_scsi_buf(phba, lpfc_cmd);
  994. }
  995. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  996. "%d:0713 SCSI layer issued device reset (%d, %d) "
  997. "return x%x status x%x result x%x\n",
  998. phba->brd_no, cmnd->device->id, cmnd->device->lun,
  999. ret, cmd_status, cmd_result);
  1000. out:
  1001. return ret;
  1002. }
  1003. static int
  1004. lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
  1005. {
  1006. struct Scsi_Host *shost = cmnd->device->host;
  1007. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1008. struct lpfc_hba *phba = vport->phba;
  1009. struct lpfc_nodelist *ndlp = NULL;
  1010. int match;
  1011. int ret = FAILED, i, err_count = 0;
  1012. int cnt, loopcnt;
  1013. struct lpfc_scsi_buf * lpfc_cmd;
  1014. lpfc_block_error_handler(cmnd);
  1015. lpfc_cmd = lpfc_get_scsi_buf(phba);
  1016. if (lpfc_cmd == NULL)
  1017. goto out;
  1018. /* The lpfc_cmd storage is reused. Set all loop invariants. */
  1019. lpfc_cmd->timeout = 60;
  1020. /*
  1021. * Since the driver manages a single bus device, reset all
  1022. * targets known to the driver. Should any target reset
  1023. * fail, this routine returns failure to the midlayer.
  1024. */
  1025. for (i = 0; i < LPFC_MAX_TARGET; i++) {
  1026. /* Search for mapped node by target ID */
  1027. match = 0;
  1028. spin_lock_irq(shost->host_lock);
  1029. list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
  1030. if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
  1031. i == ndlp->nlp_sid &&
  1032. ndlp->rport) {
  1033. match = 1;
  1034. break;
  1035. }
  1036. }
  1037. spin_unlock_irq(shost->host_lock);
  1038. if (!match)
  1039. continue;
  1040. ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
  1041. cmnd->device->lun,
  1042. ndlp->rport->dd_data);
  1043. if (ret != SUCCESS) {
  1044. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  1045. "%d:0700 Bus Reset on target %d failed\n",
  1046. phba->brd_no, i);
  1047. err_count++;
  1048. break;
  1049. }
  1050. }
  1051. if (ret != IOCB_TIMEDOUT)
  1052. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1053. if (err_count == 0)
  1054. ret = SUCCESS;
  1055. else
  1056. ret = FAILED;
  1057. /*
  1058. * All outstanding txcmplq I/Os should have been aborted by
  1059. * the targets. Unfortunately, some targets do not abide by
  1060. * this forcing the driver to double check.
  1061. */
  1062. cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  1063. 0, 0, LPFC_CTX_HOST);
  1064. if (cnt)
  1065. lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  1066. 0, 0, 0, LPFC_CTX_HOST);
  1067. loopcnt = 0;
  1068. while (cnt) {
  1069. schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
  1070. if (++loopcnt
  1071. > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
  1072. break;
  1073. cnt = lpfc_sli_sum_iocb(phba,
  1074. &phba->sli.ring[phba->sli.fcp_ring],
  1075. 0, 0, LPFC_CTX_HOST);
  1076. }
  1077. if (cnt) {
  1078. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  1079. "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
  1080. phba->brd_no, cnt, i);
  1081. ret = FAILED;
  1082. }
  1083. lpfc_printf_log(phba,
  1084. KERN_ERR,
  1085. LOG_FCP,
  1086. "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
  1087. phba->brd_no, ret);
  1088. out:
  1089. return ret;
  1090. }
  1091. static int
  1092. lpfc_slave_alloc(struct scsi_device *sdev)
  1093. {
  1094. struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
  1095. struct lpfc_hba *phba = vport->phba;
  1096. struct lpfc_scsi_buf *scsi_buf = NULL;
  1097. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1098. uint32_t total = 0, i;
  1099. uint32_t num_to_alloc = 0;
  1100. unsigned long flags;
  1101. if (!rport || fc_remote_port_chkready(rport))
  1102. return -ENXIO;
  1103. sdev->hostdata = rport->dd_data;
  1104. /*
  1105. * Populate the cmds_per_lun count scsi_bufs into this host's globally
  1106. * available list of scsi buffers. Don't allocate more than the
  1107. * HBA limit conveyed to the midlayer via the host structure. The
  1108. * formula accounts for the lun_queue_depth + error handlers + 1
  1109. * extra. This list of scsi bufs exists for the lifetime of the driver.
  1110. */
  1111. total = phba->total_scsi_bufs;
  1112. num_to_alloc = phba->cfg_lun_queue_depth + 2;
  1113. if (total >= phba->cfg_hba_queue_depth) {
  1114. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  1115. "%d:0704 At limitation of %d preallocated "
  1116. "command buffers\n", phba->brd_no, total);
  1117. return 0;
  1118. } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
  1119. lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
  1120. "%d:0705 Allocation request of %d command "
  1121. "buffers will exceed max of %d. Reducing "
  1122. "allocation request to %d.\n", phba->brd_no,
  1123. num_to_alloc, phba->cfg_hba_queue_depth,
  1124. (phba->cfg_hba_queue_depth - total));
  1125. num_to_alloc = phba->cfg_hba_queue_depth - total;
  1126. }
  1127. for (i = 0; i < num_to_alloc; i++) {
  1128. scsi_buf = lpfc_new_scsi_buf(vport);
  1129. if (!scsi_buf) {
  1130. lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
  1131. "%d:0706 Failed to allocate command "
  1132. "buffer\n", phba->brd_no);
  1133. break;
  1134. }
  1135. spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
  1136. phba->total_scsi_bufs++;
  1137. list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
  1138. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
  1139. }
  1140. return 0;
  1141. }
  1142. static int
  1143. lpfc_slave_configure(struct scsi_device *sdev)
  1144. {
  1145. struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
  1146. struct lpfc_hba *phba = vport->phba;
  1147. struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
  1148. if (sdev->tagged_supported)
  1149. scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
  1150. else
  1151. scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
  1152. /*
  1153. * Initialize the fc transport attributes for the target
  1154. * containing this scsi device. Also note that the driver's
  1155. * target pointer is stored in the starget_data for the
  1156. * driver's sysfs entry point functions.
  1157. */
  1158. rport->dev_loss_tmo = phba->cfg_devloss_tmo;
  1159. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  1160. lpfc_sli_poll_fcp_ring(phba);
  1161. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1162. lpfc_poll_rearm_timer(phba);
  1163. }
  1164. return 0;
  1165. }
  1166. static void
  1167. lpfc_slave_destroy(struct scsi_device *sdev)
  1168. {
  1169. sdev->hostdata = NULL;
  1170. return;
  1171. }
  1172. struct scsi_host_template lpfc_template = {
  1173. .module = THIS_MODULE,
  1174. .name = LPFC_DRIVER_NAME,
  1175. .info = lpfc_info,
  1176. .queuecommand = lpfc_queuecommand,
  1177. .eh_abort_handler = lpfc_abort_handler,
  1178. .eh_device_reset_handler= lpfc_device_reset_handler,
  1179. .eh_bus_reset_handler = lpfc_bus_reset_handler,
  1180. .slave_alloc = lpfc_slave_alloc,
  1181. .slave_configure = lpfc_slave_configure,
  1182. .slave_destroy = lpfc_slave_destroy,
  1183. .scan_finished = lpfc_scan_finished,
  1184. .scan_start = lpfc_scan_start,
  1185. .this_id = -1,
  1186. .sg_tablesize = LPFC_SG_SEG_CNT,
  1187. .cmd_per_lun = LPFC_CMD_PER_LUN,
  1188. .use_clustering = ENABLE_CLUSTERING,
  1189. .shost_attrs = lpfc_hba_attrs,
  1190. .max_sectors = 0xFFFF,
  1191. };