lpfc_scsi.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2004-2008 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of version 2 of the GNU General *
  11. * Public License as published by the Free Software Foundation. *
  12. * This program is distributed in the hope that it will be useful. *
  13. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  14. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  15. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  16. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  18. * more details, a copy of which can be found in the file COPYING *
  19. * included with this package. *
  20. *******************************************************************/
  21. #include <linux/pci.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/delay.h>
  24. #include <scsi/scsi.h>
  25. #include <scsi/scsi_device.h>
  26. #include <scsi/scsi_host.h>
  27. #include <scsi/scsi_tcq.h>
  28. #include <scsi/scsi_transport_fc.h>
  29. #include "lpfc_version.h"
  30. #include "lpfc_hw.h"
  31. #include "lpfc_sli.h"
  32. #include "lpfc_disc.h"
  33. #include "lpfc_scsi.h"
  34. #include "lpfc.h"
  35. #include "lpfc_logmsg.h"
  36. #include "lpfc_crtn.h"
  37. #include "lpfc_vport.h"
  38. #define LPFC_RESET_WAIT 2
  39. #define LPFC_ABORT_WAIT 2
  40. /*
  41. * This function is called with no lock held when there is a resource
  42. * error in driver or in firmware.
  43. */
  44. void
  45. lpfc_adjust_queue_depth(struct lpfc_hba *phba)
  46. {
  47. unsigned long flags;
  48. spin_lock_irqsave(&phba->hbalock, flags);
  49. atomic_inc(&phba->num_rsrc_err);
  50. phba->last_rsrc_error_time = jiffies;
  51. if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
  52. spin_unlock_irqrestore(&phba->hbalock, flags);
  53. return;
  54. }
  55. phba->last_ramp_down_time = jiffies;
  56. spin_unlock_irqrestore(&phba->hbalock, flags);
  57. spin_lock_irqsave(&phba->pport->work_port_lock, flags);
  58. if ((phba->pport->work_port_events &
  59. WORKER_RAMP_DOWN_QUEUE) == 0) {
  60. phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
  61. }
  62. spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
  63. spin_lock_irqsave(&phba->hbalock, flags);
  64. if (phba->work_wait)
  65. wake_up(phba->work_wait);
  66. spin_unlock_irqrestore(&phba->hbalock, flags);
  67. return;
  68. }
  69. /*
  70. * This function is called with no lock held when there is a successful
  71. * SCSI command completion.
  72. */
  73. static inline void
  74. lpfc_rampup_queue_depth(struct lpfc_vport *vport,
  75. struct scsi_device *sdev)
  76. {
  77. unsigned long flags;
  78. struct lpfc_hba *phba = vport->phba;
  79. atomic_inc(&phba->num_cmd_success);
  80. if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
  81. return;
  82. spin_lock_irqsave(&phba->hbalock, flags);
  83. if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
  84. ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
  85. spin_unlock_irqrestore(&phba->hbalock, flags);
  86. return;
  87. }
  88. phba->last_ramp_up_time = jiffies;
  89. spin_unlock_irqrestore(&phba->hbalock, flags);
  90. spin_lock_irqsave(&phba->pport->work_port_lock, flags);
  91. if ((phba->pport->work_port_events &
  92. WORKER_RAMP_UP_QUEUE) == 0) {
  93. phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
  94. }
  95. spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
  96. spin_lock_irqsave(&phba->hbalock, flags);
  97. if (phba->work_wait)
  98. wake_up(phba->work_wait);
  99. spin_unlock_irqrestore(&phba->hbalock, flags);
  100. }
  101. void
  102. lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
  103. {
  104. struct lpfc_vport **vports;
  105. struct Scsi_Host *shost;
  106. struct scsi_device *sdev;
  107. unsigned long new_queue_depth;
  108. unsigned long num_rsrc_err, num_cmd_success;
  109. int i;
  110. num_rsrc_err = atomic_read(&phba->num_rsrc_err);
  111. num_cmd_success = atomic_read(&phba->num_cmd_success);
  112. vports = lpfc_create_vport_work_array(phba);
  113. if (vports != NULL)
  114. for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  115. shost = lpfc_shost_from_vport(vports[i]);
  116. shost_for_each_device(sdev, shost) {
  117. new_queue_depth =
  118. sdev->queue_depth * num_rsrc_err /
  119. (num_rsrc_err + num_cmd_success);
  120. if (!new_queue_depth)
  121. new_queue_depth = sdev->queue_depth - 1;
  122. else
  123. new_queue_depth = sdev->queue_depth -
  124. new_queue_depth;
  125. if (sdev->ordered_tags)
  126. scsi_adjust_queue_depth(sdev,
  127. MSG_ORDERED_TAG,
  128. new_queue_depth);
  129. else
  130. scsi_adjust_queue_depth(sdev,
  131. MSG_SIMPLE_TAG,
  132. new_queue_depth);
  133. }
  134. }
  135. lpfc_destroy_vport_work_array(phba, vports);
  136. atomic_set(&phba->num_rsrc_err, 0);
  137. atomic_set(&phba->num_cmd_success, 0);
  138. }
  139. void
  140. lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
  141. {
  142. struct lpfc_vport **vports;
  143. struct Scsi_Host *shost;
  144. struct scsi_device *sdev;
  145. int i;
  146. vports = lpfc_create_vport_work_array(phba);
  147. if (vports != NULL)
  148. for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  149. shost = lpfc_shost_from_vport(vports[i]);
  150. shost_for_each_device(sdev, shost) {
  151. if (vports[i]->cfg_lun_queue_depth <=
  152. sdev->queue_depth)
  153. continue;
  154. if (sdev->ordered_tags)
  155. scsi_adjust_queue_depth(sdev,
  156. MSG_ORDERED_TAG,
  157. sdev->queue_depth+1);
  158. else
  159. scsi_adjust_queue_depth(sdev,
  160. MSG_SIMPLE_TAG,
  161. sdev->queue_depth+1);
  162. }
  163. }
  164. lpfc_destroy_vport_work_array(phba, vports);
  165. atomic_set(&phba->num_rsrc_err, 0);
  166. atomic_set(&phba->num_cmd_success, 0);
  167. }
  168. /*
  169. * This routine allocates a scsi buffer, which contains all the necessary
  170. * information needed to initiate a SCSI I/O. The non-DMAable buffer region
  171. * contains information to build the IOCB. The DMAable region contains
  172. * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
  173. * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
  174. * and the BPL BDE is setup in the IOCB.
  175. */
  176. static struct lpfc_scsi_buf *
  177. lpfc_new_scsi_buf(struct lpfc_vport *vport)
  178. {
  179. struct lpfc_hba *phba = vport->phba;
  180. struct lpfc_scsi_buf *psb;
  181. struct ulp_bde64 *bpl;
  182. IOCB_t *iocb;
  183. dma_addr_t pdma_phys;
  184. uint16_t iotag;
  185. psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
  186. if (!psb)
  187. return NULL;
  188. /*
  189. * Get memory from the pci pool to map the virt space to pci bus space
  190. * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
  191. * struct fcp_rsp and the number of bde's necessary to support the
  192. * sg_tablesize.
  193. */
  194. psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
  195. &psb->dma_handle);
  196. if (!psb->data) {
  197. kfree(psb);
  198. return NULL;
  199. }
  200. /* Initialize virtual ptrs to dma_buf region. */
  201. memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
  202. /* Allocate iotag for psb->cur_iocbq. */
  203. iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
  204. if (iotag == 0) {
  205. pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
  206. psb->data, psb->dma_handle);
  207. kfree (psb);
  208. return NULL;
  209. }
  210. psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
  211. psb->fcp_cmnd = psb->data;
  212. psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
  213. psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
  214. sizeof(struct fcp_rsp);
  215. /* Initialize local short-hand pointers. */
  216. bpl = psb->fcp_bpl;
  217. pdma_phys = psb->dma_handle;
  218. /*
  219. * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
  220. * list bdes. Initialize the first two and leave the rest for
  221. * queuecommand.
  222. */
  223. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
  224. bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
  225. bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
  226. bpl->tus.f.bdeFlags = BUFF_USE_CMND;
  227. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  228. bpl++;
  229. /* Setup the physical region for the FCP RSP */
  230. pdma_phys += sizeof (struct fcp_cmnd);
  231. bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
  232. bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
  233. bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
  234. bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
  235. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  236. /*
  237. * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
  238. * initialize it with all known data now.
  239. */
  240. pdma_phys += (sizeof (struct fcp_rsp));
  241. iocb = &psb->cur_iocbq.iocb;
  242. iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
  243. iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
  244. iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
  245. iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
  246. iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
  247. iocb->ulpBdeCount = 1;
  248. iocb->ulpClass = CLASS3;
  249. return psb;
  250. }
  251. static struct lpfc_scsi_buf*
  252. lpfc_get_scsi_buf(struct lpfc_hba * phba)
  253. {
  254. struct lpfc_scsi_buf * lpfc_cmd = NULL;
  255. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  256. unsigned long iflag = 0;
  257. spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
  258. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  259. if (lpfc_cmd) {
  260. lpfc_cmd->seg_cnt = 0;
  261. lpfc_cmd->nonsg_phys = 0;
  262. }
  263. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
  264. return lpfc_cmd;
  265. }
  266. static void
  267. lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
  268. {
  269. unsigned long iflag = 0;
  270. spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
  271. psb->pCmd = NULL;
  272. list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
  273. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
  274. }
  275. static int
  276. lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
  277. {
  278. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  279. struct scatterlist *sgel = NULL;
  280. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  281. struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
  282. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  283. dma_addr_t physaddr;
  284. uint32_t i, num_bde = 0;
  285. int nseg, datadir = scsi_cmnd->sc_data_direction;
  286. /*
  287. * There are three possibilities here - use scatter-gather segment, use
  288. * the single mapping, or neither. Start the lpfc command prep by
  289. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  290. * data bde entry.
  291. */
  292. bpl += 2;
  293. if (scsi_sg_count(scsi_cmnd)) {
  294. /*
  295. * The driver stores the segment count returned from pci_map_sg
  296. * because this a count of dma-mappings used to map the use_sg
  297. * pages. They are not guaranteed to be the same for those
  298. * architectures that implement an IOMMU.
  299. */
  300. nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
  301. scsi_sg_count(scsi_cmnd), datadir);
  302. if (unlikely(!nseg))
  303. return 1;
  304. lpfc_cmd->seg_cnt = nseg;
  305. if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
  306. printk(KERN_ERR "%s: Too many sg segments from "
  307. "dma_map_sg. Config %d, seg_cnt %d",
  308. __FUNCTION__, phba->cfg_sg_seg_cnt,
  309. lpfc_cmd->seg_cnt);
  310. scsi_dma_unmap(scsi_cmnd);
  311. return 1;
  312. }
  313. /*
  314. * The driver established a maximum scatter-gather segment count
  315. * during probe that limits the number of sg elements in any
  316. * single scsi command. Just run through the seg_cnt and format
  317. * the bde's.
  318. */
  319. scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
  320. physaddr = sg_dma_address(sgel);
  321. bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
  322. bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
  323. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  324. if (datadir == DMA_TO_DEVICE)
  325. bpl->tus.f.bdeFlags = 0;
  326. else
  327. bpl->tus.f.bdeFlags = BUFF_USE_RCV;
  328. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  329. bpl++;
  330. num_bde++;
  331. }
  332. }
  333. /*
  334. * Finish initializing those IOCB fields that are dependent on the
  335. * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
  336. * reinitialized since all iocb memory resources are used many times
  337. * for transmit, receive, and continuation bpl's.
  338. */
  339. iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
  340. iocb_cmd->un.fcpi64.bdl.bdeSize +=
  341. (num_bde * sizeof (struct ulp_bde64));
  342. iocb_cmd->ulpBdeCount = 1;
  343. iocb_cmd->ulpLe = 1;
  344. fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
  345. return 0;
  346. }
  347. static void
  348. lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
  349. {
  350. /*
  351. * There are only two special cases to consider. (1) the scsi command
  352. * requested scatter-gather usage or (2) the scsi command allocated
  353. * a request buffer, but did not request use_sg. There is a third
  354. * case, but it does not require resource deallocation.
  355. */
  356. if (psb->seg_cnt > 0)
  357. scsi_dma_unmap(psb->pCmd);
  358. }
  359. static void
  360. lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
  361. struct lpfc_iocbq *rsp_iocb)
  362. {
  363. struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
  364. struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
  365. struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
  366. uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
  367. uint32_t resp_info = fcprsp->rspStatus2;
  368. uint32_t scsi_status = fcprsp->rspStatus3;
  369. uint32_t *lp;
  370. uint32_t host_status = DID_OK;
  371. uint32_t rsplen = 0;
  372. uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
  373. /*
  374. * If this is a task management command, there is no
  375. * scsi packet associated with this lpfc_cmd. The driver
  376. * consumes it.
  377. */
  378. if (fcpcmd->fcpCntl2) {
  379. scsi_status = 0;
  380. goto out;
  381. }
  382. if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
  383. uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
  384. if (snslen > SCSI_SENSE_BUFFERSIZE)
  385. snslen = SCSI_SENSE_BUFFERSIZE;
  386. if (resp_info & RSP_LEN_VALID)
  387. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  388. memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
  389. }
  390. lp = (uint32_t *)cmnd->sense_buffer;
  391. if (!scsi_status && (resp_info & RESID_UNDER))
  392. logit = LOG_FCP;
  393. lpfc_printf_vlog(vport, KERN_WARNING, logit,
  394. "0730 FCP command x%x failed: x%x SNS x%x x%x "
  395. "Data: x%x x%x x%x x%x x%x\n",
  396. cmnd->cmnd[0], scsi_status,
  397. be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
  398. be32_to_cpu(fcprsp->rspResId),
  399. be32_to_cpu(fcprsp->rspSnsLen),
  400. be32_to_cpu(fcprsp->rspRspLen),
  401. fcprsp->rspInfo3);
  402. if (resp_info & RSP_LEN_VALID) {
  403. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  404. if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
  405. (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
  406. host_status = DID_ERROR;
  407. goto out;
  408. }
  409. }
  410. scsi_set_resid(cmnd, 0);
  411. if (resp_info & RESID_UNDER) {
  412. scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
  413. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  414. "0716 FCP Read Underrun, expected %d, "
  415. "residual %d Data: x%x x%x x%x\n",
  416. be32_to_cpu(fcpcmd->fcpDl),
  417. scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
  418. cmnd->underflow);
  419. /*
  420. * If there is an under run check if under run reported by
  421. * storage array is same as the under run reported by HBA.
  422. * If this is not same, there is a dropped frame.
  423. */
  424. if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
  425. fcpi_parm &&
  426. (scsi_get_resid(cmnd) != fcpi_parm)) {
  427. lpfc_printf_vlog(vport, KERN_WARNING,
  428. LOG_FCP | LOG_FCP_ERROR,
  429. "0735 FCP Read Check Error "
  430. "and Underrun Data: x%x x%x x%x x%x\n",
  431. be32_to_cpu(fcpcmd->fcpDl),
  432. scsi_get_resid(cmnd), fcpi_parm,
  433. cmnd->cmnd[0]);
  434. scsi_set_resid(cmnd, scsi_bufflen(cmnd));
  435. host_status = DID_ERROR;
  436. }
  437. /*
  438. * The cmnd->underflow is the minimum number of bytes that must
  439. * be transfered for this command. Provided a sense condition
  440. * is not present, make sure the actual amount transferred is at
  441. * least the underflow value or fail.
  442. */
  443. if (!(resp_info & SNS_LEN_VALID) &&
  444. (scsi_status == SAM_STAT_GOOD) &&
  445. (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
  446. < cmnd->underflow)) {
  447. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  448. "0717 FCP command x%x residual "
  449. "underrun converted to error "
  450. "Data: x%x x%x x%x\n",
  451. cmnd->cmnd[0], scsi_bufflen(cmnd),
  452. scsi_get_resid(cmnd), cmnd->underflow);
  453. host_status = DID_ERROR;
  454. }
  455. } else if (resp_info & RESID_OVER) {
  456. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  457. "0720 FCP command x%x residual overrun error. "
  458. "Data: x%x x%x \n", cmnd->cmnd[0],
  459. scsi_bufflen(cmnd), scsi_get_resid(cmnd));
  460. host_status = DID_ERROR;
  461. /*
  462. * Check SLI validation that all the transfer was actually done
  463. * (fcpi_parm should be zero). Apply check only to reads.
  464. */
  465. } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
  466. (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
  467. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
  468. "0734 FCP Read Check Error Data: "
  469. "x%x x%x x%x x%x\n",
  470. be32_to_cpu(fcpcmd->fcpDl),
  471. be32_to_cpu(fcprsp->rspResId),
  472. fcpi_parm, cmnd->cmnd[0]);
  473. host_status = DID_ERROR;
  474. scsi_set_resid(cmnd, scsi_bufflen(cmnd));
  475. }
  476. out:
  477. cmnd->result = ScsiResult(host_status, scsi_status);
  478. }
  479. static void
  480. lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
  481. struct lpfc_iocbq *pIocbOut)
  482. {
  483. struct lpfc_scsi_buf *lpfc_cmd =
  484. (struct lpfc_scsi_buf *) pIocbIn->context1;
  485. struct lpfc_vport *vport = pIocbIn->vport;
  486. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  487. struct lpfc_nodelist *pnode = rdata->pnode;
  488. struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
  489. int result;
  490. struct scsi_device *sdev, *tmp_sdev;
  491. int depth = 0;
  492. unsigned long flags;
  493. lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
  494. lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
  495. if (lpfc_cmd->status) {
  496. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  497. (lpfc_cmd->result & IOERR_DRVR_MASK))
  498. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  499. else if (lpfc_cmd->status >= IOSTAT_CNT)
  500. lpfc_cmd->status = IOSTAT_DEFAULT;
  501. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  502. "0729 FCP cmd x%x failed <%d/%d> "
  503. "status: x%x result: x%x Data: x%x x%x\n",
  504. cmd->cmnd[0],
  505. cmd->device ? cmd->device->id : 0xffff,
  506. cmd->device ? cmd->device->lun : 0xffff,
  507. lpfc_cmd->status, lpfc_cmd->result,
  508. pIocbOut->iocb.ulpContext,
  509. lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
  510. switch (lpfc_cmd->status) {
  511. case IOSTAT_FCP_RSP_ERROR:
  512. /* Call FCP RSP handler to determine result */
  513. lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
  514. break;
  515. case IOSTAT_NPORT_BSY:
  516. case IOSTAT_FABRIC_BSY:
  517. cmd->result = ScsiResult(DID_BUS_BUSY, 0);
  518. break;
  519. case IOSTAT_LOCAL_REJECT:
  520. if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
  521. lpfc_cmd->result == IOERR_NO_RESOURCES ||
  522. lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
  523. cmd->result = ScsiResult(DID_REQUEUE, 0);
  524. break;
  525. } /* else: fall through */
  526. default:
  527. cmd->result = ScsiResult(DID_ERROR, 0);
  528. break;
  529. }
  530. if (!pnode || !NLP_CHK_NODE_ACT(pnode)
  531. || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
  532. cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
  533. } else {
  534. cmd->result = ScsiResult(DID_OK, 0);
  535. }
  536. if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
  537. uint32_t *lp = (uint32_t *)cmd->sense_buffer;
  538. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  539. "0710 Iodone <%d/%d> cmd %p, error "
  540. "x%x SNS x%x x%x Data: x%x x%x\n",
  541. cmd->device->id, cmd->device->lun, cmd,
  542. cmd->result, *lp, *(lp + 3), cmd->retries,
  543. scsi_get_resid(cmd));
  544. }
  545. result = cmd->result;
  546. sdev = cmd->device;
  547. lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
  548. spin_lock_irqsave(sdev->host->host_lock, flags);
  549. lpfc_cmd->pCmd = NULL; /* This must be done before scsi_done */
  550. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  551. cmd->scsi_done(cmd);
  552. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  553. /*
  554. * If there is a thread waiting for command completion
  555. * wake up the thread.
  556. */
  557. spin_lock_irqsave(sdev->host->host_lock, flags);
  558. if (lpfc_cmd->waitq)
  559. wake_up(lpfc_cmd->waitq);
  560. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  561. lpfc_release_scsi_buf(phba, lpfc_cmd);
  562. return;
  563. }
  564. if (!result)
  565. lpfc_rampup_queue_depth(vport, sdev);
  566. if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
  567. ((jiffies - pnode->last_ramp_up_time) >
  568. LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
  569. ((jiffies - pnode->last_q_full_time) >
  570. LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
  571. (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
  572. shost_for_each_device(tmp_sdev, sdev->host) {
  573. if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
  574. if (tmp_sdev->id != sdev->id)
  575. continue;
  576. if (tmp_sdev->ordered_tags)
  577. scsi_adjust_queue_depth(tmp_sdev,
  578. MSG_ORDERED_TAG,
  579. tmp_sdev->queue_depth+1);
  580. else
  581. scsi_adjust_queue_depth(tmp_sdev,
  582. MSG_SIMPLE_TAG,
  583. tmp_sdev->queue_depth+1);
  584. pnode->last_ramp_up_time = jiffies;
  585. }
  586. }
  587. }
  588. /*
  589. * Check for queue full. If the lun is reporting queue full, then
  590. * back off the lun queue depth to prevent target overloads.
  591. */
  592. if (result == SAM_STAT_TASK_SET_FULL && pnode &&
  593. NLP_CHK_NODE_ACT(pnode)) {
  594. pnode->last_q_full_time = jiffies;
  595. shost_for_each_device(tmp_sdev, sdev->host) {
  596. if (tmp_sdev->id != sdev->id)
  597. continue;
  598. depth = scsi_track_queue_full(tmp_sdev,
  599. tmp_sdev->queue_depth - 1);
  600. }
  601. /*
  602. * The queue depth cannot be lowered any more.
  603. * Modify the returned error code to store
  604. * the final depth value set by
  605. * scsi_track_queue_full.
  606. */
  607. if (depth == -1)
  608. depth = sdev->host->cmd_per_lun;
  609. if (depth) {
  610. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  611. "0711 detected queue full - lun queue "
  612. "depth adjusted to %d.\n", depth);
  613. }
  614. }
  615. /*
  616. * If there is a thread waiting for command completion
  617. * wake up the thread.
  618. */
  619. spin_lock_irqsave(sdev->host->host_lock, flags);
  620. if (lpfc_cmd->waitq)
  621. wake_up(lpfc_cmd->waitq);
  622. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  623. lpfc_release_scsi_buf(phba, lpfc_cmd);
  624. }
  625. static void
  626. lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
  627. struct lpfc_nodelist *pnode)
  628. {
  629. struct lpfc_hba *phba = vport->phba;
  630. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  631. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  632. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  633. struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
  634. int datadir = scsi_cmnd->sc_data_direction;
  635. char tag[2];
  636. if (!pnode || !NLP_CHK_NODE_ACT(pnode))
  637. return;
  638. lpfc_cmd->fcp_rsp->rspSnsLen = 0;
  639. /* clear task management bits */
  640. lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
  641. int_to_scsilun(lpfc_cmd->pCmd->device->lun,
  642. &lpfc_cmd->fcp_cmnd->fcp_lun);
  643. memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
  644. if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
  645. switch (tag[0]) {
  646. case HEAD_OF_QUEUE_TAG:
  647. fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
  648. break;
  649. case ORDERED_QUEUE_TAG:
  650. fcp_cmnd->fcpCntl1 = ORDERED_Q;
  651. break;
  652. default:
  653. fcp_cmnd->fcpCntl1 = SIMPLE_Q;
  654. break;
  655. }
  656. } else
  657. fcp_cmnd->fcpCntl1 = 0;
  658. /*
  659. * There are three possibilities here - use scatter-gather segment, use
  660. * the single mapping, or neither. Start the lpfc command prep by
  661. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  662. * data bde entry.
  663. */
  664. if (scsi_sg_count(scsi_cmnd)) {
  665. if (datadir == DMA_TO_DEVICE) {
  666. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  667. iocb_cmd->un.fcpi.fcpi_parm = 0;
  668. iocb_cmd->ulpPU = 0;
  669. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  670. phba->fc4OutputRequests++;
  671. } else {
  672. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  673. iocb_cmd->ulpPU = PARM_READ_CHECK;
  674. iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
  675. fcp_cmnd->fcpCntl3 = READ_DATA;
  676. phba->fc4InputRequests++;
  677. }
  678. } else {
  679. iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
  680. iocb_cmd->un.fcpi.fcpi_parm = 0;
  681. iocb_cmd->ulpPU = 0;
  682. fcp_cmnd->fcpCntl3 = 0;
  683. phba->fc4ControlRequests++;
  684. }
  685. /*
  686. * Finish initializing those IOCB fields that are independent
  687. * of the scsi_cmnd request_buffer
  688. */
  689. piocbq->iocb.ulpContext = pnode->nlp_rpi;
  690. if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
  691. piocbq->iocb.ulpFCP2Rcvy = 1;
  692. else
  693. piocbq->iocb.ulpFCP2Rcvy = 0;
  694. piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
  695. piocbq->context1 = lpfc_cmd;
  696. piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
  697. piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
  698. piocbq->vport = vport;
  699. }
  700. static int
  701. lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
  702. struct lpfc_scsi_buf *lpfc_cmd,
  703. unsigned int lun,
  704. uint8_t task_mgmt_cmd)
  705. {
  706. struct lpfc_iocbq *piocbq;
  707. IOCB_t *piocb;
  708. struct fcp_cmnd *fcp_cmnd;
  709. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  710. struct lpfc_nodelist *ndlp = rdata->pnode;
  711. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
  712. ndlp->nlp_state != NLP_STE_MAPPED_NODE)
  713. return 0;
  714. piocbq = &(lpfc_cmd->cur_iocbq);
  715. piocbq->vport = vport;
  716. piocb = &piocbq->iocb;
  717. fcp_cmnd = lpfc_cmd->fcp_cmnd;
  718. int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
  719. fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
  720. piocb->ulpCommand = CMD_FCP_ICMND64_CR;
  721. piocb->ulpContext = ndlp->nlp_rpi;
  722. if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
  723. piocb->ulpFCP2Rcvy = 1;
  724. }
  725. piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
  726. /* ulpTimeout is only one byte */
  727. if (lpfc_cmd->timeout > 0xff) {
  728. /*
  729. * Do not timeout the command at the firmware level.
  730. * The driver will provide the timeout mechanism.
  731. */
  732. piocb->ulpTimeout = 0;
  733. } else {
  734. piocb->ulpTimeout = lpfc_cmd->timeout;
  735. }
  736. return 1;
  737. }
  738. static void
  739. lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
  740. struct lpfc_iocbq *cmdiocbq,
  741. struct lpfc_iocbq *rspiocbq)
  742. {
  743. struct lpfc_scsi_buf *lpfc_cmd =
  744. (struct lpfc_scsi_buf *) cmdiocbq->context1;
  745. if (lpfc_cmd)
  746. lpfc_release_scsi_buf(phba, lpfc_cmd);
  747. return;
  748. }
  749. static int
  750. lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
  751. unsigned tgt_id, unsigned int lun,
  752. struct lpfc_rport_data *rdata)
  753. {
  754. struct lpfc_hba *phba = vport->phba;
  755. struct lpfc_iocbq *iocbq;
  756. struct lpfc_iocbq *iocbqrsp;
  757. int ret;
  758. if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
  759. return FAILED;
  760. lpfc_cmd->rdata = rdata;
  761. ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
  762. FCP_TARGET_RESET);
  763. if (!ret)
  764. return FAILED;
  765. iocbq = &lpfc_cmd->cur_iocbq;
  766. iocbqrsp = lpfc_sli_get_iocbq(phba);
  767. if (!iocbqrsp)
  768. return FAILED;
  769. /* Issue Target Reset to TGT <num> */
  770. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  771. "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
  772. tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
  773. ret = lpfc_sli_issue_iocb_wait(phba,
  774. &phba->sli.ring[phba->sli.fcp_ring],
  775. iocbq, iocbqrsp, lpfc_cmd->timeout);
  776. if (ret != IOCB_SUCCESS) {
  777. if (ret == IOCB_TIMEDOUT)
  778. iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
  779. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  780. } else {
  781. ret = SUCCESS;
  782. lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
  783. lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
  784. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  785. (lpfc_cmd->result & IOERR_DRVR_MASK))
  786. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  787. }
  788. lpfc_sli_release_iocbq(phba, iocbqrsp);
  789. return ret;
  790. }
  791. const char *
  792. lpfc_info(struct Scsi_Host *host)
  793. {
  794. struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
  795. struct lpfc_hba *phba = vport->phba;
  796. int len;
  797. static char lpfcinfobuf[384];
  798. memset(lpfcinfobuf,0,384);
  799. if (phba && phba->pcidev){
  800. strncpy(lpfcinfobuf, phba->ModelDesc, 256);
  801. len = strlen(lpfcinfobuf);
  802. snprintf(lpfcinfobuf + len,
  803. 384-len,
  804. " on PCI bus %02x device %02x irq %d",
  805. phba->pcidev->bus->number,
  806. phba->pcidev->devfn,
  807. phba->pcidev->irq);
  808. len = strlen(lpfcinfobuf);
  809. if (phba->Port[0]) {
  810. snprintf(lpfcinfobuf + len,
  811. 384-len,
  812. " port %s",
  813. phba->Port);
  814. }
  815. }
  816. return lpfcinfobuf;
  817. }
  818. static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
  819. {
  820. unsigned long poll_tmo_expires =
  821. (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
  822. if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
  823. mod_timer(&phba->fcp_poll_timer,
  824. poll_tmo_expires);
  825. }
  826. void lpfc_poll_start_timer(struct lpfc_hba * phba)
  827. {
  828. lpfc_poll_rearm_timer(phba);
  829. }
  830. void lpfc_poll_timeout(unsigned long ptr)
  831. {
  832. struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
  833. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  834. lpfc_sli_poll_fcp_ring (phba);
  835. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  836. lpfc_poll_rearm_timer(phba);
  837. }
  838. }
  839. static int
  840. lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
  841. {
  842. struct Scsi_Host *shost = cmnd->device->host;
  843. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  844. struct lpfc_hba *phba = vport->phba;
  845. struct lpfc_sli *psli = &phba->sli;
  846. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  847. struct lpfc_nodelist *ndlp = rdata->pnode;
  848. struct lpfc_scsi_buf *lpfc_cmd;
  849. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  850. int err;
  851. err = fc_remote_port_chkready(rport);
  852. if (err) {
  853. cmnd->result = err;
  854. goto out_fail_command;
  855. }
  856. /*
  857. * Catch race where our node has transitioned, but the
  858. * transport is still transitioning.
  859. */
  860. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  861. cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
  862. goto out_fail_command;
  863. }
  864. lpfc_cmd = lpfc_get_scsi_buf(phba);
  865. if (lpfc_cmd == NULL) {
  866. lpfc_adjust_queue_depth(phba);
  867. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  868. "0707 driver's buffer pool is empty, "
  869. "IO busied\n");
  870. goto out_host_busy;
  871. }
  872. /*
  873. * Store the midlayer's command structure for the completion phase
  874. * and complete the command initialization.
  875. */
  876. lpfc_cmd->pCmd = cmnd;
  877. lpfc_cmd->rdata = rdata;
  878. lpfc_cmd->timeout = 0;
  879. cmnd->host_scribble = (unsigned char *)lpfc_cmd;
  880. cmnd->scsi_done = done;
  881. err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
  882. if (err)
  883. goto out_host_busy_free_buf;
  884. lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
  885. err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
  886. &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
  887. if (err)
  888. goto out_host_busy_free_buf;
  889. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  890. lpfc_sli_poll_fcp_ring(phba);
  891. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  892. lpfc_poll_rearm_timer(phba);
  893. }
  894. return 0;
  895. out_host_busy_free_buf:
  896. lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
  897. lpfc_release_scsi_buf(phba, lpfc_cmd);
  898. out_host_busy:
  899. return SCSI_MLQUEUE_HOST_BUSY;
  900. out_fail_command:
  901. done(cmnd);
  902. return 0;
  903. }
  904. static void
  905. lpfc_block_error_handler(struct scsi_cmnd *cmnd)
  906. {
  907. struct Scsi_Host *shost = cmnd->device->host;
  908. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  909. spin_lock_irq(shost->host_lock);
  910. while (rport->port_state == FC_PORTSTATE_BLOCKED) {
  911. spin_unlock_irq(shost->host_lock);
  912. msleep(1000);
  913. spin_lock_irq(shost->host_lock);
  914. }
  915. spin_unlock_irq(shost->host_lock);
  916. return;
  917. }
  918. static int
  919. lpfc_abort_handler(struct scsi_cmnd *cmnd)
  920. {
  921. struct Scsi_Host *shost = cmnd->device->host;
  922. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  923. struct lpfc_hba *phba = vport->phba;
  924. struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
  925. struct lpfc_iocbq *iocb;
  926. struct lpfc_iocbq *abtsiocb;
  927. struct lpfc_scsi_buf *lpfc_cmd;
  928. IOCB_t *cmd, *icmd;
  929. int ret = SUCCESS;
  930. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
  931. lpfc_block_error_handler(cmnd);
  932. lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
  933. BUG_ON(!lpfc_cmd);
  934. /*
  935. * If pCmd field of the corresponding lpfc_scsi_buf structure
  936. * points to a different SCSI command, then the driver has
  937. * already completed this command, but the midlayer did not
  938. * see the completion before the eh fired. Just return
  939. * SUCCESS.
  940. */
  941. iocb = &lpfc_cmd->cur_iocbq;
  942. if (lpfc_cmd->pCmd != cmnd)
  943. goto out;
  944. BUG_ON(iocb->context1 != lpfc_cmd);
  945. abtsiocb = lpfc_sli_get_iocbq(phba);
  946. if (abtsiocb == NULL) {
  947. ret = FAILED;
  948. goto out;
  949. }
  950. /*
  951. * The scsi command can not be in txq and it is in flight because the
  952. * pCmd is still pointig at the SCSI command we have to abort. There
  953. * is no need to search the txcmplq. Just send an abort to the FW.
  954. */
  955. cmd = &iocb->iocb;
  956. icmd = &abtsiocb->iocb;
  957. icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
  958. icmd->un.acxri.abortContextTag = cmd->ulpContext;
  959. icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
  960. icmd->ulpLe = 1;
  961. icmd->ulpClass = cmd->ulpClass;
  962. if (lpfc_is_link_up(phba))
  963. icmd->ulpCommand = CMD_ABORT_XRI_CN;
  964. else
  965. icmd->ulpCommand = CMD_CLOSE_XRI_CN;
  966. abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
  967. abtsiocb->vport = vport;
  968. if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
  969. lpfc_sli_release_iocbq(phba, abtsiocb);
  970. ret = FAILED;
  971. goto out;
  972. }
  973. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  974. lpfc_sli_poll_fcp_ring (phba);
  975. lpfc_cmd->waitq = &waitq;
  976. /* Wait for abort to complete */
  977. wait_event_timeout(waitq,
  978. (lpfc_cmd->pCmd != cmnd),
  979. (2*vport->cfg_devloss_tmo*HZ));
  980. spin_lock_irq(shost->host_lock);
  981. lpfc_cmd->waitq = NULL;
  982. spin_unlock_irq(shost->host_lock);
  983. if (lpfc_cmd->pCmd == cmnd) {
  984. ret = FAILED;
  985. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  986. "0748 abort handler timed out waiting "
  987. "for abort to complete: ret %#x, ID %d, "
  988. "LUN %d, snum %#lx\n",
  989. ret, cmnd->device->id, cmnd->device->lun,
  990. cmnd->serial_number);
  991. }
  992. out:
  993. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  994. "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
  995. "LUN %d snum %#lx\n", ret, cmnd->device->id,
  996. cmnd->device->lun, cmnd->serial_number);
  997. return ret;
  998. }
  999. static int
  1000. lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
  1001. {
  1002. struct Scsi_Host *shost = cmnd->device->host;
  1003. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1004. struct lpfc_hba *phba = vport->phba;
  1005. struct lpfc_scsi_buf *lpfc_cmd;
  1006. struct lpfc_iocbq *iocbq, *iocbqrsp;
  1007. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  1008. struct lpfc_nodelist *pnode = rdata->pnode;
  1009. uint32_t cmd_result = 0, cmd_status = 0;
  1010. int ret = FAILED;
  1011. int iocb_status = IOCB_SUCCESS;
  1012. int cnt, loopcnt;
  1013. lpfc_block_error_handler(cmnd);
  1014. loopcnt = 0;
  1015. /*
  1016. * If target is not in a MAPPED state, delay the reset until
  1017. * target is rediscovered or devloss timeout expires.
  1018. */
  1019. while (1) {
  1020. if (!pnode || !NLP_CHK_NODE_ACT(pnode))
  1021. goto out;
  1022. if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
  1023. schedule_timeout_uninterruptible(msecs_to_jiffies(500));
  1024. loopcnt++;
  1025. rdata = cmnd->device->hostdata;
  1026. if (!rdata ||
  1027. (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
  1028. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1029. "0721 LUN Reset rport "
  1030. "failure: cnt x%x rdata x%p\n",
  1031. loopcnt, rdata);
  1032. goto out;
  1033. }
  1034. pnode = rdata->pnode;
  1035. if (!pnode || !NLP_CHK_NODE_ACT(pnode))
  1036. goto out;
  1037. }
  1038. if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
  1039. break;
  1040. }
  1041. lpfc_cmd = lpfc_get_scsi_buf(phba);
  1042. if (lpfc_cmd == NULL)
  1043. goto out;
  1044. lpfc_cmd->timeout = 60;
  1045. lpfc_cmd->rdata = rdata;
  1046. ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
  1047. FCP_TARGET_RESET);
  1048. if (!ret)
  1049. goto out_free_scsi_buf;
  1050. iocbq = &lpfc_cmd->cur_iocbq;
  1051. /* get a buffer for this IOCB command response */
  1052. iocbqrsp = lpfc_sli_get_iocbq(phba);
  1053. if (iocbqrsp == NULL)
  1054. goto out_free_scsi_buf;
  1055. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  1056. "0703 Issue target reset to TGT %d LUN %d "
  1057. "rpi x%x nlp_flag x%x\n", cmnd->device->id,
  1058. cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
  1059. iocb_status = lpfc_sli_issue_iocb_wait(phba,
  1060. &phba->sli.ring[phba->sli.fcp_ring],
  1061. iocbq, iocbqrsp, lpfc_cmd->timeout);
  1062. if (iocb_status == IOCB_TIMEDOUT)
  1063. iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
  1064. if (iocb_status == IOCB_SUCCESS)
  1065. ret = SUCCESS;
  1066. else
  1067. ret = iocb_status;
  1068. cmd_result = iocbqrsp->iocb.un.ulpWord[4];
  1069. cmd_status = iocbqrsp->iocb.ulpStatus;
  1070. lpfc_sli_release_iocbq(phba, iocbqrsp);
  1071. /*
  1072. * All outstanding txcmplq I/Os should have been aborted by the device.
  1073. * Unfortunately, some targets do not abide by this forcing the driver
  1074. * to double check.
  1075. */
  1076. cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
  1077. LPFC_CTX_LUN);
  1078. if (cnt)
  1079. lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
  1080. cmnd->device->id, cmnd->device->lun,
  1081. LPFC_CTX_LUN);
  1082. loopcnt = 0;
  1083. while(cnt) {
  1084. schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
  1085. if (++loopcnt
  1086. > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
  1087. break;
  1088. cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
  1089. cmnd->device->lun, LPFC_CTX_LUN);
  1090. }
  1091. if (cnt) {
  1092. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1093. "0719 device reset I/O flush failure: "
  1094. "cnt x%x\n", cnt);
  1095. ret = FAILED;
  1096. }
  1097. out_free_scsi_buf:
  1098. if (iocb_status != IOCB_TIMEDOUT) {
  1099. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1100. }
  1101. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1102. "0713 SCSI layer issued device reset (%d, %d) "
  1103. "return x%x status x%x result x%x\n",
  1104. cmnd->device->id, cmnd->device->lun, ret,
  1105. cmd_status, cmd_result);
  1106. out:
  1107. return ret;
  1108. }
  1109. static int
  1110. lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
  1111. {
  1112. struct Scsi_Host *shost = cmnd->device->host;
  1113. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1114. struct lpfc_hba *phba = vport->phba;
  1115. struct lpfc_nodelist *ndlp = NULL;
  1116. int match;
  1117. int ret = FAILED, i, err_count = 0;
  1118. int cnt, loopcnt;
  1119. struct lpfc_scsi_buf * lpfc_cmd;
  1120. lpfc_block_error_handler(cmnd);
  1121. lpfc_cmd = lpfc_get_scsi_buf(phba);
  1122. if (lpfc_cmd == NULL)
  1123. goto out;
  1124. /* The lpfc_cmd storage is reused. Set all loop invariants. */
  1125. lpfc_cmd->timeout = 60;
  1126. /*
  1127. * Since the driver manages a single bus device, reset all
  1128. * targets known to the driver. Should any target reset
  1129. * fail, this routine returns failure to the midlayer.
  1130. */
  1131. for (i = 0; i < LPFC_MAX_TARGET; i++) {
  1132. /* Search for mapped node by target ID */
  1133. match = 0;
  1134. spin_lock_irq(shost->host_lock);
  1135. list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
  1136. if (!NLP_CHK_NODE_ACT(ndlp))
  1137. continue;
  1138. if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
  1139. i == ndlp->nlp_sid &&
  1140. ndlp->rport) {
  1141. match = 1;
  1142. break;
  1143. }
  1144. }
  1145. spin_unlock_irq(shost->host_lock);
  1146. if (!match)
  1147. continue;
  1148. ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
  1149. cmnd->device->lun,
  1150. ndlp->rport->dd_data);
  1151. if (ret != SUCCESS) {
  1152. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1153. "0700 Bus Reset on target %d failed\n",
  1154. i);
  1155. err_count++;
  1156. break;
  1157. }
  1158. }
  1159. if (ret != IOCB_TIMEDOUT)
  1160. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1161. if (err_count == 0)
  1162. ret = SUCCESS;
  1163. else
  1164. ret = FAILED;
  1165. /*
  1166. * All outstanding txcmplq I/Os should have been aborted by
  1167. * the targets. Unfortunately, some targets do not abide by
  1168. * this forcing the driver to double check.
  1169. */
  1170. cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
  1171. if (cnt)
  1172. lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
  1173. 0, 0, LPFC_CTX_HOST);
  1174. loopcnt = 0;
  1175. while(cnt) {
  1176. schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
  1177. if (++loopcnt
  1178. > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
  1179. break;
  1180. cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
  1181. }
  1182. if (cnt) {
  1183. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1184. "0715 Bus Reset I/O flush failure: "
  1185. "cnt x%x left x%x\n", cnt, i);
  1186. ret = FAILED;
  1187. }
  1188. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1189. "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
  1190. out:
  1191. return ret;
  1192. }
  1193. static int
  1194. lpfc_slave_alloc(struct scsi_device *sdev)
  1195. {
  1196. struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
  1197. struct lpfc_hba *phba = vport->phba;
  1198. struct lpfc_scsi_buf *scsi_buf = NULL;
  1199. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1200. uint32_t total = 0, i;
  1201. uint32_t num_to_alloc = 0;
  1202. unsigned long flags;
  1203. if (!rport || fc_remote_port_chkready(rport))
  1204. return -ENXIO;
  1205. sdev->hostdata = rport->dd_data;
  1206. /*
  1207. * Populate the cmds_per_lun count scsi_bufs into this host's globally
  1208. * available list of scsi buffers. Don't allocate more than the
  1209. * HBA limit conveyed to the midlayer via the host structure. The
  1210. * formula accounts for the lun_queue_depth + error handlers + 1
  1211. * extra. This list of scsi bufs exists for the lifetime of the driver.
  1212. */
  1213. total = phba->total_scsi_bufs;
  1214. num_to_alloc = vport->cfg_lun_queue_depth + 2;
  1215. /* Allow some exchanges to be available always to complete discovery */
  1216. if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
  1217. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  1218. "0704 At limitation of %d preallocated "
  1219. "command buffers\n", total);
  1220. return 0;
  1221. /* Allow some exchanges to be available always to complete discovery */
  1222. } else if (total + num_to_alloc >
  1223. phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
  1224. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  1225. "0705 Allocation request of %d "
  1226. "command buffers will exceed max of %d. "
  1227. "Reducing allocation request to %d.\n",
  1228. num_to_alloc, phba->cfg_hba_queue_depth,
  1229. (phba->cfg_hba_queue_depth - total));
  1230. num_to_alloc = phba->cfg_hba_queue_depth - total;
  1231. }
  1232. for (i = 0; i < num_to_alloc; i++) {
  1233. scsi_buf = lpfc_new_scsi_buf(vport);
  1234. if (!scsi_buf) {
  1235. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1236. "0706 Failed to allocate "
  1237. "command buffer\n");
  1238. break;
  1239. }
  1240. spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
  1241. phba->total_scsi_bufs++;
  1242. list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
  1243. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
  1244. }
  1245. return 0;
  1246. }
  1247. static int
  1248. lpfc_slave_configure(struct scsi_device *sdev)
  1249. {
  1250. struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
  1251. struct lpfc_hba *phba = vport->phba;
  1252. struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
  1253. if (sdev->tagged_supported)
  1254. scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
  1255. else
  1256. scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
  1257. /*
  1258. * Initialize the fc transport attributes for the target
  1259. * containing this scsi device. Also note that the driver's
  1260. * target pointer is stored in the starget_data for the
  1261. * driver's sysfs entry point functions.
  1262. */
  1263. rport->dev_loss_tmo = vport->cfg_devloss_tmo;
  1264. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  1265. lpfc_sli_poll_fcp_ring(phba);
  1266. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1267. lpfc_poll_rearm_timer(phba);
  1268. }
  1269. return 0;
  1270. }
  1271. static void
  1272. lpfc_slave_destroy(struct scsi_device *sdev)
  1273. {
  1274. sdev->hostdata = NULL;
  1275. return;
  1276. }
  1277. struct scsi_host_template lpfc_template = {
  1278. .module = THIS_MODULE,
  1279. .name = LPFC_DRIVER_NAME,
  1280. .info = lpfc_info,
  1281. .queuecommand = lpfc_queuecommand,
  1282. .eh_abort_handler = lpfc_abort_handler,
  1283. .eh_device_reset_handler= lpfc_device_reset_handler,
  1284. .eh_bus_reset_handler = lpfc_bus_reset_handler,
  1285. .slave_alloc = lpfc_slave_alloc,
  1286. .slave_configure = lpfc_slave_configure,
  1287. .slave_destroy = lpfc_slave_destroy,
  1288. .scan_finished = lpfc_scan_finished,
  1289. .this_id = -1,
  1290. .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
  1291. .cmd_per_lun = LPFC_CMD_PER_LUN,
  1292. .use_clustering = ENABLE_CLUSTERING,
  1293. .shost_attrs = lpfc_hba_attrs,
  1294. .max_sectors = 0xFFFF,
  1295. };
  1296. struct scsi_host_template lpfc_vport_template = {
  1297. .module = THIS_MODULE,
  1298. .name = LPFC_DRIVER_NAME,
  1299. .info = lpfc_info,
  1300. .queuecommand = lpfc_queuecommand,
  1301. .eh_abort_handler = lpfc_abort_handler,
  1302. .eh_device_reset_handler= lpfc_device_reset_handler,
  1303. .eh_bus_reset_handler = lpfc_bus_reset_handler,
  1304. .slave_alloc = lpfc_slave_alloc,
  1305. .slave_configure = lpfc_slave_configure,
  1306. .slave_destroy = lpfc_slave_destroy,
  1307. .scan_finished = lpfc_scan_finished,
  1308. .this_id = -1,
  1309. .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
  1310. .cmd_per_lun = LPFC_CMD_PER_LUN,
  1311. .use_clustering = ENABLE_CLUSTERING,
  1312. .shost_attrs = lpfc_vport_attrs,
  1313. .max_sectors = 0xFFFF,
  1314. };