lpfc_scsi.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2004-2008 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of version 2 of the GNU General *
  11. * Public License as published by the Free Software Foundation. *
  12. * This program is distributed in the hope that it will be useful. *
  13. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  14. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  15. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  16. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  18. * more details, a copy of which can be found in the file COPYING *
  19. * included with this package. *
  20. *******************************************************************/
  21. #include <linux/pci.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/delay.h>
  24. #include <scsi/scsi.h>
  25. #include <scsi/scsi_device.h>
  26. #include <scsi/scsi_host.h>
  27. #include <scsi/scsi_tcq.h>
  28. #include <scsi/scsi_transport_fc.h>
  29. #include "lpfc_version.h"
  30. #include "lpfc_hw.h"
  31. #include "lpfc_sli.h"
  32. #include "lpfc_nl.h"
  33. #include "lpfc_disc.h"
  34. #include "lpfc_scsi.h"
  35. #include "lpfc.h"
  36. #include "lpfc_logmsg.h"
  37. #include "lpfc_crtn.h"
  38. #include "lpfc_vport.h"
  39. #define LPFC_RESET_WAIT 2
  40. #define LPFC_ABORT_WAIT 2
  41. /**
  42. * lpfc_update_stats: Update statistical data for the command completion.
  43. * @phba: Pointer to HBA object.
  44. * @lpfc_cmd: lpfc scsi command object pointer.
  45. *
  46. * This function is called when there is a command completion and this
  47. * function updates the statistical data for the command completion.
  48. **/
  49. static void
  50. lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
  51. {
  52. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  53. struct lpfc_nodelist *pnode = rdata->pnode;
  54. struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
  55. unsigned long flags;
  56. struct Scsi_Host *shost = cmd->device->host;
  57. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  58. unsigned long latency;
  59. int i;
  60. if (cmd->result)
  61. return;
  62. spin_lock_irqsave(shost->host_lock, flags);
  63. if (!vport->stat_data_enabled ||
  64. vport->stat_data_blocked ||
  65. !pnode->lat_data ||
  66. (phba->bucket_type == LPFC_NO_BUCKET)) {
  67. spin_unlock_irqrestore(shost->host_lock, flags);
  68. return;
  69. }
  70. latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
  71. if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
  72. i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
  73. phba->bucket_step;
  74. if (i >= LPFC_MAX_BUCKET_COUNT)
  75. i = LPFC_MAX_BUCKET_COUNT;
  76. } else {
  77. for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
  78. if (latency <= (phba->bucket_base +
  79. ((1<<i)*phba->bucket_step)))
  80. break;
  81. }
  82. pnode->lat_data[i].cmd_count++;
  83. spin_unlock_irqrestore(shost->host_lock, flags);
  84. }
  85. /**
  86. * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
  87. * event.
  88. * @phba: Pointer to HBA context object.
  89. * @vport: Pointer to vport object.
  90. * @ndlp: Pointer to FC node associated with the target.
  91. * @lun: Lun number of the scsi device.
  92. * @old_val: Old value of the queue depth.
  93. * @new_val: New value of the queue depth.
  94. *
  95. * This function sends an event to the mgmt application indicating
  96. * there is a change in the scsi device queue depth.
  97. **/
  98. static void
  99. lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
  100. struct lpfc_vport *vport,
  101. struct lpfc_nodelist *ndlp,
  102. uint32_t lun,
  103. uint32_t old_val,
  104. uint32_t new_val)
  105. {
  106. struct lpfc_fast_path_event *fast_path_evt;
  107. unsigned long flags;
  108. fast_path_evt = lpfc_alloc_fast_evt(phba);
  109. if (!fast_path_evt)
  110. return;
  111. fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
  112. FC_REG_SCSI_EVENT;
  113. fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
  114. LPFC_EVENT_VARQUEDEPTH;
  115. /* Report all luns with change in queue depth */
  116. fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
  117. if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
  118. memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
  119. &ndlp->nlp_portname, sizeof(struct lpfc_name));
  120. memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
  121. &ndlp->nlp_nodename, sizeof(struct lpfc_name));
  122. }
  123. fast_path_evt->un.queue_depth_evt.oldval = old_val;
  124. fast_path_evt->un.queue_depth_evt.newval = new_val;
  125. fast_path_evt->vport = vport;
  126. fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
  127. spin_lock_irqsave(&phba->hbalock, flags);
  128. list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
  129. spin_unlock_irqrestore(&phba->hbalock, flags);
  130. lpfc_worker_wake_up(phba);
  131. return;
  132. }
  133. /**
  134. * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
  135. * @phba: The Hba for which this call is being executed.
  136. *
  137. * This routine is called when there is resource error in driver or firmware.
  138. * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
  139. * posts at most 1 event each second. This routine wakes up worker thread of
  140. * @phba to process WORKER_RAM_DOWN_EVENT event.
  141. *
  142. * This routine should be called with no lock held.
  143. **/
  144. void
  145. lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
  146. {
  147. unsigned long flags;
  148. uint32_t evt_posted;
  149. spin_lock_irqsave(&phba->hbalock, flags);
  150. atomic_inc(&phba->num_rsrc_err);
  151. phba->last_rsrc_error_time = jiffies;
  152. if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
  153. spin_unlock_irqrestore(&phba->hbalock, flags);
  154. return;
  155. }
  156. phba->last_ramp_down_time = jiffies;
  157. spin_unlock_irqrestore(&phba->hbalock, flags);
  158. spin_lock_irqsave(&phba->pport->work_port_lock, flags);
  159. evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
  160. if (!evt_posted)
  161. phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
  162. spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
  163. if (!evt_posted)
  164. lpfc_worker_wake_up(phba);
  165. return;
  166. }
  167. /**
  168. * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
  169. * @phba: The Hba for which this call is being executed.
  170. *
  171. * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
  172. * post at most 1 event every 5 minute after last_ramp_up_time or
  173. * last_rsrc_error_time. This routine wakes up worker thread of @phba
  174. * to process WORKER_RAM_DOWN_EVENT event.
  175. *
  176. * This routine should be called with no lock held.
  177. **/
  178. static inline void
  179. lpfc_rampup_queue_depth(struct lpfc_vport *vport,
  180. struct scsi_device *sdev)
  181. {
  182. unsigned long flags;
  183. struct lpfc_hba *phba = vport->phba;
  184. uint32_t evt_posted;
  185. atomic_inc(&phba->num_cmd_success);
  186. if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
  187. return;
  188. spin_lock_irqsave(&phba->hbalock, flags);
  189. if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
  190. ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
  191. spin_unlock_irqrestore(&phba->hbalock, flags);
  192. return;
  193. }
  194. phba->last_ramp_up_time = jiffies;
  195. spin_unlock_irqrestore(&phba->hbalock, flags);
  196. spin_lock_irqsave(&phba->pport->work_port_lock, flags);
  197. evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
  198. if (!evt_posted)
  199. phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
  200. spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
  201. if (!evt_posted)
  202. lpfc_worker_wake_up(phba);
  203. return;
  204. }
  205. /**
  206. * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
  207. * @phba: The Hba for which this call is being executed.
  208. *
  209. * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
  210. * thread.This routine reduces queue depth for all scsi device on each vport
  211. * associated with @phba.
  212. **/
  213. void
  214. lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
  215. {
  216. struct lpfc_vport **vports;
  217. struct Scsi_Host *shost;
  218. struct scsi_device *sdev;
  219. unsigned long new_queue_depth, old_queue_depth;
  220. unsigned long num_rsrc_err, num_cmd_success;
  221. int i;
  222. struct lpfc_rport_data *rdata;
  223. num_rsrc_err = atomic_read(&phba->num_rsrc_err);
  224. num_cmd_success = atomic_read(&phba->num_cmd_success);
  225. vports = lpfc_create_vport_work_array(phba);
  226. if (vports != NULL)
  227. for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  228. shost = lpfc_shost_from_vport(vports[i]);
  229. shost_for_each_device(sdev, shost) {
  230. new_queue_depth =
  231. sdev->queue_depth * num_rsrc_err /
  232. (num_rsrc_err + num_cmd_success);
  233. if (!new_queue_depth)
  234. new_queue_depth = sdev->queue_depth - 1;
  235. else
  236. new_queue_depth = sdev->queue_depth -
  237. new_queue_depth;
  238. old_queue_depth = sdev->queue_depth;
  239. if (sdev->ordered_tags)
  240. scsi_adjust_queue_depth(sdev,
  241. MSG_ORDERED_TAG,
  242. new_queue_depth);
  243. else
  244. scsi_adjust_queue_depth(sdev,
  245. MSG_SIMPLE_TAG,
  246. new_queue_depth);
  247. rdata = sdev->hostdata;
  248. if (rdata)
  249. lpfc_send_sdev_queuedepth_change_event(
  250. phba, vports[i],
  251. rdata->pnode,
  252. sdev->lun, old_queue_depth,
  253. new_queue_depth);
  254. }
  255. }
  256. lpfc_destroy_vport_work_array(phba, vports);
  257. atomic_set(&phba->num_rsrc_err, 0);
  258. atomic_set(&phba->num_cmd_success, 0);
  259. }
  260. /**
  261. * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
  262. * @phba: The Hba for which this call is being executed.
  263. *
  264. * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
  265. * thread.This routine increases queue depth for all scsi device on each vport
  266. * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
  267. * num_cmd_success to zero.
  268. **/
  269. void
  270. lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
  271. {
  272. struct lpfc_vport **vports;
  273. struct Scsi_Host *shost;
  274. struct scsi_device *sdev;
  275. int i;
  276. struct lpfc_rport_data *rdata;
  277. vports = lpfc_create_vport_work_array(phba);
  278. if (vports != NULL)
  279. for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  280. shost = lpfc_shost_from_vport(vports[i]);
  281. shost_for_each_device(sdev, shost) {
  282. if (vports[i]->cfg_lun_queue_depth <=
  283. sdev->queue_depth)
  284. continue;
  285. if (sdev->ordered_tags)
  286. scsi_adjust_queue_depth(sdev,
  287. MSG_ORDERED_TAG,
  288. sdev->queue_depth+1);
  289. else
  290. scsi_adjust_queue_depth(sdev,
  291. MSG_SIMPLE_TAG,
  292. sdev->queue_depth+1);
  293. rdata = sdev->hostdata;
  294. if (rdata)
  295. lpfc_send_sdev_queuedepth_change_event(
  296. phba, vports[i],
  297. rdata->pnode,
  298. sdev->lun,
  299. sdev->queue_depth - 1,
  300. sdev->queue_depth);
  301. }
  302. }
  303. lpfc_destroy_vport_work_array(phba, vports);
  304. atomic_set(&phba->num_rsrc_err, 0);
  305. atomic_set(&phba->num_cmd_success, 0);
  306. }
  307. /**
  308. * lpfc_scsi_dev_block: set all scsi hosts to block state.
  309. * @phba: Pointer to HBA context object.
  310. *
  311. * This function walks vport list and set each SCSI host to block state
  312. * by invoking fc_remote_port_delete() routine. This function is invoked
  313. * with EEH when device's PCI slot has been permanently disabled.
  314. **/
  315. void
  316. lpfc_scsi_dev_block(struct lpfc_hba *phba)
  317. {
  318. struct lpfc_vport **vports;
  319. struct Scsi_Host *shost;
  320. struct scsi_device *sdev;
  321. struct fc_rport *rport;
  322. int i;
  323. vports = lpfc_create_vport_work_array(phba);
  324. if (vports != NULL)
  325. for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  326. shost = lpfc_shost_from_vport(vports[i]);
  327. shost_for_each_device(sdev, shost) {
  328. rport = starget_to_rport(scsi_target(sdev));
  329. fc_remote_port_delete(rport);
  330. }
  331. }
  332. lpfc_destroy_vport_work_array(phba, vports);
  333. }
  334. /**
  335. * lpfc_new_scsi_buf: Scsi buffer allocator.
  336. * @vport: The virtual port for which this call being executed.
  337. *
  338. * This routine allocates a scsi buffer, which contains all the necessary
  339. * information needed to initiate a SCSI I/O. The non-DMAable buffer region
  340. * contains information to build the IOCB. The DMAable region contains
  341. * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
  342. * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
  343. * and the BPL BDE is setup in the IOCB.
  344. *
  345. * Return codes:
  346. * NULL - Error
  347. * Pointer to lpfc_scsi_buf data structure - Success
  348. **/
  349. static struct lpfc_scsi_buf *
  350. lpfc_new_scsi_buf(struct lpfc_vport *vport)
  351. {
  352. struct lpfc_hba *phba = vport->phba;
  353. struct lpfc_scsi_buf *psb;
  354. struct ulp_bde64 *bpl;
  355. IOCB_t *iocb;
  356. dma_addr_t pdma_phys_fcp_cmd;
  357. dma_addr_t pdma_phys_fcp_rsp;
  358. dma_addr_t pdma_phys_bpl;
  359. uint16_t iotag;
  360. psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
  361. if (!psb)
  362. return NULL;
  363. /*
  364. * Get memory from the pci pool to map the virt space to pci bus space
  365. * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
  366. * struct fcp_rsp and the number of bde's necessary to support the
  367. * sg_tablesize.
  368. */
  369. psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
  370. &psb->dma_handle);
  371. if (!psb->data) {
  372. kfree(psb);
  373. return NULL;
  374. }
  375. /* Initialize virtual ptrs to dma_buf region. */
  376. memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
  377. /* Allocate iotag for psb->cur_iocbq. */
  378. iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
  379. if (iotag == 0) {
  380. pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
  381. psb->data, psb->dma_handle);
  382. kfree (psb);
  383. return NULL;
  384. }
  385. psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
  386. psb->fcp_cmnd = psb->data;
  387. psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
  388. psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
  389. sizeof(struct fcp_rsp);
  390. /* Initialize local short-hand pointers. */
  391. bpl = psb->fcp_bpl;
  392. pdma_phys_fcp_cmd = psb->dma_handle;
  393. pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
  394. pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
  395. sizeof(struct fcp_rsp);
  396. /*
  397. * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
  398. * list bdes. Initialize the first two and leave the rest for
  399. * queuecommand.
  400. */
  401. bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
  402. bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
  403. bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
  404. bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  405. bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
  406. /* Setup the physical region for the FCP RSP */
  407. bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
  408. bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
  409. bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
  410. bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  411. bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
  412. /*
  413. * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
  414. * initialize it with all known data now.
  415. */
  416. iocb = &psb->cur_iocbq.iocb;
  417. iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
  418. if (phba->sli_rev == 3) {
  419. /* fill in immediate fcp command BDE */
  420. iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
  421. iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
  422. iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
  423. unsli3.fcp_ext.icd);
  424. iocb->un.fcpi64.bdl.addrHigh = 0;
  425. iocb->ulpBdeCount = 0;
  426. iocb->ulpLe = 0;
  427. /* fill in responce BDE */
  428. iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  429. iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
  430. sizeof(struct fcp_rsp);
  431. iocb->unsli3.fcp_ext.rbde.addrLow =
  432. putPaddrLow(pdma_phys_fcp_rsp);
  433. iocb->unsli3.fcp_ext.rbde.addrHigh =
  434. putPaddrHigh(pdma_phys_fcp_rsp);
  435. } else {
  436. iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  437. iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
  438. iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
  439. iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
  440. iocb->ulpBdeCount = 1;
  441. iocb->ulpLe = 1;
  442. }
  443. iocb->ulpClass = CLASS3;
  444. return psb;
  445. }
  446. /**
  447. * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
  448. * @phba: The Hba for which this call is being executed.
  449. *
  450. * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
  451. * and returns to caller.
  452. *
  453. * Return codes:
  454. * NULL - Error
  455. * Pointer to lpfc_scsi_buf - Success
  456. **/
  457. static struct lpfc_scsi_buf*
  458. lpfc_get_scsi_buf(struct lpfc_hba * phba)
  459. {
  460. struct lpfc_scsi_buf * lpfc_cmd = NULL;
  461. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  462. unsigned long iflag = 0;
  463. spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
  464. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  465. if (lpfc_cmd) {
  466. lpfc_cmd->seg_cnt = 0;
  467. lpfc_cmd->nonsg_phys = 0;
  468. }
  469. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
  470. return lpfc_cmd;
  471. }
  472. /**
  473. * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
  474. * @phba: The Hba for which this call is being executed.
  475. * @psb: The scsi buffer which is being released.
  476. *
  477. * This routine releases @psb scsi buffer by adding it to tail of @phba
  478. * lpfc_scsi_buf_list list.
  479. **/
  480. static void
  481. lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
  482. {
  483. unsigned long iflag = 0;
  484. spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
  485. psb->pCmd = NULL;
  486. list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
  487. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
  488. }
  489. /**
  490. * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
  491. * @phba: The Hba for which this call is being executed.
  492. * @lpfc_cmd: The scsi buffer which is going to be mapped.
  493. *
  494. * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
  495. * field of @lpfc_cmd. This routine scans through sg elements and format the
  496. * bdea. This routine also initializes all IOCB fields which are dependent on
  497. * scsi command request buffer.
  498. *
  499. * Return codes:
  500. * 1 - Error
  501. * 0 - Success
  502. **/
  503. static int
  504. lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
  505. {
  506. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  507. struct scatterlist *sgel = NULL;
  508. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  509. struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
  510. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  511. struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
  512. dma_addr_t physaddr;
  513. uint32_t num_bde = 0;
  514. int nseg, datadir = scsi_cmnd->sc_data_direction;
  515. /*
  516. * There are three possibilities here - use scatter-gather segment, use
  517. * the single mapping, or neither. Start the lpfc command prep by
  518. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  519. * data bde entry.
  520. */
  521. bpl += 2;
  522. if (scsi_sg_count(scsi_cmnd)) {
  523. /*
  524. * The driver stores the segment count returned from pci_map_sg
  525. * because this a count of dma-mappings used to map the use_sg
  526. * pages. They are not guaranteed to be the same for those
  527. * architectures that implement an IOMMU.
  528. */
  529. nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
  530. scsi_sg_count(scsi_cmnd), datadir);
  531. if (unlikely(!nseg))
  532. return 1;
  533. lpfc_cmd->seg_cnt = nseg;
  534. if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
  535. printk(KERN_ERR "%s: Too many sg segments from "
  536. "dma_map_sg. Config %d, seg_cnt %d",
  537. __func__, phba->cfg_sg_seg_cnt,
  538. lpfc_cmd->seg_cnt);
  539. scsi_dma_unmap(scsi_cmnd);
  540. return 1;
  541. }
  542. /*
  543. * The driver established a maximum scatter-gather segment count
  544. * during probe that limits the number of sg elements in any
  545. * single scsi command. Just run through the seg_cnt and format
  546. * the bde's.
  547. * When using SLI-3 the driver will try to fit all the BDEs into
  548. * the IOCB. If it can't then the BDEs get added to a BPL as it
  549. * does for SLI-2 mode.
  550. */
  551. scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
  552. physaddr = sg_dma_address(sgel);
  553. if (phba->sli_rev == 3 &&
  554. nseg <= LPFC_EXT_DATA_BDE_COUNT) {
  555. data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  556. data_bde->tus.f.bdeSize = sg_dma_len(sgel);
  557. data_bde->addrLow = putPaddrLow(physaddr);
  558. data_bde->addrHigh = putPaddrHigh(physaddr);
  559. data_bde++;
  560. } else {
  561. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  562. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  563. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  564. bpl->addrLow =
  565. le32_to_cpu(putPaddrLow(physaddr));
  566. bpl->addrHigh =
  567. le32_to_cpu(putPaddrHigh(physaddr));
  568. bpl++;
  569. }
  570. }
  571. }
  572. /*
  573. * Finish initializing those IOCB fields that are dependent on the
  574. * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
  575. * explicitly reinitialized and for SLI-3 the extended bde count is
  576. * explicitly reinitialized since all iocb memory resources are reused.
  577. */
  578. if (phba->sli_rev == 3) {
  579. if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
  580. /*
  581. * The extended IOCB format can only fit 3 BDE or a BPL.
  582. * This I/O has more than 3 BDE so the 1st data bde will
  583. * be a BPL that is filled in here.
  584. */
  585. physaddr = lpfc_cmd->dma_handle;
  586. data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
  587. data_bde->tus.f.bdeSize = (num_bde *
  588. sizeof(struct ulp_bde64));
  589. physaddr += (sizeof(struct fcp_cmnd) +
  590. sizeof(struct fcp_rsp) +
  591. (2 * sizeof(struct ulp_bde64)));
  592. data_bde->addrHigh = putPaddrHigh(physaddr);
  593. data_bde->addrLow = putPaddrLow(physaddr);
  594. /* ebde count includes the responce bde and data bpl */
  595. iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
  596. } else {
  597. /* ebde count includes the responce bde and data bdes */
  598. iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
  599. }
  600. } else {
  601. iocb_cmd->un.fcpi64.bdl.bdeSize =
  602. ((num_bde + 2) * sizeof(struct ulp_bde64));
  603. }
  604. fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
  605. return 0;
  606. }
  607. /**
  608. * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
  609. * @phba: Pointer to hba context object.
  610. * @vport: Pointer to vport object.
  611. * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
  612. * @rsp_iocb: Pointer to response iocb object which reported error.
  613. *
  614. * This function posts an event when there is a SCSI command reporting
  615. * error from the scsi device.
  616. **/
  617. static void
  618. lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
  619. struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
  620. struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
  621. struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
  622. uint32_t resp_info = fcprsp->rspStatus2;
  623. uint32_t scsi_status = fcprsp->rspStatus3;
  624. uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
  625. struct lpfc_fast_path_event *fast_path_evt = NULL;
  626. struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
  627. unsigned long flags;
  628. /* If there is queuefull or busy condition send a scsi event */
  629. if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
  630. (cmnd->result == SAM_STAT_BUSY)) {
  631. fast_path_evt = lpfc_alloc_fast_evt(phba);
  632. if (!fast_path_evt)
  633. return;
  634. fast_path_evt->un.scsi_evt.event_type =
  635. FC_REG_SCSI_EVENT;
  636. fast_path_evt->un.scsi_evt.subcategory =
  637. (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
  638. LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
  639. fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
  640. memcpy(&fast_path_evt->un.scsi_evt.wwpn,
  641. &pnode->nlp_portname, sizeof(struct lpfc_name));
  642. memcpy(&fast_path_evt->un.scsi_evt.wwnn,
  643. &pnode->nlp_nodename, sizeof(struct lpfc_name));
  644. } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
  645. ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
  646. fast_path_evt = lpfc_alloc_fast_evt(phba);
  647. if (!fast_path_evt)
  648. return;
  649. fast_path_evt->un.check_cond_evt.scsi_event.event_type =
  650. FC_REG_SCSI_EVENT;
  651. fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
  652. LPFC_EVENT_CHECK_COND;
  653. fast_path_evt->un.check_cond_evt.scsi_event.lun =
  654. cmnd->device->lun;
  655. memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
  656. &pnode->nlp_portname, sizeof(struct lpfc_name));
  657. memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
  658. &pnode->nlp_nodename, sizeof(struct lpfc_name));
  659. fast_path_evt->un.check_cond_evt.sense_key =
  660. cmnd->sense_buffer[2] & 0xf;
  661. fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
  662. fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
  663. } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
  664. fcpi_parm &&
  665. ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
  666. ((scsi_status == SAM_STAT_GOOD) &&
  667. !(resp_info & (RESID_UNDER | RESID_OVER))))) {
  668. /*
  669. * If status is good or resid does not match with fcp_param and
  670. * there is valid fcpi_parm, then there is a read_check error
  671. */
  672. fast_path_evt = lpfc_alloc_fast_evt(phba);
  673. if (!fast_path_evt)
  674. return;
  675. fast_path_evt->un.read_check_error.header.event_type =
  676. FC_REG_FABRIC_EVENT;
  677. fast_path_evt->un.read_check_error.header.subcategory =
  678. LPFC_EVENT_FCPRDCHKERR;
  679. memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
  680. &pnode->nlp_portname, sizeof(struct lpfc_name));
  681. memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
  682. &pnode->nlp_nodename, sizeof(struct lpfc_name));
  683. fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
  684. fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
  685. fast_path_evt->un.read_check_error.fcpiparam =
  686. fcpi_parm;
  687. } else
  688. return;
  689. fast_path_evt->vport = vport;
  690. spin_lock_irqsave(&phba->hbalock, flags);
  691. list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
  692. spin_unlock_irqrestore(&phba->hbalock, flags);
  693. lpfc_worker_wake_up(phba);
  694. return;
  695. }
  696. /**
  697. * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
  698. * @phba: The Hba for which this call is being executed.
  699. * @psb: The scsi buffer which is going to be un-mapped.
  700. *
  701. * This routine does DMA un-mapping of scatter gather list of scsi command
  702. * field of @lpfc_cmd.
  703. **/
  704. static void
  705. lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
  706. {
  707. /*
  708. * There are only two special cases to consider. (1) the scsi command
  709. * requested scatter-gather usage or (2) the scsi command allocated
  710. * a request buffer, but did not request use_sg. There is a third
  711. * case, but it does not require resource deallocation.
  712. */
  713. if (psb->seg_cnt > 0)
  714. scsi_dma_unmap(psb->pCmd);
  715. }
  716. /**
  717. * lpfc_handler_fcp_err: FCP response handler.
  718. * @vport: The virtual port for which this call is being executed.
  719. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
  720. * @rsp_iocb: The response IOCB which contains FCP error.
  721. *
  722. * This routine is called to process response IOCB with status field
  723. * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
  724. * based upon SCSI and FCP error.
  725. **/
  726. static void
  727. lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
  728. struct lpfc_iocbq *rsp_iocb)
  729. {
  730. struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
  731. struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
  732. struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
  733. uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
  734. uint32_t resp_info = fcprsp->rspStatus2;
  735. uint32_t scsi_status = fcprsp->rspStatus3;
  736. uint32_t *lp;
  737. uint32_t host_status = DID_OK;
  738. uint32_t rsplen = 0;
  739. uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
  740. /*
  741. * If this is a task management command, there is no
  742. * scsi packet associated with this lpfc_cmd. The driver
  743. * consumes it.
  744. */
  745. if (fcpcmd->fcpCntl2) {
  746. scsi_status = 0;
  747. goto out;
  748. }
  749. if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
  750. uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
  751. if (snslen > SCSI_SENSE_BUFFERSIZE)
  752. snslen = SCSI_SENSE_BUFFERSIZE;
  753. if (resp_info & RSP_LEN_VALID)
  754. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  755. memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
  756. }
  757. lp = (uint32_t *)cmnd->sense_buffer;
  758. if (!scsi_status && (resp_info & RESID_UNDER))
  759. logit = LOG_FCP;
  760. lpfc_printf_vlog(vport, KERN_WARNING, logit,
  761. "0730 FCP command x%x failed: x%x SNS x%x x%x "
  762. "Data: x%x x%x x%x x%x x%x\n",
  763. cmnd->cmnd[0], scsi_status,
  764. be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
  765. be32_to_cpu(fcprsp->rspResId),
  766. be32_to_cpu(fcprsp->rspSnsLen),
  767. be32_to_cpu(fcprsp->rspRspLen),
  768. fcprsp->rspInfo3);
  769. if (resp_info & RSP_LEN_VALID) {
  770. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  771. if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
  772. (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
  773. host_status = DID_ERROR;
  774. goto out;
  775. }
  776. }
  777. scsi_set_resid(cmnd, 0);
  778. if (resp_info & RESID_UNDER) {
  779. scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
  780. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  781. "0716 FCP Read Underrun, expected %d, "
  782. "residual %d Data: x%x x%x x%x\n",
  783. be32_to_cpu(fcpcmd->fcpDl),
  784. scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
  785. cmnd->underflow);
  786. /*
  787. * If there is an under run check if under run reported by
  788. * storage array is same as the under run reported by HBA.
  789. * If this is not same, there is a dropped frame.
  790. */
  791. if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
  792. fcpi_parm &&
  793. (scsi_get_resid(cmnd) != fcpi_parm)) {
  794. lpfc_printf_vlog(vport, KERN_WARNING,
  795. LOG_FCP | LOG_FCP_ERROR,
  796. "0735 FCP Read Check Error "
  797. "and Underrun Data: x%x x%x x%x x%x\n",
  798. be32_to_cpu(fcpcmd->fcpDl),
  799. scsi_get_resid(cmnd), fcpi_parm,
  800. cmnd->cmnd[0]);
  801. scsi_set_resid(cmnd, scsi_bufflen(cmnd));
  802. host_status = DID_ERROR;
  803. }
  804. /*
  805. * The cmnd->underflow is the minimum number of bytes that must
  806. * be transfered for this command. Provided a sense condition
  807. * is not present, make sure the actual amount transferred is at
  808. * least the underflow value or fail.
  809. */
  810. if (!(resp_info & SNS_LEN_VALID) &&
  811. (scsi_status == SAM_STAT_GOOD) &&
  812. (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
  813. < cmnd->underflow)) {
  814. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  815. "0717 FCP command x%x residual "
  816. "underrun converted to error "
  817. "Data: x%x x%x x%x\n",
  818. cmnd->cmnd[0], scsi_bufflen(cmnd),
  819. scsi_get_resid(cmnd), cmnd->underflow);
  820. host_status = DID_ERROR;
  821. }
  822. } else if (resp_info & RESID_OVER) {
  823. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  824. "0720 FCP command x%x residual overrun error. "
  825. "Data: x%x x%x \n", cmnd->cmnd[0],
  826. scsi_bufflen(cmnd), scsi_get_resid(cmnd));
  827. host_status = DID_ERROR;
  828. /*
  829. * Check SLI validation that all the transfer was actually done
  830. * (fcpi_parm should be zero). Apply check only to reads.
  831. */
  832. } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
  833. (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
  834. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
  835. "0734 FCP Read Check Error Data: "
  836. "x%x x%x x%x x%x\n",
  837. be32_to_cpu(fcpcmd->fcpDl),
  838. be32_to_cpu(fcprsp->rspResId),
  839. fcpi_parm, cmnd->cmnd[0]);
  840. host_status = DID_ERROR;
  841. scsi_set_resid(cmnd, scsi_bufflen(cmnd));
  842. }
  843. out:
  844. cmnd->result = ScsiResult(host_status, scsi_status);
  845. lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
  846. }
  847. /**
  848. * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
  849. * @phba: The Hba for which this call is being executed.
  850. * @pIocbIn: The command IOCBQ for the scsi cmnd.
  851. * @pIocbOut: The response IOCBQ for the scsi cmnd .
  852. *
  853. * This routine assigns scsi command result by looking into response IOCB
  854. * status field appropriately. This routine handles QUEUE FULL condition as
  855. * well by ramping down device queue depth.
  856. **/
  857. static void
  858. lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
  859. struct lpfc_iocbq *pIocbOut)
  860. {
  861. struct lpfc_scsi_buf *lpfc_cmd =
  862. (struct lpfc_scsi_buf *) pIocbIn->context1;
  863. struct lpfc_vport *vport = pIocbIn->vport;
  864. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  865. struct lpfc_nodelist *pnode = rdata->pnode;
  866. struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
  867. int result;
  868. struct scsi_device *sdev, *tmp_sdev;
  869. int depth = 0;
  870. unsigned long flags;
  871. struct lpfc_fast_path_event *fast_path_evt;
  872. lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
  873. lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
  874. if (pnode && NLP_CHK_NODE_ACT(pnode))
  875. atomic_dec(&pnode->cmd_pending);
  876. if (lpfc_cmd->status) {
  877. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  878. (lpfc_cmd->result & IOERR_DRVR_MASK))
  879. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  880. else if (lpfc_cmd->status >= IOSTAT_CNT)
  881. lpfc_cmd->status = IOSTAT_DEFAULT;
  882. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  883. "0729 FCP cmd x%x failed <%d/%d> "
  884. "status: x%x result: x%x Data: x%x x%x\n",
  885. cmd->cmnd[0],
  886. cmd->device ? cmd->device->id : 0xffff,
  887. cmd->device ? cmd->device->lun : 0xffff,
  888. lpfc_cmd->status, lpfc_cmd->result,
  889. pIocbOut->iocb.ulpContext,
  890. lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
  891. switch (lpfc_cmd->status) {
  892. case IOSTAT_FCP_RSP_ERROR:
  893. /* Call FCP RSP handler to determine result */
  894. lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
  895. break;
  896. case IOSTAT_NPORT_BSY:
  897. case IOSTAT_FABRIC_BSY:
  898. cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
  899. fast_path_evt = lpfc_alloc_fast_evt(phba);
  900. if (!fast_path_evt)
  901. break;
  902. fast_path_evt->un.fabric_evt.event_type =
  903. FC_REG_FABRIC_EVENT;
  904. fast_path_evt->un.fabric_evt.subcategory =
  905. (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
  906. LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
  907. if (pnode && NLP_CHK_NODE_ACT(pnode)) {
  908. memcpy(&fast_path_evt->un.fabric_evt.wwpn,
  909. &pnode->nlp_portname,
  910. sizeof(struct lpfc_name));
  911. memcpy(&fast_path_evt->un.fabric_evt.wwnn,
  912. &pnode->nlp_nodename,
  913. sizeof(struct lpfc_name));
  914. }
  915. fast_path_evt->vport = vport;
  916. fast_path_evt->work_evt.evt =
  917. LPFC_EVT_FASTPATH_MGMT_EVT;
  918. spin_lock_irqsave(&phba->hbalock, flags);
  919. list_add_tail(&fast_path_evt->work_evt.evt_listp,
  920. &phba->work_list);
  921. spin_unlock_irqrestore(&phba->hbalock, flags);
  922. lpfc_worker_wake_up(phba);
  923. break;
  924. case IOSTAT_LOCAL_REJECT:
  925. if (lpfc_cmd->result == IOERR_INVALID_RPI ||
  926. lpfc_cmd->result == IOERR_NO_RESOURCES ||
  927. lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
  928. cmd->result = ScsiResult(DID_REQUEUE, 0);
  929. break;
  930. } /* else: fall through */
  931. default:
  932. cmd->result = ScsiResult(DID_ERROR, 0);
  933. break;
  934. }
  935. if (!pnode || !NLP_CHK_NODE_ACT(pnode)
  936. || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
  937. cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
  938. SAM_STAT_BUSY);
  939. } else {
  940. cmd->result = ScsiResult(DID_OK, 0);
  941. }
  942. if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
  943. uint32_t *lp = (uint32_t *)cmd->sense_buffer;
  944. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  945. "0710 Iodone <%d/%d> cmd %p, error "
  946. "x%x SNS x%x x%x Data: x%x x%x\n",
  947. cmd->device->id, cmd->device->lun, cmd,
  948. cmd->result, *lp, *(lp + 3), cmd->retries,
  949. scsi_get_resid(cmd));
  950. }
  951. lpfc_update_stats(phba, lpfc_cmd);
  952. result = cmd->result;
  953. sdev = cmd->device;
  954. if (vport->cfg_max_scsicmpl_time &&
  955. time_after(jiffies, lpfc_cmd->start_time +
  956. msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
  957. spin_lock_irqsave(sdev->host->host_lock, flags);
  958. if (pnode && NLP_CHK_NODE_ACT(pnode)) {
  959. if (pnode->cmd_qdepth >
  960. atomic_read(&pnode->cmd_pending) &&
  961. (atomic_read(&pnode->cmd_pending) >
  962. LPFC_MIN_TGT_QDEPTH) &&
  963. ((cmd->cmnd[0] == READ_10) ||
  964. (cmd->cmnd[0] == WRITE_10)))
  965. pnode->cmd_qdepth =
  966. atomic_read(&pnode->cmd_pending);
  967. pnode->last_change_time = jiffies;
  968. }
  969. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  970. } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
  971. if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
  972. time_after(jiffies, pnode->last_change_time +
  973. msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
  974. spin_lock_irqsave(sdev->host->host_lock, flags);
  975. pnode->cmd_qdepth += pnode->cmd_qdepth *
  976. LPFC_TGTQ_RAMPUP_PCENT / 100;
  977. if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
  978. pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
  979. pnode->last_change_time = jiffies;
  980. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  981. }
  982. }
  983. lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
  984. cmd->scsi_done(cmd);
  985. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  986. /*
  987. * If there is a thread waiting for command completion
  988. * wake up the thread.
  989. */
  990. spin_lock_irqsave(sdev->host->host_lock, flags);
  991. lpfc_cmd->pCmd = NULL;
  992. if (lpfc_cmd->waitq)
  993. wake_up(lpfc_cmd->waitq);
  994. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  995. lpfc_release_scsi_buf(phba, lpfc_cmd);
  996. return;
  997. }
  998. if (!result)
  999. lpfc_rampup_queue_depth(vport, sdev);
  1000. if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
  1001. ((jiffies - pnode->last_ramp_up_time) >
  1002. LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
  1003. ((jiffies - pnode->last_q_full_time) >
  1004. LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
  1005. (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
  1006. shost_for_each_device(tmp_sdev, sdev->host) {
  1007. if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
  1008. if (tmp_sdev->id != sdev->id)
  1009. continue;
  1010. if (tmp_sdev->ordered_tags)
  1011. scsi_adjust_queue_depth(tmp_sdev,
  1012. MSG_ORDERED_TAG,
  1013. tmp_sdev->queue_depth+1);
  1014. else
  1015. scsi_adjust_queue_depth(tmp_sdev,
  1016. MSG_SIMPLE_TAG,
  1017. tmp_sdev->queue_depth+1);
  1018. pnode->last_ramp_up_time = jiffies;
  1019. }
  1020. }
  1021. lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
  1022. 0xFFFFFFFF,
  1023. sdev->queue_depth - 1, sdev->queue_depth);
  1024. }
  1025. /*
  1026. * Check for queue full. If the lun is reporting queue full, then
  1027. * back off the lun queue depth to prevent target overloads.
  1028. */
  1029. if (result == SAM_STAT_TASK_SET_FULL && pnode &&
  1030. NLP_CHK_NODE_ACT(pnode)) {
  1031. pnode->last_q_full_time = jiffies;
  1032. shost_for_each_device(tmp_sdev, sdev->host) {
  1033. if (tmp_sdev->id != sdev->id)
  1034. continue;
  1035. depth = scsi_track_queue_full(tmp_sdev,
  1036. tmp_sdev->queue_depth - 1);
  1037. }
  1038. /*
  1039. * The queue depth cannot be lowered any more.
  1040. * Modify the returned error code to store
  1041. * the final depth value set by
  1042. * scsi_track_queue_full.
  1043. */
  1044. if (depth == -1)
  1045. depth = sdev->host->cmd_per_lun;
  1046. if (depth) {
  1047. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  1048. "0711 detected queue full - lun queue "
  1049. "depth adjusted to %d.\n", depth);
  1050. lpfc_send_sdev_queuedepth_change_event(phba, vport,
  1051. pnode, 0xFFFFFFFF,
  1052. depth+1, depth);
  1053. }
  1054. }
  1055. /*
  1056. * If there is a thread waiting for command completion
  1057. * wake up the thread.
  1058. */
  1059. spin_lock_irqsave(sdev->host->host_lock, flags);
  1060. lpfc_cmd->pCmd = NULL;
  1061. if (lpfc_cmd->waitq)
  1062. wake_up(lpfc_cmd->waitq);
  1063. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  1064. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1065. }
  1066. /**
  1067. * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
  1068. * @data: A pointer to the immediate command data portion of the IOCB.
  1069. * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
  1070. *
  1071. * The routine copies the entire FCP command from @fcp_cmnd to @data while
  1072. * byte swapping the data to big endian format for transmission on the wire.
  1073. **/
  1074. static void
  1075. lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
  1076. {
  1077. int i, j;
  1078. for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
  1079. i += sizeof(uint32_t), j++) {
  1080. ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
  1081. }
  1082. }
  1083. /**
  1084. * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit.
  1085. * @vport: The virtual port for which this call is being executed.
  1086. * @lpfc_cmd: The scsi command which needs to send.
  1087. * @pnode: Pointer to lpfc_nodelist.
  1088. *
  1089. * This routine initializes fcp_cmnd and iocb data structure from scsi command
  1090. * to transfer.
  1091. **/
  1092. static void
  1093. lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
  1094. struct lpfc_nodelist *pnode)
  1095. {
  1096. struct lpfc_hba *phba = vport->phba;
  1097. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  1098. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  1099. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  1100. struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
  1101. int datadir = scsi_cmnd->sc_data_direction;
  1102. char tag[2];
  1103. if (!pnode || !NLP_CHK_NODE_ACT(pnode))
  1104. return;
  1105. lpfc_cmd->fcp_rsp->rspSnsLen = 0;
  1106. /* clear task management bits */
  1107. lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
  1108. int_to_scsilun(lpfc_cmd->pCmd->device->lun,
  1109. &lpfc_cmd->fcp_cmnd->fcp_lun);
  1110. memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
  1111. if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
  1112. switch (tag[0]) {
  1113. case HEAD_OF_QUEUE_TAG:
  1114. fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
  1115. break;
  1116. case ORDERED_QUEUE_TAG:
  1117. fcp_cmnd->fcpCntl1 = ORDERED_Q;
  1118. break;
  1119. default:
  1120. fcp_cmnd->fcpCntl1 = SIMPLE_Q;
  1121. break;
  1122. }
  1123. } else
  1124. fcp_cmnd->fcpCntl1 = 0;
  1125. /*
  1126. * There are three possibilities here - use scatter-gather segment, use
  1127. * the single mapping, or neither. Start the lpfc command prep by
  1128. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  1129. * data bde entry.
  1130. */
  1131. if (scsi_sg_count(scsi_cmnd)) {
  1132. if (datadir == DMA_TO_DEVICE) {
  1133. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  1134. iocb_cmd->un.fcpi.fcpi_parm = 0;
  1135. iocb_cmd->ulpPU = 0;
  1136. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  1137. phba->fc4OutputRequests++;
  1138. } else {
  1139. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  1140. iocb_cmd->ulpPU = PARM_READ_CHECK;
  1141. iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
  1142. fcp_cmnd->fcpCntl3 = READ_DATA;
  1143. phba->fc4InputRequests++;
  1144. }
  1145. } else {
  1146. iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
  1147. iocb_cmd->un.fcpi.fcpi_parm = 0;
  1148. iocb_cmd->ulpPU = 0;
  1149. fcp_cmnd->fcpCntl3 = 0;
  1150. phba->fc4ControlRequests++;
  1151. }
  1152. if (phba->sli_rev == 3)
  1153. lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
  1154. /*
  1155. * Finish initializing those IOCB fields that are independent
  1156. * of the scsi_cmnd request_buffer
  1157. */
  1158. piocbq->iocb.ulpContext = pnode->nlp_rpi;
  1159. if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
  1160. piocbq->iocb.ulpFCP2Rcvy = 1;
  1161. else
  1162. piocbq->iocb.ulpFCP2Rcvy = 0;
  1163. piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
  1164. piocbq->context1 = lpfc_cmd;
  1165. piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
  1166. piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
  1167. piocbq->vport = vport;
  1168. }
  1169. /**
  1170. * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
  1171. * @vport: The virtual port for which this call is being executed.
  1172. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
  1173. * @lun: Logical unit number.
  1174. * @task_mgmt_cmd: SCSI task management command.
  1175. *
  1176. * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
  1177. *
  1178. * Return codes:
  1179. * 0 - Error
  1180. * 1 - Success
  1181. **/
  1182. static int
  1183. lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
  1184. struct lpfc_scsi_buf *lpfc_cmd,
  1185. unsigned int lun,
  1186. uint8_t task_mgmt_cmd)
  1187. {
  1188. struct lpfc_iocbq *piocbq;
  1189. IOCB_t *piocb;
  1190. struct fcp_cmnd *fcp_cmnd;
  1191. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  1192. struct lpfc_nodelist *ndlp = rdata->pnode;
  1193. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
  1194. ndlp->nlp_state != NLP_STE_MAPPED_NODE)
  1195. return 0;
  1196. piocbq = &(lpfc_cmd->cur_iocbq);
  1197. piocbq->vport = vport;
  1198. piocb = &piocbq->iocb;
  1199. fcp_cmnd = lpfc_cmd->fcp_cmnd;
  1200. /* Clear out any old data in the FCP command area */
  1201. memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
  1202. int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
  1203. fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
  1204. if (vport->phba->sli_rev == 3)
  1205. lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
  1206. piocb->ulpCommand = CMD_FCP_ICMND64_CR;
  1207. piocb->ulpContext = ndlp->nlp_rpi;
  1208. if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
  1209. piocb->ulpFCP2Rcvy = 1;
  1210. }
  1211. piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
  1212. /* ulpTimeout is only one byte */
  1213. if (lpfc_cmd->timeout > 0xff) {
  1214. /*
  1215. * Do not timeout the command at the firmware level.
  1216. * The driver will provide the timeout mechanism.
  1217. */
  1218. piocb->ulpTimeout = 0;
  1219. } else {
  1220. piocb->ulpTimeout = lpfc_cmd->timeout;
  1221. }
  1222. return 1;
  1223. }
  1224. /**
  1225. * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
  1226. * @phba: The Hba for which this call is being executed.
  1227. * @cmdiocbq: Pointer to lpfc_iocbq data structure.
  1228. * @rspiocbq: Pointer to lpfc_iocbq data structure.
  1229. *
  1230. * This routine is IOCB completion routine for device reset and target reset
  1231. * routine. This routine release scsi buffer associated with lpfc_cmd.
  1232. **/
  1233. static void
  1234. lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
  1235. struct lpfc_iocbq *cmdiocbq,
  1236. struct lpfc_iocbq *rspiocbq)
  1237. {
  1238. struct lpfc_scsi_buf *lpfc_cmd =
  1239. (struct lpfc_scsi_buf *) cmdiocbq->context1;
  1240. if (lpfc_cmd)
  1241. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1242. return;
  1243. }
  1244. /**
  1245. * lpfc_scsi_tgt_reset: Target reset handler.
  1246. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
  1247. * @vport: The virtual port for which this call is being executed.
  1248. * @tgt_id: Target ID.
  1249. * @lun: Lun number.
  1250. * @rdata: Pointer to lpfc_rport_data.
  1251. *
  1252. * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
  1253. *
  1254. * Return Code:
  1255. * 0x2003 - Error
  1256. * 0x2002 - Success.
  1257. **/
  1258. static int
  1259. lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
  1260. unsigned tgt_id, unsigned int lun,
  1261. struct lpfc_rport_data *rdata)
  1262. {
  1263. struct lpfc_hba *phba = vport->phba;
  1264. struct lpfc_iocbq *iocbq;
  1265. struct lpfc_iocbq *iocbqrsp;
  1266. int ret;
  1267. int status;
  1268. if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
  1269. return FAILED;
  1270. lpfc_cmd->rdata = rdata;
  1271. status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
  1272. FCP_TARGET_RESET);
  1273. if (!status)
  1274. return FAILED;
  1275. iocbq = &lpfc_cmd->cur_iocbq;
  1276. iocbqrsp = lpfc_sli_get_iocbq(phba);
  1277. if (!iocbqrsp)
  1278. return FAILED;
  1279. /* Issue Target Reset to TGT <num> */
  1280. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  1281. "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
  1282. tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
  1283. status = lpfc_sli_issue_iocb_wait(phba,
  1284. &phba->sli.ring[phba->sli.fcp_ring],
  1285. iocbq, iocbqrsp, lpfc_cmd->timeout);
  1286. if (status != IOCB_SUCCESS) {
  1287. if (status == IOCB_TIMEDOUT) {
  1288. iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
  1289. ret = TIMEOUT_ERROR;
  1290. } else
  1291. ret = FAILED;
  1292. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  1293. } else {
  1294. ret = SUCCESS;
  1295. lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
  1296. lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
  1297. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  1298. (lpfc_cmd->result & IOERR_DRVR_MASK))
  1299. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  1300. }
  1301. lpfc_sli_release_iocbq(phba, iocbqrsp);
  1302. return ret;
  1303. }
  1304. /**
  1305. * lpfc_info: Info entry point of scsi_host_template data structure.
  1306. * @host: The scsi host for which this call is being executed.
  1307. *
  1308. * This routine provides module information about hba.
  1309. *
  1310. * Reutrn code:
  1311. * Pointer to char - Success.
  1312. **/
  1313. const char *
  1314. lpfc_info(struct Scsi_Host *host)
  1315. {
  1316. struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
  1317. struct lpfc_hba *phba = vport->phba;
  1318. int len;
  1319. static char lpfcinfobuf[384];
  1320. memset(lpfcinfobuf,0,384);
  1321. if (phba && phba->pcidev){
  1322. strncpy(lpfcinfobuf, phba->ModelDesc, 256);
  1323. len = strlen(lpfcinfobuf);
  1324. snprintf(lpfcinfobuf + len,
  1325. 384-len,
  1326. " on PCI bus %02x device %02x irq %d",
  1327. phba->pcidev->bus->number,
  1328. phba->pcidev->devfn,
  1329. phba->pcidev->irq);
  1330. len = strlen(lpfcinfobuf);
  1331. if (phba->Port[0]) {
  1332. snprintf(lpfcinfobuf + len,
  1333. 384-len,
  1334. " port %s",
  1335. phba->Port);
  1336. }
  1337. }
  1338. return lpfcinfobuf;
  1339. }
  1340. /**
  1341. * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
  1342. * @phba: The Hba for which this call is being executed.
  1343. *
  1344. * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
  1345. * The default value of cfg_poll_tmo is 10 milliseconds.
  1346. **/
  1347. static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
  1348. {
  1349. unsigned long poll_tmo_expires =
  1350. (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
  1351. if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
  1352. mod_timer(&phba->fcp_poll_timer,
  1353. poll_tmo_expires);
  1354. }
  1355. /**
  1356. * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
  1357. * @phba: The Hba for which this call is being executed.
  1358. *
  1359. * This routine starts the fcp_poll_timer of @phba.
  1360. **/
  1361. void lpfc_poll_start_timer(struct lpfc_hba * phba)
  1362. {
  1363. lpfc_poll_rearm_timer(phba);
  1364. }
  1365. /**
  1366. * lpfc_poll_timeout: Restart polling timer.
  1367. * @ptr: Map to lpfc_hba data structure pointer.
  1368. *
  1369. * This routine restarts fcp_poll timer, when FCP ring polling is enable
  1370. * and FCP Ring interrupt is disable.
  1371. **/
  1372. void lpfc_poll_timeout(unsigned long ptr)
  1373. {
  1374. struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
  1375. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  1376. lpfc_sli_poll_fcp_ring (phba);
  1377. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1378. lpfc_poll_rearm_timer(phba);
  1379. }
  1380. }
  1381. /**
  1382. * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
  1383. * structure.
  1384. * @cmnd: Pointer to scsi_cmnd data structure.
  1385. * @done: Pointer to done routine.
  1386. *
  1387. * Driver registers this routine to scsi midlayer to submit a @cmd to process.
  1388. * This routine prepares an IOCB from scsi command and provides to firmware.
  1389. * The @done callback is invoked after driver finished processing the command.
  1390. *
  1391. * Return value :
  1392. * 0 - Success
  1393. * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
  1394. **/
  1395. static int
  1396. lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
  1397. {
  1398. struct Scsi_Host *shost = cmnd->device->host;
  1399. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1400. struct lpfc_hba *phba = vport->phba;
  1401. struct lpfc_sli *psli = &phba->sli;
  1402. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  1403. struct lpfc_nodelist *ndlp = rdata->pnode;
  1404. struct lpfc_scsi_buf *lpfc_cmd;
  1405. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  1406. int err;
  1407. err = fc_remote_port_chkready(rport);
  1408. if (err) {
  1409. cmnd->result = err;
  1410. goto out_fail_command;
  1411. }
  1412. /*
  1413. * Catch race where our node has transitioned, but the
  1414. * transport is still transitioning.
  1415. */
  1416. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  1417. cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
  1418. goto out_fail_command;
  1419. }
  1420. if (vport->cfg_max_scsicmpl_time &&
  1421. (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
  1422. goto out_host_busy;
  1423. lpfc_cmd = lpfc_get_scsi_buf(phba);
  1424. if (lpfc_cmd == NULL) {
  1425. lpfc_rampdown_queue_depth(phba);
  1426. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  1427. "0707 driver's buffer pool is empty, "
  1428. "IO busied\n");
  1429. goto out_host_busy;
  1430. }
  1431. /*
  1432. * Store the midlayer's command structure for the completion phase
  1433. * and complete the command initialization.
  1434. */
  1435. lpfc_cmd->pCmd = cmnd;
  1436. lpfc_cmd->rdata = rdata;
  1437. lpfc_cmd->timeout = 0;
  1438. lpfc_cmd->start_time = jiffies;
  1439. cmnd->host_scribble = (unsigned char *)lpfc_cmd;
  1440. cmnd->scsi_done = done;
  1441. err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
  1442. if (err)
  1443. goto out_host_busy_free_buf;
  1444. lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
  1445. atomic_inc(&ndlp->cmd_pending);
  1446. err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
  1447. &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
  1448. if (err) {
  1449. atomic_dec(&ndlp->cmd_pending);
  1450. goto out_host_busy_free_buf;
  1451. }
  1452. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  1453. lpfc_sli_poll_fcp_ring(phba);
  1454. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1455. lpfc_poll_rearm_timer(phba);
  1456. }
  1457. return 0;
  1458. out_host_busy_free_buf:
  1459. lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
  1460. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1461. out_host_busy:
  1462. return SCSI_MLQUEUE_HOST_BUSY;
  1463. out_fail_command:
  1464. done(cmnd);
  1465. return 0;
  1466. }
  1467. /**
  1468. * lpfc_block_error_handler: Routine to block error handler.
  1469. * @cmnd: Pointer to scsi_cmnd data structure.
  1470. *
  1471. * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
  1472. **/
  1473. static void
  1474. lpfc_block_error_handler(struct scsi_cmnd *cmnd)
  1475. {
  1476. struct Scsi_Host *shost = cmnd->device->host;
  1477. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  1478. spin_lock_irq(shost->host_lock);
  1479. while (rport->port_state == FC_PORTSTATE_BLOCKED) {
  1480. spin_unlock_irq(shost->host_lock);
  1481. msleep(1000);
  1482. spin_lock_irq(shost->host_lock);
  1483. }
  1484. spin_unlock_irq(shost->host_lock);
  1485. return;
  1486. }
  1487. /**
  1488. * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
  1489. *structure.
  1490. * @cmnd: Pointer to scsi_cmnd data structure.
  1491. *
  1492. * This routine aborts @cmnd pending in base driver.
  1493. *
  1494. * Return code :
  1495. * 0x2003 - Error
  1496. * 0x2002 - Success
  1497. **/
  1498. static int
  1499. lpfc_abort_handler(struct scsi_cmnd *cmnd)
  1500. {
  1501. struct Scsi_Host *shost = cmnd->device->host;
  1502. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1503. struct lpfc_hba *phba = vport->phba;
  1504. struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
  1505. struct lpfc_iocbq *iocb;
  1506. struct lpfc_iocbq *abtsiocb;
  1507. struct lpfc_scsi_buf *lpfc_cmd;
  1508. IOCB_t *cmd, *icmd;
  1509. int ret = SUCCESS;
  1510. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
  1511. lpfc_block_error_handler(cmnd);
  1512. lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
  1513. BUG_ON(!lpfc_cmd);
  1514. /*
  1515. * If pCmd field of the corresponding lpfc_scsi_buf structure
  1516. * points to a different SCSI command, then the driver has
  1517. * already completed this command, but the midlayer did not
  1518. * see the completion before the eh fired. Just return
  1519. * SUCCESS.
  1520. */
  1521. iocb = &lpfc_cmd->cur_iocbq;
  1522. if (lpfc_cmd->pCmd != cmnd)
  1523. goto out;
  1524. BUG_ON(iocb->context1 != lpfc_cmd);
  1525. abtsiocb = lpfc_sli_get_iocbq(phba);
  1526. if (abtsiocb == NULL) {
  1527. ret = FAILED;
  1528. goto out;
  1529. }
  1530. /*
  1531. * The scsi command can not be in txq and it is in flight because the
  1532. * pCmd is still pointig at the SCSI command we have to abort. There
  1533. * is no need to search the txcmplq. Just send an abort to the FW.
  1534. */
  1535. cmd = &iocb->iocb;
  1536. icmd = &abtsiocb->iocb;
  1537. icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
  1538. icmd->un.acxri.abortContextTag = cmd->ulpContext;
  1539. icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
  1540. icmd->ulpLe = 1;
  1541. icmd->ulpClass = cmd->ulpClass;
  1542. if (lpfc_is_link_up(phba))
  1543. icmd->ulpCommand = CMD_ABORT_XRI_CN;
  1544. else
  1545. icmd->ulpCommand = CMD_CLOSE_XRI_CN;
  1546. abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
  1547. abtsiocb->vport = vport;
  1548. if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
  1549. lpfc_sli_release_iocbq(phba, abtsiocb);
  1550. ret = FAILED;
  1551. goto out;
  1552. }
  1553. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1554. lpfc_sli_poll_fcp_ring (phba);
  1555. lpfc_cmd->waitq = &waitq;
  1556. /* Wait for abort to complete */
  1557. wait_event_timeout(waitq,
  1558. (lpfc_cmd->pCmd != cmnd),
  1559. (2*vport->cfg_devloss_tmo*HZ));
  1560. spin_lock_irq(shost->host_lock);
  1561. lpfc_cmd->waitq = NULL;
  1562. spin_unlock_irq(shost->host_lock);
  1563. if (lpfc_cmd->pCmd == cmnd) {
  1564. ret = FAILED;
  1565. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1566. "0748 abort handler timed out waiting "
  1567. "for abort to complete: ret %#x, ID %d, "
  1568. "LUN %d, snum %#lx\n",
  1569. ret, cmnd->device->id, cmnd->device->lun,
  1570. cmnd->serial_number);
  1571. }
  1572. out:
  1573. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  1574. "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
  1575. "LUN %d snum %#lx\n", ret, cmnd->device->id,
  1576. cmnd->device->lun, cmnd->serial_number);
  1577. return ret;
  1578. }
  1579. /**
  1580. * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
  1581. *data structure.
  1582. * @cmnd: Pointer to scsi_cmnd data structure.
  1583. *
  1584. * This routine does a device reset by sending a TARGET_RESET task management
  1585. * command.
  1586. *
  1587. * Return code :
  1588. * 0x2003 - Error
  1589. * 0ex2002 - Success
  1590. **/
  1591. static int
  1592. lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
  1593. {
  1594. struct Scsi_Host *shost = cmnd->device->host;
  1595. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1596. struct lpfc_hba *phba = vport->phba;
  1597. struct lpfc_scsi_buf *lpfc_cmd;
  1598. struct lpfc_iocbq *iocbq, *iocbqrsp;
  1599. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  1600. struct lpfc_nodelist *pnode = rdata->pnode;
  1601. unsigned long later;
  1602. int ret = SUCCESS;
  1603. int status;
  1604. int cnt;
  1605. struct lpfc_scsi_event_header scsi_event;
  1606. lpfc_block_error_handler(cmnd);
  1607. /*
  1608. * If target is not in a MAPPED state, delay the reset until
  1609. * target is rediscovered or devloss timeout expires.
  1610. */
  1611. later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
  1612. while (time_after(later, jiffies)) {
  1613. if (!pnode || !NLP_CHK_NODE_ACT(pnode))
  1614. return FAILED;
  1615. if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
  1616. break;
  1617. schedule_timeout_uninterruptible(msecs_to_jiffies(500));
  1618. rdata = cmnd->device->hostdata;
  1619. if (!rdata)
  1620. break;
  1621. pnode = rdata->pnode;
  1622. }
  1623. scsi_event.event_type = FC_REG_SCSI_EVENT;
  1624. scsi_event.subcategory = LPFC_EVENT_TGTRESET;
  1625. scsi_event.lun = 0;
  1626. memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
  1627. memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
  1628. fc_host_post_vendor_event(shost,
  1629. fc_get_event_number(),
  1630. sizeof(scsi_event),
  1631. (char *)&scsi_event,
  1632. LPFC_NL_VENDOR_ID);
  1633. if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
  1634. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1635. "0721 LUN Reset rport "
  1636. "failure: msec x%x rdata x%p\n",
  1637. jiffies_to_msecs(jiffies - later), rdata);
  1638. return FAILED;
  1639. }
  1640. lpfc_cmd = lpfc_get_scsi_buf(phba);
  1641. if (lpfc_cmd == NULL)
  1642. return FAILED;
  1643. lpfc_cmd->timeout = 60;
  1644. lpfc_cmd->rdata = rdata;
  1645. status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
  1646. cmnd->device->lun,
  1647. FCP_TARGET_RESET);
  1648. if (!status) {
  1649. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1650. return FAILED;
  1651. }
  1652. iocbq = &lpfc_cmd->cur_iocbq;
  1653. /* get a buffer for this IOCB command response */
  1654. iocbqrsp = lpfc_sli_get_iocbq(phba);
  1655. if (iocbqrsp == NULL) {
  1656. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1657. return FAILED;
  1658. }
  1659. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  1660. "0703 Issue target reset to TGT %d LUN %d "
  1661. "rpi x%x nlp_flag x%x\n", cmnd->device->id,
  1662. cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
  1663. status = lpfc_sli_issue_iocb_wait(phba,
  1664. &phba->sli.ring[phba->sli.fcp_ring],
  1665. iocbq, iocbqrsp, lpfc_cmd->timeout);
  1666. if (status == IOCB_TIMEDOUT) {
  1667. iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
  1668. ret = TIMEOUT_ERROR;
  1669. } else {
  1670. if (status != IOCB_SUCCESS)
  1671. ret = FAILED;
  1672. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1673. }
  1674. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1675. "0713 SCSI layer issued device reset (%d, %d) "
  1676. "return x%x status x%x result x%x\n",
  1677. cmnd->device->id, cmnd->device->lun, ret,
  1678. iocbqrsp->iocb.ulpStatus,
  1679. iocbqrsp->iocb.un.ulpWord[4]);
  1680. lpfc_sli_release_iocbq(phba, iocbqrsp);
  1681. cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
  1682. LPFC_CTX_TGT);
  1683. if (cnt)
  1684. lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
  1685. cmnd->device->id, cmnd->device->lun,
  1686. LPFC_CTX_TGT);
  1687. later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
  1688. while (time_after(later, jiffies) && cnt) {
  1689. schedule_timeout_uninterruptible(msecs_to_jiffies(20));
  1690. cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
  1691. cmnd->device->lun, LPFC_CTX_TGT);
  1692. }
  1693. if (cnt) {
  1694. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1695. "0719 device reset I/O flush failure: "
  1696. "cnt x%x\n", cnt);
  1697. ret = FAILED;
  1698. }
  1699. return ret;
  1700. }
  1701. /**
  1702. * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
  1703. * Template data structure.
  1704. * @cmnd: Pointer to scsi_cmnd data structure.
  1705. *
  1706. * This routine does target reset to all target on @cmnd->device->host.
  1707. *
  1708. * Return Code:
  1709. * 0x2003 - Error
  1710. * 0x2002 - Success
  1711. **/
  1712. static int
  1713. lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
  1714. {
  1715. struct Scsi_Host *shost = cmnd->device->host;
  1716. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1717. struct lpfc_hba *phba = vport->phba;
  1718. struct lpfc_nodelist *ndlp = NULL;
  1719. int match;
  1720. int ret = SUCCESS, status = SUCCESS, i;
  1721. int cnt;
  1722. struct lpfc_scsi_buf * lpfc_cmd;
  1723. unsigned long later;
  1724. struct lpfc_scsi_event_header scsi_event;
  1725. scsi_event.event_type = FC_REG_SCSI_EVENT;
  1726. scsi_event.subcategory = LPFC_EVENT_BUSRESET;
  1727. scsi_event.lun = 0;
  1728. memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
  1729. memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
  1730. fc_host_post_vendor_event(shost,
  1731. fc_get_event_number(),
  1732. sizeof(scsi_event),
  1733. (char *)&scsi_event,
  1734. LPFC_NL_VENDOR_ID);
  1735. lpfc_block_error_handler(cmnd);
  1736. /*
  1737. * Since the driver manages a single bus device, reset all
  1738. * targets known to the driver. Should any target reset
  1739. * fail, this routine returns failure to the midlayer.
  1740. */
  1741. for (i = 0; i < LPFC_MAX_TARGET; i++) {
  1742. /* Search for mapped node by target ID */
  1743. match = 0;
  1744. spin_lock_irq(shost->host_lock);
  1745. list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
  1746. if (!NLP_CHK_NODE_ACT(ndlp))
  1747. continue;
  1748. if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
  1749. ndlp->nlp_sid == i &&
  1750. ndlp->rport) {
  1751. match = 1;
  1752. break;
  1753. }
  1754. }
  1755. spin_unlock_irq(shost->host_lock);
  1756. if (!match)
  1757. continue;
  1758. lpfc_cmd = lpfc_get_scsi_buf(phba);
  1759. if (lpfc_cmd) {
  1760. lpfc_cmd->timeout = 60;
  1761. status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
  1762. cmnd->device->lun,
  1763. ndlp->rport->dd_data);
  1764. if (status != TIMEOUT_ERROR)
  1765. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1766. }
  1767. if (!lpfc_cmd || status != SUCCESS) {
  1768. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1769. "0700 Bus Reset on target %d failed\n",
  1770. i);
  1771. ret = FAILED;
  1772. }
  1773. }
  1774. /*
  1775. * All outstanding txcmplq I/Os should have been aborted by
  1776. * the targets. Unfortunately, some targets do not abide by
  1777. * this forcing the driver to double check.
  1778. */
  1779. cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
  1780. if (cnt)
  1781. lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
  1782. 0, 0, LPFC_CTX_HOST);
  1783. later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
  1784. while (time_after(later, jiffies) && cnt) {
  1785. schedule_timeout_uninterruptible(msecs_to_jiffies(20));
  1786. cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
  1787. }
  1788. if (cnt) {
  1789. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1790. "0715 Bus Reset I/O flush failure: "
  1791. "cnt x%x left x%x\n", cnt, i);
  1792. ret = FAILED;
  1793. }
  1794. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1795. "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
  1796. return ret;
  1797. }
  1798. /**
  1799. * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
  1800. * structure.
  1801. * @sdev: Pointer to scsi_device.
  1802. *
  1803. * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
  1804. * globally available list of scsi buffers. This routine also makes sure scsi
  1805. * buffer is not allocated more than HBA limit conveyed to midlayer. This list
  1806. * of scsi buffer exists for the lifetime of the driver.
  1807. *
  1808. * Return codes:
  1809. * non-0 - Error
  1810. * 0 - Success
  1811. **/
  1812. static int
  1813. lpfc_slave_alloc(struct scsi_device *sdev)
  1814. {
  1815. struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
  1816. struct lpfc_hba *phba = vport->phba;
  1817. struct lpfc_scsi_buf *scsi_buf = NULL;
  1818. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1819. uint32_t total = 0, i;
  1820. uint32_t num_to_alloc = 0;
  1821. unsigned long flags;
  1822. if (!rport || fc_remote_port_chkready(rport))
  1823. return -ENXIO;
  1824. sdev->hostdata = rport->dd_data;
  1825. /*
  1826. * Populate the cmds_per_lun count scsi_bufs into this host's globally
  1827. * available list of scsi buffers. Don't allocate more than the
  1828. * HBA limit conveyed to the midlayer via the host structure. The
  1829. * formula accounts for the lun_queue_depth + error handlers + 1
  1830. * extra. This list of scsi bufs exists for the lifetime of the driver.
  1831. */
  1832. total = phba->total_scsi_bufs;
  1833. num_to_alloc = vport->cfg_lun_queue_depth + 2;
  1834. /* Allow some exchanges to be available always to complete discovery */
  1835. if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
  1836. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  1837. "0704 At limitation of %d preallocated "
  1838. "command buffers\n", total);
  1839. return 0;
  1840. /* Allow some exchanges to be available always to complete discovery */
  1841. } else if (total + num_to_alloc >
  1842. phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
  1843. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  1844. "0705 Allocation request of %d "
  1845. "command buffers will exceed max of %d. "
  1846. "Reducing allocation request to %d.\n",
  1847. num_to_alloc, phba->cfg_hba_queue_depth,
  1848. (phba->cfg_hba_queue_depth - total));
  1849. num_to_alloc = phba->cfg_hba_queue_depth - total;
  1850. }
  1851. for (i = 0; i < num_to_alloc; i++) {
  1852. scsi_buf = lpfc_new_scsi_buf(vport);
  1853. if (!scsi_buf) {
  1854. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1855. "0706 Failed to allocate "
  1856. "command buffer\n");
  1857. break;
  1858. }
  1859. spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
  1860. phba->total_scsi_bufs++;
  1861. list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
  1862. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
  1863. }
  1864. return 0;
  1865. }
  1866. /**
  1867. * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
  1868. * structure.
  1869. * @sdev: Pointer to scsi_device.
  1870. *
  1871. * This routine configures following items
  1872. * - Tag command queuing support for @sdev if supported.
  1873. * - Dev loss time out value of fc_rport.
  1874. * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
  1875. *
  1876. * Return codes:
  1877. * 0 - Success
  1878. **/
  1879. static int
  1880. lpfc_slave_configure(struct scsi_device *sdev)
  1881. {
  1882. struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
  1883. struct lpfc_hba *phba = vport->phba;
  1884. struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
  1885. if (sdev->tagged_supported)
  1886. scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
  1887. else
  1888. scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
  1889. /*
  1890. * Initialize the fc transport attributes for the target
  1891. * containing this scsi device. Also note that the driver's
  1892. * target pointer is stored in the starget_data for the
  1893. * driver's sysfs entry point functions.
  1894. */
  1895. rport->dev_loss_tmo = vport->cfg_devloss_tmo;
  1896. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  1897. lpfc_sli_poll_fcp_ring(phba);
  1898. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1899. lpfc_poll_rearm_timer(phba);
  1900. }
  1901. return 0;
  1902. }
  1903. /**
  1904. * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
  1905. * @sdev: Pointer to scsi_device.
  1906. *
  1907. * This routine sets @sdev hostatdata filed to null.
  1908. **/
  1909. static void
  1910. lpfc_slave_destroy(struct scsi_device *sdev)
  1911. {
  1912. sdev->hostdata = NULL;
  1913. return;
  1914. }
  1915. struct scsi_host_template lpfc_template = {
  1916. .module = THIS_MODULE,
  1917. .name = LPFC_DRIVER_NAME,
  1918. .info = lpfc_info,
  1919. .queuecommand = lpfc_queuecommand,
  1920. .eh_abort_handler = lpfc_abort_handler,
  1921. .eh_device_reset_handler= lpfc_device_reset_handler,
  1922. .eh_bus_reset_handler = lpfc_bus_reset_handler,
  1923. .slave_alloc = lpfc_slave_alloc,
  1924. .slave_configure = lpfc_slave_configure,
  1925. .slave_destroy = lpfc_slave_destroy,
  1926. .scan_finished = lpfc_scan_finished,
  1927. .this_id = -1,
  1928. .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
  1929. .cmd_per_lun = LPFC_CMD_PER_LUN,
  1930. .use_clustering = ENABLE_CLUSTERING,
  1931. .shost_attrs = lpfc_hba_attrs,
  1932. .max_sectors = 0xFFFF,
  1933. };
  1934. struct scsi_host_template lpfc_vport_template = {
  1935. .module = THIS_MODULE,
  1936. .name = LPFC_DRIVER_NAME,
  1937. .info = lpfc_info,
  1938. .queuecommand = lpfc_queuecommand,
  1939. .eh_abort_handler = lpfc_abort_handler,
  1940. .eh_device_reset_handler= lpfc_device_reset_handler,
  1941. .eh_bus_reset_handler = lpfc_bus_reset_handler,
  1942. .slave_alloc = lpfc_slave_alloc,
  1943. .slave_configure = lpfc_slave_configure,
  1944. .slave_destroy = lpfc_slave_destroy,
  1945. .scan_finished = lpfc_scan_finished,
  1946. .this_id = -1,
  1947. .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
  1948. .cmd_per_lun = LPFC_CMD_PER_LUN,
  1949. .use_clustering = ENABLE_CLUSTERING,
  1950. .shost_attrs = lpfc_vport_attrs,
  1951. .max_sectors = 0xFFFF,
  1952. };