lpfc_scsi.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2004-2008 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of version 2 of the GNU General *
  11. * Public License as published by the Free Software Foundation. *
  12. * This program is distributed in the hope that it will be useful. *
  13. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  14. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  15. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  16. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  18. * more details, a copy of which can be found in the file COPYING *
  19. * included with this package. *
  20. *******************************************************************/
  21. #include <linux/pci.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/delay.h>
  24. #include <scsi/scsi.h>
  25. #include <scsi/scsi_device.h>
  26. #include <scsi/scsi_host.h>
  27. #include <scsi/scsi_tcq.h>
  28. #include <scsi/scsi_transport_fc.h>
  29. #include "lpfc_version.h"
  30. #include "lpfc_hw.h"
  31. #include "lpfc_sli.h"
  32. #include "lpfc_nl.h"
  33. #include "lpfc_disc.h"
  34. #include "lpfc_scsi.h"
  35. #include "lpfc.h"
  36. #include "lpfc_logmsg.h"
  37. #include "lpfc_crtn.h"
  38. #include "lpfc_vport.h"
  39. #define LPFC_RESET_WAIT 2
  40. #define LPFC_ABORT_WAIT 2
  41. /**
  42. * lpfc_update_stats: Update statistical data for the command completion.
  43. * @phba: Pointer to HBA object.
  44. * @lpfc_cmd: lpfc scsi command object pointer.
  45. *
  46. * This function is called when there is a command completion and this
  47. * function updates the statistical data for the command completion.
  48. **/
  49. static void
  50. lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
  51. {
  52. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  53. struct lpfc_nodelist *pnode = rdata->pnode;
  54. struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
  55. unsigned long flags;
  56. struct Scsi_Host *shost = cmd->device->host;
  57. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  58. unsigned long latency;
  59. int i;
  60. if (cmd->result)
  61. return;
  62. latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
  63. spin_lock_irqsave(shost->host_lock, flags);
  64. if (!vport->stat_data_enabled ||
  65. vport->stat_data_blocked ||
  66. !pnode->lat_data ||
  67. (phba->bucket_type == LPFC_NO_BUCKET)) {
  68. spin_unlock_irqrestore(shost->host_lock, flags);
  69. return;
  70. }
  71. if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
  72. i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
  73. phba->bucket_step;
  74. /* check array subscript bounds */
  75. if (i < 0)
  76. i = 0;
  77. else if (i >= LPFC_MAX_BUCKET_COUNT)
  78. i = LPFC_MAX_BUCKET_COUNT - 1;
  79. } else {
  80. for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
  81. if (latency <= (phba->bucket_base +
  82. ((1<<i)*phba->bucket_step)))
  83. break;
  84. }
  85. pnode->lat_data[i].cmd_count++;
  86. spin_unlock_irqrestore(shost->host_lock, flags);
  87. }
  88. /**
  89. * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
  90. * event.
  91. * @phba: Pointer to HBA context object.
  92. * @vport: Pointer to vport object.
  93. * @ndlp: Pointer to FC node associated with the target.
  94. * @lun: Lun number of the scsi device.
  95. * @old_val: Old value of the queue depth.
  96. * @new_val: New value of the queue depth.
  97. *
  98. * This function sends an event to the mgmt application indicating
  99. * there is a change in the scsi device queue depth.
  100. **/
  101. static void
  102. lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
  103. struct lpfc_vport *vport,
  104. struct lpfc_nodelist *ndlp,
  105. uint32_t lun,
  106. uint32_t old_val,
  107. uint32_t new_val)
  108. {
  109. struct lpfc_fast_path_event *fast_path_evt;
  110. unsigned long flags;
  111. fast_path_evt = lpfc_alloc_fast_evt(phba);
  112. if (!fast_path_evt)
  113. return;
  114. fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
  115. FC_REG_SCSI_EVENT;
  116. fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
  117. LPFC_EVENT_VARQUEDEPTH;
  118. /* Report all luns with change in queue depth */
  119. fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
  120. if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
  121. memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
  122. &ndlp->nlp_portname, sizeof(struct lpfc_name));
  123. memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
  124. &ndlp->nlp_nodename, sizeof(struct lpfc_name));
  125. }
  126. fast_path_evt->un.queue_depth_evt.oldval = old_val;
  127. fast_path_evt->un.queue_depth_evt.newval = new_val;
  128. fast_path_evt->vport = vport;
  129. fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
  130. spin_lock_irqsave(&phba->hbalock, flags);
  131. list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
  132. spin_unlock_irqrestore(&phba->hbalock, flags);
  133. lpfc_worker_wake_up(phba);
  134. return;
  135. }
  136. /**
  137. * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
  138. * @phba: The Hba for which this call is being executed.
  139. *
  140. * This routine is called when there is resource error in driver or firmware.
  141. * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
  142. * posts at most 1 event each second. This routine wakes up worker thread of
  143. * @phba to process WORKER_RAM_DOWN_EVENT event.
  144. *
  145. * This routine should be called with no lock held.
  146. **/
  147. void
  148. lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
  149. {
  150. unsigned long flags;
  151. uint32_t evt_posted;
  152. spin_lock_irqsave(&phba->hbalock, flags);
  153. atomic_inc(&phba->num_rsrc_err);
  154. phba->last_rsrc_error_time = jiffies;
  155. if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
  156. spin_unlock_irqrestore(&phba->hbalock, flags);
  157. return;
  158. }
  159. phba->last_ramp_down_time = jiffies;
  160. spin_unlock_irqrestore(&phba->hbalock, flags);
  161. spin_lock_irqsave(&phba->pport->work_port_lock, flags);
  162. evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
  163. if (!evt_posted)
  164. phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
  165. spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
  166. if (!evt_posted)
  167. lpfc_worker_wake_up(phba);
  168. return;
  169. }
  170. /**
  171. * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
  172. * @phba: The Hba for which this call is being executed.
  173. *
  174. * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
  175. * post at most 1 event every 5 minute after last_ramp_up_time or
  176. * last_rsrc_error_time. This routine wakes up worker thread of @phba
  177. * to process WORKER_RAM_DOWN_EVENT event.
  178. *
  179. * This routine should be called with no lock held.
  180. **/
  181. static inline void
  182. lpfc_rampup_queue_depth(struct lpfc_vport *vport,
  183. struct scsi_device *sdev)
  184. {
  185. unsigned long flags;
  186. struct lpfc_hba *phba = vport->phba;
  187. uint32_t evt_posted;
  188. atomic_inc(&phba->num_cmd_success);
  189. if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
  190. return;
  191. spin_lock_irqsave(&phba->hbalock, flags);
  192. if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
  193. ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
  194. spin_unlock_irqrestore(&phba->hbalock, flags);
  195. return;
  196. }
  197. phba->last_ramp_up_time = jiffies;
  198. spin_unlock_irqrestore(&phba->hbalock, flags);
  199. spin_lock_irqsave(&phba->pport->work_port_lock, flags);
  200. evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
  201. if (!evt_posted)
  202. phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
  203. spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
  204. if (!evt_posted)
  205. lpfc_worker_wake_up(phba);
  206. return;
  207. }
  208. /**
  209. * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
  210. * @phba: The Hba for which this call is being executed.
  211. *
  212. * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
  213. * thread.This routine reduces queue depth for all scsi device on each vport
  214. * associated with @phba.
  215. **/
  216. void
  217. lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
  218. {
  219. struct lpfc_vport **vports;
  220. struct Scsi_Host *shost;
  221. struct scsi_device *sdev;
  222. unsigned long new_queue_depth, old_queue_depth;
  223. unsigned long num_rsrc_err, num_cmd_success;
  224. int i;
  225. struct lpfc_rport_data *rdata;
  226. num_rsrc_err = atomic_read(&phba->num_rsrc_err);
  227. num_cmd_success = atomic_read(&phba->num_cmd_success);
  228. vports = lpfc_create_vport_work_array(phba);
  229. if (vports != NULL)
  230. for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  231. shost = lpfc_shost_from_vport(vports[i]);
  232. shost_for_each_device(sdev, shost) {
  233. new_queue_depth =
  234. sdev->queue_depth * num_rsrc_err /
  235. (num_rsrc_err + num_cmd_success);
  236. if (!new_queue_depth)
  237. new_queue_depth = sdev->queue_depth - 1;
  238. else
  239. new_queue_depth = sdev->queue_depth -
  240. new_queue_depth;
  241. old_queue_depth = sdev->queue_depth;
  242. if (sdev->ordered_tags)
  243. scsi_adjust_queue_depth(sdev,
  244. MSG_ORDERED_TAG,
  245. new_queue_depth);
  246. else
  247. scsi_adjust_queue_depth(sdev,
  248. MSG_SIMPLE_TAG,
  249. new_queue_depth);
  250. rdata = sdev->hostdata;
  251. if (rdata)
  252. lpfc_send_sdev_queuedepth_change_event(
  253. phba, vports[i],
  254. rdata->pnode,
  255. sdev->lun, old_queue_depth,
  256. new_queue_depth);
  257. }
  258. }
  259. lpfc_destroy_vport_work_array(phba, vports);
  260. atomic_set(&phba->num_rsrc_err, 0);
  261. atomic_set(&phba->num_cmd_success, 0);
  262. }
  263. /**
  264. * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
  265. * @phba: The Hba for which this call is being executed.
  266. *
  267. * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
  268. * thread.This routine increases queue depth for all scsi device on each vport
  269. * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
  270. * num_cmd_success to zero.
  271. **/
  272. void
  273. lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
  274. {
  275. struct lpfc_vport **vports;
  276. struct Scsi_Host *shost;
  277. struct scsi_device *sdev;
  278. int i;
  279. struct lpfc_rport_data *rdata;
  280. vports = lpfc_create_vport_work_array(phba);
  281. if (vports != NULL)
  282. for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  283. shost = lpfc_shost_from_vport(vports[i]);
  284. shost_for_each_device(sdev, shost) {
  285. if (vports[i]->cfg_lun_queue_depth <=
  286. sdev->queue_depth)
  287. continue;
  288. if (sdev->ordered_tags)
  289. scsi_adjust_queue_depth(sdev,
  290. MSG_ORDERED_TAG,
  291. sdev->queue_depth+1);
  292. else
  293. scsi_adjust_queue_depth(sdev,
  294. MSG_SIMPLE_TAG,
  295. sdev->queue_depth+1);
  296. rdata = sdev->hostdata;
  297. if (rdata)
  298. lpfc_send_sdev_queuedepth_change_event(
  299. phba, vports[i],
  300. rdata->pnode,
  301. sdev->lun,
  302. sdev->queue_depth - 1,
  303. sdev->queue_depth);
  304. }
  305. }
  306. lpfc_destroy_vport_work_array(phba, vports);
  307. atomic_set(&phba->num_rsrc_err, 0);
  308. atomic_set(&phba->num_cmd_success, 0);
  309. }
  310. /**
  311. * lpfc_scsi_dev_block: set all scsi hosts to block state.
  312. * @phba: Pointer to HBA context object.
  313. *
  314. * This function walks vport list and set each SCSI host to block state
  315. * by invoking fc_remote_port_delete() routine. This function is invoked
  316. * with EEH when device's PCI slot has been permanently disabled.
  317. **/
  318. void
  319. lpfc_scsi_dev_block(struct lpfc_hba *phba)
  320. {
  321. struct lpfc_vport **vports;
  322. struct Scsi_Host *shost;
  323. struct scsi_device *sdev;
  324. struct fc_rport *rport;
  325. int i;
  326. vports = lpfc_create_vport_work_array(phba);
  327. if (vports != NULL)
  328. for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  329. shost = lpfc_shost_from_vport(vports[i]);
  330. shost_for_each_device(sdev, shost) {
  331. rport = starget_to_rport(scsi_target(sdev));
  332. fc_remote_port_delete(rport);
  333. }
  334. }
  335. lpfc_destroy_vport_work_array(phba, vports);
  336. }
  337. /**
  338. * lpfc_new_scsi_buf: Scsi buffer allocator.
  339. * @vport: The virtual port for which this call being executed.
  340. *
  341. * This routine allocates a scsi buffer, which contains all the necessary
  342. * information needed to initiate a SCSI I/O. The non-DMAable buffer region
  343. * contains information to build the IOCB. The DMAable region contains
  344. * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
  345. * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
  346. * and the BPL BDE is setup in the IOCB.
  347. *
  348. * Return codes:
  349. * NULL - Error
  350. * Pointer to lpfc_scsi_buf data structure - Success
  351. **/
  352. static struct lpfc_scsi_buf *
  353. lpfc_new_scsi_buf(struct lpfc_vport *vport)
  354. {
  355. struct lpfc_hba *phba = vport->phba;
  356. struct lpfc_scsi_buf *psb;
  357. struct ulp_bde64 *bpl;
  358. IOCB_t *iocb;
  359. dma_addr_t pdma_phys_fcp_cmd;
  360. dma_addr_t pdma_phys_fcp_rsp;
  361. dma_addr_t pdma_phys_bpl;
  362. uint16_t iotag;
  363. psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
  364. if (!psb)
  365. return NULL;
  366. /*
  367. * Get memory from the pci pool to map the virt space to pci bus space
  368. * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
  369. * struct fcp_rsp and the number of bde's necessary to support the
  370. * sg_tablesize.
  371. */
  372. psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
  373. &psb->dma_handle);
  374. if (!psb->data) {
  375. kfree(psb);
  376. return NULL;
  377. }
  378. /* Initialize virtual ptrs to dma_buf region. */
  379. memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
  380. /* Allocate iotag for psb->cur_iocbq. */
  381. iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
  382. if (iotag == 0) {
  383. pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
  384. psb->data, psb->dma_handle);
  385. kfree (psb);
  386. return NULL;
  387. }
  388. psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
  389. psb->fcp_cmnd = psb->data;
  390. psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
  391. psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
  392. sizeof(struct fcp_rsp);
  393. /* Initialize local short-hand pointers. */
  394. bpl = psb->fcp_bpl;
  395. pdma_phys_fcp_cmd = psb->dma_handle;
  396. pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
  397. pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
  398. sizeof(struct fcp_rsp);
  399. /*
  400. * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
  401. * list bdes. Initialize the first two and leave the rest for
  402. * queuecommand.
  403. */
  404. bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
  405. bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
  406. bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
  407. bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  408. bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
  409. /* Setup the physical region for the FCP RSP */
  410. bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
  411. bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
  412. bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
  413. bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  414. bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
  415. /*
  416. * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
  417. * initialize it with all known data now.
  418. */
  419. iocb = &psb->cur_iocbq.iocb;
  420. iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
  421. if (phba->sli_rev == 3) {
  422. /* fill in immediate fcp command BDE */
  423. iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
  424. iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
  425. iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
  426. unsli3.fcp_ext.icd);
  427. iocb->un.fcpi64.bdl.addrHigh = 0;
  428. iocb->ulpBdeCount = 0;
  429. iocb->ulpLe = 0;
  430. /* fill in responce BDE */
  431. iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  432. iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
  433. sizeof(struct fcp_rsp);
  434. iocb->unsli3.fcp_ext.rbde.addrLow =
  435. putPaddrLow(pdma_phys_fcp_rsp);
  436. iocb->unsli3.fcp_ext.rbde.addrHigh =
  437. putPaddrHigh(pdma_phys_fcp_rsp);
  438. } else {
  439. iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  440. iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
  441. iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
  442. iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
  443. iocb->ulpBdeCount = 1;
  444. iocb->ulpLe = 1;
  445. }
  446. iocb->ulpClass = CLASS3;
  447. return psb;
  448. }
  449. /**
  450. * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
  451. * @phba: The Hba for which this call is being executed.
  452. *
  453. * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
  454. * and returns to caller.
  455. *
  456. * Return codes:
  457. * NULL - Error
  458. * Pointer to lpfc_scsi_buf - Success
  459. **/
  460. static struct lpfc_scsi_buf*
  461. lpfc_get_scsi_buf(struct lpfc_hba * phba)
  462. {
  463. struct lpfc_scsi_buf * lpfc_cmd = NULL;
  464. struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
  465. unsigned long iflag = 0;
  466. spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
  467. list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
  468. if (lpfc_cmd) {
  469. lpfc_cmd->seg_cnt = 0;
  470. lpfc_cmd->nonsg_phys = 0;
  471. }
  472. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
  473. return lpfc_cmd;
  474. }
  475. /**
  476. * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
  477. * @phba: The Hba for which this call is being executed.
  478. * @psb: The scsi buffer which is being released.
  479. *
  480. * This routine releases @psb scsi buffer by adding it to tail of @phba
  481. * lpfc_scsi_buf_list list.
  482. **/
  483. static void
  484. lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
  485. {
  486. unsigned long iflag = 0;
  487. spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
  488. psb->pCmd = NULL;
  489. list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
  490. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
  491. }
  492. /**
  493. * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
  494. * @phba: The Hba for which this call is being executed.
  495. * @lpfc_cmd: The scsi buffer which is going to be mapped.
  496. *
  497. * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
  498. * field of @lpfc_cmd. This routine scans through sg elements and format the
  499. * bdea. This routine also initializes all IOCB fields which are dependent on
  500. * scsi command request buffer.
  501. *
  502. * Return codes:
  503. * 1 - Error
  504. * 0 - Success
  505. **/
  506. static int
  507. lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
  508. {
  509. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  510. struct scatterlist *sgel = NULL;
  511. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  512. struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
  513. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  514. struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
  515. dma_addr_t physaddr;
  516. uint32_t num_bde = 0;
  517. int nseg, datadir = scsi_cmnd->sc_data_direction;
  518. /*
  519. * There are three possibilities here - use scatter-gather segment, use
  520. * the single mapping, or neither. Start the lpfc command prep by
  521. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  522. * data bde entry.
  523. */
  524. bpl += 2;
  525. if (scsi_sg_count(scsi_cmnd)) {
  526. /*
  527. * The driver stores the segment count returned from pci_map_sg
  528. * because this a count of dma-mappings used to map the use_sg
  529. * pages. They are not guaranteed to be the same for those
  530. * architectures that implement an IOMMU.
  531. */
  532. nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
  533. scsi_sg_count(scsi_cmnd), datadir);
  534. if (unlikely(!nseg))
  535. return 1;
  536. lpfc_cmd->seg_cnt = nseg;
  537. if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
  538. printk(KERN_ERR "%s: Too many sg segments from "
  539. "dma_map_sg. Config %d, seg_cnt %d",
  540. __func__, phba->cfg_sg_seg_cnt,
  541. lpfc_cmd->seg_cnt);
  542. scsi_dma_unmap(scsi_cmnd);
  543. return 1;
  544. }
  545. /*
  546. * The driver established a maximum scatter-gather segment count
  547. * during probe that limits the number of sg elements in any
  548. * single scsi command. Just run through the seg_cnt and format
  549. * the bde's.
  550. * When using SLI-3 the driver will try to fit all the BDEs into
  551. * the IOCB. If it can't then the BDEs get added to a BPL as it
  552. * does for SLI-2 mode.
  553. */
  554. scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
  555. physaddr = sg_dma_address(sgel);
  556. if (phba->sli_rev == 3 &&
  557. nseg <= LPFC_EXT_DATA_BDE_COUNT) {
  558. data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  559. data_bde->tus.f.bdeSize = sg_dma_len(sgel);
  560. data_bde->addrLow = putPaddrLow(physaddr);
  561. data_bde->addrHigh = putPaddrHigh(physaddr);
  562. data_bde++;
  563. } else {
  564. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  565. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  566. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  567. bpl->addrLow =
  568. le32_to_cpu(putPaddrLow(physaddr));
  569. bpl->addrHigh =
  570. le32_to_cpu(putPaddrHigh(physaddr));
  571. bpl++;
  572. }
  573. }
  574. }
  575. /*
  576. * Finish initializing those IOCB fields that are dependent on the
  577. * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
  578. * explicitly reinitialized and for SLI-3 the extended bde count is
  579. * explicitly reinitialized since all iocb memory resources are reused.
  580. */
  581. if (phba->sli_rev == 3) {
  582. if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
  583. /*
  584. * The extended IOCB format can only fit 3 BDE or a BPL.
  585. * This I/O has more than 3 BDE so the 1st data bde will
  586. * be a BPL that is filled in here.
  587. */
  588. physaddr = lpfc_cmd->dma_handle;
  589. data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
  590. data_bde->tus.f.bdeSize = (num_bde *
  591. sizeof(struct ulp_bde64));
  592. physaddr += (sizeof(struct fcp_cmnd) +
  593. sizeof(struct fcp_rsp) +
  594. (2 * sizeof(struct ulp_bde64)));
  595. data_bde->addrHigh = putPaddrHigh(physaddr);
  596. data_bde->addrLow = putPaddrLow(physaddr);
  597. /* ebde count includes the responce bde and data bpl */
  598. iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
  599. } else {
  600. /* ebde count includes the responce bde and data bdes */
  601. iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
  602. }
  603. } else {
  604. iocb_cmd->un.fcpi64.bdl.bdeSize =
  605. ((num_bde + 2) * sizeof(struct ulp_bde64));
  606. }
  607. fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
  608. return 0;
  609. }
  610. /**
  611. * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
  612. * @phba: Pointer to hba context object.
  613. * @vport: Pointer to vport object.
  614. * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
  615. * @rsp_iocb: Pointer to response iocb object which reported error.
  616. *
  617. * This function posts an event when there is a SCSI command reporting
  618. * error from the scsi device.
  619. **/
  620. static void
  621. lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
  622. struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
  623. struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
  624. struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
  625. uint32_t resp_info = fcprsp->rspStatus2;
  626. uint32_t scsi_status = fcprsp->rspStatus3;
  627. uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
  628. struct lpfc_fast_path_event *fast_path_evt = NULL;
  629. struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
  630. unsigned long flags;
  631. /* If there is queuefull or busy condition send a scsi event */
  632. if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
  633. (cmnd->result == SAM_STAT_BUSY)) {
  634. fast_path_evt = lpfc_alloc_fast_evt(phba);
  635. if (!fast_path_evt)
  636. return;
  637. fast_path_evt->un.scsi_evt.event_type =
  638. FC_REG_SCSI_EVENT;
  639. fast_path_evt->un.scsi_evt.subcategory =
  640. (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
  641. LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
  642. fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
  643. memcpy(&fast_path_evt->un.scsi_evt.wwpn,
  644. &pnode->nlp_portname, sizeof(struct lpfc_name));
  645. memcpy(&fast_path_evt->un.scsi_evt.wwnn,
  646. &pnode->nlp_nodename, sizeof(struct lpfc_name));
  647. } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
  648. ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
  649. fast_path_evt = lpfc_alloc_fast_evt(phba);
  650. if (!fast_path_evt)
  651. return;
  652. fast_path_evt->un.check_cond_evt.scsi_event.event_type =
  653. FC_REG_SCSI_EVENT;
  654. fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
  655. LPFC_EVENT_CHECK_COND;
  656. fast_path_evt->un.check_cond_evt.scsi_event.lun =
  657. cmnd->device->lun;
  658. memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
  659. &pnode->nlp_portname, sizeof(struct lpfc_name));
  660. memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
  661. &pnode->nlp_nodename, sizeof(struct lpfc_name));
  662. fast_path_evt->un.check_cond_evt.sense_key =
  663. cmnd->sense_buffer[2] & 0xf;
  664. fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
  665. fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
  666. } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
  667. fcpi_parm &&
  668. ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
  669. ((scsi_status == SAM_STAT_GOOD) &&
  670. !(resp_info & (RESID_UNDER | RESID_OVER))))) {
  671. /*
  672. * If status is good or resid does not match with fcp_param and
  673. * there is valid fcpi_parm, then there is a read_check error
  674. */
  675. fast_path_evt = lpfc_alloc_fast_evt(phba);
  676. if (!fast_path_evt)
  677. return;
  678. fast_path_evt->un.read_check_error.header.event_type =
  679. FC_REG_FABRIC_EVENT;
  680. fast_path_evt->un.read_check_error.header.subcategory =
  681. LPFC_EVENT_FCPRDCHKERR;
  682. memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
  683. &pnode->nlp_portname, sizeof(struct lpfc_name));
  684. memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
  685. &pnode->nlp_nodename, sizeof(struct lpfc_name));
  686. fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
  687. fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
  688. fast_path_evt->un.read_check_error.fcpiparam =
  689. fcpi_parm;
  690. } else
  691. return;
  692. fast_path_evt->vport = vport;
  693. spin_lock_irqsave(&phba->hbalock, flags);
  694. list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
  695. spin_unlock_irqrestore(&phba->hbalock, flags);
  696. lpfc_worker_wake_up(phba);
  697. return;
  698. }
  699. /**
  700. * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
  701. * @phba: The Hba for which this call is being executed.
  702. * @psb: The scsi buffer which is going to be un-mapped.
  703. *
  704. * This routine does DMA un-mapping of scatter gather list of scsi command
  705. * field of @lpfc_cmd.
  706. **/
  707. static void
  708. lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
  709. {
  710. /*
  711. * There are only two special cases to consider. (1) the scsi command
  712. * requested scatter-gather usage or (2) the scsi command allocated
  713. * a request buffer, but did not request use_sg. There is a third
  714. * case, but it does not require resource deallocation.
  715. */
  716. if (psb->seg_cnt > 0)
  717. scsi_dma_unmap(psb->pCmd);
  718. }
  719. /**
  720. * lpfc_handler_fcp_err: FCP response handler.
  721. * @vport: The virtual port for which this call is being executed.
  722. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
  723. * @rsp_iocb: The response IOCB which contains FCP error.
  724. *
  725. * This routine is called to process response IOCB with status field
  726. * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
  727. * based upon SCSI and FCP error.
  728. **/
  729. static void
  730. lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
  731. struct lpfc_iocbq *rsp_iocb)
  732. {
  733. struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
  734. struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
  735. struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
  736. uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
  737. uint32_t resp_info = fcprsp->rspStatus2;
  738. uint32_t scsi_status = fcprsp->rspStatus3;
  739. uint32_t *lp;
  740. uint32_t host_status = DID_OK;
  741. uint32_t rsplen = 0;
  742. uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
  743. /*
  744. * If this is a task management command, there is no
  745. * scsi packet associated with this lpfc_cmd. The driver
  746. * consumes it.
  747. */
  748. if (fcpcmd->fcpCntl2) {
  749. scsi_status = 0;
  750. goto out;
  751. }
  752. if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
  753. uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
  754. if (snslen > SCSI_SENSE_BUFFERSIZE)
  755. snslen = SCSI_SENSE_BUFFERSIZE;
  756. if (resp_info & RSP_LEN_VALID)
  757. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  758. memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
  759. }
  760. lp = (uint32_t *)cmnd->sense_buffer;
  761. if (!scsi_status && (resp_info & RESID_UNDER))
  762. logit = LOG_FCP;
  763. lpfc_printf_vlog(vport, KERN_WARNING, logit,
  764. "0730 FCP command x%x failed: x%x SNS x%x x%x "
  765. "Data: x%x x%x x%x x%x x%x\n",
  766. cmnd->cmnd[0], scsi_status,
  767. be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
  768. be32_to_cpu(fcprsp->rspResId),
  769. be32_to_cpu(fcprsp->rspSnsLen),
  770. be32_to_cpu(fcprsp->rspRspLen),
  771. fcprsp->rspInfo3);
  772. if (resp_info & RSP_LEN_VALID) {
  773. rsplen = be32_to_cpu(fcprsp->rspRspLen);
  774. if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
  775. (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
  776. host_status = DID_ERROR;
  777. goto out;
  778. }
  779. }
  780. scsi_set_resid(cmnd, 0);
  781. if (resp_info & RESID_UNDER) {
  782. scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
  783. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  784. "0716 FCP Read Underrun, expected %d, "
  785. "residual %d Data: x%x x%x x%x\n",
  786. be32_to_cpu(fcpcmd->fcpDl),
  787. scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
  788. cmnd->underflow);
  789. /*
  790. * If there is an under run check if under run reported by
  791. * storage array is same as the under run reported by HBA.
  792. * If this is not same, there is a dropped frame.
  793. */
  794. if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
  795. fcpi_parm &&
  796. (scsi_get_resid(cmnd) != fcpi_parm)) {
  797. lpfc_printf_vlog(vport, KERN_WARNING,
  798. LOG_FCP | LOG_FCP_ERROR,
  799. "0735 FCP Read Check Error "
  800. "and Underrun Data: x%x x%x x%x x%x\n",
  801. be32_to_cpu(fcpcmd->fcpDl),
  802. scsi_get_resid(cmnd), fcpi_parm,
  803. cmnd->cmnd[0]);
  804. scsi_set_resid(cmnd, scsi_bufflen(cmnd));
  805. host_status = DID_ERROR;
  806. }
  807. /*
  808. * The cmnd->underflow is the minimum number of bytes that must
  809. * be transfered for this command. Provided a sense condition
  810. * is not present, make sure the actual amount transferred is at
  811. * least the underflow value or fail.
  812. */
  813. if (!(resp_info & SNS_LEN_VALID) &&
  814. (scsi_status == SAM_STAT_GOOD) &&
  815. (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
  816. < cmnd->underflow)) {
  817. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  818. "0717 FCP command x%x residual "
  819. "underrun converted to error "
  820. "Data: x%x x%x x%x\n",
  821. cmnd->cmnd[0], scsi_bufflen(cmnd),
  822. scsi_get_resid(cmnd), cmnd->underflow);
  823. host_status = DID_ERROR;
  824. }
  825. } else if (resp_info & RESID_OVER) {
  826. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  827. "0720 FCP command x%x residual overrun error. "
  828. "Data: x%x x%x \n", cmnd->cmnd[0],
  829. scsi_bufflen(cmnd), scsi_get_resid(cmnd));
  830. host_status = DID_ERROR;
  831. /*
  832. * Check SLI validation that all the transfer was actually done
  833. * (fcpi_parm should be zero). Apply check only to reads.
  834. */
  835. } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
  836. (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
  837. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
  838. "0734 FCP Read Check Error Data: "
  839. "x%x x%x x%x x%x\n",
  840. be32_to_cpu(fcpcmd->fcpDl),
  841. be32_to_cpu(fcprsp->rspResId),
  842. fcpi_parm, cmnd->cmnd[0]);
  843. host_status = DID_ERROR;
  844. scsi_set_resid(cmnd, scsi_bufflen(cmnd));
  845. }
  846. out:
  847. cmnd->result = ScsiResult(host_status, scsi_status);
  848. lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
  849. }
  850. /**
  851. * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
  852. * @phba: The Hba for which this call is being executed.
  853. * @pIocbIn: The command IOCBQ for the scsi cmnd.
  854. * @pIocbOut: The response IOCBQ for the scsi cmnd .
  855. *
  856. * This routine assigns scsi command result by looking into response IOCB
  857. * status field appropriately. This routine handles QUEUE FULL condition as
  858. * well by ramping down device queue depth.
  859. **/
  860. static void
  861. lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
  862. struct lpfc_iocbq *pIocbOut)
  863. {
  864. struct lpfc_scsi_buf *lpfc_cmd =
  865. (struct lpfc_scsi_buf *) pIocbIn->context1;
  866. struct lpfc_vport *vport = pIocbIn->vport;
  867. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  868. struct lpfc_nodelist *pnode = rdata->pnode;
  869. struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
  870. int result;
  871. struct scsi_device *sdev, *tmp_sdev;
  872. int depth = 0;
  873. unsigned long flags;
  874. struct lpfc_fast_path_event *fast_path_evt;
  875. lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
  876. lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
  877. if (pnode && NLP_CHK_NODE_ACT(pnode))
  878. atomic_dec(&pnode->cmd_pending);
  879. if (lpfc_cmd->status) {
  880. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  881. (lpfc_cmd->result & IOERR_DRVR_MASK))
  882. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  883. else if (lpfc_cmd->status >= IOSTAT_CNT)
  884. lpfc_cmd->status = IOSTAT_DEFAULT;
  885. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  886. "0729 FCP cmd x%x failed <%d/%d> "
  887. "status: x%x result: x%x Data: x%x x%x\n",
  888. cmd->cmnd[0],
  889. cmd->device ? cmd->device->id : 0xffff,
  890. cmd->device ? cmd->device->lun : 0xffff,
  891. lpfc_cmd->status, lpfc_cmd->result,
  892. pIocbOut->iocb.ulpContext,
  893. lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
  894. switch (lpfc_cmd->status) {
  895. case IOSTAT_FCP_RSP_ERROR:
  896. /* Call FCP RSP handler to determine result */
  897. lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
  898. break;
  899. case IOSTAT_NPORT_BSY:
  900. case IOSTAT_FABRIC_BSY:
  901. cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
  902. fast_path_evt = lpfc_alloc_fast_evt(phba);
  903. if (!fast_path_evt)
  904. break;
  905. fast_path_evt->un.fabric_evt.event_type =
  906. FC_REG_FABRIC_EVENT;
  907. fast_path_evt->un.fabric_evt.subcategory =
  908. (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
  909. LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
  910. if (pnode && NLP_CHK_NODE_ACT(pnode)) {
  911. memcpy(&fast_path_evt->un.fabric_evt.wwpn,
  912. &pnode->nlp_portname,
  913. sizeof(struct lpfc_name));
  914. memcpy(&fast_path_evt->un.fabric_evt.wwnn,
  915. &pnode->nlp_nodename,
  916. sizeof(struct lpfc_name));
  917. }
  918. fast_path_evt->vport = vport;
  919. fast_path_evt->work_evt.evt =
  920. LPFC_EVT_FASTPATH_MGMT_EVT;
  921. spin_lock_irqsave(&phba->hbalock, flags);
  922. list_add_tail(&fast_path_evt->work_evt.evt_listp,
  923. &phba->work_list);
  924. spin_unlock_irqrestore(&phba->hbalock, flags);
  925. lpfc_worker_wake_up(phba);
  926. break;
  927. case IOSTAT_LOCAL_REJECT:
  928. if (lpfc_cmd->result == IOERR_INVALID_RPI ||
  929. lpfc_cmd->result == IOERR_NO_RESOURCES ||
  930. lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
  931. cmd->result = ScsiResult(DID_REQUEUE, 0);
  932. break;
  933. } /* else: fall through */
  934. default:
  935. cmd->result = ScsiResult(DID_ERROR, 0);
  936. break;
  937. }
  938. if (!pnode || !NLP_CHK_NODE_ACT(pnode)
  939. || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
  940. cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
  941. SAM_STAT_BUSY);
  942. } else {
  943. cmd->result = ScsiResult(DID_OK, 0);
  944. }
  945. if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
  946. uint32_t *lp = (uint32_t *)cmd->sense_buffer;
  947. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  948. "0710 Iodone <%d/%d> cmd %p, error "
  949. "x%x SNS x%x x%x Data: x%x x%x\n",
  950. cmd->device->id, cmd->device->lun, cmd,
  951. cmd->result, *lp, *(lp + 3), cmd->retries,
  952. scsi_get_resid(cmd));
  953. }
  954. lpfc_update_stats(phba, lpfc_cmd);
  955. result = cmd->result;
  956. sdev = cmd->device;
  957. if (vport->cfg_max_scsicmpl_time &&
  958. time_after(jiffies, lpfc_cmd->start_time +
  959. msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
  960. spin_lock_irqsave(sdev->host->host_lock, flags);
  961. if (pnode && NLP_CHK_NODE_ACT(pnode)) {
  962. if (pnode->cmd_qdepth >
  963. atomic_read(&pnode->cmd_pending) &&
  964. (atomic_read(&pnode->cmd_pending) >
  965. LPFC_MIN_TGT_QDEPTH) &&
  966. ((cmd->cmnd[0] == READ_10) ||
  967. (cmd->cmnd[0] == WRITE_10)))
  968. pnode->cmd_qdepth =
  969. atomic_read(&pnode->cmd_pending);
  970. pnode->last_change_time = jiffies;
  971. }
  972. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  973. } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
  974. if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
  975. time_after(jiffies, pnode->last_change_time +
  976. msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
  977. spin_lock_irqsave(sdev->host->host_lock, flags);
  978. pnode->cmd_qdepth += pnode->cmd_qdepth *
  979. LPFC_TGTQ_RAMPUP_PCENT / 100;
  980. if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
  981. pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
  982. pnode->last_change_time = jiffies;
  983. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  984. }
  985. }
  986. lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
  987. cmd->scsi_done(cmd);
  988. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  989. /*
  990. * If there is a thread waiting for command completion
  991. * wake up the thread.
  992. */
  993. spin_lock_irqsave(sdev->host->host_lock, flags);
  994. lpfc_cmd->pCmd = NULL;
  995. if (lpfc_cmd->waitq)
  996. wake_up(lpfc_cmd->waitq);
  997. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  998. lpfc_release_scsi_buf(phba, lpfc_cmd);
  999. return;
  1000. }
  1001. if (!result)
  1002. lpfc_rampup_queue_depth(vport, sdev);
  1003. if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
  1004. ((jiffies - pnode->last_ramp_up_time) >
  1005. LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
  1006. ((jiffies - pnode->last_q_full_time) >
  1007. LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
  1008. (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
  1009. shost_for_each_device(tmp_sdev, sdev->host) {
  1010. if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
  1011. if (tmp_sdev->id != sdev->id)
  1012. continue;
  1013. if (tmp_sdev->ordered_tags)
  1014. scsi_adjust_queue_depth(tmp_sdev,
  1015. MSG_ORDERED_TAG,
  1016. tmp_sdev->queue_depth+1);
  1017. else
  1018. scsi_adjust_queue_depth(tmp_sdev,
  1019. MSG_SIMPLE_TAG,
  1020. tmp_sdev->queue_depth+1);
  1021. pnode->last_ramp_up_time = jiffies;
  1022. }
  1023. }
  1024. lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
  1025. 0xFFFFFFFF,
  1026. sdev->queue_depth - 1, sdev->queue_depth);
  1027. }
  1028. /*
  1029. * Check for queue full. If the lun is reporting queue full, then
  1030. * back off the lun queue depth to prevent target overloads.
  1031. */
  1032. if (result == SAM_STAT_TASK_SET_FULL && pnode &&
  1033. NLP_CHK_NODE_ACT(pnode)) {
  1034. pnode->last_q_full_time = jiffies;
  1035. shost_for_each_device(tmp_sdev, sdev->host) {
  1036. if (tmp_sdev->id != sdev->id)
  1037. continue;
  1038. depth = scsi_track_queue_full(tmp_sdev,
  1039. tmp_sdev->queue_depth - 1);
  1040. }
  1041. /*
  1042. * The queue depth cannot be lowered any more.
  1043. * Modify the returned error code to store
  1044. * the final depth value set by
  1045. * scsi_track_queue_full.
  1046. */
  1047. if (depth == -1)
  1048. depth = sdev->host->cmd_per_lun;
  1049. if (depth) {
  1050. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  1051. "0711 detected queue full - lun queue "
  1052. "depth adjusted to %d.\n", depth);
  1053. lpfc_send_sdev_queuedepth_change_event(phba, vport,
  1054. pnode, 0xFFFFFFFF,
  1055. depth+1, depth);
  1056. }
  1057. }
  1058. /*
  1059. * If there is a thread waiting for command completion
  1060. * wake up the thread.
  1061. */
  1062. spin_lock_irqsave(sdev->host->host_lock, flags);
  1063. lpfc_cmd->pCmd = NULL;
  1064. if (lpfc_cmd->waitq)
  1065. wake_up(lpfc_cmd->waitq);
  1066. spin_unlock_irqrestore(sdev->host->host_lock, flags);
  1067. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1068. }
  1069. /**
  1070. * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
  1071. * @data: A pointer to the immediate command data portion of the IOCB.
  1072. * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
  1073. *
  1074. * The routine copies the entire FCP command from @fcp_cmnd to @data while
  1075. * byte swapping the data to big endian format for transmission on the wire.
  1076. **/
  1077. static void
  1078. lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
  1079. {
  1080. int i, j;
  1081. for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
  1082. i += sizeof(uint32_t), j++) {
  1083. ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
  1084. }
  1085. }
  1086. /**
  1087. * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit.
  1088. * @vport: The virtual port for which this call is being executed.
  1089. * @lpfc_cmd: The scsi command which needs to send.
  1090. * @pnode: Pointer to lpfc_nodelist.
  1091. *
  1092. * This routine initializes fcp_cmnd and iocb data structure from scsi command
  1093. * to transfer.
  1094. **/
  1095. static void
  1096. lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
  1097. struct lpfc_nodelist *pnode)
  1098. {
  1099. struct lpfc_hba *phba = vport->phba;
  1100. struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
  1101. struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
  1102. IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
  1103. struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
  1104. int datadir = scsi_cmnd->sc_data_direction;
  1105. char tag[2];
  1106. if (!pnode || !NLP_CHK_NODE_ACT(pnode))
  1107. return;
  1108. lpfc_cmd->fcp_rsp->rspSnsLen = 0;
  1109. /* clear task management bits */
  1110. lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
  1111. int_to_scsilun(lpfc_cmd->pCmd->device->lun,
  1112. &lpfc_cmd->fcp_cmnd->fcp_lun);
  1113. memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
  1114. if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
  1115. switch (tag[0]) {
  1116. case HEAD_OF_QUEUE_TAG:
  1117. fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
  1118. break;
  1119. case ORDERED_QUEUE_TAG:
  1120. fcp_cmnd->fcpCntl1 = ORDERED_Q;
  1121. break;
  1122. default:
  1123. fcp_cmnd->fcpCntl1 = SIMPLE_Q;
  1124. break;
  1125. }
  1126. } else
  1127. fcp_cmnd->fcpCntl1 = 0;
  1128. /*
  1129. * There are three possibilities here - use scatter-gather segment, use
  1130. * the single mapping, or neither. Start the lpfc command prep by
  1131. * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
  1132. * data bde entry.
  1133. */
  1134. if (scsi_sg_count(scsi_cmnd)) {
  1135. if (datadir == DMA_TO_DEVICE) {
  1136. iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
  1137. iocb_cmd->un.fcpi.fcpi_parm = 0;
  1138. iocb_cmd->ulpPU = 0;
  1139. fcp_cmnd->fcpCntl3 = WRITE_DATA;
  1140. phba->fc4OutputRequests++;
  1141. } else {
  1142. iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
  1143. iocb_cmd->ulpPU = PARM_READ_CHECK;
  1144. iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
  1145. fcp_cmnd->fcpCntl3 = READ_DATA;
  1146. phba->fc4InputRequests++;
  1147. }
  1148. } else {
  1149. iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
  1150. iocb_cmd->un.fcpi.fcpi_parm = 0;
  1151. iocb_cmd->ulpPU = 0;
  1152. fcp_cmnd->fcpCntl3 = 0;
  1153. phba->fc4ControlRequests++;
  1154. }
  1155. if (phba->sli_rev == 3)
  1156. lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
  1157. /*
  1158. * Finish initializing those IOCB fields that are independent
  1159. * of the scsi_cmnd request_buffer
  1160. */
  1161. piocbq->iocb.ulpContext = pnode->nlp_rpi;
  1162. if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
  1163. piocbq->iocb.ulpFCP2Rcvy = 1;
  1164. else
  1165. piocbq->iocb.ulpFCP2Rcvy = 0;
  1166. piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
  1167. piocbq->context1 = lpfc_cmd;
  1168. piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
  1169. piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
  1170. piocbq->vport = vport;
  1171. }
  1172. /**
  1173. * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
  1174. * @vport: The virtual port for which this call is being executed.
  1175. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
  1176. * @lun: Logical unit number.
  1177. * @task_mgmt_cmd: SCSI task management command.
  1178. *
  1179. * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
  1180. *
  1181. * Return codes:
  1182. * 0 - Error
  1183. * 1 - Success
  1184. **/
  1185. static int
  1186. lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
  1187. struct lpfc_scsi_buf *lpfc_cmd,
  1188. unsigned int lun,
  1189. uint8_t task_mgmt_cmd)
  1190. {
  1191. struct lpfc_iocbq *piocbq;
  1192. IOCB_t *piocb;
  1193. struct fcp_cmnd *fcp_cmnd;
  1194. struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
  1195. struct lpfc_nodelist *ndlp = rdata->pnode;
  1196. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
  1197. ndlp->nlp_state != NLP_STE_MAPPED_NODE)
  1198. return 0;
  1199. piocbq = &(lpfc_cmd->cur_iocbq);
  1200. piocbq->vport = vport;
  1201. piocb = &piocbq->iocb;
  1202. fcp_cmnd = lpfc_cmd->fcp_cmnd;
  1203. /* Clear out any old data in the FCP command area */
  1204. memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
  1205. int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
  1206. fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
  1207. if (vport->phba->sli_rev == 3)
  1208. lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
  1209. piocb->ulpCommand = CMD_FCP_ICMND64_CR;
  1210. piocb->ulpContext = ndlp->nlp_rpi;
  1211. if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
  1212. piocb->ulpFCP2Rcvy = 1;
  1213. }
  1214. piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
  1215. /* ulpTimeout is only one byte */
  1216. if (lpfc_cmd->timeout > 0xff) {
  1217. /*
  1218. * Do not timeout the command at the firmware level.
  1219. * The driver will provide the timeout mechanism.
  1220. */
  1221. piocb->ulpTimeout = 0;
  1222. } else {
  1223. piocb->ulpTimeout = lpfc_cmd->timeout;
  1224. }
  1225. return 1;
  1226. }
  1227. /**
  1228. * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
  1229. * @phba: The Hba for which this call is being executed.
  1230. * @cmdiocbq: Pointer to lpfc_iocbq data structure.
  1231. * @rspiocbq: Pointer to lpfc_iocbq data structure.
  1232. *
  1233. * This routine is IOCB completion routine for device reset and target reset
  1234. * routine. This routine release scsi buffer associated with lpfc_cmd.
  1235. **/
  1236. static void
  1237. lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
  1238. struct lpfc_iocbq *cmdiocbq,
  1239. struct lpfc_iocbq *rspiocbq)
  1240. {
  1241. struct lpfc_scsi_buf *lpfc_cmd =
  1242. (struct lpfc_scsi_buf *) cmdiocbq->context1;
  1243. if (lpfc_cmd)
  1244. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1245. return;
  1246. }
  1247. /**
  1248. * lpfc_scsi_tgt_reset: Target reset handler.
  1249. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
  1250. * @vport: The virtual port for which this call is being executed.
  1251. * @tgt_id: Target ID.
  1252. * @lun: Lun number.
  1253. * @rdata: Pointer to lpfc_rport_data.
  1254. *
  1255. * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
  1256. *
  1257. * Return Code:
  1258. * 0x2003 - Error
  1259. * 0x2002 - Success.
  1260. **/
  1261. static int
  1262. lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
  1263. unsigned tgt_id, unsigned int lun,
  1264. struct lpfc_rport_data *rdata)
  1265. {
  1266. struct lpfc_hba *phba = vport->phba;
  1267. struct lpfc_iocbq *iocbq;
  1268. struct lpfc_iocbq *iocbqrsp;
  1269. int ret;
  1270. int status;
  1271. if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
  1272. return FAILED;
  1273. lpfc_cmd->rdata = rdata;
  1274. status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
  1275. FCP_TARGET_RESET);
  1276. if (!status)
  1277. return FAILED;
  1278. iocbq = &lpfc_cmd->cur_iocbq;
  1279. iocbqrsp = lpfc_sli_get_iocbq(phba);
  1280. if (!iocbqrsp)
  1281. return FAILED;
  1282. /* Issue Target Reset to TGT <num> */
  1283. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  1284. "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
  1285. tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
  1286. status = lpfc_sli_issue_iocb_wait(phba,
  1287. &phba->sli.ring[phba->sli.fcp_ring],
  1288. iocbq, iocbqrsp, lpfc_cmd->timeout);
  1289. if (status != IOCB_SUCCESS) {
  1290. if (status == IOCB_TIMEDOUT) {
  1291. iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
  1292. ret = TIMEOUT_ERROR;
  1293. } else
  1294. ret = FAILED;
  1295. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  1296. } else {
  1297. ret = SUCCESS;
  1298. lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
  1299. lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
  1300. if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
  1301. (lpfc_cmd->result & IOERR_DRVR_MASK))
  1302. lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
  1303. }
  1304. lpfc_sli_release_iocbq(phba, iocbqrsp);
  1305. return ret;
  1306. }
  1307. /**
  1308. * lpfc_info: Info entry point of scsi_host_template data structure.
  1309. * @host: The scsi host for which this call is being executed.
  1310. *
  1311. * This routine provides module information about hba.
  1312. *
  1313. * Reutrn code:
  1314. * Pointer to char - Success.
  1315. **/
  1316. const char *
  1317. lpfc_info(struct Scsi_Host *host)
  1318. {
  1319. struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
  1320. struct lpfc_hba *phba = vport->phba;
  1321. int len;
  1322. static char lpfcinfobuf[384];
  1323. memset(lpfcinfobuf,0,384);
  1324. if (phba && phba->pcidev){
  1325. strncpy(lpfcinfobuf, phba->ModelDesc, 256);
  1326. len = strlen(lpfcinfobuf);
  1327. snprintf(lpfcinfobuf + len,
  1328. 384-len,
  1329. " on PCI bus %02x device %02x irq %d",
  1330. phba->pcidev->bus->number,
  1331. phba->pcidev->devfn,
  1332. phba->pcidev->irq);
  1333. len = strlen(lpfcinfobuf);
  1334. if (phba->Port[0]) {
  1335. snprintf(lpfcinfobuf + len,
  1336. 384-len,
  1337. " port %s",
  1338. phba->Port);
  1339. }
  1340. }
  1341. return lpfcinfobuf;
  1342. }
  1343. /**
  1344. * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
  1345. * @phba: The Hba for which this call is being executed.
  1346. *
  1347. * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
  1348. * The default value of cfg_poll_tmo is 10 milliseconds.
  1349. **/
  1350. static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
  1351. {
  1352. unsigned long poll_tmo_expires =
  1353. (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
  1354. if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
  1355. mod_timer(&phba->fcp_poll_timer,
  1356. poll_tmo_expires);
  1357. }
  1358. /**
  1359. * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
  1360. * @phba: The Hba for which this call is being executed.
  1361. *
  1362. * This routine starts the fcp_poll_timer of @phba.
  1363. **/
  1364. void lpfc_poll_start_timer(struct lpfc_hba * phba)
  1365. {
  1366. lpfc_poll_rearm_timer(phba);
  1367. }
  1368. /**
  1369. * lpfc_poll_timeout: Restart polling timer.
  1370. * @ptr: Map to lpfc_hba data structure pointer.
  1371. *
  1372. * This routine restarts fcp_poll timer, when FCP ring polling is enable
  1373. * and FCP Ring interrupt is disable.
  1374. **/
  1375. void lpfc_poll_timeout(unsigned long ptr)
  1376. {
  1377. struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
  1378. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  1379. lpfc_sli_poll_fcp_ring (phba);
  1380. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1381. lpfc_poll_rearm_timer(phba);
  1382. }
  1383. }
  1384. /**
  1385. * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
  1386. * structure.
  1387. * @cmnd: Pointer to scsi_cmnd data structure.
  1388. * @done: Pointer to done routine.
  1389. *
  1390. * Driver registers this routine to scsi midlayer to submit a @cmd to process.
  1391. * This routine prepares an IOCB from scsi command and provides to firmware.
  1392. * The @done callback is invoked after driver finished processing the command.
  1393. *
  1394. * Return value :
  1395. * 0 - Success
  1396. * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
  1397. **/
  1398. static int
  1399. lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
  1400. {
  1401. struct Scsi_Host *shost = cmnd->device->host;
  1402. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1403. struct lpfc_hba *phba = vport->phba;
  1404. struct lpfc_sli *psli = &phba->sli;
  1405. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  1406. struct lpfc_nodelist *ndlp = rdata->pnode;
  1407. struct lpfc_scsi_buf *lpfc_cmd;
  1408. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  1409. int err;
  1410. err = fc_remote_port_chkready(rport);
  1411. if (err) {
  1412. cmnd->result = err;
  1413. goto out_fail_command;
  1414. }
  1415. /*
  1416. * Catch race where our node has transitioned, but the
  1417. * transport is still transitioning.
  1418. */
  1419. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  1420. cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
  1421. goto out_fail_command;
  1422. }
  1423. if (vport->cfg_max_scsicmpl_time &&
  1424. (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
  1425. goto out_host_busy;
  1426. lpfc_cmd = lpfc_get_scsi_buf(phba);
  1427. if (lpfc_cmd == NULL) {
  1428. lpfc_rampdown_queue_depth(phba);
  1429. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  1430. "0707 driver's buffer pool is empty, "
  1431. "IO busied\n");
  1432. goto out_host_busy;
  1433. }
  1434. /*
  1435. * Store the midlayer's command structure for the completion phase
  1436. * and complete the command initialization.
  1437. */
  1438. lpfc_cmd->pCmd = cmnd;
  1439. lpfc_cmd->rdata = rdata;
  1440. lpfc_cmd->timeout = 0;
  1441. lpfc_cmd->start_time = jiffies;
  1442. cmnd->host_scribble = (unsigned char *)lpfc_cmd;
  1443. cmnd->scsi_done = done;
  1444. err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
  1445. if (err)
  1446. goto out_host_busy_free_buf;
  1447. lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
  1448. atomic_inc(&ndlp->cmd_pending);
  1449. err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
  1450. &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
  1451. if (err) {
  1452. atomic_dec(&ndlp->cmd_pending);
  1453. goto out_host_busy_free_buf;
  1454. }
  1455. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  1456. lpfc_sli_poll_fcp_ring(phba);
  1457. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1458. lpfc_poll_rearm_timer(phba);
  1459. }
  1460. return 0;
  1461. out_host_busy_free_buf:
  1462. lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
  1463. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1464. out_host_busy:
  1465. return SCSI_MLQUEUE_HOST_BUSY;
  1466. out_fail_command:
  1467. done(cmnd);
  1468. return 0;
  1469. }
  1470. /**
  1471. * lpfc_block_error_handler: Routine to block error handler.
  1472. * @cmnd: Pointer to scsi_cmnd data structure.
  1473. *
  1474. * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
  1475. **/
  1476. static void
  1477. lpfc_block_error_handler(struct scsi_cmnd *cmnd)
  1478. {
  1479. struct Scsi_Host *shost = cmnd->device->host;
  1480. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  1481. spin_lock_irq(shost->host_lock);
  1482. while (rport->port_state == FC_PORTSTATE_BLOCKED) {
  1483. spin_unlock_irq(shost->host_lock);
  1484. msleep(1000);
  1485. spin_lock_irq(shost->host_lock);
  1486. }
  1487. spin_unlock_irq(shost->host_lock);
  1488. return;
  1489. }
  1490. /**
  1491. * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
  1492. *structure.
  1493. * @cmnd: Pointer to scsi_cmnd data structure.
  1494. *
  1495. * This routine aborts @cmnd pending in base driver.
  1496. *
  1497. * Return code :
  1498. * 0x2003 - Error
  1499. * 0x2002 - Success
  1500. **/
  1501. static int
  1502. lpfc_abort_handler(struct scsi_cmnd *cmnd)
  1503. {
  1504. struct Scsi_Host *shost = cmnd->device->host;
  1505. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1506. struct lpfc_hba *phba = vport->phba;
  1507. struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
  1508. struct lpfc_iocbq *iocb;
  1509. struct lpfc_iocbq *abtsiocb;
  1510. struct lpfc_scsi_buf *lpfc_cmd;
  1511. IOCB_t *cmd, *icmd;
  1512. int ret = SUCCESS;
  1513. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
  1514. lpfc_block_error_handler(cmnd);
  1515. lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
  1516. BUG_ON(!lpfc_cmd);
  1517. /*
  1518. * If pCmd field of the corresponding lpfc_scsi_buf structure
  1519. * points to a different SCSI command, then the driver has
  1520. * already completed this command, but the midlayer did not
  1521. * see the completion before the eh fired. Just return
  1522. * SUCCESS.
  1523. */
  1524. iocb = &lpfc_cmd->cur_iocbq;
  1525. if (lpfc_cmd->pCmd != cmnd)
  1526. goto out;
  1527. BUG_ON(iocb->context1 != lpfc_cmd);
  1528. abtsiocb = lpfc_sli_get_iocbq(phba);
  1529. if (abtsiocb == NULL) {
  1530. ret = FAILED;
  1531. goto out;
  1532. }
  1533. /*
  1534. * The scsi command can not be in txq and it is in flight because the
  1535. * pCmd is still pointig at the SCSI command we have to abort. There
  1536. * is no need to search the txcmplq. Just send an abort to the FW.
  1537. */
  1538. cmd = &iocb->iocb;
  1539. icmd = &abtsiocb->iocb;
  1540. icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
  1541. icmd->un.acxri.abortContextTag = cmd->ulpContext;
  1542. icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
  1543. icmd->ulpLe = 1;
  1544. icmd->ulpClass = cmd->ulpClass;
  1545. if (lpfc_is_link_up(phba))
  1546. icmd->ulpCommand = CMD_ABORT_XRI_CN;
  1547. else
  1548. icmd->ulpCommand = CMD_CLOSE_XRI_CN;
  1549. abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
  1550. abtsiocb->vport = vport;
  1551. if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
  1552. lpfc_sli_release_iocbq(phba, abtsiocb);
  1553. ret = FAILED;
  1554. goto out;
  1555. }
  1556. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1557. lpfc_sli_poll_fcp_ring (phba);
  1558. lpfc_cmd->waitq = &waitq;
  1559. /* Wait for abort to complete */
  1560. wait_event_timeout(waitq,
  1561. (lpfc_cmd->pCmd != cmnd),
  1562. (2*vport->cfg_devloss_tmo*HZ));
  1563. spin_lock_irq(shost->host_lock);
  1564. lpfc_cmd->waitq = NULL;
  1565. spin_unlock_irq(shost->host_lock);
  1566. if (lpfc_cmd->pCmd == cmnd) {
  1567. ret = FAILED;
  1568. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1569. "0748 abort handler timed out waiting "
  1570. "for abort to complete: ret %#x, ID %d, "
  1571. "LUN %d, snum %#lx\n",
  1572. ret, cmnd->device->id, cmnd->device->lun,
  1573. cmnd->serial_number);
  1574. }
  1575. out:
  1576. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  1577. "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
  1578. "LUN %d snum %#lx\n", ret, cmnd->device->id,
  1579. cmnd->device->lun, cmnd->serial_number);
  1580. return ret;
  1581. }
  1582. /**
  1583. * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
  1584. *data structure.
  1585. * @cmnd: Pointer to scsi_cmnd data structure.
  1586. *
  1587. * This routine does a device reset by sending a TARGET_RESET task management
  1588. * command.
  1589. *
  1590. * Return code :
  1591. * 0x2003 - Error
  1592. * 0ex2002 - Success
  1593. **/
  1594. static int
  1595. lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
  1596. {
  1597. struct Scsi_Host *shost = cmnd->device->host;
  1598. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1599. struct lpfc_hba *phba = vport->phba;
  1600. struct lpfc_scsi_buf *lpfc_cmd;
  1601. struct lpfc_iocbq *iocbq, *iocbqrsp;
  1602. struct lpfc_rport_data *rdata = cmnd->device->hostdata;
  1603. struct lpfc_nodelist *pnode = rdata->pnode;
  1604. unsigned long later;
  1605. int ret = SUCCESS;
  1606. int status;
  1607. int cnt;
  1608. struct lpfc_scsi_event_header scsi_event;
  1609. lpfc_block_error_handler(cmnd);
  1610. /*
  1611. * If target is not in a MAPPED state, delay the reset until
  1612. * target is rediscovered or devloss timeout expires.
  1613. */
  1614. later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
  1615. while (time_after(later, jiffies)) {
  1616. if (!pnode || !NLP_CHK_NODE_ACT(pnode))
  1617. return FAILED;
  1618. if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
  1619. break;
  1620. schedule_timeout_uninterruptible(msecs_to_jiffies(500));
  1621. rdata = cmnd->device->hostdata;
  1622. if (!rdata)
  1623. break;
  1624. pnode = rdata->pnode;
  1625. }
  1626. scsi_event.event_type = FC_REG_SCSI_EVENT;
  1627. scsi_event.subcategory = LPFC_EVENT_TGTRESET;
  1628. scsi_event.lun = 0;
  1629. memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
  1630. memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
  1631. fc_host_post_vendor_event(shost,
  1632. fc_get_event_number(),
  1633. sizeof(scsi_event),
  1634. (char *)&scsi_event,
  1635. LPFC_NL_VENDOR_ID);
  1636. if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
  1637. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1638. "0721 LUN Reset rport "
  1639. "failure: msec x%x rdata x%p\n",
  1640. jiffies_to_msecs(jiffies - later), rdata);
  1641. return FAILED;
  1642. }
  1643. lpfc_cmd = lpfc_get_scsi_buf(phba);
  1644. if (lpfc_cmd == NULL)
  1645. return FAILED;
  1646. lpfc_cmd->timeout = 60;
  1647. lpfc_cmd->rdata = rdata;
  1648. status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
  1649. cmnd->device->lun,
  1650. FCP_TARGET_RESET);
  1651. if (!status) {
  1652. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1653. return FAILED;
  1654. }
  1655. iocbq = &lpfc_cmd->cur_iocbq;
  1656. /* get a buffer for this IOCB command response */
  1657. iocbqrsp = lpfc_sli_get_iocbq(phba);
  1658. if (iocbqrsp == NULL) {
  1659. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1660. return FAILED;
  1661. }
  1662. lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
  1663. "0703 Issue target reset to TGT %d LUN %d "
  1664. "rpi x%x nlp_flag x%x\n", cmnd->device->id,
  1665. cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
  1666. status = lpfc_sli_issue_iocb_wait(phba,
  1667. &phba->sli.ring[phba->sli.fcp_ring],
  1668. iocbq, iocbqrsp, lpfc_cmd->timeout);
  1669. if (status == IOCB_TIMEDOUT) {
  1670. iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
  1671. ret = TIMEOUT_ERROR;
  1672. } else {
  1673. if (status != IOCB_SUCCESS)
  1674. ret = FAILED;
  1675. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1676. }
  1677. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1678. "0713 SCSI layer issued device reset (%d, %d) "
  1679. "return x%x status x%x result x%x\n",
  1680. cmnd->device->id, cmnd->device->lun, ret,
  1681. iocbqrsp->iocb.ulpStatus,
  1682. iocbqrsp->iocb.un.ulpWord[4]);
  1683. lpfc_sli_release_iocbq(phba, iocbqrsp);
  1684. cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
  1685. LPFC_CTX_TGT);
  1686. if (cnt)
  1687. lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
  1688. cmnd->device->id, cmnd->device->lun,
  1689. LPFC_CTX_TGT);
  1690. later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
  1691. while (time_after(later, jiffies) && cnt) {
  1692. schedule_timeout_uninterruptible(msecs_to_jiffies(20));
  1693. cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
  1694. cmnd->device->lun, LPFC_CTX_TGT);
  1695. }
  1696. if (cnt) {
  1697. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1698. "0719 device reset I/O flush failure: "
  1699. "cnt x%x\n", cnt);
  1700. ret = FAILED;
  1701. }
  1702. return ret;
  1703. }
  1704. /**
  1705. * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
  1706. * Template data structure.
  1707. * @cmnd: Pointer to scsi_cmnd data structure.
  1708. *
  1709. * This routine does target reset to all target on @cmnd->device->host.
  1710. *
  1711. * Return Code:
  1712. * 0x2003 - Error
  1713. * 0x2002 - Success
  1714. **/
  1715. static int
  1716. lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
  1717. {
  1718. struct Scsi_Host *shost = cmnd->device->host;
  1719. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1720. struct lpfc_hba *phba = vport->phba;
  1721. struct lpfc_nodelist *ndlp = NULL;
  1722. int match;
  1723. int ret = SUCCESS, status = SUCCESS, i;
  1724. int cnt;
  1725. struct lpfc_scsi_buf * lpfc_cmd;
  1726. unsigned long later;
  1727. struct lpfc_scsi_event_header scsi_event;
  1728. scsi_event.event_type = FC_REG_SCSI_EVENT;
  1729. scsi_event.subcategory = LPFC_EVENT_BUSRESET;
  1730. scsi_event.lun = 0;
  1731. memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
  1732. memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
  1733. fc_host_post_vendor_event(shost,
  1734. fc_get_event_number(),
  1735. sizeof(scsi_event),
  1736. (char *)&scsi_event,
  1737. LPFC_NL_VENDOR_ID);
  1738. lpfc_block_error_handler(cmnd);
  1739. /*
  1740. * Since the driver manages a single bus device, reset all
  1741. * targets known to the driver. Should any target reset
  1742. * fail, this routine returns failure to the midlayer.
  1743. */
  1744. for (i = 0; i < LPFC_MAX_TARGET; i++) {
  1745. /* Search for mapped node by target ID */
  1746. match = 0;
  1747. spin_lock_irq(shost->host_lock);
  1748. list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
  1749. if (!NLP_CHK_NODE_ACT(ndlp))
  1750. continue;
  1751. if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
  1752. ndlp->nlp_sid == i &&
  1753. ndlp->rport) {
  1754. match = 1;
  1755. break;
  1756. }
  1757. }
  1758. spin_unlock_irq(shost->host_lock);
  1759. if (!match)
  1760. continue;
  1761. lpfc_cmd = lpfc_get_scsi_buf(phba);
  1762. if (lpfc_cmd) {
  1763. lpfc_cmd->timeout = 60;
  1764. status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
  1765. cmnd->device->lun,
  1766. ndlp->rport->dd_data);
  1767. if (status != TIMEOUT_ERROR)
  1768. lpfc_release_scsi_buf(phba, lpfc_cmd);
  1769. }
  1770. if (!lpfc_cmd || status != SUCCESS) {
  1771. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1772. "0700 Bus Reset on target %d failed\n",
  1773. i);
  1774. ret = FAILED;
  1775. }
  1776. }
  1777. /*
  1778. * All outstanding txcmplq I/Os should have been aborted by
  1779. * the targets. Unfortunately, some targets do not abide by
  1780. * this forcing the driver to double check.
  1781. */
  1782. cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
  1783. if (cnt)
  1784. lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
  1785. 0, 0, LPFC_CTX_HOST);
  1786. later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
  1787. while (time_after(later, jiffies) && cnt) {
  1788. schedule_timeout_uninterruptible(msecs_to_jiffies(20));
  1789. cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
  1790. }
  1791. if (cnt) {
  1792. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1793. "0715 Bus Reset I/O flush failure: "
  1794. "cnt x%x left x%x\n", cnt, i);
  1795. ret = FAILED;
  1796. }
  1797. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1798. "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
  1799. return ret;
  1800. }
  1801. /**
  1802. * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
  1803. * structure.
  1804. * @sdev: Pointer to scsi_device.
  1805. *
  1806. * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
  1807. * globally available list of scsi buffers. This routine also makes sure scsi
  1808. * buffer is not allocated more than HBA limit conveyed to midlayer. This list
  1809. * of scsi buffer exists for the lifetime of the driver.
  1810. *
  1811. * Return codes:
  1812. * non-0 - Error
  1813. * 0 - Success
  1814. **/
  1815. static int
  1816. lpfc_slave_alloc(struct scsi_device *sdev)
  1817. {
  1818. struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
  1819. struct lpfc_hba *phba = vport->phba;
  1820. struct lpfc_scsi_buf *scsi_buf = NULL;
  1821. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  1822. uint32_t total = 0, i;
  1823. uint32_t num_to_alloc = 0;
  1824. unsigned long flags;
  1825. if (!rport || fc_remote_port_chkready(rport))
  1826. return -ENXIO;
  1827. sdev->hostdata = rport->dd_data;
  1828. /*
  1829. * Populate the cmds_per_lun count scsi_bufs into this host's globally
  1830. * available list of scsi buffers. Don't allocate more than the
  1831. * HBA limit conveyed to the midlayer via the host structure. The
  1832. * formula accounts for the lun_queue_depth + error handlers + 1
  1833. * extra. This list of scsi bufs exists for the lifetime of the driver.
  1834. */
  1835. total = phba->total_scsi_bufs;
  1836. num_to_alloc = vport->cfg_lun_queue_depth + 2;
  1837. /* Allow some exchanges to be available always to complete discovery */
  1838. if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
  1839. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  1840. "0704 At limitation of %d preallocated "
  1841. "command buffers\n", total);
  1842. return 0;
  1843. /* Allow some exchanges to be available always to complete discovery */
  1844. } else if (total + num_to_alloc >
  1845. phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
  1846. lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
  1847. "0705 Allocation request of %d "
  1848. "command buffers will exceed max of %d. "
  1849. "Reducing allocation request to %d.\n",
  1850. num_to_alloc, phba->cfg_hba_queue_depth,
  1851. (phba->cfg_hba_queue_depth - total));
  1852. num_to_alloc = phba->cfg_hba_queue_depth - total;
  1853. }
  1854. for (i = 0; i < num_to_alloc; i++) {
  1855. scsi_buf = lpfc_new_scsi_buf(vport);
  1856. if (!scsi_buf) {
  1857. lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
  1858. "0706 Failed to allocate "
  1859. "command buffer\n");
  1860. break;
  1861. }
  1862. spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
  1863. phba->total_scsi_bufs++;
  1864. list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
  1865. spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
  1866. }
  1867. return 0;
  1868. }
  1869. /**
  1870. * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
  1871. * structure.
  1872. * @sdev: Pointer to scsi_device.
  1873. *
  1874. * This routine configures following items
  1875. * - Tag command queuing support for @sdev if supported.
  1876. * - Dev loss time out value of fc_rport.
  1877. * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
  1878. *
  1879. * Return codes:
  1880. * 0 - Success
  1881. **/
  1882. static int
  1883. lpfc_slave_configure(struct scsi_device *sdev)
  1884. {
  1885. struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
  1886. struct lpfc_hba *phba = vport->phba;
  1887. struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
  1888. if (sdev->tagged_supported)
  1889. scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
  1890. else
  1891. scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
  1892. /*
  1893. * Initialize the fc transport attributes for the target
  1894. * containing this scsi device. Also note that the driver's
  1895. * target pointer is stored in the starget_data for the
  1896. * driver's sysfs entry point functions.
  1897. */
  1898. rport->dev_loss_tmo = vport->cfg_devloss_tmo;
  1899. if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
  1900. lpfc_sli_poll_fcp_ring(phba);
  1901. if (phba->cfg_poll & DISABLE_FCP_RING_INT)
  1902. lpfc_poll_rearm_timer(phba);
  1903. }
  1904. return 0;
  1905. }
  1906. /**
  1907. * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
  1908. * @sdev: Pointer to scsi_device.
  1909. *
  1910. * This routine sets @sdev hostatdata filed to null.
  1911. **/
  1912. static void
  1913. lpfc_slave_destroy(struct scsi_device *sdev)
  1914. {
  1915. sdev->hostdata = NULL;
  1916. return;
  1917. }
  1918. struct scsi_host_template lpfc_template = {
  1919. .module = THIS_MODULE,
  1920. .name = LPFC_DRIVER_NAME,
  1921. .info = lpfc_info,
  1922. .queuecommand = lpfc_queuecommand,
  1923. .eh_abort_handler = lpfc_abort_handler,
  1924. .eh_device_reset_handler= lpfc_device_reset_handler,
  1925. .eh_bus_reset_handler = lpfc_bus_reset_handler,
  1926. .slave_alloc = lpfc_slave_alloc,
  1927. .slave_configure = lpfc_slave_configure,
  1928. .slave_destroy = lpfc_slave_destroy,
  1929. .scan_finished = lpfc_scan_finished,
  1930. .this_id = -1,
  1931. .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
  1932. .cmd_per_lun = LPFC_CMD_PER_LUN,
  1933. .use_clustering = ENABLE_CLUSTERING,
  1934. .shost_attrs = lpfc_hba_attrs,
  1935. .max_sectors = 0xFFFF,
  1936. };
  1937. struct scsi_host_template lpfc_vport_template = {
  1938. .module = THIS_MODULE,
  1939. .name = LPFC_DRIVER_NAME,
  1940. .info = lpfc_info,
  1941. .queuecommand = lpfc_queuecommand,
  1942. .eh_abort_handler = lpfc_abort_handler,
  1943. .eh_device_reset_handler= lpfc_device_reset_handler,
  1944. .eh_bus_reset_handler = lpfc_bus_reset_handler,
  1945. .slave_alloc = lpfc_slave_alloc,
  1946. .slave_configure = lpfc_slave_configure,
  1947. .slave_destroy = lpfc_slave_destroy,
  1948. .scan_finished = lpfc_scan_finished,
  1949. .this_id = -1,
  1950. .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
  1951. .cmd_per_lun = LPFC_CMD_PER_LUN,
  1952. .use_clustering = ENABLE_CLUSTERING,
  1953. .shost_attrs = lpfc_vport_attrs,
  1954. .max_sectors = 0xFFFF,
  1955. };