lpfc_init.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2004-2006 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of version 2 of the GNU General *
  11. * Public License as published by the Free Software Foundation. *
  12. * This program is distributed in the hope that it will be useful. *
  13. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  14. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  15. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  16. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  18. * more details, a copy of which can be found in the file COPYING *
  19. * included with this package. *
  20. *******************************************************************/
  21. #include <linux/blkdev.h>
  22. #include <linux/delay.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/idr.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/kthread.h>
  27. #include <linux/pci.h>
  28. #include <linux/spinlock.h>
  29. #include <scsi/scsi.h>
  30. #include <scsi/scsi_device.h>
  31. #include <scsi/scsi_host.h>
  32. #include <scsi/scsi_transport_fc.h>
  33. #include "lpfc_hw.h"
  34. #include "lpfc_sli.h"
  35. #include "lpfc_disc.h"
  36. #include "lpfc_scsi.h"
  37. #include "lpfc.h"
  38. #include "lpfc_logmsg.h"
  39. #include "lpfc_crtn.h"
  40. #include "lpfc_version.h"
  41. static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
  42. static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  43. static int lpfc_post_rcv_buf(struct lpfc_hba *);
  44. static struct scsi_transport_template *lpfc_transport_template = NULL;
  45. static DEFINE_IDR(lpfc_hba_index);
  46. /************************************************************************/
  47. /* */
  48. /* lpfc_config_port_prep */
  49. /* This routine will do LPFC initialization prior to the */
  50. /* CONFIG_PORT mailbox command. This will be initialized */
  51. /* as a SLI layer callback routine. */
  52. /* This routine returns 0 on success or -ERESTART if it wants */
  53. /* the SLI layer to reset the HBA and try again. Any */
  54. /* other return value indicates an error. */
  55. /* */
  56. /************************************************************************/
  57. int
  58. lpfc_config_port_prep(struct lpfc_hba * phba)
  59. {
  60. lpfc_vpd_t *vp = &phba->vpd;
  61. int i = 0, rc;
  62. LPFC_MBOXQ_t *pmb;
  63. MAILBOX_t *mb;
  64. char *lpfc_vpd_data = NULL;
  65. uint16_t offset = 0;
  66. static char licensed[56] =
  67. "key unlock for use with gnu public licensed code only\0";
  68. static int init_key = 1;
  69. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  70. if (!pmb) {
  71. phba->hba_state = LPFC_HBA_ERROR;
  72. return -ENOMEM;
  73. }
  74. mb = &pmb->mb;
  75. phba->hba_state = LPFC_INIT_MBX_CMDS;
  76. if (lpfc_is_LC_HBA(phba->pcidev->device)) {
  77. if (init_key) {
  78. uint32_t *ptext = (uint32_t *) licensed;
  79. for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
  80. *ptext = cpu_to_be32(*ptext);
  81. init_key = 0;
  82. }
  83. lpfc_read_nv(phba, pmb);
  84. memset((char*)mb->un.varRDnvp.rsvd3, 0,
  85. sizeof (mb->un.varRDnvp.rsvd3));
  86. memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
  87. sizeof (licensed));
  88. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  89. if (rc != MBX_SUCCESS) {
  90. lpfc_printf_log(phba,
  91. KERN_ERR,
  92. LOG_MBOX,
  93. "%d:0324 Config Port initialization "
  94. "error, mbxCmd x%x READ_NVPARM, "
  95. "mbxStatus x%x\n",
  96. phba->brd_no,
  97. mb->mbxCommand, mb->mbxStatus);
  98. mempool_free(pmb, phba->mbox_mem_pool);
  99. return -ERESTART;
  100. }
  101. memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
  102. sizeof (mb->un.varRDnvp.nodename));
  103. }
  104. /* Setup and issue mailbox READ REV command */
  105. lpfc_read_rev(phba, pmb);
  106. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  107. if (rc != MBX_SUCCESS) {
  108. lpfc_printf_log(phba,
  109. KERN_ERR,
  110. LOG_INIT,
  111. "%d:0439 Adapter failed to init, mbxCmd x%x "
  112. "READ_REV, mbxStatus x%x\n",
  113. phba->brd_no,
  114. mb->mbxCommand, mb->mbxStatus);
  115. mempool_free( pmb, phba->mbox_mem_pool);
  116. return -ERESTART;
  117. }
  118. /*
  119. * The value of rr must be 1 since the driver set the cv field to 1.
  120. * This setting requires the FW to set all revision fields.
  121. */
  122. if (mb->un.varRdRev.rr == 0) {
  123. vp->rev.rBit = 0;
  124. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  125. "%d:0440 Adapter failed to init, READ_REV has "
  126. "missing revision information.\n",
  127. phba->brd_no);
  128. mempool_free(pmb, phba->mbox_mem_pool);
  129. return -ERESTART;
  130. }
  131. /* Save information as VPD data */
  132. vp->rev.rBit = 1;
  133. vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
  134. memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
  135. vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
  136. memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
  137. vp->rev.biuRev = mb->un.varRdRev.biuRev;
  138. vp->rev.smRev = mb->un.varRdRev.smRev;
  139. vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
  140. vp->rev.endecRev = mb->un.varRdRev.endecRev;
  141. vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
  142. vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
  143. vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
  144. vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
  145. vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
  146. vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
  147. if (lpfc_is_LC_HBA(phba->pcidev->device))
  148. memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
  149. sizeof (phba->RandomData));
  150. /* Get adapter VPD information */
  151. pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
  152. if (!pmb->context2)
  153. goto out_free_mbox;
  154. lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
  155. if (!lpfc_vpd_data)
  156. goto out_free_context2;
  157. do {
  158. lpfc_dump_mem(phba, pmb, offset);
  159. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  160. if (rc != MBX_SUCCESS) {
  161. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  162. "%d:0441 VPD not present on adapter, "
  163. "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
  164. phba->brd_no,
  165. mb->mbxCommand, mb->mbxStatus);
  166. mb->un.varDmp.word_cnt = 0;
  167. }
  168. if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
  169. mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
  170. lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
  171. mb->un.varDmp.word_cnt);
  172. offset += mb->un.varDmp.word_cnt;
  173. } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
  174. lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
  175. kfree(lpfc_vpd_data);
  176. out_free_context2:
  177. kfree(pmb->context2);
  178. out_free_mbox:
  179. mempool_free(pmb, phba->mbox_mem_pool);
  180. return 0;
  181. }
  182. /************************************************************************/
  183. /* */
  184. /* lpfc_config_port_post */
  185. /* This routine will do LPFC initialization after the */
  186. /* CONFIG_PORT mailbox command. This will be initialized */
  187. /* as a SLI layer callback routine. */
  188. /* This routine returns 0 on success. Any other return value */
  189. /* indicates an error. */
  190. /* */
  191. /************************************************************************/
  192. int
  193. lpfc_config_port_post(struct lpfc_hba * phba)
  194. {
  195. LPFC_MBOXQ_t *pmb;
  196. MAILBOX_t *mb;
  197. struct lpfc_dmabuf *mp;
  198. struct lpfc_sli *psli = &phba->sli;
  199. uint32_t status, timeout;
  200. int i, j, rc;
  201. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  202. if (!pmb) {
  203. phba->hba_state = LPFC_HBA_ERROR;
  204. return -ENOMEM;
  205. }
  206. mb = &pmb->mb;
  207. lpfc_config_link(phba, pmb);
  208. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  209. if (rc != MBX_SUCCESS) {
  210. lpfc_printf_log(phba,
  211. KERN_ERR,
  212. LOG_INIT,
  213. "%d:0447 Adapter failed init, mbxCmd x%x "
  214. "CONFIG_LINK mbxStatus x%x\n",
  215. phba->brd_no,
  216. mb->mbxCommand, mb->mbxStatus);
  217. phba->hba_state = LPFC_HBA_ERROR;
  218. mempool_free( pmb, phba->mbox_mem_pool);
  219. return -EIO;
  220. }
  221. /* Get login parameters for NID. */
  222. lpfc_read_sparam(phba, pmb);
  223. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  224. lpfc_printf_log(phba,
  225. KERN_ERR,
  226. LOG_INIT,
  227. "%d:0448 Adapter failed init, mbxCmd x%x "
  228. "READ_SPARM mbxStatus x%x\n",
  229. phba->brd_no,
  230. mb->mbxCommand, mb->mbxStatus);
  231. phba->hba_state = LPFC_HBA_ERROR;
  232. mp = (struct lpfc_dmabuf *) pmb->context1;
  233. mempool_free( pmb, phba->mbox_mem_pool);
  234. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  235. kfree(mp);
  236. return -EIO;
  237. }
  238. mp = (struct lpfc_dmabuf *) pmb->context1;
  239. memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
  240. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  241. kfree(mp);
  242. pmb->context1 = NULL;
  243. if (phba->cfg_soft_wwnn)
  244. u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
  245. if (phba->cfg_soft_wwpn)
  246. u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
  247. memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
  248. sizeof (struct lpfc_name));
  249. memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
  250. sizeof (struct lpfc_name));
  251. /* If no serial number in VPD data, use low 6 bytes of WWNN */
  252. /* This should be consolidated into parse_vpd ? - mr */
  253. if (phba->SerialNumber[0] == 0) {
  254. uint8_t *outptr;
  255. outptr = &phba->fc_nodename.u.s.IEEE[0];
  256. for (i = 0; i < 12; i++) {
  257. status = *outptr++;
  258. j = ((status & 0xf0) >> 4);
  259. if (j <= 9)
  260. phba->SerialNumber[i] =
  261. (char)((uint8_t) 0x30 + (uint8_t) j);
  262. else
  263. phba->SerialNumber[i] =
  264. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  265. i++;
  266. j = (status & 0xf);
  267. if (j <= 9)
  268. phba->SerialNumber[i] =
  269. (char)((uint8_t) 0x30 + (uint8_t) j);
  270. else
  271. phba->SerialNumber[i] =
  272. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  273. }
  274. }
  275. lpfc_read_config(phba, pmb);
  276. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  277. lpfc_printf_log(phba,
  278. KERN_ERR,
  279. LOG_INIT,
  280. "%d:0453 Adapter failed to init, mbxCmd x%x "
  281. "READ_CONFIG, mbxStatus x%x\n",
  282. phba->brd_no,
  283. mb->mbxCommand, mb->mbxStatus);
  284. phba->hba_state = LPFC_HBA_ERROR;
  285. mempool_free( pmb, phba->mbox_mem_pool);
  286. return -EIO;
  287. }
  288. /* Reset the DFT_HBA_Q_DEPTH to the max xri */
  289. if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
  290. phba->cfg_hba_queue_depth =
  291. mb->un.varRdConfig.max_xri + 1;
  292. phba->lmt = mb->un.varRdConfig.lmt;
  293. /* Get the default values for Model Name and Description */
  294. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  295. if ((phba->cfg_link_speed > LINK_SPEED_10G)
  296. || ((phba->cfg_link_speed == LINK_SPEED_1G)
  297. && !(phba->lmt & LMT_1Gb))
  298. || ((phba->cfg_link_speed == LINK_SPEED_2G)
  299. && !(phba->lmt & LMT_2Gb))
  300. || ((phba->cfg_link_speed == LINK_SPEED_4G)
  301. && !(phba->lmt & LMT_4Gb))
  302. || ((phba->cfg_link_speed == LINK_SPEED_8G)
  303. && !(phba->lmt & LMT_8Gb))
  304. || ((phba->cfg_link_speed == LINK_SPEED_10G)
  305. && !(phba->lmt & LMT_10Gb))) {
  306. /* Reset link speed to auto */
  307. lpfc_printf_log(phba,
  308. KERN_WARNING,
  309. LOG_LINK_EVENT,
  310. "%d:1302 Invalid speed for this board: "
  311. "Reset link speed to auto: x%x\n",
  312. phba->brd_no,
  313. phba->cfg_link_speed);
  314. phba->cfg_link_speed = LINK_SPEED_AUTO;
  315. }
  316. phba->hba_state = LPFC_LINK_DOWN;
  317. /* Only process IOCBs on ring 0 till hba_state is READY */
  318. if (psli->ring[psli->extra_ring].cmdringaddr)
  319. psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
  320. if (psli->ring[psli->fcp_ring].cmdringaddr)
  321. psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
  322. if (psli->ring[psli->next_ring].cmdringaddr)
  323. psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
  324. /* Post receive buffers for desired rings */
  325. lpfc_post_rcv_buf(phba);
  326. /* Enable appropriate host interrupts */
  327. spin_lock_irq(phba->host->host_lock);
  328. status = readl(phba->HCregaddr);
  329. status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
  330. if (psli->num_rings > 0)
  331. status |= HC_R0INT_ENA;
  332. if (psli->num_rings > 1)
  333. status |= HC_R1INT_ENA;
  334. if (psli->num_rings > 2)
  335. status |= HC_R2INT_ENA;
  336. if (psli->num_rings > 3)
  337. status |= HC_R3INT_ENA;
  338. if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
  339. (phba->cfg_poll & DISABLE_FCP_RING_INT))
  340. status &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
  341. writel(status, phba->HCregaddr);
  342. readl(phba->HCregaddr); /* flush */
  343. spin_unlock_irq(phba->host->host_lock);
  344. /*
  345. * Setup the ring 0 (els) timeout handler
  346. */
  347. timeout = phba->fc_ratov << 1;
  348. phba->els_tmofunc.expires = jiffies + HZ * timeout;
  349. add_timer(&phba->els_tmofunc);
  350. lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
  351. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  352. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  353. if (rc != MBX_SUCCESS) {
  354. lpfc_printf_log(phba,
  355. KERN_ERR,
  356. LOG_INIT,
  357. "%d:0454 Adapter failed to init, mbxCmd x%x "
  358. "INIT_LINK, mbxStatus x%x\n",
  359. phba->brd_no,
  360. mb->mbxCommand, mb->mbxStatus);
  361. /* Clear all interrupt enable conditions */
  362. writel(0, phba->HCregaddr);
  363. readl(phba->HCregaddr); /* flush */
  364. /* Clear all pending interrupts */
  365. writel(0xffffffff, phba->HAregaddr);
  366. readl(phba->HAregaddr); /* flush */
  367. phba->hba_state = LPFC_HBA_ERROR;
  368. if (rc != MBX_BUSY)
  369. mempool_free(pmb, phba->mbox_mem_pool);
  370. return -EIO;
  371. }
  372. /* MBOX buffer will be freed in mbox compl */
  373. return (0);
  374. }
  375. static int
  376. lpfc_discovery_wait(struct lpfc_hba *phba)
  377. {
  378. int i = 0;
  379. while ((phba->hba_state != LPFC_HBA_READY) ||
  380. (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
  381. ((phba->fc_map_cnt == 0) && (i<2)) ||
  382. (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
  383. /* Check every second for 30 retries. */
  384. i++;
  385. if (i > 30) {
  386. return -ETIMEDOUT;
  387. }
  388. if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
  389. /* The link is down. Set linkdown timeout */
  390. return -ETIMEDOUT;
  391. }
  392. /* Delay for 1 second to give discovery time to complete. */
  393. msleep(1000);
  394. }
  395. return 0;
  396. }
  397. /************************************************************************/
  398. /* */
  399. /* lpfc_hba_down_prep */
  400. /* This routine will do LPFC uninitialization before the */
  401. /* HBA is reset when bringing down the SLI Layer. This will be */
  402. /* initialized as a SLI layer callback routine. */
  403. /* This routine returns 0 on success. Any other return value */
  404. /* indicates an error. */
  405. /* */
  406. /************************************************************************/
  407. int
  408. lpfc_hba_down_prep(struct lpfc_hba * phba)
  409. {
  410. /* Disable interrupts */
  411. writel(0, phba->HCregaddr);
  412. readl(phba->HCregaddr); /* flush */
  413. /* Cleanup potential discovery resources */
  414. lpfc_els_flush_rscn(phba);
  415. lpfc_els_flush_cmd(phba);
  416. lpfc_disc_flush_list(phba);
  417. return (0);
  418. }
  419. /************************************************************************/
  420. /* */
  421. /* lpfc_hba_down_post */
  422. /* This routine will do uninitialization after the HBA is reset */
  423. /* when bringing down the SLI Layer. */
  424. /* This routine returns 0 on success. Any other return value */
  425. /* indicates an error. */
  426. /* */
  427. /************************************************************************/
  428. int
  429. lpfc_hba_down_post(struct lpfc_hba * phba)
  430. {
  431. struct lpfc_sli *psli = &phba->sli;
  432. struct lpfc_sli_ring *pring;
  433. struct lpfc_dmabuf *mp, *next_mp;
  434. int i;
  435. /* Cleanup preposted buffers on the ELS ring */
  436. pring = &psli->ring[LPFC_ELS_RING];
  437. list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
  438. list_del(&mp->list);
  439. pring->postbufq_cnt--;
  440. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  441. kfree(mp);
  442. }
  443. for (i = 0; i < psli->num_rings; i++) {
  444. pring = &psli->ring[i];
  445. lpfc_sli_abort_iocb_ring(phba, pring);
  446. }
  447. return 0;
  448. }
  449. /************************************************************************/
  450. /* */
  451. /* lpfc_handle_eratt */
  452. /* This routine will handle processing a Host Attention */
  453. /* Error Status event. This will be initialized */
  454. /* as a SLI layer callback routine. */
  455. /* */
  456. /************************************************************************/
  457. void
  458. lpfc_handle_eratt(struct lpfc_hba * phba)
  459. {
  460. struct lpfc_sli *psli = &phba->sli;
  461. struct lpfc_sli_ring *pring;
  462. uint32_t event_data;
  463. /* If the pci channel is offline, ignore possible errors,
  464. * since we cannot communicate with the pci card anyway. */
  465. if (pci_channel_offline(phba->pcidev))
  466. return;
  467. if (phba->work_hs & HS_FFER6 ||
  468. phba->work_hs & HS_FFER5) {
  469. /* Re-establishing Link */
  470. lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
  471. "%d:1301 Re-establishing Link "
  472. "Data: x%x x%x x%x\n",
  473. phba->brd_no, phba->work_hs,
  474. phba->work_status[0], phba->work_status[1]);
  475. spin_lock_irq(phba->host->host_lock);
  476. phba->fc_flag |= FC_ESTABLISH_LINK;
  477. psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
  478. spin_unlock_irq(phba->host->host_lock);
  479. /*
  480. * Firmware stops when it triggled erratt with HS_FFER6.
  481. * That could cause the I/Os dropped by the firmware.
  482. * Error iocb (I/O) on txcmplq and let the SCSI layer
  483. * retry it after re-establishing link.
  484. */
  485. pring = &psli->ring[psli->fcp_ring];
  486. lpfc_sli_abort_iocb_ring(phba, pring);
  487. /*
  488. * There was a firmware error. Take the hba offline and then
  489. * attempt to restart it.
  490. */
  491. lpfc_offline(phba);
  492. lpfc_sli_brdrestart(phba);
  493. if (lpfc_online(phba) == 0) { /* Initialize the HBA */
  494. mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
  495. return;
  496. }
  497. } else {
  498. /* The if clause above forces this code path when the status
  499. * failure is a value other than FFER6. Do not call the offline
  500. * twice. This is the adapter hardware error path.
  501. */
  502. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  503. "%d:0457 Adapter Hardware Error "
  504. "Data: x%x x%x x%x\n",
  505. phba->brd_no, phba->work_hs,
  506. phba->work_status[0], phba->work_status[1]);
  507. event_data = FC_REG_DUMP_EVENT;
  508. fc_host_post_vendor_event(phba->host, fc_get_event_number(),
  509. sizeof(event_data), (char *) &event_data,
  510. SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
  511. psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
  512. lpfc_offline(phba);
  513. phba->hba_state = LPFC_HBA_ERROR;
  514. lpfc_hba_down_post(phba);
  515. }
  516. }
  517. /************************************************************************/
  518. /* */
  519. /* lpfc_handle_latt */
  520. /* This routine will handle processing a Host Attention */
  521. /* Link Status event. This will be initialized */
  522. /* as a SLI layer callback routine. */
  523. /* */
  524. /************************************************************************/
  525. void
  526. lpfc_handle_latt(struct lpfc_hba * phba)
  527. {
  528. struct lpfc_sli *psli = &phba->sli;
  529. LPFC_MBOXQ_t *pmb;
  530. volatile uint32_t control;
  531. struct lpfc_dmabuf *mp;
  532. int rc = -ENOMEM;
  533. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  534. if (!pmb)
  535. goto lpfc_handle_latt_err_exit;
  536. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  537. if (!mp)
  538. goto lpfc_handle_latt_free_pmb;
  539. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  540. if (!mp->virt)
  541. goto lpfc_handle_latt_free_mp;
  542. rc = -EIO;
  543. /* Cleanup any outstanding ELS commands */
  544. lpfc_els_flush_cmd(phba);
  545. psli->slistat.link_event++;
  546. lpfc_read_la(phba, pmb, mp);
  547. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
  548. rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
  549. if (rc == MBX_NOT_FINISHED)
  550. goto lpfc_handle_latt_free_mbuf;
  551. /* Clear Link Attention in HA REG */
  552. spin_lock_irq(phba->host->host_lock);
  553. writel(HA_LATT, phba->HAregaddr);
  554. readl(phba->HAregaddr); /* flush */
  555. spin_unlock_irq(phba->host->host_lock);
  556. return;
  557. lpfc_handle_latt_free_mbuf:
  558. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  559. lpfc_handle_latt_free_mp:
  560. kfree(mp);
  561. lpfc_handle_latt_free_pmb:
  562. kfree(pmb);
  563. lpfc_handle_latt_err_exit:
  564. /* Enable Link attention interrupts */
  565. spin_lock_irq(phba->host->host_lock);
  566. psli->sli_flag |= LPFC_PROCESS_LA;
  567. control = readl(phba->HCregaddr);
  568. control |= HC_LAINT_ENA;
  569. writel(control, phba->HCregaddr);
  570. readl(phba->HCregaddr); /* flush */
  571. /* Clear Link Attention in HA REG */
  572. writel(HA_LATT, phba->HAregaddr);
  573. readl(phba->HAregaddr); /* flush */
  574. spin_unlock_irq(phba->host->host_lock);
  575. lpfc_linkdown(phba);
  576. phba->hba_state = LPFC_HBA_ERROR;
  577. /* The other case is an error from issue_mbox */
  578. if (rc == -ENOMEM)
  579. lpfc_printf_log(phba,
  580. KERN_WARNING,
  581. LOG_MBOX,
  582. "%d:0300 READ_LA: no buffers\n",
  583. phba->brd_no);
  584. return;
  585. }
  586. /************************************************************************/
  587. /* */
  588. /* lpfc_parse_vpd */
  589. /* This routine will parse the VPD data */
  590. /* */
  591. /************************************************************************/
  592. static int
  593. lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
  594. {
  595. uint8_t lenlo, lenhi;
  596. uint32_t Length;
  597. int i, j;
  598. int finished = 0;
  599. int index = 0;
  600. if (!vpd)
  601. return 0;
  602. /* Vital Product */
  603. lpfc_printf_log(phba,
  604. KERN_INFO,
  605. LOG_INIT,
  606. "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
  607. phba->brd_no,
  608. (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
  609. (uint32_t) vpd[3]);
  610. while (!finished && (index < (len - 4))) {
  611. switch (vpd[index]) {
  612. case 0x82:
  613. case 0x91:
  614. index += 1;
  615. lenlo = vpd[index];
  616. index += 1;
  617. lenhi = vpd[index];
  618. index += 1;
  619. i = ((((unsigned short)lenhi) << 8) + lenlo);
  620. index += i;
  621. break;
  622. case 0x90:
  623. index += 1;
  624. lenlo = vpd[index];
  625. index += 1;
  626. lenhi = vpd[index];
  627. index += 1;
  628. Length = ((((unsigned short)lenhi) << 8) + lenlo);
  629. if (Length > len - index)
  630. Length = len - index;
  631. while (Length > 0) {
  632. /* Look for Serial Number */
  633. if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
  634. index += 2;
  635. i = vpd[index];
  636. index += 1;
  637. j = 0;
  638. Length -= (3+i);
  639. while(i--) {
  640. phba->SerialNumber[j++] = vpd[index++];
  641. if (j == 31)
  642. break;
  643. }
  644. phba->SerialNumber[j] = 0;
  645. continue;
  646. }
  647. else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
  648. phba->vpd_flag |= VPD_MODEL_DESC;
  649. index += 2;
  650. i = vpd[index];
  651. index += 1;
  652. j = 0;
  653. Length -= (3+i);
  654. while(i--) {
  655. phba->ModelDesc[j++] = vpd[index++];
  656. if (j == 255)
  657. break;
  658. }
  659. phba->ModelDesc[j] = 0;
  660. continue;
  661. }
  662. else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
  663. phba->vpd_flag |= VPD_MODEL_NAME;
  664. index += 2;
  665. i = vpd[index];
  666. index += 1;
  667. j = 0;
  668. Length -= (3+i);
  669. while(i--) {
  670. phba->ModelName[j++] = vpd[index++];
  671. if (j == 79)
  672. break;
  673. }
  674. phba->ModelName[j] = 0;
  675. continue;
  676. }
  677. else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
  678. phba->vpd_flag |= VPD_PROGRAM_TYPE;
  679. index += 2;
  680. i = vpd[index];
  681. index += 1;
  682. j = 0;
  683. Length -= (3+i);
  684. while(i--) {
  685. phba->ProgramType[j++] = vpd[index++];
  686. if (j == 255)
  687. break;
  688. }
  689. phba->ProgramType[j] = 0;
  690. continue;
  691. }
  692. else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
  693. phba->vpd_flag |= VPD_PORT;
  694. index += 2;
  695. i = vpd[index];
  696. index += 1;
  697. j = 0;
  698. Length -= (3+i);
  699. while(i--) {
  700. phba->Port[j++] = vpd[index++];
  701. if (j == 19)
  702. break;
  703. }
  704. phba->Port[j] = 0;
  705. continue;
  706. }
  707. else {
  708. index += 2;
  709. i = vpd[index];
  710. index += 1;
  711. index += i;
  712. Length -= (3 + i);
  713. }
  714. }
  715. finished = 0;
  716. break;
  717. case 0x78:
  718. finished = 1;
  719. break;
  720. default:
  721. index ++;
  722. break;
  723. }
  724. }
  725. return(1);
  726. }
  727. static void
  728. lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
  729. {
  730. lpfc_vpd_t *vp;
  731. uint16_t dev_id = phba->pcidev->device;
  732. int max_speed;
  733. struct {
  734. char * name;
  735. int max_speed;
  736. char * bus;
  737. } m = {"<Unknown>", 0, ""};
  738. if (mdp && mdp[0] != '\0'
  739. && descp && descp[0] != '\0')
  740. return;
  741. if (phba->lmt & LMT_10Gb)
  742. max_speed = 10;
  743. else if (phba->lmt & LMT_8Gb)
  744. max_speed = 8;
  745. else if (phba->lmt & LMT_4Gb)
  746. max_speed = 4;
  747. else if (phba->lmt & LMT_2Gb)
  748. max_speed = 2;
  749. else
  750. max_speed = 1;
  751. vp = &phba->vpd;
  752. switch (dev_id) {
  753. case PCI_DEVICE_ID_FIREFLY:
  754. m = (typeof(m)){"LP6000", max_speed, "PCI"};
  755. break;
  756. case PCI_DEVICE_ID_SUPERFLY:
  757. if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
  758. m = (typeof(m)){"LP7000", max_speed, "PCI"};
  759. else
  760. m = (typeof(m)){"LP7000E", max_speed, "PCI"};
  761. break;
  762. case PCI_DEVICE_ID_DRAGONFLY:
  763. m = (typeof(m)){"LP8000", max_speed, "PCI"};
  764. break;
  765. case PCI_DEVICE_ID_CENTAUR:
  766. if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
  767. m = (typeof(m)){"LP9002", max_speed, "PCI"};
  768. else
  769. m = (typeof(m)){"LP9000", max_speed, "PCI"};
  770. break;
  771. case PCI_DEVICE_ID_RFLY:
  772. m = (typeof(m)){"LP952", max_speed, "PCI"};
  773. break;
  774. case PCI_DEVICE_ID_PEGASUS:
  775. m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
  776. break;
  777. case PCI_DEVICE_ID_THOR:
  778. m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
  779. break;
  780. case PCI_DEVICE_ID_VIPER:
  781. m = (typeof(m)){"LPX1000", max_speed, "PCI-X"};
  782. break;
  783. case PCI_DEVICE_ID_PFLY:
  784. m = (typeof(m)){"LP982", max_speed, "PCI-X"};
  785. break;
  786. case PCI_DEVICE_ID_TFLY:
  787. m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
  788. break;
  789. case PCI_DEVICE_ID_HELIOS:
  790. m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
  791. break;
  792. case PCI_DEVICE_ID_HELIOS_SCSP:
  793. m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
  794. break;
  795. case PCI_DEVICE_ID_HELIOS_DCSP:
  796. m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
  797. break;
  798. case PCI_DEVICE_ID_NEPTUNE:
  799. m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
  800. break;
  801. case PCI_DEVICE_ID_NEPTUNE_SCSP:
  802. m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
  803. break;
  804. case PCI_DEVICE_ID_NEPTUNE_DCSP:
  805. m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
  806. break;
  807. case PCI_DEVICE_ID_BMID:
  808. m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
  809. break;
  810. case PCI_DEVICE_ID_BSMB:
  811. m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
  812. break;
  813. case PCI_DEVICE_ID_ZEPHYR:
  814. m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
  815. break;
  816. case PCI_DEVICE_ID_ZEPHYR_SCSP:
  817. m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
  818. break;
  819. case PCI_DEVICE_ID_ZEPHYR_DCSP:
  820. m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
  821. break;
  822. case PCI_DEVICE_ID_ZMID:
  823. m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
  824. break;
  825. case PCI_DEVICE_ID_ZSMB:
  826. m = (typeof(m)){"LPe111", max_speed, "PCIe"};
  827. break;
  828. case PCI_DEVICE_ID_LP101:
  829. m = (typeof(m)){"LP101", max_speed, "PCI-X"};
  830. break;
  831. case PCI_DEVICE_ID_LP10000S:
  832. m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
  833. break;
  834. case PCI_DEVICE_ID_LP11000S:
  835. m = (typeof(m)){"LP11000-S", max_speed,
  836. "PCI-X2"};
  837. break;
  838. case PCI_DEVICE_ID_LPE11000S:
  839. m = (typeof(m)){"LPe11000-S", max_speed,
  840. "PCIe"};
  841. break;
  842. default:
  843. m = (typeof(m)){ NULL };
  844. break;
  845. }
  846. if (mdp && mdp[0] == '\0')
  847. snprintf(mdp, 79,"%s", m.name);
  848. if (descp && descp[0] == '\0')
  849. snprintf(descp, 255,
  850. "Emulex %s %dGb %s Fibre Channel Adapter",
  851. m.name, m.max_speed, m.bus);
  852. }
  853. /**************************************************/
  854. /* lpfc_post_buffer */
  855. /* */
  856. /* This routine will post count buffers to the */
  857. /* ring with the QUE_RING_BUF_CN command. This */
  858. /* allows 3 buffers / command to be posted. */
  859. /* Returns the number of buffers NOT posted. */
  860. /**************************************************/
  861. int
  862. lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
  863. int type)
  864. {
  865. IOCB_t *icmd;
  866. struct lpfc_iocbq *iocb;
  867. struct lpfc_dmabuf *mp1, *mp2;
  868. cnt += pring->missbufcnt;
  869. /* While there are buffers to post */
  870. while (cnt > 0) {
  871. /* Allocate buffer for command iocb */
  872. spin_lock_irq(phba->host->host_lock);
  873. iocb = lpfc_sli_get_iocbq(phba);
  874. spin_unlock_irq(phba->host->host_lock);
  875. if (iocb == NULL) {
  876. pring->missbufcnt = cnt;
  877. return cnt;
  878. }
  879. icmd = &iocb->iocb;
  880. /* 2 buffers can be posted per command */
  881. /* Allocate buffer to post */
  882. mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  883. if (mp1)
  884. mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  885. &mp1->phys);
  886. if (mp1 == 0 || mp1->virt == 0) {
  887. kfree(mp1);
  888. spin_lock_irq(phba->host->host_lock);
  889. lpfc_sli_release_iocbq(phba, iocb);
  890. spin_unlock_irq(phba->host->host_lock);
  891. pring->missbufcnt = cnt;
  892. return cnt;
  893. }
  894. INIT_LIST_HEAD(&mp1->list);
  895. /* Allocate buffer to post */
  896. if (cnt > 1) {
  897. mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  898. if (mp2)
  899. mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  900. &mp2->phys);
  901. if (mp2 == 0 || mp2->virt == 0) {
  902. kfree(mp2);
  903. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  904. kfree(mp1);
  905. spin_lock_irq(phba->host->host_lock);
  906. lpfc_sli_release_iocbq(phba, iocb);
  907. spin_unlock_irq(phba->host->host_lock);
  908. pring->missbufcnt = cnt;
  909. return cnt;
  910. }
  911. INIT_LIST_HEAD(&mp2->list);
  912. } else {
  913. mp2 = NULL;
  914. }
  915. icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
  916. icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
  917. icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
  918. icmd->ulpBdeCount = 1;
  919. cnt--;
  920. if (mp2) {
  921. icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
  922. icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
  923. icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
  924. cnt--;
  925. icmd->ulpBdeCount = 2;
  926. }
  927. icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
  928. icmd->ulpLe = 1;
  929. spin_lock_irq(phba->host->host_lock);
  930. if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
  931. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  932. kfree(mp1);
  933. cnt++;
  934. if (mp2) {
  935. lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
  936. kfree(mp2);
  937. cnt++;
  938. }
  939. lpfc_sli_release_iocbq(phba, iocb);
  940. pring->missbufcnt = cnt;
  941. spin_unlock_irq(phba->host->host_lock);
  942. return cnt;
  943. }
  944. spin_unlock_irq(phba->host->host_lock);
  945. lpfc_sli_ringpostbuf_put(phba, pring, mp1);
  946. if (mp2) {
  947. lpfc_sli_ringpostbuf_put(phba, pring, mp2);
  948. }
  949. }
  950. pring->missbufcnt = 0;
  951. return 0;
  952. }
  953. /************************************************************************/
  954. /* */
  955. /* lpfc_post_rcv_buf */
  956. /* This routine post initial rcv buffers to the configured rings */
  957. /* */
  958. /************************************************************************/
  959. static int
  960. lpfc_post_rcv_buf(struct lpfc_hba * phba)
  961. {
  962. struct lpfc_sli *psli = &phba->sli;
  963. /* Ring 0, ELS / CT buffers */
  964. lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
  965. /* Ring 2 - FCP no buffers needed */
  966. return 0;
  967. }
  968. #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
  969. /************************************************************************/
  970. /* */
  971. /* lpfc_sha_init */
  972. /* */
  973. /************************************************************************/
  974. static void
  975. lpfc_sha_init(uint32_t * HashResultPointer)
  976. {
  977. HashResultPointer[0] = 0x67452301;
  978. HashResultPointer[1] = 0xEFCDAB89;
  979. HashResultPointer[2] = 0x98BADCFE;
  980. HashResultPointer[3] = 0x10325476;
  981. HashResultPointer[4] = 0xC3D2E1F0;
  982. }
  983. /************************************************************************/
  984. /* */
  985. /* lpfc_sha_iterate */
  986. /* */
  987. /************************************************************************/
  988. static void
  989. lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
  990. {
  991. int t;
  992. uint32_t TEMP;
  993. uint32_t A, B, C, D, E;
  994. t = 16;
  995. do {
  996. HashWorkingPointer[t] =
  997. S(1,
  998. HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
  999. 8] ^
  1000. HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
  1001. } while (++t <= 79);
  1002. t = 0;
  1003. A = HashResultPointer[0];
  1004. B = HashResultPointer[1];
  1005. C = HashResultPointer[2];
  1006. D = HashResultPointer[3];
  1007. E = HashResultPointer[4];
  1008. do {
  1009. if (t < 20) {
  1010. TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
  1011. } else if (t < 40) {
  1012. TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
  1013. } else if (t < 60) {
  1014. TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
  1015. } else {
  1016. TEMP = (B ^ C ^ D) + 0xCA62C1D6;
  1017. }
  1018. TEMP += S(5, A) + E + HashWorkingPointer[t];
  1019. E = D;
  1020. D = C;
  1021. C = S(30, B);
  1022. B = A;
  1023. A = TEMP;
  1024. } while (++t <= 79);
  1025. HashResultPointer[0] += A;
  1026. HashResultPointer[1] += B;
  1027. HashResultPointer[2] += C;
  1028. HashResultPointer[3] += D;
  1029. HashResultPointer[4] += E;
  1030. }
  1031. /************************************************************************/
  1032. /* */
  1033. /* lpfc_challenge_key */
  1034. /* */
  1035. /************************************************************************/
  1036. static void
  1037. lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
  1038. {
  1039. *HashWorking = (*RandomChallenge ^ *HashWorking);
  1040. }
  1041. /************************************************************************/
  1042. /* */
  1043. /* lpfc_hba_init */
  1044. /* */
  1045. /************************************************************************/
  1046. void
  1047. lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
  1048. {
  1049. int t;
  1050. uint32_t *HashWorking;
  1051. uint32_t *pwwnn = phba->wwnn;
  1052. HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
  1053. if (!HashWorking)
  1054. return;
  1055. memset(HashWorking, 0, (80 * sizeof(uint32_t)));
  1056. HashWorking[0] = HashWorking[78] = *pwwnn++;
  1057. HashWorking[1] = HashWorking[79] = *pwwnn;
  1058. for (t = 0; t < 7; t++)
  1059. lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
  1060. lpfc_sha_init(hbainit);
  1061. lpfc_sha_iterate(hbainit, HashWorking);
  1062. kfree(HashWorking);
  1063. }
  1064. static void
  1065. lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind)
  1066. {
  1067. struct lpfc_nodelist *ndlp, *next_ndlp;
  1068. /* clean up phba - lpfc specific */
  1069. lpfc_can_disctmo(phba);
  1070. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
  1071. nlp_listp) {
  1072. lpfc_nlp_remove(phba, ndlp);
  1073. }
  1074. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
  1075. nlp_listp) {
  1076. lpfc_nlp_remove(phba, ndlp);
  1077. }
  1078. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
  1079. nlp_listp) {
  1080. lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
  1081. }
  1082. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
  1083. nlp_listp) {
  1084. lpfc_nlp_remove(phba, ndlp);
  1085. }
  1086. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
  1087. nlp_listp) {
  1088. lpfc_nlp_remove(phba, ndlp);
  1089. }
  1090. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list,
  1091. nlp_listp) {
  1092. lpfc_nlp_remove(phba, ndlp);
  1093. }
  1094. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
  1095. nlp_listp) {
  1096. lpfc_nlp_remove(phba, ndlp);
  1097. }
  1098. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
  1099. nlp_listp) {
  1100. lpfc_nlp_remove(phba, ndlp);
  1101. }
  1102. INIT_LIST_HEAD(&phba->fc_nlpmap_list);
  1103. INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
  1104. INIT_LIST_HEAD(&phba->fc_unused_list);
  1105. INIT_LIST_HEAD(&phba->fc_plogi_list);
  1106. INIT_LIST_HEAD(&phba->fc_adisc_list);
  1107. INIT_LIST_HEAD(&phba->fc_reglogin_list);
  1108. INIT_LIST_HEAD(&phba->fc_prli_list);
  1109. INIT_LIST_HEAD(&phba->fc_npr_list);
  1110. phba->fc_map_cnt = 0;
  1111. phba->fc_unmap_cnt = 0;
  1112. phba->fc_plogi_cnt = 0;
  1113. phba->fc_adisc_cnt = 0;
  1114. phba->fc_reglogin_cnt = 0;
  1115. phba->fc_prli_cnt = 0;
  1116. phba->fc_npr_cnt = 0;
  1117. phba->fc_unused_cnt= 0;
  1118. return;
  1119. }
  1120. static void
  1121. lpfc_establish_link_tmo(unsigned long ptr)
  1122. {
  1123. struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
  1124. unsigned long iflag;
  1125. /* Re-establishing Link, timer expired */
  1126. lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
  1127. "%d:1300 Re-establishing Link, timer expired "
  1128. "Data: x%x x%x\n",
  1129. phba->brd_no, phba->fc_flag, phba->hba_state);
  1130. spin_lock_irqsave(phba->host->host_lock, iflag);
  1131. phba->fc_flag &= ~FC_ESTABLISH_LINK;
  1132. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  1133. }
  1134. static int
  1135. lpfc_stop_timer(struct lpfc_hba * phba)
  1136. {
  1137. struct lpfc_sli *psli = &phba->sli;
  1138. /* Instead of a timer, this has been converted to a
  1139. * deferred procedding list.
  1140. */
  1141. while (!list_empty(&phba->freebufList)) {
  1142. struct lpfc_dmabuf *mp = NULL;
  1143. list_remove_head((&phba->freebufList), mp,
  1144. struct lpfc_dmabuf, list);
  1145. if (mp) {
  1146. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  1147. kfree(mp);
  1148. }
  1149. }
  1150. del_timer_sync(&phba->fcp_poll_timer);
  1151. del_timer_sync(&phba->fc_estabtmo);
  1152. del_timer_sync(&phba->fc_disctmo);
  1153. del_timer_sync(&phba->fc_fdmitmo);
  1154. del_timer_sync(&phba->els_tmofunc);
  1155. psli = &phba->sli;
  1156. del_timer_sync(&psli->mbox_tmo);
  1157. return(1);
  1158. }
  1159. int
  1160. lpfc_online(struct lpfc_hba * phba)
  1161. {
  1162. if (!phba)
  1163. return 0;
  1164. if (!(phba->fc_flag & FC_OFFLINE_MODE))
  1165. return 0;
  1166. lpfc_printf_log(phba,
  1167. KERN_WARNING,
  1168. LOG_INIT,
  1169. "%d:0458 Bring Adapter online\n",
  1170. phba->brd_no);
  1171. if (!lpfc_sli_queue_setup(phba))
  1172. return 1;
  1173. if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */
  1174. return 1;
  1175. spin_lock_irq(phba->host->host_lock);
  1176. phba->fc_flag &= ~FC_OFFLINE_MODE;
  1177. spin_unlock_irq(phba->host->host_lock);
  1178. return 0;
  1179. }
  1180. int
  1181. lpfc_offline(struct lpfc_hba * phba)
  1182. {
  1183. struct lpfc_sli_ring *pring;
  1184. struct lpfc_sli *psli;
  1185. unsigned long iflag;
  1186. int i;
  1187. int cnt = 0;
  1188. if (!phba)
  1189. return 0;
  1190. if (phba->fc_flag & FC_OFFLINE_MODE)
  1191. return 0;
  1192. psli = &phba->sli;
  1193. lpfc_linkdown(phba);
  1194. lpfc_sli_flush_mbox_queue(phba);
  1195. for (i = 0; i < psli->num_rings; i++) {
  1196. pring = &psli->ring[i];
  1197. /* The linkdown event takes 30 seconds to timeout. */
  1198. while (pring->txcmplq_cnt) {
  1199. mdelay(10);
  1200. if (cnt++ > 3000) {
  1201. lpfc_printf_log(phba,
  1202. KERN_WARNING, LOG_INIT,
  1203. "%d:0466 Outstanding IO when "
  1204. "bringing Adapter offline\n",
  1205. phba->brd_no);
  1206. break;
  1207. }
  1208. }
  1209. }
  1210. /* stop all timers associated with this hba */
  1211. lpfc_stop_timer(phba);
  1212. phba->work_hba_events = 0;
  1213. phba->work_ha = 0;
  1214. lpfc_printf_log(phba,
  1215. KERN_WARNING,
  1216. LOG_INIT,
  1217. "%d:0460 Bring Adapter offline\n",
  1218. phba->brd_no);
  1219. /* Bring down the SLI Layer and cleanup. The HBA is offline
  1220. now. */
  1221. lpfc_sli_hba_down(phba);
  1222. lpfc_cleanup(phba, 1);
  1223. spin_lock_irqsave(phba->host->host_lock, iflag);
  1224. phba->fc_flag |= FC_OFFLINE_MODE;
  1225. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  1226. return 0;
  1227. }
  1228. /******************************************************************************
  1229. * Function name: lpfc_scsi_free
  1230. *
  1231. * Description: Called from lpfc_pci_remove_one free internal driver resources
  1232. *
  1233. ******************************************************************************/
  1234. static int
  1235. lpfc_scsi_free(struct lpfc_hba * phba)
  1236. {
  1237. struct lpfc_scsi_buf *sb, *sb_next;
  1238. struct lpfc_iocbq *io, *io_next;
  1239. spin_lock_irq(phba->host->host_lock);
  1240. /* Release all the lpfc_scsi_bufs maintained by this host. */
  1241. list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
  1242. list_del(&sb->list);
  1243. pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
  1244. sb->dma_handle);
  1245. kfree(sb);
  1246. phba->total_scsi_bufs--;
  1247. }
  1248. /* Release all the lpfc_iocbq entries maintained by this host. */
  1249. list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
  1250. list_del(&io->list);
  1251. kfree(io);
  1252. phba->total_iocbq_bufs--;
  1253. }
  1254. spin_unlock_irq(phba->host->host_lock);
  1255. return 0;
  1256. }
  1257. static int __devinit
  1258. lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
  1259. {
  1260. struct Scsi_Host *host;
  1261. struct lpfc_hba *phba;
  1262. struct lpfc_sli *psli;
  1263. struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
  1264. unsigned long bar0map_len, bar2map_len;
  1265. int error = -ENODEV, retval;
  1266. int i;
  1267. uint16_t iotag;
  1268. if (pci_enable_device(pdev))
  1269. goto out;
  1270. if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
  1271. goto out_disable_device;
  1272. host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba));
  1273. if (!host)
  1274. goto out_release_regions;
  1275. phba = (struct lpfc_hba*)host->hostdata;
  1276. memset(phba, 0, sizeof (struct lpfc_hba));
  1277. phba->host = host;
  1278. phba->fc_flag |= FC_LOADING;
  1279. phba->pcidev = pdev;
  1280. /* Assign an unused board number */
  1281. if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
  1282. goto out_put_host;
  1283. error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
  1284. if (error)
  1285. goto out_put_host;
  1286. host->unique_id = phba->brd_no;
  1287. INIT_LIST_HEAD(&phba->ctrspbuflist);
  1288. INIT_LIST_HEAD(&phba->rnidrspbuflist);
  1289. INIT_LIST_HEAD(&phba->freebufList);
  1290. /* Initialize timers used by driver */
  1291. init_timer(&phba->fc_estabtmo);
  1292. phba->fc_estabtmo.function = lpfc_establish_link_tmo;
  1293. phba->fc_estabtmo.data = (unsigned long)phba;
  1294. init_timer(&phba->fc_disctmo);
  1295. phba->fc_disctmo.function = lpfc_disc_timeout;
  1296. phba->fc_disctmo.data = (unsigned long)phba;
  1297. init_timer(&phba->fc_fdmitmo);
  1298. phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
  1299. phba->fc_fdmitmo.data = (unsigned long)phba;
  1300. init_timer(&phba->els_tmofunc);
  1301. phba->els_tmofunc.function = lpfc_els_timeout;
  1302. phba->els_tmofunc.data = (unsigned long)phba;
  1303. psli = &phba->sli;
  1304. init_timer(&psli->mbox_tmo);
  1305. psli->mbox_tmo.function = lpfc_mbox_timeout;
  1306. psli->mbox_tmo.data = (unsigned long)phba;
  1307. init_timer(&phba->fcp_poll_timer);
  1308. phba->fcp_poll_timer.function = lpfc_poll_timeout;
  1309. phba->fcp_poll_timer.data = (unsigned long)phba;
  1310. /*
  1311. * Get all the module params for configuring this host and then
  1312. * establish the host parameters.
  1313. */
  1314. lpfc_get_cfgparam(phba);
  1315. host->max_id = LPFC_MAX_TARGET;
  1316. host->max_lun = phba->cfg_max_luns;
  1317. host->this_id = -1;
  1318. /* Initialize all internally managed lists. */
  1319. INIT_LIST_HEAD(&phba->fc_nlpmap_list);
  1320. INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
  1321. INIT_LIST_HEAD(&phba->fc_unused_list);
  1322. INIT_LIST_HEAD(&phba->fc_plogi_list);
  1323. INIT_LIST_HEAD(&phba->fc_adisc_list);
  1324. INIT_LIST_HEAD(&phba->fc_reglogin_list);
  1325. INIT_LIST_HEAD(&phba->fc_prli_list);
  1326. INIT_LIST_HEAD(&phba->fc_npr_list);
  1327. pci_set_master(pdev);
  1328. retval = pci_set_mwi(pdev);
  1329. if (retval)
  1330. dev_printk(KERN_WARNING, &pdev->dev,
  1331. "Warning: pci_set_mwi returned %d\n", retval);
  1332. if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
  1333. if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
  1334. goto out_idr_remove;
  1335. /*
  1336. * Get the bus address of Bar0 and Bar2 and the number of bytes
  1337. * required by each mapping.
  1338. */
  1339. phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
  1340. bar0map_len = pci_resource_len(phba->pcidev, 0);
  1341. phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
  1342. bar2map_len = pci_resource_len(phba->pcidev, 2);
  1343. /* Map HBA SLIM to a kernel virtual address. */
  1344. phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
  1345. if (!phba->slim_memmap_p) {
  1346. error = -ENODEV;
  1347. dev_printk(KERN_ERR, &pdev->dev,
  1348. "ioremap failed for SLIM memory.\n");
  1349. goto out_idr_remove;
  1350. }
  1351. /* Map HBA Control Registers to a kernel virtual address. */
  1352. phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
  1353. if (!phba->ctrl_regs_memmap_p) {
  1354. error = -ENODEV;
  1355. dev_printk(KERN_ERR, &pdev->dev,
  1356. "ioremap failed for HBA control registers.\n");
  1357. goto out_iounmap_slim;
  1358. }
  1359. /* Allocate memory for SLI-2 structures */
  1360. phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
  1361. &phba->slim2p_mapping, GFP_KERNEL);
  1362. if (!phba->slim2p)
  1363. goto out_iounmap;
  1364. memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
  1365. /* Initialize the SLI Layer to run with lpfc HBAs. */
  1366. lpfc_sli_setup(phba);
  1367. lpfc_sli_queue_setup(phba);
  1368. error = lpfc_mem_alloc(phba);
  1369. if (error)
  1370. goto out_free_slim;
  1371. /* Initialize and populate the iocb list per host. */
  1372. INIT_LIST_HEAD(&phba->lpfc_iocb_list);
  1373. for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
  1374. iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
  1375. if (iocbq_entry == NULL) {
  1376. printk(KERN_ERR "%s: only allocated %d iocbs of "
  1377. "expected %d count. Unloading driver.\n",
  1378. __FUNCTION__, i, LPFC_IOCB_LIST_CNT);
  1379. error = -ENOMEM;
  1380. goto out_free_iocbq;
  1381. }
  1382. memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq));
  1383. iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
  1384. if (iotag == 0) {
  1385. kfree (iocbq_entry);
  1386. printk(KERN_ERR "%s: failed to allocate IOTAG. "
  1387. "Unloading driver.\n",
  1388. __FUNCTION__);
  1389. error = -ENOMEM;
  1390. goto out_free_iocbq;
  1391. }
  1392. spin_lock_irq(phba->host->host_lock);
  1393. list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
  1394. phba->total_iocbq_bufs++;
  1395. spin_unlock_irq(phba->host->host_lock);
  1396. }
  1397. /* Initialize HBA structure */
  1398. phba->fc_edtov = FF_DEF_EDTOV;
  1399. phba->fc_ratov = FF_DEF_RATOV;
  1400. phba->fc_altov = FF_DEF_ALTOV;
  1401. phba->fc_arbtov = FF_DEF_ARBTOV;
  1402. INIT_LIST_HEAD(&phba->work_list);
  1403. phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
  1404. phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
  1405. /* Startup the kernel thread for this host adapter. */
  1406. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  1407. "lpfc_worker_%d", phba->brd_no);
  1408. if (IS_ERR(phba->worker_thread)) {
  1409. error = PTR_ERR(phba->worker_thread);
  1410. goto out_free_iocbq;
  1411. }
  1412. /*
  1413. * Set initial can_queue value since 0 is no longer supported and
  1414. * scsi_add_host will fail. This will be adjusted later based on the
  1415. * max xri value determined in hba setup.
  1416. */
  1417. host->can_queue = phba->cfg_hba_queue_depth - 10;
  1418. /* Tell the midlayer we support 16 byte commands */
  1419. host->max_cmd_len = 16;
  1420. /* Initialize the list of scsi buffers used by driver for scsi IO. */
  1421. spin_lock_init(&phba->scsi_buf_list_lock);
  1422. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
  1423. host->transportt = lpfc_transport_template;
  1424. pci_set_drvdata(pdev, host);
  1425. error = scsi_add_host(host, &pdev->dev);
  1426. if (error)
  1427. goto out_kthread_stop;
  1428. error = lpfc_alloc_sysfs_attr(phba);
  1429. if (error)
  1430. goto out_remove_host;
  1431. if (phba->cfg_use_msi) {
  1432. error = pci_enable_msi(phba->pcidev);
  1433. if (error)
  1434. lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0452 "
  1435. "Enable MSI failed, continuing with "
  1436. "IRQ\n", phba->brd_no);
  1437. }
  1438. error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
  1439. LPFC_DRIVER_NAME, phba);
  1440. if (error) {
  1441. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1442. "%d:0451 Enable interrupt handler failed\n",
  1443. phba->brd_no);
  1444. goto out_free_sysfs_attr;
  1445. }
  1446. phba->MBslimaddr = phba->slim_memmap_p;
  1447. phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
  1448. phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
  1449. phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
  1450. phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
  1451. error = lpfc_sli_hba_setup(phba);
  1452. if (error) {
  1453. error = -ENODEV;
  1454. goto out_free_irq;
  1455. }
  1456. /*
  1457. * hba setup may have changed the hba_queue_depth so we need to adjust
  1458. * the value of can_queue.
  1459. */
  1460. host->can_queue = phba->cfg_hba_queue_depth - 10;
  1461. lpfc_discovery_wait(phba);
  1462. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  1463. spin_lock_irq(phba->host->host_lock);
  1464. lpfc_poll_start_timer(phba);
  1465. spin_unlock_irq(phba->host->host_lock);
  1466. }
  1467. /*
  1468. * set fixed host attributes
  1469. * Must done after lpfc_sli_hba_setup()
  1470. */
  1471. fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn);
  1472. fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn);
  1473. fc_host_supported_classes(host) = FC_COS_CLASS3;
  1474. memset(fc_host_supported_fc4s(host), 0,
  1475. sizeof(fc_host_supported_fc4s(host)));
  1476. fc_host_supported_fc4s(host)[2] = 1;
  1477. fc_host_supported_fc4s(host)[7] = 1;
  1478. lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host));
  1479. fc_host_supported_speeds(host) = 0;
  1480. if (phba->lmt & LMT_10Gb)
  1481. fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT;
  1482. if (phba->lmt & LMT_4Gb)
  1483. fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT;
  1484. if (phba->lmt & LMT_2Gb)
  1485. fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT;
  1486. if (phba->lmt & LMT_1Gb)
  1487. fc_host_supported_speeds(host) |= FC_PORTSPEED_1GBIT;
  1488. fc_host_maxframe_size(host) =
  1489. ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
  1490. (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
  1491. /* This value is also unchanging */
  1492. memset(fc_host_active_fc4s(host), 0,
  1493. sizeof(fc_host_active_fc4s(host)));
  1494. fc_host_active_fc4s(host)[2] = 1;
  1495. fc_host_active_fc4s(host)[7] = 1;
  1496. spin_lock_irq(phba->host->host_lock);
  1497. phba->fc_flag &= ~FC_LOADING;
  1498. spin_unlock_irq(phba->host->host_lock);
  1499. return 0;
  1500. out_free_irq:
  1501. lpfc_stop_timer(phba);
  1502. phba->work_hba_events = 0;
  1503. free_irq(phba->pcidev->irq, phba);
  1504. pci_disable_msi(phba->pcidev);
  1505. out_free_sysfs_attr:
  1506. lpfc_free_sysfs_attr(phba);
  1507. out_remove_host:
  1508. fc_remove_host(phba->host);
  1509. scsi_remove_host(phba->host);
  1510. out_kthread_stop:
  1511. kthread_stop(phba->worker_thread);
  1512. out_free_iocbq:
  1513. list_for_each_entry_safe(iocbq_entry, iocbq_next,
  1514. &phba->lpfc_iocb_list, list) {
  1515. spin_lock_irq(phba->host->host_lock);
  1516. kfree(iocbq_entry);
  1517. phba->total_iocbq_bufs--;
  1518. spin_unlock_irq(phba->host->host_lock);
  1519. }
  1520. lpfc_mem_free(phba);
  1521. out_free_slim:
  1522. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
  1523. phba->slim2p_mapping);
  1524. out_iounmap:
  1525. iounmap(phba->ctrl_regs_memmap_p);
  1526. out_iounmap_slim:
  1527. iounmap(phba->slim_memmap_p);
  1528. out_idr_remove:
  1529. idr_remove(&lpfc_hba_index, phba->brd_no);
  1530. out_put_host:
  1531. phba->host = NULL;
  1532. scsi_host_put(host);
  1533. out_release_regions:
  1534. pci_release_regions(pdev);
  1535. out_disable_device:
  1536. pci_disable_device(pdev);
  1537. out:
  1538. pci_set_drvdata(pdev, NULL);
  1539. return error;
  1540. }
  1541. static void __devexit
  1542. lpfc_pci_remove_one(struct pci_dev *pdev)
  1543. {
  1544. struct Scsi_Host *host = pci_get_drvdata(pdev);
  1545. struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
  1546. unsigned long iflag;
  1547. lpfc_free_sysfs_attr(phba);
  1548. spin_lock_irqsave(phba->host->host_lock, iflag);
  1549. phba->fc_flag |= FC_UNLOADING;
  1550. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  1551. fc_remove_host(phba->host);
  1552. scsi_remove_host(phba->host);
  1553. kthread_stop(phba->worker_thread);
  1554. /*
  1555. * Bring down the SLI Layer. This step disable all interrupts,
  1556. * clears the rings, discards all mailbox commands, and resets
  1557. * the HBA.
  1558. */
  1559. lpfc_sli_hba_down(phba);
  1560. lpfc_sli_brdrestart(phba);
  1561. /* Release the irq reservation */
  1562. free_irq(phba->pcidev->irq, phba);
  1563. pci_disable_msi(phba->pcidev);
  1564. lpfc_cleanup(phba, 0);
  1565. lpfc_stop_timer(phba);
  1566. phba->work_hba_events = 0;
  1567. /*
  1568. * Call scsi_free before mem_free since scsi bufs are released to their
  1569. * corresponding pools here.
  1570. */
  1571. lpfc_scsi_free(phba);
  1572. lpfc_mem_free(phba);
  1573. /* Free resources associated with SLI2 interface */
  1574. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  1575. phba->slim2p, phba->slim2p_mapping);
  1576. /* unmap adapter SLIM and Control Registers */
  1577. iounmap(phba->ctrl_regs_memmap_p);
  1578. iounmap(phba->slim_memmap_p);
  1579. pci_release_regions(phba->pcidev);
  1580. pci_disable_device(phba->pcidev);
  1581. idr_remove(&lpfc_hba_index, phba->brd_no);
  1582. scsi_host_put(phba->host);
  1583. pci_set_drvdata(pdev, NULL);
  1584. }
  1585. /**
  1586. * lpfc_io_error_detected - called when PCI error is detected
  1587. * @pdev: Pointer to PCI device
  1588. * @state: The current pci conneection state
  1589. *
  1590. * This function is called after a PCI bus error affecting
  1591. * this device has been detected.
  1592. */
  1593. static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
  1594. pci_channel_state_t state)
  1595. {
  1596. struct Scsi_Host *host = pci_get_drvdata(pdev);
  1597. struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
  1598. struct lpfc_sli *psli = &phba->sli;
  1599. struct lpfc_sli_ring *pring;
  1600. if (state == pci_channel_io_perm_failure) {
  1601. lpfc_pci_remove_one(pdev);
  1602. return PCI_ERS_RESULT_DISCONNECT;
  1603. }
  1604. pci_disable_device(pdev);
  1605. /*
  1606. * There may be I/Os dropped by the firmware.
  1607. * Error iocb (I/O) on txcmplq and let the SCSI layer
  1608. * retry it after re-establishing link.
  1609. */
  1610. pring = &psli->ring[psli->fcp_ring];
  1611. lpfc_sli_abort_iocb_ring(phba, pring);
  1612. /* Request a slot reset. */
  1613. return PCI_ERS_RESULT_NEED_RESET;
  1614. }
  1615. /**
  1616. * lpfc_io_slot_reset - called after the pci bus has been reset.
  1617. * @pdev: Pointer to PCI device
  1618. *
  1619. * Restart the card from scratch, as if from a cold-boot.
  1620. */
  1621. static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
  1622. {
  1623. struct Scsi_Host *host = pci_get_drvdata(pdev);
  1624. struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
  1625. struct lpfc_sli *psli = &phba->sli;
  1626. int bars = pci_select_bars(pdev, IORESOURCE_MEM);
  1627. dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
  1628. if (pci_enable_device_bars(pdev, bars)) {
  1629. printk(KERN_ERR "lpfc: Cannot re-enable "
  1630. "PCI device after reset.\n");
  1631. return PCI_ERS_RESULT_DISCONNECT;
  1632. }
  1633. pci_set_master(pdev);
  1634. /* Re-establishing Link */
  1635. spin_lock_irq(phba->host->host_lock);
  1636. phba->fc_flag |= FC_ESTABLISH_LINK;
  1637. psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
  1638. spin_unlock_irq(phba->host->host_lock);
  1639. /* Take device offline; this will perform cleanup */
  1640. lpfc_offline(phba);
  1641. lpfc_sli_brdrestart(phba);
  1642. return PCI_ERS_RESULT_RECOVERED;
  1643. }
  1644. /**
  1645. * lpfc_io_resume - called when traffic can start flowing again.
  1646. * @pdev: Pointer to PCI device
  1647. *
  1648. * This callback is called when the error recovery driver tells us that
  1649. * its OK to resume normal operation.
  1650. */
  1651. static void lpfc_io_resume(struct pci_dev *pdev)
  1652. {
  1653. struct Scsi_Host *host = pci_get_drvdata(pdev);
  1654. struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
  1655. if (lpfc_online(phba) == 0) {
  1656. mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
  1657. }
  1658. }
  1659. static struct pci_device_id lpfc_id_table[] = {
  1660. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
  1661. PCI_ANY_ID, PCI_ANY_ID, },
  1662. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
  1663. PCI_ANY_ID, PCI_ANY_ID, },
  1664. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
  1665. PCI_ANY_ID, PCI_ANY_ID, },
  1666. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
  1667. PCI_ANY_ID, PCI_ANY_ID, },
  1668. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
  1669. PCI_ANY_ID, PCI_ANY_ID, },
  1670. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
  1671. PCI_ANY_ID, PCI_ANY_ID, },
  1672. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
  1673. PCI_ANY_ID, PCI_ANY_ID, },
  1674. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
  1675. PCI_ANY_ID, PCI_ANY_ID, },
  1676. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
  1677. PCI_ANY_ID, PCI_ANY_ID, },
  1678. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
  1679. PCI_ANY_ID, PCI_ANY_ID, },
  1680. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
  1681. PCI_ANY_ID, PCI_ANY_ID, },
  1682. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
  1683. PCI_ANY_ID, PCI_ANY_ID, },
  1684. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
  1685. PCI_ANY_ID, PCI_ANY_ID, },
  1686. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
  1687. PCI_ANY_ID, PCI_ANY_ID, },
  1688. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
  1689. PCI_ANY_ID, PCI_ANY_ID, },
  1690. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
  1691. PCI_ANY_ID, PCI_ANY_ID, },
  1692. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
  1693. PCI_ANY_ID, PCI_ANY_ID, },
  1694. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
  1695. PCI_ANY_ID, PCI_ANY_ID, },
  1696. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
  1697. PCI_ANY_ID, PCI_ANY_ID, },
  1698. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
  1699. PCI_ANY_ID, PCI_ANY_ID, },
  1700. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
  1701. PCI_ANY_ID, PCI_ANY_ID, },
  1702. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
  1703. PCI_ANY_ID, PCI_ANY_ID, },
  1704. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
  1705. PCI_ANY_ID, PCI_ANY_ID, },
  1706. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
  1707. PCI_ANY_ID, PCI_ANY_ID, },
  1708. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
  1709. PCI_ANY_ID, PCI_ANY_ID, },
  1710. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
  1711. PCI_ANY_ID, PCI_ANY_ID, },
  1712. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
  1713. PCI_ANY_ID, PCI_ANY_ID, },
  1714. { 0 }
  1715. };
  1716. MODULE_DEVICE_TABLE(pci, lpfc_id_table);
  1717. static struct pci_error_handlers lpfc_err_handler = {
  1718. .error_detected = lpfc_io_error_detected,
  1719. .slot_reset = lpfc_io_slot_reset,
  1720. .resume = lpfc_io_resume,
  1721. };
  1722. static struct pci_driver lpfc_driver = {
  1723. .name = LPFC_DRIVER_NAME,
  1724. .id_table = lpfc_id_table,
  1725. .probe = lpfc_pci_probe_one,
  1726. .remove = __devexit_p(lpfc_pci_remove_one),
  1727. .err_handler = &lpfc_err_handler,
  1728. };
  1729. static int __init
  1730. lpfc_init(void)
  1731. {
  1732. int error = 0;
  1733. printk(LPFC_MODULE_DESC "\n");
  1734. printk(LPFC_COPYRIGHT "\n");
  1735. lpfc_transport_template =
  1736. fc_attach_transport(&lpfc_transport_functions);
  1737. if (!lpfc_transport_template)
  1738. return -ENOMEM;
  1739. error = pci_register_driver(&lpfc_driver);
  1740. if (error)
  1741. fc_release_transport(lpfc_transport_template);
  1742. return error;
  1743. }
  1744. static void __exit
  1745. lpfc_exit(void)
  1746. {
  1747. pci_unregister_driver(&lpfc_driver);
  1748. fc_release_transport(lpfc_transport_template);
  1749. }
  1750. module_init(lpfc_init);
  1751. module_exit(lpfc_exit);
  1752. MODULE_LICENSE("GPL");
  1753. MODULE_DESCRIPTION(LPFC_MODULE_DESC);
  1754. MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
  1755. MODULE_VERSION("0:" LPFC_DRIVER_VERSION);