lpfc_init.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Enterprise Fibre Channel Host Bus Adapters. *
  4. * Refer to the README file included with this package for *
  5. * driver version and adapter support. *
  6. * Copyright (C) 2004 Emulex Corporation. *
  7. * www.emulex.com *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of the GNU General Public License *
  11. * as published by the Free Software Foundation; either version 2 *
  12. * of the License, or (at your option) any later version. *
  13. * *
  14. * This program is distributed in the hope that it will be useful, *
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  17. * GNU General Public License for more details, a copy of which *
  18. * can be found in the file COPYING included with this package. *
  19. *******************************************************************/
  20. /*
  21. * $Id: lpfc_init.c 1.233 2005/04/13 11:59:09EDT sf_support Exp $
  22. */
  23. #include <linux/blkdev.h>
  24. #include <linux/delay.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/idr.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/kthread.h>
  29. #include <linux/pci.h>
  30. #include <linux/spinlock.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_host.h>
  33. #include <scsi/scsi_transport_fc.h>
  34. #include "lpfc_hw.h"
  35. #include "lpfc_sli.h"
  36. #include "lpfc_disc.h"
  37. #include "lpfc_scsi.h"
  38. #include "lpfc.h"
  39. #include "lpfc_logmsg.h"
  40. #include "lpfc_crtn.h"
  41. #include "lpfc_version.h"
  42. static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *);
  43. static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  44. static int lpfc_post_rcv_buf(struct lpfc_hba *);
  45. static struct scsi_transport_template *lpfc_transport_template = NULL;
  46. static DEFINE_IDR(lpfc_hba_index);
  47. /************************************************************************/
  48. /* */
  49. /* lpfc_config_port_prep */
  50. /* This routine will do LPFC initialization prior to the */
  51. /* CONFIG_PORT mailbox command. This will be initialized */
  52. /* as a SLI layer callback routine. */
  53. /* This routine returns 0 on success or -ERESTART if it wants */
  54. /* the SLI layer to reset the HBA and try again. Any */
  55. /* other return value indicates an error. */
  56. /* */
  57. /************************************************************************/
  58. int
  59. lpfc_config_port_prep(struct lpfc_hba * phba)
  60. {
  61. lpfc_vpd_t *vp = &phba->vpd;
  62. int i = 0, rc;
  63. LPFC_MBOXQ_t *pmb;
  64. MAILBOX_t *mb;
  65. char *lpfc_vpd_data = NULL;
  66. uint16_t offset = 0;
  67. static char licensed[56] =
  68. "key unlock for use with gnu public licensed code only\0";
  69. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  70. if (!pmb) {
  71. phba->hba_state = LPFC_HBA_ERROR;
  72. return -ENOMEM;
  73. }
  74. mb = &pmb->mb;
  75. phba->hba_state = LPFC_INIT_MBX_CMDS;
  76. if (lpfc_is_LC_HBA(phba->pcidev->device)) {
  77. uint32_t *ptext = (uint32_t *) licensed;
  78. for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
  79. *ptext = cpu_to_be32(*ptext);
  80. lpfc_read_nv(phba, pmb);
  81. memset((char*)mb->un.varRDnvp.rsvd3, 0,
  82. sizeof (mb->un.varRDnvp.rsvd3));
  83. memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
  84. sizeof (licensed));
  85. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  86. if (rc != MBX_SUCCESS) {
  87. lpfc_printf_log(phba,
  88. KERN_ERR,
  89. LOG_MBOX,
  90. "%d:0324 Config Port initialization "
  91. "error, mbxCmd x%x READ_NVPARM, "
  92. "mbxStatus x%x\n",
  93. phba->brd_no,
  94. mb->mbxCommand, mb->mbxStatus);
  95. mempool_free(pmb, phba->mbox_mem_pool);
  96. return -ERESTART;
  97. }
  98. memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
  99. sizeof (mb->un.varRDnvp.nodename));
  100. }
  101. /* Setup and issue mailbox READ REV command */
  102. lpfc_read_rev(phba, pmb);
  103. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  104. if (rc != MBX_SUCCESS) {
  105. lpfc_printf_log(phba,
  106. KERN_ERR,
  107. LOG_INIT,
  108. "%d:0439 Adapter failed to init, mbxCmd x%x "
  109. "READ_REV, mbxStatus x%x\n",
  110. phba->brd_no,
  111. mb->mbxCommand, mb->mbxStatus);
  112. mempool_free( pmb, phba->mbox_mem_pool);
  113. return -ERESTART;
  114. }
  115. /* The HBA's current state is provided by the ProgType and rr fields.
  116. * Read and check the value of these fields before continuing to config
  117. * this port.
  118. */
  119. if (mb->un.varRdRev.rr == 0 || mb->un.varRdRev.un.b.ProgType != 2) {
  120. /* Old firmware */
  121. vp->rev.rBit = 0;
  122. lpfc_printf_log(phba,
  123. KERN_ERR,
  124. LOG_INIT,
  125. "%d:0440 Adapter failed to init, mbxCmd x%x "
  126. "READ_REV detected outdated firmware"
  127. "Data: x%x\n",
  128. phba->brd_no,
  129. mb->mbxCommand, 0);
  130. mempool_free(pmb, phba->mbox_mem_pool);
  131. return -ERESTART;
  132. } else {
  133. vp->rev.rBit = 1;
  134. vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
  135. memcpy(vp->rev.sli1FwName,
  136. (char*)mb->un.varRdRev.sli1FwName, 16);
  137. vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
  138. memcpy(vp->rev.sli2FwName,
  139. (char *)mb->un.varRdRev.sli2FwName, 16);
  140. }
  141. /* Save information as VPD data */
  142. vp->rev.biuRev = mb->un.varRdRev.biuRev;
  143. vp->rev.smRev = mb->un.varRdRev.smRev;
  144. vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
  145. vp->rev.endecRev = mb->un.varRdRev.endecRev;
  146. vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
  147. vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
  148. vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
  149. vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
  150. vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
  151. vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
  152. if (lpfc_is_LC_HBA(phba->pcidev->device))
  153. memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
  154. sizeof (phba->RandomData));
  155. /* Get the default values for Model Name and Description */
  156. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  157. /* Get adapter VPD information */
  158. pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
  159. if (!pmb->context2)
  160. goto out_free_mbox;
  161. lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
  162. if (!lpfc_vpd_data)
  163. goto out_free_context2;
  164. do {
  165. lpfc_dump_mem(phba, pmb, offset);
  166. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  167. if (rc != MBX_SUCCESS) {
  168. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  169. "%d:0441 VPD not present on adapter, "
  170. "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
  171. phba->brd_no,
  172. mb->mbxCommand, mb->mbxStatus);
  173. kfree(lpfc_vpd_data);
  174. lpfc_vpd_data = NULL;
  175. break;
  176. }
  177. lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
  178. mb->un.varDmp.word_cnt);
  179. offset += mb->un.varDmp.word_cnt;
  180. } while (mb->un.varDmp.word_cnt);
  181. lpfc_parse_vpd(phba, lpfc_vpd_data);
  182. kfree(lpfc_vpd_data);
  183. out_free_context2:
  184. kfree(pmb->context2);
  185. out_free_mbox:
  186. mempool_free(pmb, phba->mbox_mem_pool);
  187. return 0;
  188. }
  189. /************************************************************************/
  190. /* */
  191. /* lpfc_config_port_post */
  192. /* This routine will do LPFC initialization after the */
  193. /* CONFIG_PORT mailbox command. This will be initialized */
  194. /* as a SLI layer callback routine. */
  195. /* This routine returns 0 on success. Any other return value */
  196. /* indicates an error. */
  197. /* */
  198. /************************************************************************/
  199. int
  200. lpfc_config_port_post(struct lpfc_hba * phba)
  201. {
  202. LPFC_MBOXQ_t *pmb;
  203. MAILBOX_t *mb;
  204. struct lpfc_dmabuf *mp;
  205. struct lpfc_sli *psli = &phba->sli;
  206. uint32_t status, timeout;
  207. int i, j, rc;
  208. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  209. if (!pmb) {
  210. phba->hba_state = LPFC_HBA_ERROR;
  211. return -ENOMEM;
  212. }
  213. mb = &pmb->mb;
  214. lpfc_config_link(phba, pmb);
  215. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  216. if (rc != MBX_SUCCESS) {
  217. lpfc_printf_log(phba,
  218. KERN_ERR,
  219. LOG_INIT,
  220. "%d:0447 Adapter failed init, mbxCmd x%x "
  221. "CONFIG_LINK mbxStatus x%x\n",
  222. phba->brd_no,
  223. mb->mbxCommand, mb->mbxStatus);
  224. phba->hba_state = LPFC_HBA_ERROR;
  225. mempool_free( pmb, phba->mbox_mem_pool);
  226. return -EIO;
  227. }
  228. /* Get login parameters for NID. */
  229. lpfc_read_sparam(phba, pmb);
  230. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  231. lpfc_printf_log(phba,
  232. KERN_ERR,
  233. LOG_INIT,
  234. "%d:0448 Adapter failed init, mbxCmd x%x "
  235. "READ_SPARM mbxStatus x%x\n",
  236. phba->brd_no,
  237. mb->mbxCommand, mb->mbxStatus);
  238. phba->hba_state = LPFC_HBA_ERROR;
  239. mp = (struct lpfc_dmabuf *) pmb->context1;
  240. mempool_free( pmb, phba->mbox_mem_pool);
  241. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  242. kfree(mp);
  243. return -EIO;
  244. }
  245. mp = (struct lpfc_dmabuf *) pmb->context1;
  246. memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
  247. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  248. kfree(mp);
  249. pmb->context1 = NULL;
  250. memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
  251. sizeof (struct lpfc_name));
  252. memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
  253. sizeof (struct lpfc_name));
  254. /* If no serial number in VPD data, use low 6 bytes of WWNN */
  255. /* This should be consolidated into parse_vpd ? - mr */
  256. if (phba->SerialNumber[0] == 0) {
  257. uint8_t *outptr;
  258. outptr = (uint8_t *) & phba->fc_nodename.IEEE[0];
  259. for (i = 0; i < 12; i++) {
  260. status = *outptr++;
  261. j = ((status & 0xf0) >> 4);
  262. if (j <= 9)
  263. phba->SerialNumber[i] =
  264. (char)((uint8_t) 0x30 + (uint8_t) j);
  265. else
  266. phba->SerialNumber[i] =
  267. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  268. i++;
  269. j = (status & 0xf);
  270. if (j <= 9)
  271. phba->SerialNumber[i] =
  272. (char)((uint8_t) 0x30 + (uint8_t) j);
  273. else
  274. phba->SerialNumber[i] =
  275. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  276. }
  277. }
  278. /* This should turn on DELAYED ABTS for ELS timeouts */
  279. lpfc_set_slim(phba, pmb, 0x052198, 0x1);
  280. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  281. phba->hba_state = LPFC_HBA_ERROR;
  282. mempool_free( pmb, phba->mbox_mem_pool);
  283. return -EIO;
  284. }
  285. lpfc_read_config(phba, pmb);
  286. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  287. lpfc_printf_log(phba,
  288. KERN_ERR,
  289. LOG_INIT,
  290. "%d:0453 Adapter failed to init, mbxCmd x%x "
  291. "READ_CONFIG, mbxStatus x%x\n",
  292. phba->brd_no,
  293. mb->mbxCommand, mb->mbxStatus);
  294. phba->hba_state = LPFC_HBA_ERROR;
  295. mempool_free( pmb, phba->mbox_mem_pool);
  296. return -EIO;
  297. }
  298. /* Reset the DFT_HBA_Q_DEPTH to the max xri */
  299. if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
  300. phba->cfg_hba_queue_depth =
  301. mb->un.varRdConfig.max_xri + 1;
  302. phba->lmt = mb->un.varRdConfig.lmt;
  303. /* HBA is not 4GB capable, or HBA is not 2GB capable,
  304. don't let link speed ask for it */
  305. if ((((phba->lmt & LMT_4250_10bit) != LMT_4250_10bit) &&
  306. (phba->cfg_link_speed > LINK_SPEED_2G)) ||
  307. (((phba->lmt & LMT_2125_10bit) != LMT_2125_10bit) &&
  308. (phba->cfg_link_speed > LINK_SPEED_1G))) {
  309. /* Reset link speed to auto. 1G/2GB HBA cfg'd for 4G */
  310. lpfc_printf_log(phba,
  311. KERN_WARNING,
  312. LOG_LINK_EVENT,
  313. "%d:1302 Invalid speed for this board: "
  314. "Reset link speed to auto: x%x\n",
  315. phba->brd_no,
  316. phba->cfg_link_speed);
  317. phba->cfg_link_speed = LINK_SPEED_AUTO;
  318. }
  319. phba->hba_state = LPFC_LINK_DOWN;
  320. /* Only process IOCBs on ring 0 till hba_state is READY */
  321. if (psli->ring[psli->ip_ring].cmdringaddr)
  322. psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT;
  323. if (psli->ring[psli->fcp_ring].cmdringaddr)
  324. psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
  325. if (psli->ring[psli->next_ring].cmdringaddr)
  326. psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
  327. /* Post receive buffers for desired rings */
  328. lpfc_post_rcv_buf(phba);
  329. /* Enable appropriate host interrupts */
  330. spin_lock_irq(phba->host->host_lock);
  331. status = readl(phba->HCregaddr);
  332. status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
  333. if (psli->num_rings > 0)
  334. status |= HC_R0INT_ENA;
  335. if (psli->num_rings > 1)
  336. status |= HC_R1INT_ENA;
  337. if (psli->num_rings > 2)
  338. status |= HC_R2INT_ENA;
  339. if (psli->num_rings > 3)
  340. status |= HC_R3INT_ENA;
  341. writel(status, phba->HCregaddr);
  342. readl(phba->HCregaddr); /* flush */
  343. spin_unlock_irq(phba->host->host_lock);
  344. /*
  345. * Setup the ring 0 (els) timeout handler
  346. */
  347. timeout = phba->fc_ratov << 1;
  348. phba->els_tmofunc.expires = jiffies + HZ * timeout;
  349. add_timer(&phba->els_tmofunc);
  350. lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
  351. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  352. if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) {
  353. lpfc_printf_log(phba,
  354. KERN_ERR,
  355. LOG_INIT,
  356. "%d:0454 Adapter failed to init, mbxCmd x%x "
  357. "INIT_LINK, mbxStatus x%x\n",
  358. phba->brd_no,
  359. mb->mbxCommand, mb->mbxStatus);
  360. /* Clear all interrupt enable conditions */
  361. writel(0, phba->HCregaddr);
  362. readl(phba->HCregaddr); /* flush */
  363. /* Clear all pending interrupts */
  364. writel(0xffffffff, phba->HAregaddr);
  365. readl(phba->HAregaddr); /* flush */
  366. phba->hba_state = LPFC_HBA_ERROR;
  367. mempool_free(pmb, phba->mbox_mem_pool);
  368. return -EIO;
  369. }
  370. /* MBOX buffer will be freed in mbox compl */
  371. i = 0;
  372. while ((phba->hba_state != LPFC_HBA_READY) ||
  373. (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
  374. ((phba->fc_map_cnt == 0) && (i<2)) ||
  375. (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
  376. /* Check every second for 30 retries. */
  377. i++;
  378. if (i > 30) {
  379. break;
  380. }
  381. if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
  382. /* The link is down. Set linkdown timeout */
  383. break;
  384. }
  385. /* Delay for 1 second to give discovery time to complete. */
  386. msleep(1000);
  387. }
  388. /* Since num_disc_nodes keys off of PLOGI, delay a bit to let
  389. * any potential PRLIs to flush thru the SLI sub-system.
  390. */
  391. msleep(50);
  392. return (0);
  393. }
  394. /************************************************************************/
  395. /* */
  396. /* lpfc_hba_down_prep */
  397. /* This routine will do LPFC uninitialization before the */
  398. /* HBA is reset when bringing down the SLI Layer. This will be */
  399. /* initialized as a SLI layer callback routine. */
  400. /* This routine returns 0 on success. Any other return value */
  401. /* indicates an error. */
  402. /* */
  403. /************************************************************************/
  404. int
  405. lpfc_hba_down_prep(struct lpfc_hba * phba)
  406. {
  407. /* Disable interrupts */
  408. writel(0, phba->HCregaddr);
  409. readl(phba->HCregaddr); /* flush */
  410. /* Cleanup potential discovery resources */
  411. lpfc_els_flush_rscn(phba);
  412. lpfc_els_flush_cmd(phba);
  413. lpfc_disc_flush_list(phba);
  414. return (0);
  415. }
  416. /************************************************************************/
  417. /* */
  418. /* lpfc_handle_eratt */
  419. /* This routine will handle processing a Host Attention */
  420. /* Error Status event. This will be initialized */
  421. /* as a SLI layer callback routine. */
  422. /* */
  423. /************************************************************************/
  424. void
  425. lpfc_handle_eratt(struct lpfc_hba * phba)
  426. {
  427. struct lpfc_sli *psli = &phba->sli;
  428. struct lpfc_sli_ring *pring;
  429. /*
  430. * If a reset is sent to the HBA restore PCI configuration registers.
  431. */
  432. if ( phba->hba_state == LPFC_INIT_START ) {
  433. mdelay(1);
  434. readl(phba->HCregaddr); /* flush */
  435. writel(0, phba->HCregaddr);
  436. readl(phba->HCregaddr); /* flush */
  437. /* Restore PCI cmd register */
  438. pci_write_config_word(phba->pcidev,
  439. PCI_COMMAND, phba->pci_cfg_value);
  440. }
  441. if (phba->work_hs & HS_FFER6) {
  442. /* Re-establishing Link */
  443. lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
  444. "%d:1301 Re-establishing Link "
  445. "Data: x%x x%x x%x\n",
  446. phba->brd_no, phba->work_hs,
  447. phba->work_status[0], phba->work_status[1]);
  448. spin_lock_irq(phba->host->host_lock);
  449. phba->fc_flag |= FC_ESTABLISH_LINK;
  450. spin_unlock_irq(phba->host->host_lock);
  451. /*
  452. * Firmware stops when it triggled erratt with HS_FFER6.
  453. * That could cause the I/Os dropped by the firmware.
  454. * Error iocb (I/O) on txcmplq and let the SCSI layer
  455. * retry it after re-establishing link.
  456. */
  457. pring = &psli->ring[psli->fcp_ring];
  458. lpfc_sli_abort_iocb_ring(phba, pring);
  459. /*
  460. * There was a firmware error. Take the hba offline and then
  461. * attempt to restart it.
  462. */
  463. lpfc_offline(phba);
  464. if (lpfc_online(phba) == 0) { /* Initialize the HBA */
  465. mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
  466. return;
  467. }
  468. } else {
  469. /* The if clause above forces this code path when the status
  470. * failure is a value other than FFER6. Do not call the offline
  471. * twice. This is the adapter hardware error path.
  472. */
  473. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  474. "%d:0457 Adapter Hardware Error "
  475. "Data: x%x x%x x%x\n",
  476. phba->brd_no, phba->work_hs,
  477. phba->work_status[0], phba->work_status[1]);
  478. lpfc_offline(phba);
  479. /*
  480. * Restart all traffic to this host. Since the fc_transport
  481. * block functions (future) were not called in lpfc_offline,
  482. * don't call them here.
  483. */
  484. scsi_unblock_requests(phba->host);
  485. }
  486. }
  487. /************************************************************************/
  488. /* */
  489. /* lpfc_handle_latt */
  490. /* This routine will handle processing a Host Attention */
  491. /* Link Status event. This will be initialized */
  492. /* as a SLI layer callback routine. */
  493. /* */
  494. /************************************************************************/
  495. void
  496. lpfc_handle_latt(struct lpfc_hba * phba)
  497. {
  498. struct lpfc_sli *psli = &phba->sli;
  499. LPFC_MBOXQ_t *pmb;
  500. volatile uint32_t control;
  501. struct lpfc_dmabuf *mp;
  502. int rc = -ENOMEM;
  503. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  504. if (!pmb)
  505. goto lpfc_handle_latt_err_exit;
  506. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  507. if (!mp)
  508. goto lpfc_handle_latt_free_pmb;
  509. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  510. if (!mp->virt)
  511. goto lpfc_handle_latt_free_mp;
  512. rc = -EIO;
  513. psli->slistat.link_event++;
  514. lpfc_read_la(phba, pmb, mp);
  515. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
  516. rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
  517. if (rc == MBX_NOT_FINISHED)
  518. goto lpfc_handle_latt_free_mp;
  519. /* Clear Link Attention in HA REG */
  520. spin_lock_irq(phba->host->host_lock);
  521. writel(HA_LATT, phba->HAregaddr);
  522. readl(phba->HAregaddr); /* flush */
  523. spin_unlock_irq(phba->host->host_lock);
  524. return;
  525. lpfc_handle_latt_free_mp:
  526. kfree(mp);
  527. lpfc_handle_latt_free_pmb:
  528. kfree(pmb);
  529. lpfc_handle_latt_err_exit:
  530. /* Enable Link attention interrupts */
  531. spin_lock_irq(phba->host->host_lock);
  532. psli->sli_flag |= LPFC_PROCESS_LA;
  533. control = readl(phba->HCregaddr);
  534. control |= HC_LAINT_ENA;
  535. writel(control, phba->HCregaddr);
  536. readl(phba->HCregaddr); /* flush */
  537. /* Clear Link Attention in HA REG */
  538. writel(HA_LATT, phba->HAregaddr);
  539. readl(phba->HAregaddr); /* flush */
  540. spin_unlock_irq(phba->host->host_lock);
  541. lpfc_linkdown(phba);
  542. phba->hba_state = LPFC_HBA_ERROR;
  543. /* The other case is an error from issue_mbox */
  544. if (rc == -ENOMEM)
  545. lpfc_printf_log(phba,
  546. KERN_WARNING,
  547. LOG_MBOX,
  548. "%d:0300 READ_LA: no buffers\n",
  549. phba->brd_no);
  550. return;
  551. }
  552. /************************************************************************/
  553. /* */
  554. /* lpfc_parse_vpd */
  555. /* This routine will parse the VPD data */
  556. /* */
  557. /************************************************************************/
  558. static int
  559. lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd)
  560. {
  561. uint8_t lenlo, lenhi;
  562. uint32_t Length;
  563. int i, j;
  564. int finished = 0;
  565. int index = 0;
  566. if (!vpd)
  567. return 0;
  568. /* Vital Product */
  569. lpfc_printf_log(phba,
  570. KERN_INFO,
  571. LOG_INIT,
  572. "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
  573. phba->brd_no,
  574. (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
  575. (uint32_t) vpd[3]);
  576. do {
  577. switch (vpd[index]) {
  578. case 0x82:
  579. index += 1;
  580. lenlo = vpd[index];
  581. index += 1;
  582. lenhi = vpd[index];
  583. index += 1;
  584. i = ((((unsigned short)lenhi) << 8) + lenlo);
  585. index += i;
  586. break;
  587. case 0x90:
  588. index += 1;
  589. lenlo = vpd[index];
  590. index += 1;
  591. lenhi = vpd[index];
  592. index += 1;
  593. Length = ((((unsigned short)lenhi) << 8) + lenlo);
  594. while (Length > 0) {
  595. /* Look for Serial Number */
  596. if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
  597. index += 2;
  598. i = vpd[index];
  599. index += 1;
  600. j = 0;
  601. Length -= (3+i);
  602. while(i--) {
  603. phba->SerialNumber[j++] = vpd[index++];
  604. if (j == 31)
  605. break;
  606. }
  607. phba->SerialNumber[j] = 0;
  608. continue;
  609. }
  610. else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
  611. phba->vpd_flag |= VPD_MODEL_DESC;
  612. index += 2;
  613. i = vpd[index];
  614. index += 1;
  615. j = 0;
  616. Length -= (3+i);
  617. while(i--) {
  618. phba->ModelDesc[j++] = vpd[index++];
  619. if (j == 255)
  620. break;
  621. }
  622. phba->ModelDesc[j] = 0;
  623. continue;
  624. }
  625. else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
  626. phba->vpd_flag |= VPD_MODEL_NAME;
  627. index += 2;
  628. i = vpd[index];
  629. index += 1;
  630. j = 0;
  631. Length -= (3+i);
  632. while(i--) {
  633. phba->ModelName[j++] = vpd[index++];
  634. if (j == 79)
  635. break;
  636. }
  637. phba->ModelName[j] = 0;
  638. continue;
  639. }
  640. else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
  641. phba->vpd_flag |= VPD_PROGRAM_TYPE;
  642. index += 2;
  643. i = vpd[index];
  644. index += 1;
  645. j = 0;
  646. Length -= (3+i);
  647. while(i--) {
  648. phba->ProgramType[j++] = vpd[index++];
  649. if (j == 255)
  650. break;
  651. }
  652. phba->ProgramType[j] = 0;
  653. continue;
  654. }
  655. else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
  656. phba->vpd_flag |= VPD_PORT;
  657. index += 2;
  658. i = vpd[index];
  659. index += 1;
  660. j = 0;
  661. Length -= (3+i);
  662. while(i--) {
  663. phba->Port[j++] = vpd[index++];
  664. if (j == 19)
  665. break;
  666. }
  667. phba->Port[j] = 0;
  668. continue;
  669. }
  670. else {
  671. index += 2;
  672. i = vpd[index];
  673. index += 1;
  674. index += i;
  675. Length -= (3 + i);
  676. }
  677. }
  678. finished = 0;
  679. break;
  680. case 0x78:
  681. finished = 1;
  682. break;
  683. default:
  684. index ++;
  685. break;
  686. }
  687. } while (!finished && (index < 108));
  688. return(1);
  689. }
  690. static void
  691. lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
  692. {
  693. lpfc_vpd_t *vp;
  694. uint32_t id;
  695. char str[16];
  696. vp = &phba->vpd;
  697. pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id);
  698. switch ((id >> 16) & 0xffff) {
  699. case PCI_DEVICE_ID_SUPERFLY:
  700. if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
  701. strcpy(str, "LP7000 1");
  702. else
  703. strcpy(str, "LP7000E 1");
  704. break;
  705. case PCI_DEVICE_ID_DRAGONFLY:
  706. strcpy(str, "LP8000 1");
  707. break;
  708. case PCI_DEVICE_ID_CENTAUR:
  709. if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
  710. strcpy(str, "LP9002 2");
  711. else
  712. strcpy(str, "LP9000 1");
  713. break;
  714. case PCI_DEVICE_ID_RFLY:
  715. strcpy(str, "LP952 2");
  716. break;
  717. case PCI_DEVICE_ID_PEGASUS:
  718. strcpy(str, "LP9802 2");
  719. break;
  720. case PCI_DEVICE_ID_THOR:
  721. strcpy(str, "LP10000 2");
  722. break;
  723. case PCI_DEVICE_ID_VIPER:
  724. strcpy(str, "LPX1000 10");
  725. break;
  726. case PCI_DEVICE_ID_PFLY:
  727. strcpy(str, "LP982 2");
  728. break;
  729. case PCI_DEVICE_ID_TFLY:
  730. strcpy(str, "LP1050 2");
  731. break;
  732. case PCI_DEVICE_ID_HELIOS:
  733. strcpy(str, "LP11000 4");
  734. break;
  735. case PCI_DEVICE_ID_BMID:
  736. strcpy(str, "LP1150 4");
  737. break;
  738. case PCI_DEVICE_ID_BSMB:
  739. strcpy(str, "LP111 4");
  740. break;
  741. case PCI_DEVICE_ID_ZEPHYR:
  742. strcpy(str, "LP11000e 4");
  743. break;
  744. case PCI_DEVICE_ID_ZMID:
  745. strcpy(str, "LP1150e 4");
  746. break;
  747. case PCI_DEVICE_ID_ZSMB:
  748. strcpy(str, "LP111e 4");
  749. break;
  750. case PCI_DEVICE_ID_LP101:
  751. strcpy(str, "LP101 2");
  752. break;
  753. case PCI_DEVICE_ID_LP10000S:
  754. strcpy(str, "LP10000-S 2");
  755. break;
  756. }
  757. if (mdp)
  758. sscanf(str, "%s", mdp);
  759. if (descp)
  760. sprintf(descp, "Emulex LightPulse %s Gigabit PCI Fibre "
  761. "Channel Adapter", str);
  762. }
  763. /**************************************************/
  764. /* lpfc_post_buffer */
  765. /* */
  766. /* This routine will post count buffers to the */
  767. /* ring with the QUE_RING_BUF_CN command. This */
  768. /* allows 3 buffers / command to be posted. */
  769. /* Returns the number of buffers NOT posted. */
  770. /**************************************************/
  771. int
  772. lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
  773. int type)
  774. {
  775. IOCB_t *icmd;
  776. struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
  777. struct lpfc_iocbq *iocb = NULL;
  778. struct lpfc_dmabuf *mp1, *mp2;
  779. cnt += pring->missbufcnt;
  780. /* While there are buffers to post */
  781. while (cnt > 0) {
  782. /* Allocate buffer for command iocb */
  783. spin_lock_irq(phba->host->host_lock);
  784. list_remove_head(lpfc_iocb_list, iocb, struct lpfc_iocbq, list);
  785. spin_unlock_irq(phba->host->host_lock);
  786. if (iocb == NULL) {
  787. pring->missbufcnt = cnt;
  788. return cnt;
  789. }
  790. memset(iocb, 0, sizeof (struct lpfc_iocbq));
  791. icmd = &iocb->iocb;
  792. /* 2 buffers can be posted per command */
  793. /* Allocate buffer to post */
  794. mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  795. if (mp1)
  796. mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  797. &mp1->phys);
  798. if (mp1 == 0 || mp1->virt == 0) {
  799. if (mp1)
  800. kfree(mp1);
  801. spin_lock_irq(phba->host->host_lock);
  802. list_add_tail(&iocb->list, lpfc_iocb_list);
  803. spin_unlock_irq(phba->host->host_lock);
  804. pring->missbufcnt = cnt;
  805. return cnt;
  806. }
  807. INIT_LIST_HEAD(&mp1->list);
  808. /* Allocate buffer to post */
  809. if (cnt > 1) {
  810. mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  811. if (mp2)
  812. mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  813. &mp2->phys);
  814. if (mp2 == 0 || mp2->virt == 0) {
  815. if (mp2)
  816. kfree(mp2);
  817. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  818. kfree(mp1);
  819. spin_lock_irq(phba->host->host_lock);
  820. list_add_tail(&iocb->list, lpfc_iocb_list);
  821. spin_unlock_irq(phba->host->host_lock);
  822. pring->missbufcnt = cnt;
  823. return cnt;
  824. }
  825. INIT_LIST_HEAD(&mp2->list);
  826. } else {
  827. mp2 = NULL;
  828. }
  829. icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
  830. icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
  831. icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
  832. icmd->ulpBdeCount = 1;
  833. cnt--;
  834. if (mp2) {
  835. icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
  836. icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
  837. icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
  838. cnt--;
  839. icmd->ulpBdeCount = 2;
  840. }
  841. icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
  842. icmd->ulpLe = 1;
  843. spin_lock_irq(phba->host->host_lock);
  844. if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
  845. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  846. kfree(mp1);
  847. cnt++;
  848. if (mp2) {
  849. lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
  850. kfree(mp2);
  851. cnt++;
  852. }
  853. list_add_tail(&iocb->list, lpfc_iocb_list);
  854. pring->missbufcnt = cnt;
  855. spin_unlock_irq(phba->host->host_lock);
  856. return cnt;
  857. }
  858. spin_unlock_irq(phba->host->host_lock);
  859. lpfc_sli_ringpostbuf_put(phba, pring, mp1);
  860. if (mp2) {
  861. lpfc_sli_ringpostbuf_put(phba, pring, mp2);
  862. }
  863. }
  864. pring->missbufcnt = 0;
  865. return 0;
  866. }
  867. /************************************************************************/
  868. /* */
  869. /* lpfc_post_rcv_buf */
  870. /* This routine post initial rcv buffers to the configured rings */
  871. /* */
  872. /************************************************************************/
  873. static int
  874. lpfc_post_rcv_buf(struct lpfc_hba * phba)
  875. {
  876. struct lpfc_sli *psli = &phba->sli;
  877. /* Ring 0, ELS / CT buffers */
  878. lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
  879. /* Ring 2 - FCP no buffers needed */
  880. return 0;
  881. }
  882. #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
  883. /************************************************************************/
  884. /* */
  885. /* lpfc_sha_init */
  886. /* */
  887. /************************************************************************/
  888. static void
  889. lpfc_sha_init(uint32_t * HashResultPointer)
  890. {
  891. HashResultPointer[0] = 0x67452301;
  892. HashResultPointer[1] = 0xEFCDAB89;
  893. HashResultPointer[2] = 0x98BADCFE;
  894. HashResultPointer[3] = 0x10325476;
  895. HashResultPointer[4] = 0xC3D2E1F0;
  896. }
  897. /************************************************************************/
  898. /* */
  899. /* lpfc_sha_iterate */
  900. /* */
  901. /************************************************************************/
  902. static void
  903. lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
  904. {
  905. int t;
  906. uint32_t TEMP;
  907. uint32_t A, B, C, D, E;
  908. t = 16;
  909. do {
  910. HashWorkingPointer[t] =
  911. S(1,
  912. HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
  913. 8] ^
  914. HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
  915. } while (++t <= 79);
  916. t = 0;
  917. A = HashResultPointer[0];
  918. B = HashResultPointer[1];
  919. C = HashResultPointer[2];
  920. D = HashResultPointer[3];
  921. E = HashResultPointer[4];
  922. do {
  923. if (t < 20) {
  924. TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
  925. } else if (t < 40) {
  926. TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
  927. } else if (t < 60) {
  928. TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
  929. } else {
  930. TEMP = (B ^ C ^ D) + 0xCA62C1D6;
  931. }
  932. TEMP += S(5, A) + E + HashWorkingPointer[t];
  933. E = D;
  934. D = C;
  935. C = S(30, B);
  936. B = A;
  937. A = TEMP;
  938. } while (++t <= 79);
  939. HashResultPointer[0] += A;
  940. HashResultPointer[1] += B;
  941. HashResultPointer[2] += C;
  942. HashResultPointer[3] += D;
  943. HashResultPointer[4] += E;
  944. }
  945. /************************************************************************/
  946. /* */
  947. /* lpfc_challenge_key */
  948. /* */
  949. /************************************************************************/
  950. static void
  951. lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
  952. {
  953. *HashWorking = (*RandomChallenge ^ *HashWorking);
  954. }
  955. /************************************************************************/
  956. /* */
  957. /* lpfc_hba_init */
  958. /* */
  959. /************************************************************************/
  960. void
  961. lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
  962. {
  963. int t;
  964. uint32_t *HashWorking;
  965. uint32_t *pwwnn = phba->wwnn;
  966. HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
  967. if (!HashWorking)
  968. return;
  969. memset(HashWorking, 0, (80 * sizeof(uint32_t)));
  970. HashWorking[0] = HashWorking[78] = *pwwnn++;
  971. HashWorking[1] = HashWorking[79] = *pwwnn;
  972. for (t = 0; t < 7; t++)
  973. lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
  974. lpfc_sha_init(hbainit);
  975. lpfc_sha_iterate(hbainit, HashWorking);
  976. kfree(HashWorking);
  977. }
  978. static void
  979. lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind)
  980. {
  981. struct lpfc_nodelist *ndlp, *next_ndlp;
  982. /* clean up phba - lpfc specific */
  983. lpfc_can_disctmo(phba);
  984. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
  985. nlp_listp) {
  986. lpfc_nlp_remove(phba, ndlp);
  987. }
  988. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
  989. nlp_listp) {
  990. lpfc_nlp_remove(phba, ndlp);
  991. }
  992. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
  993. nlp_listp) {
  994. lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
  995. }
  996. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
  997. nlp_listp) {
  998. lpfc_nlp_remove(phba, ndlp);
  999. }
  1000. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
  1001. nlp_listp) {
  1002. lpfc_nlp_remove(phba, ndlp);
  1003. }
  1004. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list,
  1005. nlp_listp) {
  1006. lpfc_nlp_remove(phba, ndlp);
  1007. }
  1008. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
  1009. nlp_listp) {
  1010. lpfc_nlp_remove(phba, ndlp);
  1011. }
  1012. list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
  1013. nlp_listp) {
  1014. lpfc_nlp_remove(phba, ndlp);
  1015. }
  1016. INIT_LIST_HEAD(&phba->fc_nlpmap_list);
  1017. INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
  1018. INIT_LIST_HEAD(&phba->fc_unused_list);
  1019. INIT_LIST_HEAD(&phba->fc_plogi_list);
  1020. INIT_LIST_HEAD(&phba->fc_adisc_list);
  1021. INIT_LIST_HEAD(&phba->fc_reglogin_list);
  1022. INIT_LIST_HEAD(&phba->fc_prli_list);
  1023. INIT_LIST_HEAD(&phba->fc_npr_list);
  1024. phba->fc_map_cnt = 0;
  1025. phba->fc_unmap_cnt = 0;
  1026. phba->fc_plogi_cnt = 0;
  1027. phba->fc_adisc_cnt = 0;
  1028. phba->fc_reglogin_cnt = 0;
  1029. phba->fc_prli_cnt = 0;
  1030. phba->fc_npr_cnt = 0;
  1031. phba->fc_unused_cnt= 0;
  1032. return;
  1033. }
  1034. static void
  1035. lpfc_establish_link_tmo(unsigned long ptr)
  1036. {
  1037. struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
  1038. unsigned long iflag;
  1039. /* Re-establishing Link, timer expired */
  1040. lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
  1041. "%d:1300 Re-establishing Link, timer expired "
  1042. "Data: x%x x%x\n",
  1043. phba->brd_no, phba->fc_flag, phba->hba_state);
  1044. spin_lock_irqsave(phba->host->host_lock, iflag);
  1045. phba->fc_flag &= ~FC_ESTABLISH_LINK;
  1046. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  1047. }
  1048. static int
  1049. lpfc_stop_timer(struct lpfc_hba * phba)
  1050. {
  1051. struct lpfc_sli *psli = &phba->sli;
  1052. /* Instead of a timer, this has been converted to a
  1053. * deferred procedding list.
  1054. */
  1055. while (!list_empty(&phba->freebufList)) {
  1056. struct lpfc_dmabuf *mp = NULL;
  1057. list_remove_head((&phba->freebufList), mp,
  1058. struct lpfc_dmabuf, list);
  1059. if (mp) {
  1060. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  1061. kfree(mp);
  1062. }
  1063. }
  1064. del_timer_sync(&phba->fc_estabtmo);
  1065. del_timer_sync(&phba->fc_disctmo);
  1066. del_timer_sync(&phba->fc_fdmitmo);
  1067. del_timer_sync(&phba->els_tmofunc);
  1068. psli = &phba->sli;
  1069. del_timer_sync(&psli->mbox_tmo);
  1070. return(1);
  1071. }
  1072. int
  1073. lpfc_online(struct lpfc_hba * phba)
  1074. {
  1075. if (!phba)
  1076. return 0;
  1077. if (!(phba->fc_flag & FC_OFFLINE_MODE))
  1078. return 0;
  1079. lpfc_printf_log(phba,
  1080. KERN_WARNING,
  1081. LOG_INIT,
  1082. "%d:0458 Bring Adapter online\n",
  1083. phba->brd_no);
  1084. if (!lpfc_sli_queue_setup(phba))
  1085. return 1;
  1086. if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */
  1087. return 1;
  1088. spin_lock_irq(phba->host->host_lock);
  1089. phba->fc_flag &= ~FC_OFFLINE_MODE;
  1090. spin_unlock_irq(phba->host->host_lock);
  1091. /*
  1092. * Restart all traffic to this host. Since the fc_transport block
  1093. * functions (future) were not called in lpfc_offline, don't call them
  1094. * here.
  1095. */
  1096. scsi_unblock_requests(phba->host);
  1097. return 0;
  1098. }
  1099. int
  1100. lpfc_offline(struct lpfc_hba * phba)
  1101. {
  1102. struct lpfc_sli_ring *pring;
  1103. struct lpfc_sli *psli;
  1104. unsigned long iflag;
  1105. int i = 0;
  1106. if (!phba)
  1107. return 0;
  1108. if (phba->fc_flag & FC_OFFLINE_MODE)
  1109. return 0;
  1110. /*
  1111. * Don't call the fc_transport block api (future). The device is
  1112. * going offline and causing a timer to fire in the midlayer is
  1113. * unproductive. Just block all new requests until the driver
  1114. * comes back online.
  1115. */
  1116. scsi_block_requests(phba->host);
  1117. psli = &phba->sli;
  1118. pring = &psli->ring[psli->fcp_ring];
  1119. lpfc_linkdown(phba);
  1120. /* The linkdown event takes 30 seconds to timeout. */
  1121. while (pring->txcmplq_cnt) {
  1122. mdelay(10);
  1123. if (i++ > 3000)
  1124. break;
  1125. }
  1126. /* stop all timers associated with this hba */
  1127. lpfc_stop_timer(phba);
  1128. phba->work_hba_events = 0;
  1129. lpfc_printf_log(phba,
  1130. KERN_WARNING,
  1131. LOG_INIT,
  1132. "%d:0460 Bring Adapter offline\n",
  1133. phba->brd_no);
  1134. /* Bring down the SLI Layer and cleanup. The HBA is offline
  1135. now. */
  1136. lpfc_sli_hba_down(phba);
  1137. lpfc_cleanup(phba, 1);
  1138. spin_lock_irqsave(phba->host->host_lock, iflag);
  1139. phba->fc_flag |= FC_OFFLINE_MODE;
  1140. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  1141. return 0;
  1142. }
  1143. /******************************************************************************
  1144. * Function name: lpfc_scsi_free
  1145. *
  1146. * Description: Called from lpfc_pci_remove_one free internal driver resources
  1147. *
  1148. ******************************************************************************/
  1149. static int
  1150. lpfc_scsi_free(struct lpfc_hba * phba)
  1151. {
  1152. struct lpfc_scsi_buf *sb, *sb_next;
  1153. struct lpfc_iocbq *io, *io_next;
  1154. spin_lock_irq(phba->host->host_lock);
  1155. /* Release all the lpfc_scsi_bufs maintained by this host. */
  1156. list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
  1157. list_del(&sb->list);
  1158. pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
  1159. sb->dma_handle);
  1160. kfree(sb);
  1161. phba->total_scsi_bufs--;
  1162. }
  1163. /* Release all the lpfc_iocbq entries maintained by this host. */
  1164. list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
  1165. list_del(&io->list);
  1166. kfree(io);
  1167. phba->total_iocbq_bufs--;
  1168. }
  1169. spin_unlock_irq(phba->host->host_lock);
  1170. return 0;
  1171. }
  1172. static int __devinit
  1173. lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
  1174. {
  1175. struct Scsi_Host *host;
  1176. struct lpfc_hba *phba;
  1177. struct lpfc_sli *psli;
  1178. struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
  1179. unsigned long bar0map_len, bar2map_len;
  1180. int error = -ENODEV, retval;
  1181. int i;
  1182. u64 wwname;
  1183. if (pci_enable_device(pdev))
  1184. goto out;
  1185. if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
  1186. goto out_disable_device;
  1187. host = scsi_host_alloc(&lpfc_template,
  1188. sizeof (struct lpfc_hba) + sizeof (unsigned long));
  1189. if (!host)
  1190. goto out_release_regions;
  1191. phba = (struct lpfc_hba*)host->hostdata;
  1192. memset(phba, 0, sizeof (struct lpfc_hba));
  1193. phba->link_stats = (void *)&phba[1];
  1194. phba->host = host;
  1195. phba->fc_flag |= FC_LOADING;
  1196. phba->pcidev = pdev;
  1197. /* Assign an unused board number */
  1198. if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
  1199. goto out_put_host;
  1200. error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
  1201. if (error)
  1202. goto out_put_host;
  1203. host->unique_id = phba->brd_no;
  1204. INIT_LIST_HEAD(&phba->ctrspbuflist);
  1205. INIT_LIST_HEAD(&phba->rnidrspbuflist);
  1206. INIT_LIST_HEAD(&phba->freebufList);
  1207. /* Initialize timers used by driver */
  1208. init_timer(&phba->fc_estabtmo);
  1209. phba->fc_estabtmo.function = lpfc_establish_link_tmo;
  1210. phba->fc_estabtmo.data = (unsigned long)phba;
  1211. init_timer(&phba->fc_disctmo);
  1212. phba->fc_disctmo.function = lpfc_disc_timeout;
  1213. phba->fc_disctmo.data = (unsigned long)phba;
  1214. init_timer(&phba->fc_fdmitmo);
  1215. phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
  1216. phba->fc_fdmitmo.data = (unsigned long)phba;
  1217. init_timer(&phba->els_tmofunc);
  1218. phba->els_tmofunc.function = lpfc_els_timeout;
  1219. phba->els_tmofunc.data = (unsigned long)phba;
  1220. psli = &phba->sli;
  1221. init_timer(&psli->mbox_tmo);
  1222. psli->mbox_tmo.function = lpfc_mbox_timeout;
  1223. psli->mbox_tmo.data = (unsigned long)phba;
  1224. /*
  1225. * Get all the module params for configuring this host and then
  1226. * establish the host parameters.
  1227. */
  1228. lpfc_get_cfgparam(phba);
  1229. host->max_id = LPFC_MAX_TARGET;
  1230. host->max_lun = phba->cfg_max_luns;
  1231. host->this_id = -1;
  1232. /* Initialize all internally managed lists. */
  1233. INIT_LIST_HEAD(&phba->fc_nlpmap_list);
  1234. INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
  1235. INIT_LIST_HEAD(&phba->fc_unused_list);
  1236. INIT_LIST_HEAD(&phba->fc_plogi_list);
  1237. INIT_LIST_HEAD(&phba->fc_adisc_list);
  1238. INIT_LIST_HEAD(&phba->fc_reglogin_list);
  1239. INIT_LIST_HEAD(&phba->fc_prli_list);
  1240. INIT_LIST_HEAD(&phba->fc_npr_list);
  1241. pci_set_master(pdev);
  1242. retval = pci_set_mwi(pdev);
  1243. if (retval)
  1244. dev_printk(KERN_WARNING, &pdev->dev,
  1245. "Warning: pci_set_mwi returned %d\n", retval);
  1246. if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
  1247. if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
  1248. goto out_idr_remove;
  1249. /*
  1250. * Get the bus address of Bar0 and Bar2 and the number of bytes
  1251. * required by each mapping.
  1252. */
  1253. phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
  1254. bar0map_len = pci_resource_len(phba->pcidev, 0);
  1255. phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
  1256. bar2map_len = pci_resource_len(phba->pcidev, 2);
  1257. /* Map HBA SLIM and Control Registers to a kernel virtual address. */
  1258. phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
  1259. phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
  1260. /* Allocate memory for SLI-2 structures */
  1261. phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
  1262. &phba->slim2p_mapping, GFP_KERNEL);
  1263. if (!phba->slim2p)
  1264. goto out_iounmap;
  1265. /* Initialize the SLI Layer to run with lpfc HBAs. */
  1266. lpfc_sli_setup(phba);
  1267. lpfc_sli_queue_setup(phba);
  1268. error = lpfc_mem_alloc(phba);
  1269. if (error)
  1270. goto out_free_slim;
  1271. /* Initialize and populate the iocb list per host. */
  1272. INIT_LIST_HEAD(&phba->lpfc_iocb_list);
  1273. for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
  1274. iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
  1275. if (iocbq_entry == NULL) {
  1276. printk(KERN_ERR "%s: only allocated %d iocbs of "
  1277. "expected %d count. Unloading driver.\n",
  1278. __FUNCTION__, i, LPFC_IOCB_LIST_CNT);
  1279. error = -ENOMEM;
  1280. goto out_free_iocbq;
  1281. }
  1282. memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq));
  1283. spin_lock_irq(phba->host->host_lock);
  1284. list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
  1285. phba->total_iocbq_bufs++;
  1286. spin_unlock_irq(phba->host->host_lock);
  1287. }
  1288. /* Initialize HBA structure */
  1289. phba->fc_edtov = FF_DEF_EDTOV;
  1290. phba->fc_ratov = FF_DEF_RATOV;
  1291. phba->fc_altov = FF_DEF_ALTOV;
  1292. phba->fc_arbtov = FF_DEF_ARBTOV;
  1293. INIT_LIST_HEAD(&phba->work_list);
  1294. phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
  1295. phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
  1296. /* Startup the kernel thread for this host adapter. */
  1297. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  1298. "lpfc_worker_%d", phba->brd_no);
  1299. if (IS_ERR(phba->worker_thread)) {
  1300. error = PTR_ERR(phba->worker_thread);
  1301. goto out_free_iocbq;
  1302. }
  1303. /* We can rely on a queue depth attribute only after SLI HBA setup */
  1304. host->can_queue = phba->cfg_hba_queue_depth - 10;
  1305. /* Tell the midlayer we support 16 byte commands */
  1306. host->max_cmd_len = 16;
  1307. /* Initialize the list of scsi buffers used by driver for scsi IO. */
  1308. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
  1309. host->transportt = lpfc_transport_template;
  1310. host->hostdata[0] = (unsigned long)phba;
  1311. pci_set_drvdata(pdev, host);
  1312. error = scsi_add_host(host, &pdev->dev);
  1313. if (error)
  1314. goto out_kthread_stop;
  1315. error = lpfc_alloc_sysfs_attr(phba);
  1316. if (error)
  1317. goto out_kthread_stop;
  1318. error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ,
  1319. LPFC_DRIVER_NAME, phba);
  1320. if (error) {
  1321. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1322. "%d:0451 Enable interrupt handler failed\n",
  1323. phba->brd_no);
  1324. goto out_free_sysfs_attr;
  1325. }
  1326. phba->MBslimaddr = phba->slim_memmap_p;
  1327. phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
  1328. phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
  1329. phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
  1330. phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
  1331. error = lpfc_sli_hba_setup(phba);
  1332. if (error)
  1333. goto out_free_irq;
  1334. /*
  1335. * set fixed host attributes
  1336. * Must done after lpfc_sli_hba_setup()
  1337. */
  1338. memcpy(&wwname, &phba->fc_nodename, sizeof(u64));
  1339. fc_host_node_name(host) = be64_to_cpu(wwname);
  1340. memcpy(&wwname, &phba->fc_portname, sizeof(u64));
  1341. fc_host_port_name(host) = be64_to_cpu(wwname);
  1342. fc_host_supported_classes(host) = FC_COS_CLASS3;
  1343. memset(fc_host_supported_fc4s(host), 0,
  1344. sizeof(fc_host_supported_fc4s(host)));
  1345. fc_host_supported_fc4s(host)[2] = 1;
  1346. fc_host_supported_fc4s(host)[7] = 1;
  1347. lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host));
  1348. fc_host_supported_speeds(host) = 0;
  1349. switch (FC_JEDEC_ID(phba->vpd.rev.biuRev)) {
  1350. case VIPER_JEDEC_ID:
  1351. fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT;
  1352. break;
  1353. case HELIOS_JEDEC_ID:
  1354. fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT;
  1355. /* Fall through */
  1356. case CENTAUR_2G_JEDEC_ID:
  1357. case PEGASUS_JEDEC_ID:
  1358. case THOR_JEDEC_ID:
  1359. fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT;
  1360. /* Fall through */
  1361. default:
  1362. fc_host_supported_speeds(host) = FC_PORTSPEED_1GBIT;
  1363. }
  1364. fc_host_maxframe_size(host) =
  1365. ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
  1366. (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
  1367. /* This value is also unchanging */
  1368. memset(fc_host_active_fc4s(host), 0,
  1369. sizeof(fc_host_active_fc4s(host)));
  1370. fc_host_active_fc4s(host)[2] = 1;
  1371. fc_host_active_fc4s(host)[7] = 1;
  1372. spin_lock_irq(phba->host->host_lock);
  1373. phba->fc_flag &= ~FC_LOADING;
  1374. spin_unlock_irq(phba->host->host_lock);
  1375. return 0;
  1376. out_free_irq:
  1377. lpfc_stop_timer(phba);
  1378. phba->work_hba_events = 0;
  1379. free_irq(phba->pcidev->irq, phba);
  1380. out_free_sysfs_attr:
  1381. lpfc_free_sysfs_attr(phba);
  1382. out_kthread_stop:
  1383. kthread_stop(phba->worker_thread);
  1384. out_free_iocbq:
  1385. list_for_each_entry_safe(iocbq_entry, iocbq_next,
  1386. &phba->lpfc_iocb_list, list) {
  1387. spin_lock_irq(phba->host->host_lock);
  1388. kfree(iocbq_entry);
  1389. phba->total_iocbq_bufs--;
  1390. spin_unlock_irq(phba->host->host_lock);
  1391. }
  1392. lpfc_mem_free(phba);
  1393. out_free_slim:
  1394. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
  1395. phba->slim2p_mapping);
  1396. out_iounmap:
  1397. iounmap(phba->ctrl_regs_memmap_p);
  1398. iounmap(phba->slim_memmap_p);
  1399. out_idr_remove:
  1400. idr_remove(&lpfc_hba_index, phba->brd_no);
  1401. out_put_host:
  1402. scsi_host_put(host);
  1403. out_release_regions:
  1404. pci_release_regions(pdev);
  1405. out_disable_device:
  1406. pci_disable_device(pdev);
  1407. out:
  1408. return error;
  1409. }
  1410. static void __devexit
  1411. lpfc_pci_remove_one(struct pci_dev *pdev)
  1412. {
  1413. struct Scsi_Host *host = pci_get_drvdata(pdev);
  1414. struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata[0];
  1415. unsigned long iflag;
  1416. lpfc_free_sysfs_attr(phba);
  1417. spin_lock_irqsave(phba->host->host_lock, iflag);
  1418. phba->fc_flag |= FC_UNLOADING;
  1419. spin_unlock_irqrestore(phba->host->host_lock, iflag);
  1420. fc_remove_host(phba->host);
  1421. scsi_remove_host(phba->host);
  1422. kthread_stop(phba->worker_thread);
  1423. /*
  1424. * Bring down the SLI Layer. This step disable all interrupts,
  1425. * clears the rings, discards all mailbox commands, and resets
  1426. * the HBA.
  1427. */
  1428. lpfc_sli_hba_down(phba);
  1429. /* Release the irq reservation */
  1430. free_irq(phba->pcidev->irq, phba);
  1431. lpfc_cleanup(phba, 0);
  1432. lpfc_stop_timer(phba);
  1433. phba->work_hba_events = 0;
  1434. /*
  1435. * Call scsi_free before mem_free since scsi bufs are released to their
  1436. * corresponding pools here.
  1437. */
  1438. lpfc_scsi_free(phba);
  1439. lpfc_mem_free(phba);
  1440. /* Free resources associated with SLI2 interface */
  1441. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  1442. phba->slim2p, phba->slim2p_mapping);
  1443. /* unmap adapter SLIM and Control Registers */
  1444. iounmap(phba->ctrl_regs_memmap_p);
  1445. iounmap(phba->slim_memmap_p);
  1446. pci_release_regions(phba->pcidev);
  1447. pci_disable_device(phba->pcidev);
  1448. idr_remove(&lpfc_hba_index, phba->brd_no);
  1449. scsi_host_put(phba->host);
  1450. pci_set_drvdata(pdev, NULL);
  1451. }
  1452. static struct pci_device_id lpfc_id_table[] = {
  1453. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
  1454. PCI_ANY_ID, PCI_ANY_ID, },
  1455. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
  1456. PCI_ANY_ID, PCI_ANY_ID, },
  1457. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
  1458. PCI_ANY_ID, PCI_ANY_ID, },
  1459. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
  1460. PCI_ANY_ID, PCI_ANY_ID, },
  1461. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
  1462. PCI_ANY_ID, PCI_ANY_ID, },
  1463. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
  1464. PCI_ANY_ID, PCI_ANY_ID, },
  1465. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
  1466. PCI_ANY_ID, PCI_ANY_ID, },
  1467. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
  1468. PCI_ANY_ID, PCI_ANY_ID, },
  1469. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
  1470. PCI_ANY_ID, PCI_ANY_ID, },
  1471. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
  1472. PCI_ANY_ID, PCI_ANY_ID, },
  1473. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
  1474. PCI_ANY_ID, PCI_ANY_ID, },
  1475. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
  1476. PCI_ANY_ID, PCI_ANY_ID, },
  1477. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
  1478. PCI_ANY_ID, PCI_ANY_ID, },
  1479. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
  1480. PCI_ANY_ID, PCI_ANY_ID, },
  1481. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
  1482. PCI_ANY_ID, PCI_ANY_ID, },
  1483. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
  1484. PCI_ANY_ID, PCI_ANY_ID, },
  1485. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
  1486. PCI_ANY_ID, PCI_ANY_ID, },
  1487. { 0 }
  1488. };
  1489. MODULE_DEVICE_TABLE(pci, lpfc_id_table);
  1490. static struct pci_driver lpfc_driver = {
  1491. .name = LPFC_DRIVER_NAME,
  1492. .id_table = lpfc_id_table,
  1493. .probe = lpfc_pci_probe_one,
  1494. .remove = __devexit_p(lpfc_pci_remove_one),
  1495. };
  1496. static int __init
  1497. lpfc_init(void)
  1498. {
  1499. int error = 0;
  1500. printk(LPFC_MODULE_DESC "\n");
  1501. lpfc_transport_template =
  1502. fc_attach_transport(&lpfc_transport_functions);
  1503. if (!lpfc_transport_template)
  1504. return -ENOMEM;
  1505. error = pci_register_driver(&lpfc_driver);
  1506. if (error)
  1507. fc_release_transport(lpfc_transport_template);
  1508. return error;
  1509. }
  1510. static void __exit
  1511. lpfc_exit(void)
  1512. {
  1513. pci_unregister_driver(&lpfc_driver);
  1514. fc_release_transport(lpfc_transport_template);
  1515. }
  1516. module_init(lpfc_init);
  1517. module_exit(lpfc_exit);
  1518. MODULE_LICENSE("GPL");
  1519. MODULE_DESCRIPTION(LPFC_MODULE_DESC);
  1520. MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
  1521. MODULE_VERSION("0:" LPFC_DRIVER_VERSION);