lpfc_init.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2004-2007 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of version 2 of the GNU General *
  11. * Public License as published by the Free Software Foundation. *
  12. * This program is distributed in the hope that it will be useful. *
  13. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  14. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  15. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  16. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  18. * more details, a copy of which can be found in the file COPYING *
  19. * included with this package. *
  20. *******************************************************************/
  21. #include <linux/blkdev.h>
  22. #include <linux/delay.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/idr.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/kthread.h>
  27. #include <linux/pci.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/ctype.h>
  30. #include <scsi/scsi.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_host.h>
  33. #include <scsi/scsi_transport_fc.h>
  34. #include "lpfc_hw.h"
  35. #include "lpfc_sli.h"
  36. #include "lpfc_disc.h"
  37. #include "lpfc_scsi.h"
  38. #include "lpfc.h"
  39. #include "lpfc_logmsg.h"
  40. #include "lpfc_crtn.h"
  41. #include "lpfc_vport.h"
  42. #include "lpfc_version.h"
  43. #include "lpfc_vport.h"
  44. static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
  45. static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  46. static int lpfc_post_rcv_buf(struct lpfc_hba *);
  47. static struct scsi_transport_template *lpfc_transport_template = NULL;
  48. static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
  49. static DEFINE_IDR(lpfc_hba_index);
  50. /************************************************************************/
  51. /* */
  52. /* lpfc_config_port_prep */
  53. /* This routine will do LPFC initialization prior to the */
  54. /* CONFIG_PORT mailbox command. This will be initialized */
  55. /* as a SLI layer callback routine. */
  56. /* This routine returns 0 on success or -ERESTART if it wants */
  57. /* the SLI layer to reset the HBA and try again. Any */
  58. /* other return value indicates an error. */
  59. /* */
  60. /************************************************************************/
  61. int
  62. lpfc_config_port_prep(struct lpfc_hba *phba)
  63. {
  64. lpfc_vpd_t *vp = &phba->vpd;
  65. int i = 0, rc;
  66. LPFC_MBOXQ_t *pmb;
  67. MAILBOX_t *mb;
  68. char *lpfc_vpd_data = NULL;
  69. uint16_t offset = 0;
  70. static char licensed[56] =
  71. "key unlock for use with gnu public licensed code only\0";
  72. static int init_key = 1;
  73. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  74. if (!pmb) {
  75. phba->link_state = LPFC_HBA_ERROR;
  76. return -ENOMEM;
  77. }
  78. mb = &pmb->mb;
  79. phba->link_state = LPFC_INIT_MBX_CMDS;
  80. if (lpfc_is_LC_HBA(phba->pcidev->device)) {
  81. if (init_key) {
  82. uint32_t *ptext = (uint32_t *) licensed;
  83. for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
  84. *ptext = cpu_to_be32(*ptext);
  85. init_key = 0;
  86. }
  87. lpfc_read_nv(phba, pmb);
  88. memset((char*)mb->un.varRDnvp.rsvd3, 0,
  89. sizeof (mb->un.varRDnvp.rsvd3));
  90. memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
  91. sizeof (licensed));
  92. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  93. if (rc != MBX_SUCCESS) {
  94. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  95. "0324 Config Port initialization "
  96. "error, mbxCmd x%x READ_NVPARM, "
  97. "mbxStatus x%x\n",
  98. mb->mbxCommand, mb->mbxStatus);
  99. mempool_free(pmb, phba->mbox_mem_pool);
  100. return -ERESTART;
  101. }
  102. memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
  103. sizeof(phba->wwnn));
  104. memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
  105. sizeof(phba->wwpn));
  106. }
  107. phba->sli3_options = 0x0;
  108. /* Setup and issue mailbox READ REV command */
  109. lpfc_read_rev(phba, pmb);
  110. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  111. if (rc != MBX_SUCCESS) {
  112. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  113. "0439 Adapter failed to init, mbxCmd x%x "
  114. "READ_REV, mbxStatus x%x\n",
  115. mb->mbxCommand, mb->mbxStatus);
  116. mempool_free( pmb, phba->mbox_mem_pool);
  117. return -ERESTART;
  118. }
  119. /*
  120. * The value of rr must be 1 since the driver set the cv field to 1.
  121. * This setting requires the FW to set all revision fields.
  122. */
  123. if (mb->un.varRdRev.rr == 0) {
  124. vp->rev.rBit = 0;
  125. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  126. "0440 Adapter failed to init, READ_REV has "
  127. "missing revision information.\n");
  128. mempool_free(pmb, phba->mbox_mem_pool);
  129. return -ERESTART;
  130. }
  131. if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp)
  132. return -EINVAL;
  133. /* Save information as VPD data */
  134. vp->rev.rBit = 1;
  135. memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
  136. vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
  137. memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
  138. vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
  139. memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
  140. vp->rev.biuRev = mb->un.varRdRev.biuRev;
  141. vp->rev.smRev = mb->un.varRdRev.smRev;
  142. vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
  143. vp->rev.endecRev = mb->un.varRdRev.endecRev;
  144. vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
  145. vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
  146. vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
  147. vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
  148. vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
  149. vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
  150. /* If the sli feature level is less then 9, we must
  151. * tear down all RPIs and VPIs on link down if NPIV
  152. * is enabled.
  153. */
  154. if (vp->rev.feaLevelHigh < 9)
  155. phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
  156. if (lpfc_is_LC_HBA(phba->pcidev->device))
  157. memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
  158. sizeof (phba->RandomData));
  159. /* Get adapter VPD information */
  160. pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
  161. if (!pmb->context2)
  162. goto out_free_mbox;
  163. lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
  164. if (!lpfc_vpd_data)
  165. goto out_free_context2;
  166. do {
  167. lpfc_dump_mem(phba, pmb, offset);
  168. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  169. if (rc != MBX_SUCCESS) {
  170. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  171. "0441 VPD not present on adapter, "
  172. "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
  173. mb->mbxCommand, mb->mbxStatus);
  174. mb->un.varDmp.word_cnt = 0;
  175. }
  176. if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
  177. mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
  178. lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
  179. mb->un.varDmp.word_cnt);
  180. offset += mb->un.varDmp.word_cnt;
  181. } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
  182. lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
  183. kfree(lpfc_vpd_data);
  184. out_free_context2:
  185. kfree(pmb->context2);
  186. out_free_mbox:
  187. mempool_free(pmb, phba->mbox_mem_pool);
  188. return 0;
  189. }
  190. /************************************************************************/
  191. /* */
  192. /* lpfc_config_port_post */
  193. /* This routine will do LPFC initialization after the */
  194. /* CONFIG_PORT mailbox command. This will be initialized */
  195. /* as a SLI layer callback routine. */
  196. /* This routine returns 0 on success. Any other return value */
  197. /* indicates an error. */
  198. /* */
  199. /************************************************************************/
  200. int
  201. lpfc_config_port_post(struct lpfc_hba *phba)
  202. {
  203. struct lpfc_vport *vport = phba->pport;
  204. LPFC_MBOXQ_t *pmb;
  205. MAILBOX_t *mb;
  206. struct lpfc_dmabuf *mp;
  207. struct lpfc_sli *psli = &phba->sli;
  208. uint32_t status, timeout;
  209. int i, j;
  210. int rc;
  211. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  212. if (!pmb) {
  213. phba->link_state = LPFC_HBA_ERROR;
  214. return -ENOMEM;
  215. }
  216. mb = &pmb->mb;
  217. /* Get login parameters for NID. */
  218. lpfc_read_sparam(phba, pmb, 0);
  219. pmb->vport = vport;
  220. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  221. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  222. "0448 Adapter failed init, mbxCmd x%x "
  223. "READ_SPARM mbxStatus x%x\n",
  224. mb->mbxCommand, mb->mbxStatus);
  225. phba->link_state = LPFC_HBA_ERROR;
  226. mp = (struct lpfc_dmabuf *) pmb->context1;
  227. mempool_free( pmb, phba->mbox_mem_pool);
  228. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  229. kfree(mp);
  230. return -EIO;
  231. }
  232. mp = (struct lpfc_dmabuf *) pmb->context1;
  233. memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
  234. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  235. kfree(mp);
  236. pmb->context1 = NULL;
  237. if (phba->cfg_soft_wwnn)
  238. u64_to_wwn(phba->cfg_soft_wwnn,
  239. vport->fc_sparam.nodeName.u.wwn);
  240. if (phba->cfg_soft_wwpn)
  241. u64_to_wwn(phba->cfg_soft_wwpn,
  242. vport->fc_sparam.portName.u.wwn);
  243. memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
  244. sizeof (struct lpfc_name));
  245. memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
  246. sizeof (struct lpfc_name));
  247. /* If no serial number in VPD data, use low 6 bytes of WWNN */
  248. /* This should be consolidated into parse_vpd ? - mr */
  249. if (phba->SerialNumber[0] == 0) {
  250. uint8_t *outptr;
  251. outptr = &vport->fc_nodename.u.s.IEEE[0];
  252. for (i = 0; i < 12; i++) {
  253. status = *outptr++;
  254. j = ((status & 0xf0) >> 4);
  255. if (j <= 9)
  256. phba->SerialNumber[i] =
  257. (char)((uint8_t) 0x30 + (uint8_t) j);
  258. else
  259. phba->SerialNumber[i] =
  260. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  261. i++;
  262. j = (status & 0xf);
  263. if (j <= 9)
  264. phba->SerialNumber[i] =
  265. (char)((uint8_t) 0x30 + (uint8_t) j);
  266. else
  267. phba->SerialNumber[i] =
  268. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  269. }
  270. }
  271. lpfc_read_config(phba, pmb);
  272. pmb->vport = vport;
  273. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  274. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  275. "0453 Adapter failed to init, mbxCmd x%x "
  276. "READ_CONFIG, mbxStatus x%x\n",
  277. mb->mbxCommand, mb->mbxStatus);
  278. phba->link_state = LPFC_HBA_ERROR;
  279. mempool_free( pmb, phba->mbox_mem_pool);
  280. return -EIO;
  281. }
  282. /* Reset the DFT_HBA_Q_DEPTH to the max xri */
  283. if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
  284. phba->cfg_hba_queue_depth =
  285. mb->un.varRdConfig.max_xri + 1;
  286. phba->lmt = mb->un.varRdConfig.lmt;
  287. /* Get the default values for Model Name and Description */
  288. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  289. if ((phba->cfg_link_speed > LINK_SPEED_10G)
  290. || ((phba->cfg_link_speed == LINK_SPEED_1G)
  291. && !(phba->lmt & LMT_1Gb))
  292. || ((phba->cfg_link_speed == LINK_SPEED_2G)
  293. && !(phba->lmt & LMT_2Gb))
  294. || ((phba->cfg_link_speed == LINK_SPEED_4G)
  295. && !(phba->lmt & LMT_4Gb))
  296. || ((phba->cfg_link_speed == LINK_SPEED_8G)
  297. && !(phba->lmt & LMT_8Gb))
  298. || ((phba->cfg_link_speed == LINK_SPEED_10G)
  299. && !(phba->lmt & LMT_10Gb))) {
  300. /* Reset link speed to auto */
  301. lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
  302. "1302 Invalid speed for this board: "
  303. "Reset link speed to auto: x%x\n",
  304. phba->cfg_link_speed);
  305. phba->cfg_link_speed = LINK_SPEED_AUTO;
  306. }
  307. phba->link_state = LPFC_LINK_DOWN;
  308. /* Only process IOCBs on ring 0 till hba_state is READY */
  309. if (psli->ring[psli->extra_ring].cmdringaddr)
  310. psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
  311. if (psli->ring[psli->fcp_ring].cmdringaddr)
  312. psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
  313. if (psli->ring[psli->next_ring].cmdringaddr)
  314. psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
  315. /* Post receive buffers for desired rings */
  316. if (phba->sli_rev != 3)
  317. lpfc_post_rcv_buf(phba);
  318. /* Enable appropriate host interrupts */
  319. spin_lock_irq(&phba->hbalock);
  320. status = readl(phba->HCregaddr);
  321. status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
  322. if (psli->num_rings > 0)
  323. status |= HC_R0INT_ENA;
  324. if (psli->num_rings > 1)
  325. status |= HC_R1INT_ENA;
  326. if (psli->num_rings > 2)
  327. status |= HC_R2INT_ENA;
  328. if (psli->num_rings > 3)
  329. status |= HC_R3INT_ENA;
  330. if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
  331. (phba->cfg_poll & DISABLE_FCP_RING_INT))
  332. status &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
  333. writel(status, phba->HCregaddr);
  334. readl(phba->HCregaddr); /* flush */
  335. spin_unlock_irq(&phba->hbalock);
  336. /*
  337. * Setup the ring 0 (els) timeout handler
  338. */
  339. timeout = phba->fc_ratov << 1;
  340. mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
  341. mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
  342. phba->hb_outstanding = 0;
  343. phba->last_completion_time = jiffies;
  344. lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
  345. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  346. pmb->vport = vport;
  347. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  348. lpfc_set_loopback_flag(phba);
  349. if (rc != MBX_SUCCESS) {
  350. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  351. "0454 Adapter failed to init, mbxCmd x%x "
  352. "INIT_LINK, mbxStatus x%x\n",
  353. mb->mbxCommand, mb->mbxStatus);
  354. /* Clear all interrupt enable conditions */
  355. writel(0, phba->HCregaddr);
  356. readl(phba->HCregaddr); /* flush */
  357. /* Clear all pending interrupts */
  358. writel(0xffffffff, phba->HAregaddr);
  359. readl(phba->HAregaddr); /* flush */
  360. phba->link_state = LPFC_HBA_ERROR;
  361. if (rc != MBX_BUSY)
  362. mempool_free(pmb, phba->mbox_mem_pool);
  363. return -EIO;
  364. }
  365. /* MBOX buffer will be freed in mbox compl */
  366. return (0);
  367. }
  368. /************************************************************************/
  369. /* */
  370. /* lpfc_hba_down_prep */
  371. /* This routine will do LPFC uninitialization before the */
  372. /* HBA is reset when bringing down the SLI Layer. This will be */
  373. /* initialized as a SLI layer callback routine. */
  374. /* This routine returns 0 on success. Any other return value */
  375. /* indicates an error. */
  376. /* */
  377. /************************************************************************/
  378. int
  379. lpfc_hba_down_prep(struct lpfc_hba *phba)
  380. {
  381. /* Disable interrupts */
  382. writel(0, phba->HCregaddr);
  383. readl(phba->HCregaddr); /* flush */
  384. lpfc_cleanup_discovery_resources(phba->pport);
  385. return 0;
  386. }
  387. /************************************************************************/
  388. /* */
  389. /* lpfc_hba_down_post */
  390. /* This routine will do uninitialization after the HBA is reset */
  391. /* when bringing down the SLI Layer. */
  392. /* This routine returns 0 on success. Any other return value */
  393. /* indicates an error. */
  394. /* */
  395. /************************************************************************/
  396. int
  397. lpfc_hba_down_post(struct lpfc_hba *phba)
  398. {
  399. struct lpfc_sli *psli = &phba->sli;
  400. struct lpfc_sli_ring *pring;
  401. struct lpfc_dmabuf *mp, *next_mp;
  402. int i;
  403. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
  404. lpfc_sli_hbqbuf_free_all(phba);
  405. else {
  406. /* Cleanup preposted buffers on the ELS ring */
  407. pring = &psli->ring[LPFC_ELS_RING];
  408. list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
  409. list_del(&mp->list);
  410. pring->postbufq_cnt--;
  411. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  412. kfree(mp);
  413. }
  414. }
  415. for (i = 0; i < psli->num_rings; i++) {
  416. pring = &psli->ring[i];
  417. lpfc_sli_abort_iocb_ring(phba, pring);
  418. }
  419. return 0;
  420. }
  421. /* HBA heart beat timeout handler */
  422. void
  423. lpfc_hb_timeout(unsigned long ptr)
  424. {
  425. struct lpfc_hba *phba;
  426. unsigned long iflag;
  427. phba = (struct lpfc_hba *)ptr;
  428. spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
  429. if (!(phba->pport->work_port_events & WORKER_HB_TMO))
  430. phba->pport->work_port_events |= WORKER_HB_TMO;
  431. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
  432. if (phba->work_wait)
  433. wake_up(phba->work_wait);
  434. return;
  435. }
  436. static void
  437. lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
  438. {
  439. unsigned long drvr_flag;
  440. spin_lock_irqsave(&phba->hbalock, drvr_flag);
  441. phba->hb_outstanding = 0;
  442. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  443. mempool_free(pmboxq, phba->mbox_mem_pool);
  444. if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
  445. !(phba->link_state == LPFC_HBA_ERROR) &&
  446. !(phba->pport->load_flag & FC_UNLOADING))
  447. mod_timer(&phba->hb_tmofunc,
  448. jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
  449. return;
  450. }
  451. void
  452. lpfc_hb_timeout_handler(struct lpfc_hba *phba)
  453. {
  454. LPFC_MBOXQ_t *pmboxq;
  455. int retval;
  456. struct lpfc_sli *psli = &phba->sli;
  457. if ((phba->link_state == LPFC_HBA_ERROR) ||
  458. (phba->pport->load_flag & FC_UNLOADING) ||
  459. (phba->pport->fc_flag & FC_OFFLINE_MODE))
  460. return;
  461. spin_lock_irq(&phba->pport->work_port_lock);
  462. /* If the timer is already canceled do nothing */
  463. if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
  464. spin_unlock_irq(&phba->pport->work_port_lock);
  465. return;
  466. }
  467. if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
  468. jiffies)) {
  469. spin_unlock_irq(&phba->pport->work_port_lock);
  470. if (!phba->hb_outstanding)
  471. mod_timer(&phba->hb_tmofunc,
  472. jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
  473. else
  474. mod_timer(&phba->hb_tmofunc,
  475. jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
  476. return;
  477. }
  478. spin_unlock_irq(&phba->pport->work_port_lock);
  479. /* If there is no heart beat outstanding, issue a heartbeat command */
  480. if (!phba->hb_outstanding) {
  481. pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
  482. if (!pmboxq) {
  483. mod_timer(&phba->hb_tmofunc,
  484. jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
  485. return;
  486. }
  487. lpfc_heart_beat(phba, pmboxq);
  488. pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
  489. pmboxq->vport = phba->pport;
  490. retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
  491. if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
  492. mempool_free(pmboxq, phba->mbox_mem_pool);
  493. mod_timer(&phba->hb_tmofunc,
  494. jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
  495. return;
  496. }
  497. mod_timer(&phba->hb_tmofunc,
  498. jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
  499. phba->hb_outstanding = 1;
  500. return;
  501. } else {
  502. /*
  503. * If heart beat timeout called with hb_outstanding set we
  504. * need to take the HBA offline.
  505. */
  506. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  507. "0459 Adapter heartbeat failure, taking "
  508. "this port offline.\n");
  509. spin_lock_irq(&phba->hbalock);
  510. psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
  511. spin_unlock_irq(&phba->hbalock);
  512. lpfc_offline_prep(phba);
  513. lpfc_offline(phba);
  514. lpfc_unblock_mgmt_io(phba);
  515. phba->link_state = LPFC_HBA_ERROR;
  516. lpfc_hba_down_post(phba);
  517. }
  518. }
  519. /************************************************************************/
  520. /* */
  521. /* lpfc_handle_eratt */
  522. /* This routine will handle processing a Host Attention */
  523. /* Error Status event. This will be initialized */
  524. /* as a SLI layer callback routine. */
  525. /* */
  526. /************************************************************************/
  527. void
  528. lpfc_handle_eratt(struct lpfc_hba *phba)
  529. {
  530. struct lpfc_vport *vport = phba->pport;
  531. struct lpfc_sli *psli = &phba->sli;
  532. struct lpfc_sli_ring *pring;
  533. struct lpfc_vport **vports;
  534. uint32_t event_data;
  535. struct Scsi_Host *shost;
  536. int i;
  537. /* If the pci channel is offline, ignore possible errors,
  538. * since we cannot communicate with the pci card anyway. */
  539. if (pci_channel_offline(phba->pcidev))
  540. return;
  541. if (phba->work_hs & HS_FFER6 ||
  542. phba->work_hs & HS_FFER5) {
  543. /* Re-establishing Link */
  544. lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
  545. "1301 Re-establishing Link "
  546. "Data: x%x x%x x%x\n",
  547. phba->work_hs,
  548. phba->work_status[0], phba->work_status[1]);
  549. vports = lpfc_create_vport_work_array(phba);
  550. if (vports != NULL)
  551. for(i = 0;
  552. i < LPFC_MAX_VPORTS && vports[i] != NULL;
  553. i++){
  554. shost = lpfc_shost_from_vport(vports[i]);
  555. spin_lock_irq(shost->host_lock);
  556. vports[i]->fc_flag |= FC_ESTABLISH_LINK;
  557. spin_unlock_irq(shost->host_lock);
  558. }
  559. lpfc_destroy_vport_work_array(vports);
  560. spin_lock_irq(&phba->hbalock);
  561. psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
  562. spin_unlock_irq(&phba->hbalock);
  563. /*
  564. * Firmware stops when it triggled erratt with HS_FFER6.
  565. * That could cause the I/Os dropped by the firmware.
  566. * Error iocb (I/O) on txcmplq and let the SCSI layer
  567. * retry it after re-establishing link.
  568. */
  569. pring = &psli->ring[psli->fcp_ring];
  570. lpfc_sli_abort_iocb_ring(phba, pring);
  571. /*
  572. * There was a firmware error. Take the hba offline and then
  573. * attempt to restart it.
  574. */
  575. lpfc_offline_prep(phba);
  576. lpfc_offline(phba);
  577. lpfc_sli_brdrestart(phba);
  578. if (lpfc_online(phba) == 0) { /* Initialize the HBA */
  579. mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
  580. lpfc_unblock_mgmt_io(phba);
  581. return;
  582. }
  583. lpfc_unblock_mgmt_io(phba);
  584. } else {
  585. /* The if clause above forces this code path when the status
  586. * failure is a value other than FFER6. Do not call the offline
  587. * twice. This is the adapter hardware error path.
  588. */
  589. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  590. "0457 Adapter Hardware Error "
  591. "Data: x%x x%x x%x\n",
  592. phba->work_hs,
  593. phba->work_status[0], phba->work_status[1]);
  594. event_data = FC_REG_DUMP_EVENT;
  595. shost = lpfc_shost_from_vport(vport);
  596. fc_host_post_vendor_event(shost, fc_get_event_number(),
  597. sizeof(event_data), (char *) &event_data,
  598. SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
  599. spin_lock_irq(&phba->hbalock);
  600. psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
  601. spin_unlock_irq(&phba->hbalock);
  602. lpfc_offline_prep(phba);
  603. lpfc_offline(phba);
  604. lpfc_unblock_mgmt_io(phba);
  605. phba->link_state = LPFC_HBA_ERROR;
  606. lpfc_hba_down_post(phba);
  607. }
  608. }
  609. /************************************************************************/
  610. /* */
  611. /* lpfc_handle_latt */
  612. /* This routine will handle processing a Host Attention */
  613. /* Link Status event. This will be initialized */
  614. /* as a SLI layer callback routine. */
  615. /* */
  616. /************************************************************************/
  617. void
  618. lpfc_handle_latt(struct lpfc_hba *phba)
  619. {
  620. struct lpfc_vport *vport = phba->pport;
  621. struct lpfc_sli *psli = &phba->sli;
  622. LPFC_MBOXQ_t *pmb;
  623. volatile uint32_t control;
  624. struct lpfc_dmabuf *mp;
  625. int rc = -ENOMEM;
  626. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  627. if (!pmb)
  628. goto lpfc_handle_latt_err_exit;
  629. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  630. if (!mp)
  631. goto lpfc_handle_latt_free_pmb;
  632. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  633. if (!mp->virt)
  634. goto lpfc_handle_latt_free_mp;
  635. rc = -EIO;
  636. /* Cleanup any outstanding ELS commands */
  637. lpfc_els_flush_all_cmd(phba);
  638. psli->slistat.link_event++;
  639. lpfc_read_la(phba, pmb, mp);
  640. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
  641. pmb->vport = vport;
  642. rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
  643. if (rc == MBX_NOT_FINISHED)
  644. goto lpfc_handle_latt_free_mbuf;
  645. /* Clear Link Attention in HA REG */
  646. spin_lock_irq(&phba->hbalock);
  647. writel(HA_LATT, phba->HAregaddr);
  648. readl(phba->HAregaddr); /* flush */
  649. spin_unlock_irq(&phba->hbalock);
  650. return;
  651. lpfc_handle_latt_free_mbuf:
  652. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  653. lpfc_handle_latt_free_mp:
  654. kfree(mp);
  655. lpfc_handle_latt_free_pmb:
  656. mempool_free(pmb, phba->mbox_mem_pool);
  657. lpfc_handle_latt_err_exit:
  658. /* Enable Link attention interrupts */
  659. spin_lock_irq(&phba->hbalock);
  660. psli->sli_flag |= LPFC_PROCESS_LA;
  661. control = readl(phba->HCregaddr);
  662. control |= HC_LAINT_ENA;
  663. writel(control, phba->HCregaddr);
  664. readl(phba->HCregaddr); /* flush */
  665. /* Clear Link Attention in HA REG */
  666. writel(HA_LATT, phba->HAregaddr);
  667. readl(phba->HAregaddr); /* flush */
  668. spin_unlock_irq(&phba->hbalock);
  669. lpfc_linkdown(phba);
  670. phba->link_state = LPFC_HBA_ERROR;
  671. /* The other case is an error from issue_mbox */
  672. if (rc == -ENOMEM)
  673. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
  674. "0300 READ_LA: no buffers\n");
  675. return;
  676. }
  677. /************************************************************************/
  678. /* */
  679. /* lpfc_parse_vpd */
  680. /* This routine will parse the VPD data */
  681. /* */
  682. /************************************************************************/
  683. static int
  684. lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
  685. {
  686. uint8_t lenlo, lenhi;
  687. int Length;
  688. int i, j;
  689. int finished = 0;
  690. int index = 0;
  691. if (!vpd)
  692. return 0;
  693. /* Vital Product */
  694. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  695. "0455 Vital Product Data: x%x x%x x%x x%x\n",
  696. (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
  697. (uint32_t) vpd[3]);
  698. while (!finished && (index < (len - 4))) {
  699. switch (vpd[index]) {
  700. case 0x82:
  701. case 0x91:
  702. index += 1;
  703. lenlo = vpd[index];
  704. index += 1;
  705. lenhi = vpd[index];
  706. index += 1;
  707. i = ((((unsigned short)lenhi) << 8) + lenlo);
  708. index += i;
  709. break;
  710. case 0x90:
  711. index += 1;
  712. lenlo = vpd[index];
  713. index += 1;
  714. lenhi = vpd[index];
  715. index += 1;
  716. Length = ((((unsigned short)lenhi) << 8) + lenlo);
  717. if (Length > len - index)
  718. Length = len - index;
  719. while (Length > 0) {
  720. /* Look for Serial Number */
  721. if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
  722. index += 2;
  723. i = vpd[index];
  724. index += 1;
  725. j = 0;
  726. Length -= (3+i);
  727. while(i--) {
  728. phba->SerialNumber[j++] = vpd[index++];
  729. if (j == 31)
  730. break;
  731. }
  732. phba->SerialNumber[j] = 0;
  733. continue;
  734. }
  735. else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
  736. phba->vpd_flag |= VPD_MODEL_DESC;
  737. index += 2;
  738. i = vpd[index];
  739. index += 1;
  740. j = 0;
  741. Length -= (3+i);
  742. while(i--) {
  743. phba->ModelDesc[j++] = vpd[index++];
  744. if (j == 255)
  745. break;
  746. }
  747. phba->ModelDesc[j] = 0;
  748. continue;
  749. }
  750. else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
  751. phba->vpd_flag |= VPD_MODEL_NAME;
  752. index += 2;
  753. i = vpd[index];
  754. index += 1;
  755. j = 0;
  756. Length -= (3+i);
  757. while(i--) {
  758. phba->ModelName[j++] = vpd[index++];
  759. if (j == 79)
  760. break;
  761. }
  762. phba->ModelName[j] = 0;
  763. continue;
  764. }
  765. else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
  766. phba->vpd_flag |= VPD_PROGRAM_TYPE;
  767. index += 2;
  768. i = vpd[index];
  769. index += 1;
  770. j = 0;
  771. Length -= (3+i);
  772. while(i--) {
  773. phba->ProgramType[j++] = vpd[index++];
  774. if (j == 255)
  775. break;
  776. }
  777. phba->ProgramType[j] = 0;
  778. continue;
  779. }
  780. else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
  781. phba->vpd_flag |= VPD_PORT;
  782. index += 2;
  783. i = vpd[index];
  784. index += 1;
  785. j = 0;
  786. Length -= (3+i);
  787. while(i--) {
  788. phba->Port[j++] = vpd[index++];
  789. if (j == 19)
  790. break;
  791. }
  792. phba->Port[j] = 0;
  793. continue;
  794. }
  795. else {
  796. index += 2;
  797. i = vpd[index];
  798. index += 1;
  799. index += i;
  800. Length -= (3 + i);
  801. }
  802. }
  803. finished = 0;
  804. break;
  805. case 0x78:
  806. finished = 1;
  807. break;
  808. default:
  809. index ++;
  810. break;
  811. }
  812. }
  813. return(1);
  814. }
  815. static void
  816. lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
  817. {
  818. lpfc_vpd_t *vp;
  819. uint16_t dev_id = phba->pcidev->device;
  820. int max_speed;
  821. struct {
  822. char * name;
  823. int max_speed;
  824. char * bus;
  825. } m = {"<Unknown>", 0, ""};
  826. if (mdp && mdp[0] != '\0'
  827. && descp && descp[0] != '\0')
  828. return;
  829. if (phba->lmt & LMT_10Gb)
  830. max_speed = 10;
  831. else if (phba->lmt & LMT_8Gb)
  832. max_speed = 8;
  833. else if (phba->lmt & LMT_4Gb)
  834. max_speed = 4;
  835. else if (phba->lmt & LMT_2Gb)
  836. max_speed = 2;
  837. else
  838. max_speed = 1;
  839. vp = &phba->vpd;
  840. switch (dev_id) {
  841. case PCI_DEVICE_ID_FIREFLY:
  842. m = (typeof(m)){"LP6000", max_speed, "PCI"};
  843. break;
  844. case PCI_DEVICE_ID_SUPERFLY:
  845. if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
  846. m = (typeof(m)){"LP7000", max_speed, "PCI"};
  847. else
  848. m = (typeof(m)){"LP7000E", max_speed, "PCI"};
  849. break;
  850. case PCI_DEVICE_ID_DRAGONFLY:
  851. m = (typeof(m)){"LP8000", max_speed, "PCI"};
  852. break;
  853. case PCI_DEVICE_ID_CENTAUR:
  854. if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
  855. m = (typeof(m)){"LP9002", max_speed, "PCI"};
  856. else
  857. m = (typeof(m)){"LP9000", max_speed, "PCI"};
  858. break;
  859. case PCI_DEVICE_ID_RFLY:
  860. m = (typeof(m)){"LP952", max_speed, "PCI"};
  861. break;
  862. case PCI_DEVICE_ID_PEGASUS:
  863. m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
  864. break;
  865. case PCI_DEVICE_ID_THOR:
  866. m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
  867. break;
  868. case PCI_DEVICE_ID_VIPER:
  869. m = (typeof(m)){"LPX1000", max_speed, "PCI-X"};
  870. break;
  871. case PCI_DEVICE_ID_PFLY:
  872. m = (typeof(m)){"LP982", max_speed, "PCI-X"};
  873. break;
  874. case PCI_DEVICE_ID_TFLY:
  875. m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
  876. break;
  877. case PCI_DEVICE_ID_HELIOS:
  878. m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
  879. break;
  880. case PCI_DEVICE_ID_HELIOS_SCSP:
  881. m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
  882. break;
  883. case PCI_DEVICE_ID_HELIOS_DCSP:
  884. m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
  885. break;
  886. case PCI_DEVICE_ID_NEPTUNE:
  887. m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
  888. break;
  889. case PCI_DEVICE_ID_NEPTUNE_SCSP:
  890. m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
  891. break;
  892. case PCI_DEVICE_ID_NEPTUNE_DCSP:
  893. m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
  894. break;
  895. case PCI_DEVICE_ID_BMID:
  896. m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
  897. break;
  898. case PCI_DEVICE_ID_BSMB:
  899. m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
  900. break;
  901. case PCI_DEVICE_ID_ZEPHYR:
  902. m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
  903. break;
  904. case PCI_DEVICE_ID_ZEPHYR_SCSP:
  905. m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
  906. break;
  907. case PCI_DEVICE_ID_ZEPHYR_DCSP:
  908. m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
  909. break;
  910. case PCI_DEVICE_ID_ZMID:
  911. m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
  912. break;
  913. case PCI_DEVICE_ID_ZSMB:
  914. m = (typeof(m)){"LPe111", max_speed, "PCIe"};
  915. break;
  916. case PCI_DEVICE_ID_LP101:
  917. m = (typeof(m)){"LP101", max_speed, "PCI-X"};
  918. break;
  919. case PCI_DEVICE_ID_LP10000S:
  920. m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
  921. break;
  922. case PCI_DEVICE_ID_LP11000S:
  923. m = (typeof(m)){"LP11000-S", max_speed,
  924. "PCI-X2"};
  925. break;
  926. case PCI_DEVICE_ID_LPE11000S:
  927. m = (typeof(m)){"LPe11000-S", max_speed,
  928. "PCIe"};
  929. break;
  930. case PCI_DEVICE_ID_SAT:
  931. m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
  932. break;
  933. case PCI_DEVICE_ID_SAT_MID:
  934. m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
  935. break;
  936. case PCI_DEVICE_ID_SAT_SMB:
  937. m = (typeof(m)){"LPe121", max_speed, "PCIe"};
  938. break;
  939. case PCI_DEVICE_ID_SAT_DCSP:
  940. m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
  941. break;
  942. case PCI_DEVICE_ID_SAT_SCSP:
  943. m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
  944. break;
  945. case PCI_DEVICE_ID_SAT_S:
  946. m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
  947. break;
  948. default:
  949. m = (typeof(m)){ NULL };
  950. break;
  951. }
  952. if (mdp && mdp[0] == '\0')
  953. snprintf(mdp, 79,"%s", m.name);
  954. if (descp && descp[0] == '\0')
  955. snprintf(descp, 255,
  956. "Emulex %s %dGb %s Fibre Channel Adapter",
  957. m.name, m.max_speed, m.bus);
  958. }
  959. /**************************************************/
  960. /* lpfc_post_buffer */
  961. /* */
  962. /* This routine will post count buffers to the */
  963. /* ring with the QUE_RING_BUF_CN command. This */
  964. /* allows 3 buffers / command to be posted. */
  965. /* Returns the number of buffers NOT posted. */
  966. /**************************************************/
  967. int
  968. lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
  969. int type)
  970. {
  971. IOCB_t *icmd;
  972. struct lpfc_iocbq *iocb;
  973. struct lpfc_dmabuf *mp1, *mp2;
  974. cnt += pring->missbufcnt;
  975. /* While there are buffers to post */
  976. while (cnt > 0) {
  977. /* Allocate buffer for command iocb */
  978. iocb = lpfc_sli_get_iocbq(phba);
  979. if (iocb == NULL) {
  980. pring->missbufcnt = cnt;
  981. return cnt;
  982. }
  983. icmd = &iocb->iocb;
  984. /* 2 buffers can be posted per command */
  985. /* Allocate buffer to post */
  986. mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  987. if (mp1)
  988. mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  989. &mp1->phys);
  990. if (mp1 == 0 || mp1->virt == 0) {
  991. kfree(mp1);
  992. lpfc_sli_release_iocbq(phba, iocb);
  993. pring->missbufcnt = cnt;
  994. return cnt;
  995. }
  996. INIT_LIST_HEAD(&mp1->list);
  997. /* Allocate buffer to post */
  998. if (cnt > 1) {
  999. mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  1000. if (mp2)
  1001. mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  1002. &mp2->phys);
  1003. if (mp2 == 0 || mp2->virt == 0) {
  1004. kfree(mp2);
  1005. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  1006. kfree(mp1);
  1007. lpfc_sli_release_iocbq(phba, iocb);
  1008. pring->missbufcnt = cnt;
  1009. return cnt;
  1010. }
  1011. INIT_LIST_HEAD(&mp2->list);
  1012. } else {
  1013. mp2 = NULL;
  1014. }
  1015. icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
  1016. icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
  1017. icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
  1018. icmd->ulpBdeCount = 1;
  1019. cnt--;
  1020. if (mp2) {
  1021. icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
  1022. icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
  1023. icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
  1024. cnt--;
  1025. icmd->ulpBdeCount = 2;
  1026. }
  1027. icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
  1028. icmd->ulpLe = 1;
  1029. if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
  1030. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  1031. kfree(mp1);
  1032. cnt++;
  1033. if (mp2) {
  1034. lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
  1035. kfree(mp2);
  1036. cnt++;
  1037. }
  1038. lpfc_sli_release_iocbq(phba, iocb);
  1039. pring->missbufcnt = cnt;
  1040. return cnt;
  1041. }
  1042. lpfc_sli_ringpostbuf_put(phba, pring, mp1);
  1043. if (mp2)
  1044. lpfc_sli_ringpostbuf_put(phba, pring, mp2);
  1045. }
  1046. pring->missbufcnt = 0;
  1047. return 0;
  1048. }
  1049. /************************************************************************/
  1050. /* */
  1051. /* lpfc_post_rcv_buf */
  1052. /* This routine post initial rcv buffers to the configured rings */
  1053. /* */
  1054. /************************************************************************/
  1055. static int
  1056. lpfc_post_rcv_buf(struct lpfc_hba *phba)
  1057. {
  1058. struct lpfc_sli *psli = &phba->sli;
  1059. /* Ring 0, ELS / CT buffers */
  1060. lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
  1061. /* Ring 2 - FCP no buffers needed */
  1062. return 0;
  1063. }
  1064. #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
  1065. /************************************************************************/
  1066. /* */
  1067. /* lpfc_sha_init */
  1068. /* */
  1069. /************************************************************************/
  1070. static void
  1071. lpfc_sha_init(uint32_t * HashResultPointer)
  1072. {
  1073. HashResultPointer[0] = 0x67452301;
  1074. HashResultPointer[1] = 0xEFCDAB89;
  1075. HashResultPointer[2] = 0x98BADCFE;
  1076. HashResultPointer[3] = 0x10325476;
  1077. HashResultPointer[4] = 0xC3D2E1F0;
  1078. }
  1079. /************************************************************************/
  1080. /* */
  1081. /* lpfc_sha_iterate */
  1082. /* */
  1083. /************************************************************************/
  1084. static void
  1085. lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
  1086. {
  1087. int t;
  1088. uint32_t TEMP;
  1089. uint32_t A, B, C, D, E;
  1090. t = 16;
  1091. do {
  1092. HashWorkingPointer[t] =
  1093. S(1,
  1094. HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
  1095. 8] ^
  1096. HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
  1097. } while (++t <= 79);
  1098. t = 0;
  1099. A = HashResultPointer[0];
  1100. B = HashResultPointer[1];
  1101. C = HashResultPointer[2];
  1102. D = HashResultPointer[3];
  1103. E = HashResultPointer[4];
  1104. do {
  1105. if (t < 20) {
  1106. TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
  1107. } else if (t < 40) {
  1108. TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
  1109. } else if (t < 60) {
  1110. TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
  1111. } else {
  1112. TEMP = (B ^ C ^ D) + 0xCA62C1D6;
  1113. }
  1114. TEMP += S(5, A) + E + HashWorkingPointer[t];
  1115. E = D;
  1116. D = C;
  1117. C = S(30, B);
  1118. B = A;
  1119. A = TEMP;
  1120. } while (++t <= 79);
  1121. HashResultPointer[0] += A;
  1122. HashResultPointer[1] += B;
  1123. HashResultPointer[2] += C;
  1124. HashResultPointer[3] += D;
  1125. HashResultPointer[4] += E;
  1126. }
  1127. /************************************************************************/
  1128. /* */
  1129. /* lpfc_challenge_key */
  1130. /* */
  1131. /************************************************************************/
  1132. static void
  1133. lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
  1134. {
  1135. *HashWorking = (*RandomChallenge ^ *HashWorking);
  1136. }
  1137. /************************************************************************/
  1138. /* */
  1139. /* lpfc_hba_init */
  1140. /* */
  1141. /************************************************************************/
  1142. void
  1143. lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
  1144. {
  1145. int t;
  1146. uint32_t *HashWorking;
  1147. uint32_t *pwwnn = (uint32_t *) phba->wwnn;
  1148. HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
  1149. if (!HashWorking)
  1150. return;
  1151. memset(HashWorking, 0, (80 * sizeof(uint32_t)));
  1152. HashWorking[0] = HashWorking[78] = *pwwnn++;
  1153. HashWorking[1] = HashWorking[79] = *pwwnn;
  1154. for (t = 0; t < 7; t++)
  1155. lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
  1156. lpfc_sha_init(hbainit);
  1157. lpfc_sha_iterate(hbainit, HashWorking);
  1158. kfree(HashWorking);
  1159. }
  1160. static void
  1161. lpfc_cleanup(struct lpfc_vport *vport)
  1162. {
  1163. struct lpfc_nodelist *ndlp, *next_ndlp;
  1164. /* clean up phba - lpfc specific */
  1165. lpfc_can_disctmo(vport);
  1166. list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
  1167. lpfc_nlp_put(ndlp);
  1168. return;
  1169. }
  1170. static void
  1171. lpfc_establish_link_tmo(unsigned long ptr)
  1172. {
  1173. struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
  1174. struct lpfc_vport **vports;
  1175. unsigned long iflag;
  1176. int i;
  1177. /* Re-establishing Link, timer expired */
  1178. lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
  1179. "1300 Re-establishing Link, timer expired "
  1180. "Data: x%x x%x\n",
  1181. phba->pport->fc_flag, phba->pport->port_state);
  1182. vports = lpfc_create_vport_work_array(phba);
  1183. if (vports != NULL)
  1184. for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
  1185. struct Scsi_Host *shost;
  1186. shost = lpfc_shost_from_vport(vports[i]);
  1187. spin_lock_irqsave(shost->host_lock, iflag);
  1188. vports[i]->fc_flag &= ~FC_ESTABLISH_LINK;
  1189. spin_unlock_irqrestore(shost->host_lock, iflag);
  1190. }
  1191. lpfc_destroy_vport_work_array(vports);
  1192. }
  1193. void
  1194. lpfc_stop_vport_timers(struct lpfc_vport *vport)
  1195. {
  1196. del_timer_sync(&vport->els_tmofunc);
  1197. del_timer_sync(&vport->fc_fdmitmo);
  1198. lpfc_can_disctmo(vport);
  1199. return;
  1200. }
  1201. static void
  1202. lpfc_stop_phba_timers(struct lpfc_hba *phba)
  1203. {
  1204. del_timer_sync(&phba->fcp_poll_timer);
  1205. del_timer_sync(&phba->fc_estabtmo);
  1206. lpfc_stop_vport_timers(phba->pport);
  1207. del_timer_sync(&phba->sli.mbox_tmo);
  1208. del_timer_sync(&phba->fabric_block_timer);
  1209. phba->hb_outstanding = 0;
  1210. del_timer_sync(&phba->hb_tmofunc);
  1211. return;
  1212. }
  1213. int
  1214. lpfc_online(struct lpfc_hba *phba)
  1215. {
  1216. struct lpfc_vport *vport = phba->pport;
  1217. struct lpfc_vport **vports;
  1218. int i;
  1219. if (!phba)
  1220. return 0;
  1221. if (!(vport->fc_flag & FC_OFFLINE_MODE))
  1222. return 0;
  1223. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  1224. "0458 Bring Adapter online\n");
  1225. lpfc_block_mgmt_io(phba);
  1226. if (!lpfc_sli_queue_setup(phba)) {
  1227. lpfc_unblock_mgmt_io(phba);
  1228. return 1;
  1229. }
  1230. if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */
  1231. lpfc_unblock_mgmt_io(phba);
  1232. return 1;
  1233. }
  1234. vports = lpfc_create_vport_work_array(phba);
  1235. if (vports != NULL)
  1236. for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
  1237. struct Scsi_Host *shost;
  1238. shost = lpfc_shost_from_vport(vports[i]);
  1239. spin_lock_irq(shost->host_lock);
  1240. vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
  1241. if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
  1242. vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  1243. spin_unlock_irq(shost->host_lock);
  1244. }
  1245. lpfc_destroy_vport_work_array(vports);
  1246. lpfc_unblock_mgmt_io(phba);
  1247. return 0;
  1248. }
  1249. void
  1250. lpfc_block_mgmt_io(struct lpfc_hba * phba)
  1251. {
  1252. unsigned long iflag;
  1253. spin_lock_irqsave(&phba->hbalock, iflag);
  1254. phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
  1255. spin_unlock_irqrestore(&phba->hbalock, iflag);
  1256. }
  1257. void
  1258. lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
  1259. {
  1260. unsigned long iflag;
  1261. spin_lock_irqsave(&phba->hbalock, iflag);
  1262. phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
  1263. spin_unlock_irqrestore(&phba->hbalock, iflag);
  1264. }
  1265. void
  1266. lpfc_offline_prep(struct lpfc_hba * phba)
  1267. {
  1268. struct lpfc_vport *vport = phba->pport;
  1269. struct lpfc_nodelist *ndlp, *next_ndlp;
  1270. if (vport->fc_flag & FC_OFFLINE_MODE)
  1271. return;
  1272. lpfc_block_mgmt_io(phba);
  1273. lpfc_linkdown(phba);
  1274. /* Issue an unreg_login to all nodes */
  1275. list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
  1276. if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
  1277. lpfc_unreg_rpi(vport, ndlp);
  1278. lpfc_sli_flush_mbox_queue(phba);
  1279. }
  1280. void
  1281. lpfc_offline(struct lpfc_hba *phba)
  1282. {
  1283. struct Scsi_Host *shost;
  1284. struct lpfc_vport **vports;
  1285. int i;
  1286. if (phba->pport->fc_flag & FC_OFFLINE_MODE)
  1287. return;
  1288. /* stop all timers associated with this hba */
  1289. lpfc_stop_phba_timers(phba);
  1290. vports = lpfc_create_vport_work_array(phba);
  1291. if (vports != NULL)
  1292. for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
  1293. lpfc_stop_vport_timers(vports[i]);
  1294. lpfc_destroy_vport_work_array(vports);
  1295. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  1296. "0460 Bring Adapter offline\n");
  1297. /* Bring down the SLI Layer and cleanup. The HBA is offline
  1298. now. */
  1299. lpfc_sli_hba_down(phba);
  1300. spin_lock_irq(&phba->hbalock);
  1301. phba->work_ha = 0;
  1302. spin_unlock_irq(&phba->hbalock);
  1303. vports = lpfc_create_vport_work_array(phba);
  1304. if (vports != NULL)
  1305. for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
  1306. shost = lpfc_shost_from_vport(vports[i]);
  1307. lpfc_cleanup(vports[i]);
  1308. spin_lock_irq(shost->host_lock);
  1309. vports[i]->work_port_events = 0;
  1310. vports[i]->fc_flag |= FC_OFFLINE_MODE;
  1311. spin_unlock_irq(shost->host_lock);
  1312. }
  1313. lpfc_destroy_vport_work_array(vports);
  1314. }
  1315. /******************************************************************************
  1316. * Function name: lpfc_scsi_free
  1317. *
  1318. * Description: Called from lpfc_pci_remove_one free internal driver resources
  1319. *
  1320. ******************************************************************************/
  1321. static int
  1322. lpfc_scsi_free(struct lpfc_hba *phba)
  1323. {
  1324. struct lpfc_scsi_buf *sb, *sb_next;
  1325. struct lpfc_iocbq *io, *io_next;
  1326. spin_lock_irq(&phba->hbalock);
  1327. /* Release all the lpfc_scsi_bufs maintained by this host. */
  1328. list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
  1329. list_del(&sb->list);
  1330. pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
  1331. sb->dma_handle);
  1332. kfree(sb);
  1333. phba->total_scsi_bufs--;
  1334. }
  1335. /* Release all the lpfc_iocbq entries maintained by this host. */
  1336. list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
  1337. list_del(&io->list);
  1338. kfree(io);
  1339. phba->total_iocbq_bufs--;
  1340. }
  1341. spin_unlock_irq(&phba->hbalock);
  1342. return 0;
  1343. }
  1344. struct lpfc_vport *
  1345. lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
  1346. {
  1347. struct lpfc_vport *vport;
  1348. struct Scsi_Host *shost;
  1349. int error = 0;
  1350. if (dev != &phba->pcidev->dev)
  1351. shost = scsi_host_alloc(&lpfc_vport_template,
  1352. sizeof(struct lpfc_vport));
  1353. else
  1354. shost = scsi_host_alloc(&lpfc_template,
  1355. sizeof(struct lpfc_vport));
  1356. if (!shost)
  1357. goto out;
  1358. vport = (struct lpfc_vport *) shost->hostdata;
  1359. vport->phba = phba;
  1360. vport->load_flag |= FC_LOADING;
  1361. vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  1362. lpfc_get_vport_cfgparam(vport);
  1363. shost->unique_id = instance;
  1364. shost->max_id = LPFC_MAX_TARGET;
  1365. shost->max_lun = vport->cfg_max_luns;
  1366. shost->this_id = -1;
  1367. shost->max_cmd_len = 16;
  1368. /*
  1369. * Set initial can_queue value since 0 is no longer supported and
  1370. * scsi_add_host will fail. This will be adjusted later based on the
  1371. * max xri value determined in hba setup.
  1372. */
  1373. shost->can_queue = phba->cfg_hba_queue_depth - 10;
  1374. if (dev != &phba->pcidev->dev) {
  1375. shost->transportt = lpfc_vport_transport_template;
  1376. vport->port_type = LPFC_NPIV_PORT;
  1377. } else {
  1378. shost->transportt = lpfc_transport_template;
  1379. vport->port_type = LPFC_PHYSICAL_PORT;
  1380. }
  1381. /* Initialize all internally managed lists. */
  1382. INIT_LIST_HEAD(&vport->fc_nodes);
  1383. spin_lock_init(&vport->work_port_lock);
  1384. init_timer(&vport->fc_disctmo);
  1385. vport->fc_disctmo.function = lpfc_disc_timeout;
  1386. vport->fc_disctmo.data = (unsigned long)vport;
  1387. init_timer(&vport->fc_fdmitmo);
  1388. vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
  1389. vport->fc_fdmitmo.data = (unsigned long)vport;
  1390. init_timer(&vport->els_tmofunc);
  1391. vport->els_tmofunc.function = lpfc_els_timeout;
  1392. vport->els_tmofunc.data = (unsigned long)vport;
  1393. error = scsi_add_host(shost, dev);
  1394. if (error)
  1395. goto out_put_shost;
  1396. spin_lock_irq(&phba->hbalock);
  1397. list_add_tail(&vport->listentry, &phba->port_list);
  1398. spin_unlock_irq(&phba->hbalock);
  1399. return vport;
  1400. out_put_shost:
  1401. scsi_host_put(shost);
  1402. out:
  1403. return NULL;
  1404. }
  1405. void
  1406. destroy_port(struct lpfc_vport *vport)
  1407. {
  1408. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1409. struct lpfc_hba *phba = vport->phba;
  1410. kfree(vport->vname);
  1411. lpfc_debugfs_terminate(vport);
  1412. fc_remove_host(shost);
  1413. scsi_remove_host(shost);
  1414. spin_lock_irq(&phba->hbalock);
  1415. list_del_init(&vport->listentry);
  1416. spin_unlock_irq(&phba->hbalock);
  1417. lpfc_cleanup(vport);
  1418. return;
  1419. }
  1420. int
  1421. lpfc_get_instance(void)
  1422. {
  1423. int instance = 0;
  1424. /* Assign an unused number */
  1425. if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
  1426. return -1;
  1427. if (idr_get_new(&lpfc_hba_index, NULL, &instance))
  1428. return -1;
  1429. return instance;
  1430. }
  1431. /*
  1432. * Note: there is no scan_start function as adapter initialization
  1433. * will have asynchronously kicked off the link initialization.
  1434. */
  1435. int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
  1436. {
  1437. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1438. struct lpfc_hba *phba = vport->phba;
  1439. int stat = 0;
  1440. spin_lock_irq(shost->host_lock);
  1441. if (vport->load_flag & FC_UNLOADING) {
  1442. stat = 1;
  1443. goto finished;
  1444. }
  1445. if (time >= 30 * HZ) {
  1446. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  1447. "0461 Scanning longer than 30 "
  1448. "seconds. Continuing initialization\n");
  1449. stat = 1;
  1450. goto finished;
  1451. }
  1452. if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
  1453. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  1454. "0465 Link down longer than 15 "
  1455. "seconds. Continuing initialization\n");
  1456. stat = 1;
  1457. goto finished;
  1458. }
  1459. if (vport->port_state != LPFC_VPORT_READY)
  1460. goto finished;
  1461. if (vport->num_disc_nodes || vport->fc_prli_sent)
  1462. goto finished;
  1463. if (vport->fc_map_cnt == 0 && time < 2 * HZ)
  1464. goto finished;
  1465. if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
  1466. goto finished;
  1467. stat = 1;
  1468. finished:
  1469. spin_unlock_irq(shost->host_lock);
  1470. return stat;
  1471. }
  1472. void lpfc_host_attrib_init(struct Scsi_Host *shost)
  1473. {
  1474. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1475. struct lpfc_hba *phba = vport->phba;
  1476. /*
  1477. * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
  1478. */
  1479. fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
  1480. fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
  1481. fc_host_supported_classes(shost) = FC_COS_CLASS3;
  1482. memset(fc_host_supported_fc4s(shost), 0,
  1483. sizeof(fc_host_supported_fc4s(shost)));
  1484. fc_host_supported_fc4s(shost)[2] = 1;
  1485. fc_host_supported_fc4s(shost)[7] = 1;
  1486. lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
  1487. sizeof fc_host_symbolic_name(shost));
  1488. fc_host_supported_speeds(shost) = 0;
  1489. if (phba->lmt & LMT_10Gb)
  1490. fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
  1491. if (phba->lmt & LMT_4Gb)
  1492. fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
  1493. if (phba->lmt & LMT_2Gb)
  1494. fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
  1495. if (phba->lmt & LMT_1Gb)
  1496. fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
  1497. fc_host_maxframe_size(shost) =
  1498. (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
  1499. (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
  1500. /* This value is also unchanging */
  1501. memset(fc_host_active_fc4s(shost), 0,
  1502. sizeof(fc_host_active_fc4s(shost)));
  1503. fc_host_active_fc4s(shost)[2] = 1;
  1504. fc_host_active_fc4s(shost)[7] = 1;
  1505. fc_host_max_npiv_vports(shost) = phba->max_vpi;
  1506. spin_lock_irq(shost->host_lock);
  1507. vport->load_flag &= ~FC_LOADING;
  1508. spin_unlock_irq(shost->host_lock);
  1509. }
  1510. static int __devinit
  1511. lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
  1512. {
  1513. struct lpfc_vport *vport = NULL;
  1514. struct lpfc_hba *phba;
  1515. struct lpfc_sli *psli;
  1516. struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
  1517. struct Scsi_Host *shost = NULL;
  1518. void *ptr;
  1519. unsigned long bar0map_len, bar2map_len;
  1520. int error = -ENODEV;
  1521. int i, hbq_count;
  1522. uint16_t iotag;
  1523. if (pci_enable_device(pdev))
  1524. goto out;
  1525. if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
  1526. goto out_disable_device;
  1527. phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
  1528. if (!phba)
  1529. goto out_release_regions;
  1530. spin_lock_init(&phba->hbalock);
  1531. phba->pcidev = pdev;
  1532. /* Assign an unused board number */
  1533. if ((phba->brd_no = lpfc_get_instance()) < 0)
  1534. goto out_free_phba;
  1535. INIT_LIST_HEAD(&phba->port_list);
  1536. /*
  1537. * Get all the module params for configuring this host and then
  1538. * establish the host.
  1539. */
  1540. lpfc_get_cfgparam(phba);
  1541. phba->max_vpi = LPFC_MAX_VPI;
  1542. /* Initialize timers used by driver */
  1543. init_timer(&phba->fc_estabtmo);
  1544. phba->fc_estabtmo.function = lpfc_establish_link_tmo;
  1545. phba->fc_estabtmo.data = (unsigned long)phba;
  1546. init_timer(&phba->hb_tmofunc);
  1547. phba->hb_tmofunc.function = lpfc_hb_timeout;
  1548. phba->hb_tmofunc.data = (unsigned long)phba;
  1549. psli = &phba->sli;
  1550. init_timer(&psli->mbox_tmo);
  1551. psli->mbox_tmo.function = lpfc_mbox_timeout;
  1552. psli->mbox_tmo.data = (unsigned long) phba;
  1553. init_timer(&phba->fcp_poll_timer);
  1554. phba->fcp_poll_timer.function = lpfc_poll_timeout;
  1555. phba->fcp_poll_timer.data = (unsigned long) phba;
  1556. init_timer(&phba->fabric_block_timer);
  1557. phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
  1558. phba->fabric_block_timer.data = (unsigned long) phba;
  1559. pci_set_master(pdev);
  1560. pci_try_set_mwi(pdev);
  1561. if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
  1562. if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
  1563. goto out_idr_remove;
  1564. /*
  1565. * Get the bus address of Bar0 and Bar2 and the number of bytes
  1566. * required by each mapping.
  1567. */
  1568. phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
  1569. bar0map_len = pci_resource_len(phba->pcidev, 0);
  1570. phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
  1571. bar2map_len = pci_resource_len(phba->pcidev, 2);
  1572. /* Map HBA SLIM to a kernel virtual address. */
  1573. phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
  1574. if (!phba->slim_memmap_p) {
  1575. error = -ENODEV;
  1576. dev_printk(KERN_ERR, &pdev->dev,
  1577. "ioremap failed for SLIM memory.\n");
  1578. goto out_idr_remove;
  1579. }
  1580. /* Map HBA Control Registers to a kernel virtual address. */
  1581. phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
  1582. if (!phba->ctrl_regs_memmap_p) {
  1583. error = -ENODEV;
  1584. dev_printk(KERN_ERR, &pdev->dev,
  1585. "ioremap failed for HBA control registers.\n");
  1586. goto out_iounmap_slim;
  1587. }
  1588. /* Allocate memory for SLI-2 structures */
  1589. phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
  1590. &phba->slim2p_mapping, GFP_KERNEL);
  1591. if (!phba->slim2p)
  1592. goto out_iounmap;
  1593. memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
  1594. phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
  1595. lpfc_sli_hbq_size(),
  1596. &phba->hbqslimp.phys,
  1597. GFP_KERNEL);
  1598. if (!phba->hbqslimp.virt)
  1599. goto out_free_slim;
  1600. hbq_count = lpfc_sli_hbq_count();
  1601. ptr = phba->hbqslimp.virt;
  1602. for (i = 0; i < hbq_count; ++i) {
  1603. phba->hbqs[i].hbq_virt = ptr;
  1604. INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
  1605. ptr += (lpfc_hbq_defs[i]->entry_count *
  1606. sizeof(struct lpfc_hbq_entry));
  1607. }
  1608. phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
  1609. phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
  1610. memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
  1611. /* Initialize the SLI Layer to run with lpfc HBAs. */
  1612. lpfc_sli_setup(phba);
  1613. lpfc_sli_queue_setup(phba);
  1614. error = lpfc_mem_alloc(phba);
  1615. if (error)
  1616. goto out_free_hbqslimp;
  1617. /* Initialize and populate the iocb list per host. */
  1618. INIT_LIST_HEAD(&phba->lpfc_iocb_list);
  1619. for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
  1620. iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
  1621. if (iocbq_entry == NULL) {
  1622. printk(KERN_ERR "%s: only allocated %d iocbs of "
  1623. "expected %d count. Unloading driver.\n",
  1624. __FUNCTION__, i, LPFC_IOCB_LIST_CNT);
  1625. error = -ENOMEM;
  1626. goto out_free_iocbq;
  1627. }
  1628. iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
  1629. if (iotag == 0) {
  1630. kfree (iocbq_entry);
  1631. printk(KERN_ERR "%s: failed to allocate IOTAG. "
  1632. "Unloading driver.\n",
  1633. __FUNCTION__);
  1634. error = -ENOMEM;
  1635. goto out_free_iocbq;
  1636. }
  1637. spin_lock_irq(&phba->hbalock);
  1638. list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
  1639. phba->total_iocbq_bufs++;
  1640. spin_unlock_irq(&phba->hbalock);
  1641. }
  1642. /* Initialize HBA structure */
  1643. phba->fc_edtov = FF_DEF_EDTOV;
  1644. phba->fc_ratov = FF_DEF_RATOV;
  1645. phba->fc_altov = FF_DEF_ALTOV;
  1646. phba->fc_arbtov = FF_DEF_ARBTOV;
  1647. INIT_LIST_HEAD(&phba->work_list);
  1648. phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
  1649. phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
  1650. /* Startup the kernel thread for this host adapter. */
  1651. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  1652. "lpfc_worker_%d", phba->brd_no);
  1653. if (IS_ERR(phba->worker_thread)) {
  1654. error = PTR_ERR(phba->worker_thread);
  1655. goto out_free_iocbq;
  1656. }
  1657. /* Initialize the list of scsi buffers used by driver for scsi IO. */
  1658. spin_lock_init(&phba->scsi_buf_list_lock);
  1659. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
  1660. /* Initialize list of fabric iocbs */
  1661. INIT_LIST_HEAD(&phba->fabric_iocb_list);
  1662. vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
  1663. if (!vport)
  1664. goto out_kthread_stop;
  1665. shost = lpfc_shost_from_vport(vport);
  1666. phba->pport = vport;
  1667. lpfc_debugfs_initialize(vport);
  1668. pci_set_drvdata(pdev, shost);
  1669. if (phba->cfg_use_msi) {
  1670. error = pci_enable_msi(phba->pcidev);
  1671. if (!error)
  1672. phba->using_msi = 1;
  1673. else
  1674. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  1675. "0452 Enable MSI failed, continuing "
  1676. "with IRQ\n");
  1677. }
  1678. error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
  1679. LPFC_DRIVER_NAME, phba);
  1680. if (error) {
  1681. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1682. "0451 Enable interrupt handler failed\n");
  1683. goto out_disable_msi;
  1684. }
  1685. phba->MBslimaddr = phba->slim_memmap_p;
  1686. phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
  1687. phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
  1688. phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
  1689. phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
  1690. if (lpfc_alloc_sysfs_attr(vport))
  1691. goto out_free_irq;
  1692. if (lpfc_sli_hba_setup(phba))
  1693. goto out_remove_device;
  1694. /*
  1695. * hba setup may have changed the hba_queue_depth so we need to adjust
  1696. * the value of can_queue.
  1697. */
  1698. shost->can_queue = phba->cfg_hba_queue_depth - 10;
  1699. lpfc_host_attrib_init(shost);
  1700. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  1701. spin_lock_irq(shost->host_lock);
  1702. lpfc_poll_start_timer(phba);
  1703. spin_unlock_irq(shost->host_lock);
  1704. }
  1705. scsi_scan_host(shost);
  1706. return 0;
  1707. out_remove_device:
  1708. lpfc_free_sysfs_attr(vport);
  1709. spin_lock_irq(shost->host_lock);
  1710. vport->load_flag |= FC_UNLOADING;
  1711. spin_unlock_irq(shost->host_lock);
  1712. out_free_irq:
  1713. lpfc_stop_phba_timers(phba);
  1714. phba->pport->work_port_events = 0;
  1715. free_irq(phba->pcidev->irq, phba);
  1716. out_disable_msi:
  1717. if (phba->using_msi)
  1718. pci_disable_msi(phba->pcidev);
  1719. destroy_port(vport);
  1720. out_kthread_stop:
  1721. kthread_stop(phba->worker_thread);
  1722. out_free_iocbq:
  1723. list_for_each_entry_safe(iocbq_entry, iocbq_next,
  1724. &phba->lpfc_iocb_list, list) {
  1725. kfree(iocbq_entry);
  1726. phba->total_iocbq_bufs--;
  1727. }
  1728. lpfc_mem_free(phba);
  1729. out_free_hbqslimp:
  1730. dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
  1731. phba->hbqslimp.phys);
  1732. out_free_slim:
  1733. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
  1734. phba->slim2p_mapping);
  1735. out_iounmap:
  1736. iounmap(phba->ctrl_regs_memmap_p);
  1737. out_iounmap_slim:
  1738. iounmap(phba->slim_memmap_p);
  1739. out_idr_remove:
  1740. idr_remove(&lpfc_hba_index, phba->brd_no);
  1741. out_free_phba:
  1742. kfree(phba);
  1743. out_release_regions:
  1744. pci_release_regions(pdev);
  1745. out_disable_device:
  1746. pci_disable_device(pdev);
  1747. out:
  1748. pci_set_drvdata(pdev, NULL);
  1749. if (shost)
  1750. scsi_host_put(shost);
  1751. return error;
  1752. }
  1753. static void __devexit
  1754. lpfc_pci_remove_one(struct pci_dev *pdev)
  1755. {
  1756. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1757. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  1758. struct lpfc_hba *phba = vport->phba;
  1759. spin_lock_irq(&phba->hbalock);
  1760. vport->load_flag |= FC_UNLOADING;
  1761. spin_unlock_irq(&phba->hbalock);
  1762. kfree(vport->vname);
  1763. lpfc_free_sysfs_attr(vport);
  1764. fc_remove_host(shost);
  1765. scsi_remove_host(shost);
  1766. /*
  1767. * Bring down the SLI Layer. This step disable all interrupts,
  1768. * clears the rings, discards all mailbox commands, and resets
  1769. * the HBA.
  1770. */
  1771. lpfc_sli_hba_down(phba);
  1772. lpfc_sli_brdrestart(phba);
  1773. lpfc_stop_phba_timers(phba);
  1774. spin_lock_irq(&phba->hbalock);
  1775. list_del_init(&vport->listentry);
  1776. spin_unlock_irq(&phba->hbalock);
  1777. lpfc_debugfs_terminate(vport);
  1778. lpfc_cleanup(vport);
  1779. kthread_stop(phba->worker_thread);
  1780. /* Release the irq reservation */
  1781. free_irq(phba->pcidev->irq, phba);
  1782. if (phba->using_msi)
  1783. pci_disable_msi(phba->pcidev);
  1784. pci_set_drvdata(pdev, NULL);
  1785. scsi_host_put(shost);
  1786. /*
  1787. * Call scsi_free before mem_free since scsi bufs are released to their
  1788. * corresponding pools here.
  1789. */
  1790. lpfc_scsi_free(phba);
  1791. lpfc_mem_free(phba);
  1792. dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
  1793. phba->hbqslimp.phys);
  1794. /* Free resources associated with SLI2 interface */
  1795. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  1796. phba->slim2p, phba->slim2p_mapping);
  1797. /* unmap adapter SLIM and Control Registers */
  1798. iounmap(phba->ctrl_regs_memmap_p);
  1799. iounmap(phba->slim_memmap_p);
  1800. idr_remove(&lpfc_hba_index, phba->brd_no);
  1801. kfree(phba);
  1802. pci_release_regions(pdev);
  1803. pci_disable_device(pdev);
  1804. }
  1805. /**
  1806. * lpfc_io_error_detected - called when PCI error is detected
  1807. * @pdev: Pointer to PCI device
  1808. * @state: The current pci conneection state
  1809. *
  1810. * This function is called after a PCI bus error affecting
  1811. * this device has been detected.
  1812. */
  1813. static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
  1814. pci_channel_state_t state)
  1815. {
  1816. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1817. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  1818. struct lpfc_sli *psli = &phba->sli;
  1819. struct lpfc_sli_ring *pring;
  1820. if (state == pci_channel_io_perm_failure)
  1821. return PCI_ERS_RESULT_DISCONNECT;
  1822. pci_disable_device(pdev);
  1823. /*
  1824. * There may be I/Os dropped by the firmware.
  1825. * Error iocb (I/O) on txcmplq and let the SCSI layer
  1826. * retry it after re-establishing link.
  1827. */
  1828. pring = &psli->ring[psli->fcp_ring];
  1829. lpfc_sli_abort_iocb_ring(phba, pring);
  1830. /* Release the irq reservation */
  1831. free_irq(phba->pcidev->irq, phba);
  1832. if (phba->using_msi)
  1833. pci_disable_msi(phba->pcidev);
  1834. /* Request a slot reset. */
  1835. return PCI_ERS_RESULT_NEED_RESET;
  1836. }
  1837. /**
  1838. * lpfc_io_slot_reset - called after the pci bus has been reset.
  1839. * @pdev: Pointer to PCI device
  1840. *
  1841. * Restart the card from scratch, as if from a cold-boot.
  1842. */
  1843. static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
  1844. {
  1845. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1846. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  1847. struct lpfc_sli *psli = &phba->sli;
  1848. int bars = pci_select_bars(pdev, IORESOURCE_MEM);
  1849. dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
  1850. if (pci_enable_device_bars(pdev, bars)) {
  1851. printk(KERN_ERR "lpfc: Cannot re-enable "
  1852. "PCI device after reset.\n");
  1853. return PCI_ERS_RESULT_DISCONNECT;
  1854. }
  1855. pci_set_master(pdev);
  1856. /* Re-establishing Link */
  1857. spin_lock_irq(shost->host_lock);
  1858. phba->pport->fc_flag |= FC_ESTABLISH_LINK;
  1859. spin_unlock_irq(shost->host_lock);
  1860. spin_lock_irq(&phba->hbalock);
  1861. psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
  1862. spin_unlock_irq(&phba->hbalock);
  1863. /* Take device offline; this will perform cleanup */
  1864. lpfc_offline(phba);
  1865. lpfc_sli_brdrestart(phba);
  1866. return PCI_ERS_RESULT_RECOVERED;
  1867. }
  1868. /**
  1869. * lpfc_io_resume - called when traffic can start flowing again.
  1870. * @pdev: Pointer to PCI device
  1871. *
  1872. * This callback is called when the error recovery driver tells us that
  1873. * its OK to resume normal operation.
  1874. */
  1875. static void lpfc_io_resume(struct pci_dev *pdev)
  1876. {
  1877. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1878. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  1879. if (lpfc_online(phba) == 0) {
  1880. mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
  1881. }
  1882. }
  1883. static struct pci_device_id lpfc_id_table[] = {
  1884. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
  1885. PCI_ANY_ID, PCI_ANY_ID, },
  1886. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
  1887. PCI_ANY_ID, PCI_ANY_ID, },
  1888. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
  1889. PCI_ANY_ID, PCI_ANY_ID, },
  1890. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
  1891. PCI_ANY_ID, PCI_ANY_ID, },
  1892. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
  1893. PCI_ANY_ID, PCI_ANY_ID, },
  1894. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
  1895. PCI_ANY_ID, PCI_ANY_ID, },
  1896. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
  1897. PCI_ANY_ID, PCI_ANY_ID, },
  1898. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
  1899. PCI_ANY_ID, PCI_ANY_ID, },
  1900. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
  1901. PCI_ANY_ID, PCI_ANY_ID, },
  1902. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
  1903. PCI_ANY_ID, PCI_ANY_ID, },
  1904. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
  1905. PCI_ANY_ID, PCI_ANY_ID, },
  1906. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
  1907. PCI_ANY_ID, PCI_ANY_ID, },
  1908. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
  1909. PCI_ANY_ID, PCI_ANY_ID, },
  1910. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
  1911. PCI_ANY_ID, PCI_ANY_ID, },
  1912. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
  1913. PCI_ANY_ID, PCI_ANY_ID, },
  1914. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
  1915. PCI_ANY_ID, PCI_ANY_ID, },
  1916. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
  1917. PCI_ANY_ID, PCI_ANY_ID, },
  1918. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
  1919. PCI_ANY_ID, PCI_ANY_ID, },
  1920. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
  1921. PCI_ANY_ID, PCI_ANY_ID, },
  1922. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
  1923. PCI_ANY_ID, PCI_ANY_ID, },
  1924. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
  1925. PCI_ANY_ID, PCI_ANY_ID, },
  1926. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
  1927. PCI_ANY_ID, PCI_ANY_ID, },
  1928. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
  1929. PCI_ANY_ID, PCI_ANY_ID, },
  1930. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
  1931. PCI_ANY_ID, PCI_ANY_ID, },
  1932. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
  1933. PCI_ANY_ID, PCI_ANY_ID, },
  1934. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
  1935. PCI_ANY_ID, PCI_ANY_ID, },
  1936. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
  1937. PCI_ANY_ID, PCI_ANY_ID, },
  1938. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
  1939. PCI_ANY_ID, PCI_ANY_ID, },
  1940. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
  1941. PCI_ANY_ID, PCI_ANY_ID, },
  1942. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
  1943. PCI_ANY_ID, PCI_ANY_ID, },
  1944. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
  1945. PCI_ANY_ID, PCI_ANY_ID, },
  1946. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
  1947. PCI_ANY_ID, PCI_ANY_ID, },
  1948. {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
  1949. PCI_ANY_ID, PCI_ANY_ID, },
  1950. { 0 }
  1951. };
  1952. MODULE_DEVICE_TABLE(pci, lpfc_id_table);
  1953. static struct pci_error_handlers lpfc_err_handler = {
  1954. .error_detected = lpfc_io_error_detected,
  1955. .slot_reset = lpfc_io_slot_reset,
  1956. .resume = lpfc_io_resume,
  1957. };
  1958. static struct pci_driver lpfc_driver = {
  1959. .name = LPFC_DRIVER_NAME,
  1960. .id_table = lpfc_id_table,
  1961. .probe = lpfc_pci_probe_one,
  1962. .remove = __devexit_p(lpfc_pci_remove_one),
  1963. .err_handler = &lpfc_err_handler,
  1964. };
  1965. static int __init
  1966. lpfc_init(void)
  1967. {
  1968. int error = 0;
  1969. printk(LPFC_MODULE_DESC "\n");
  1970. printk(LPFC_COPYRIGHT "\n");
  1971. lpfc_transport_template =
  1972. fc_attach_transport(&lpfc_transport_functions);
  1973. lpfc_vport_transport_template =
  1974. fc_attach_transport(&lpfc_vport_transport_functions);
  1975. if (!lpfc_transport_template || !lpfc_vport_transport_template)
  1976. return -ENOMEM;
  1977. error = pci_register_driver(&lpfc_driver);
  1978. if (error) {
  1979. fc_release_transport(lpfc_transport_template);
  1980. fc_release_transport(lpfc_vport_transport_template);
  1981. }
  1982. return error;
  1983. }
  1984. static void __exit
  1985. lpfc_exit(void)
  1986. {
  1987. pci_unregister_driver(&lpfc_driver);
  1988. fc_release_transport(lpfc_transport_template);
  1989. fc_release_transport(lpfc_vport_transport_template);
  1990. }
  1991. module_init(lpfc_init);
  1992. module_exit(lpfc_exit);
  1993. MODULE_LICENSE("GPL");
  1994. MODULE_DESCRIPTION(LPFC_MODULE_DESC);
  1995. MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
  1996. MODULE_VERSION("0:" LPFC_DRIVER_VERSION);