lpfc_nportdisc.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2004-2007 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  8. * *
  9. * This program is free software; you can redistribute it and/or *
  10. * modify it under the terms of version 2 of the GNU General *
  11. * Public License as published by the Free Software Foundation. *
  12. * This program is distributed in the hope that it will be useful. *
  13. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  14. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  15. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  16. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  17. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  18. * more details, a copy of which can be found in the file COPYING *
  19. * included with this package. *
  20. *******************************************************************/
  21. #include <linux/blkdev.h>
  22. #include <linux/pci.h>
  23. #include <linux/interrupt.h>
  24. #include <scsi/scsi.h>
  25. #include <scsi/scsi_device.h>
  26. #include <scsi/scsi_host.h>
  27. #include <scsi/scsi_transport_fc.h>
  28. #include "lpfc_hw.h"
  29. #include "lpfc_sli.h"
  30. #include "lpfc_disc.h"
  31. #include "lpfc_scsi.h"
  32. #include "lpfc.h"
  33. #include "lpfc_logmsg.h"
  34. #include "lpfc_crtn.h"
  35. #include "lpfc_vport.h"
  36. /* Called to verify a rcv'ed ADISC was intended for us. */
  37. static int
  38. lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  39. struct lpfc_name *nn, struct lpfc_name *pn)
  40. {
  41. /* Compare the ADISC rsp WWNN / WWPN matches our internal node
  42. * table entry for that node.
  43. */
  44. if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
  45. return 0;
  46. if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
  47. return 0;
  48. /* we match, return success */
  49. return 1;
  50. }
  51. int
  52. lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  53. struct serv_parm * sp, uint32_t class)
  54. {
  55. volatile struct serv_parm *hsp = &vport->fc_sparam;
  56. uint16_t hsp_value, ssp_value = 0;
  57. /*
  58. * The receive data field size and buffer-to-buffer receive data field
  59. * size entries are 16 bits but are represented as two 8-bit fields in
  60. * the driver data structure to account for rsvd bits and other control
  61. * bits. Reconstruct and compare the fields as a 16-bit values before
  62. * correcting the byte values.
  63. */
  64. if (sp->cls1.classValid) {
  65. hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) |
  66. hsp->cls1.rcvDataSizeLsb;
  67. ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
  68. sp->cls1.rcvDataSizeLsb;
  69. if (!ssp_value)
  70. goto bad_service_param;
  71. if (ssp_value > hsp_value) {
  72. sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
  73. sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
  74. }
  75. } else if (class == CLASS1) {
  76. goto bad_service_param;
  77. }
  78. if (sp->cls2.classValid) {
  79. hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) |
  80. hsp->cls2.rcvDataSizeLsb;
  81. ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
  82. sp->cls2.rcvDataSizeLsb;
  83. if (!ssp_value)
  84. goto bad_service_param;
  85. if (ssp_value > hsp_value) {
  86. sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
  87. sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
  88. }
  89. } else if (class == CLASS2) {
  90. goto bad_service_param;
  91. }
  92. if (sp->cls3.classValid) {
  93. hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) |
  94. hsp->cls3.rcvDataSizeLsb;
  95. ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
  96. sp->cls3.rcvDataSizeLsb;
  97. if (!ssp_value)
  98. goto bad_service_param;
  99. if (ssp_value > hsp_value) {
  100. sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
  101. sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
  102. }
  103. } else if (class == CLASS3) {
  104. goto bad_service_param;
  105. }
  106. /*
  107. * Preserve the upper four bits of the MSB from the PLOGI response.
  108. * These bits contain the Buffer-to-Buffer State Change Number
  109. * from the target and need to be passed to the FW.
  110. */
  111. hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
  112. ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
  113. if (ssp_value > hsp_value) {
  114. sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
  115. sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
  116. (hsp->cmn.bbRcvSizeMsb & 0x0F);
  117. }
  118. memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
  119. memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
  120. return 1;
  121. bad_service_param:
  122. lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
  123. "%d (%d):0207 Device %x "
  124. "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
  125. "invalid service parameters. Ignoring device.\n",
  126. vport->phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
  127. sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
  128. sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
  129. sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
  130. sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
  131. return 0;
  132. }
  133. static void *
  134. lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  135. struct lpfc_iocbq *rspiocb)
  136. {
  137. struct lpfc_dmabuf *pcmd, *prsp;
  138. uint32_t *lp;
  139. void *ptr = NULL;
  140. IOCB_t *irsp;
  141. irsp = &rspiocb->iocb;
  142. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  143. /* For lpfc_els_abort, context2 could be zero'ed to delay
  144. * freeing associated memory till after ABTS completes.
  145. */
  146. if (pcmd) {
  147. prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf,
  148. list);
  149. if (prsp) {
  150. lp = (uint32_t *) prsp->virt;
  151. ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
  152. }
  153. } else {
  154. /* Force ulpStatus error since we are returning NULL ptr */
  155. if (!(irsp->ulpStatus)) {
  156. irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
  157. irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
  158. }
  159. ptr = NULL;
  160. }
  161. return ptr;
  162. }
  163. /*
  164. * Free resources / clean up outstanding I/Os
  165. * associated with a LPFC_NODELIST entry. This
  166. * routine effectively results in a "software abort".
  167. */
  168. int
  169. lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
  170. {
  171. LIST_HEAD(completions);
  172. struct lpfc_sli *psli = &phba->sli;
  173. struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
  174. struct lpfc_iocbq *iocb, *next_iocb;
  175. IOCB_t *cmd;
  176. /* Abort outstanding I/O on NPort <nlp_DID> */
  177. lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
  178. "%d (%d):0205 Abort outstanding I/O on NPort x%x "
  179. "Data: x%x x%x x%x\n",
  180. phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
  181. ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
  182. lpfc_fabric_abort_nport(ndlp);
  183. /* First check the txq */
  184. spin_lock_irq(&phba->hbalock);
  185. list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
  186. /* Check to see if iocb matches the nport we are looking
  187. for */
  188. if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
  189. /* It matches, so deque and call compl with an
  190. error */
  191. list_move_tail(&iocb->list, &completions);
  192. pring->txq_cnt--;
  193. }
  194. }
  195. /* Next check the txcmplq */
  196. list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
  197. /* Check to see if iocb matches the nport we are looking
  198. for */
  199. if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
  200. lpfc_sli_issue_abort_iotag(phba, pring, iocb);
  201. }
  202. }
  203. spin_unlock_irq(&phba->hbalock);
  204. while (!list_empty(&completions)) {
  205. iocb = list_get_first(&completions, struct lpfc_iocbq, list);
  206. cmd = &iocb->iocb;
  207. list_del_init(&iocb->list);
  208. if (!iocb->iocb_cmpl)
  209. lpfc_sli_release_iocbq(phba, iocb);
  210. else {
  211. cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
  212. cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
  213. (iocb->iocb_cmpl) (phba, iocb, iocb);
  214. }
  215. }
  216. /* If we are delaying issuing an ELS command, cancel it */
  217. if (ndlp->nlp_flag & NLP_DELAY_TMO)
  218. lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
  219. return 0;
  220. }
  221. static int
  222. lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  223. struct lpfc_iocbq *cmdiocb)
  224. {
  225. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  226. struct lpfc_hba *phba = vport->phba;
  227. struct lpfc_dmabuf *pcmd;
  228. uint32_t *lp;
  229. IOCB_t *icmd;
  230. struct serv_parm *sp;
  231. LPFC_MBOXQ_t *mbox;
  232. struct ls_rjt stat;
  233. int rc;
  234. memset(&stat, 0, sizeof (struct ls_rjt));
  235. if (vport->port_state <= LPFC_FLOGI) {
  236. /* Before responding to PLOGI, check for pt2pt mode.
  237. * If we are pt2pt, with an outstanding FLOGI, abort
  238. * the FLOGI and resend it first.
  239. */
  240. if (vport->fc_flag & FC_PT2PT) {
  241. lpfc_els_abort_flogi(phba);
  242. if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
  243. /* If the other side is supposed to initiate
  244. * the PLOGI anyway, just ACC it now and
  245. * move on with discovery.
  246. */
  247. phba->fc_edtov = FF_DEF_EDTOV;
  248. phba->fc_ratov = FF_DEF_RATOV;
  249. /* Start discovery - this should just do
  250. CLEAR_LA */
  251. lpfc_disc_start(vport);
  252. } else
  253. lpfc_initial_flogi(vport);
  254. } else {
  255. stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
  256. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  257. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
  258. ndlp);
  259. return 0;
  260. }
  261. }
  262. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  263. lp = (uint32_t *) pcmd->virt;
  264. sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
  265. if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
  266. /* Reject this request because invalid parameters */
  267. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  268. stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
  269. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp);
  270. return 0;
  271. }
  272. icmd = &cmdiocb->iocb;
  273. /* PLOGI chkparm OK */
  274. lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
  275. "%d (%d):0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
  276. phba->brd_no, vport->vpi,
  277. ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
  278. ndlp->nlp_rpi);
  279. if (phba->cfg_fcp_class == 2 && sp->cls2.classValid)
  280. ndlp->nlp_fcp_info |= CLASS2;
  281. else
  282. ndlp->nlp_fcp_info |= CLASS3;
  283. ndlp->nlp_class_sup = 0;
  284. if (sp->cls1.classValid)
  285. ndlp->nlp_class_sup |= FC_COS_CLASS1;
  286. if (sp->cls2.classValid)
  287. ndlp->nlp_class_sup |= FC_COS_CLASS2;
  288. if (sp->cls3.classValid)
  289. ndlp->nlp_class_sup |= FC_COS_CLASS3;
  290. if (sp->cls4.classValid)
  291. ndlp->nlp_class_sup |= FC_COS_CLASS4;
  292. ndlp->nlp_maxframe =
  293. ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
  294. /* no need to reg_login if we are already in one of these states */
  295. switch (ndlp->nlp_state) {
  296. case NLP_STE_NPR_NODE:
  297. if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
  298. break;
  299. case NLP_STE_REG_LOGIN_ISSUE:
  300. case NLP_STE_PRLI_ISSUE:
  301. case NLP_STE_UNMAPPED_NODE:
  302. case NLP_STE_MAPPED_NODE:
  303. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
  304. return 1;
  305. }
  306. if ((vport->fc_flag & FC_PT2PT) &&
  307. !(vport->fc_flag & FC_PT2PT_PLOGI)) {
  308. /* rcv'ed PLOGI decides what our NPortId will be */
  309. vport->fc_myDID = icmd->un.rcvels.parmRo;
  310. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  311. if (mbox == NULL)
  312. goto out;
  313. lpfc_config_link(phba, mbox);
  314. mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  315. mbox->vport = vport;
  316. rc = lpfc_sli_issue_mbox
  317. (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
  318. if (rc == MBX_NOT_FINISHED) {
  319. mempool_free(mbox, phba->mbox_mem_pool);
  320. goto out;
  321. }
  322. lpfc_can_disctmo(vport);
  323. }
  324. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  325. if (!mbox)
  326. goto out;
  327. rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
  328. (uint8_t *) sp, mbox, 0);
  329. if (rc) {
  330. mempool_free(mbox, phba->mbox_mem_pool);
  331. goto out;
  332. }
  333. /* ACC PLOGI rsp command needs to execute first,
  334. * queue this mbox command to be processed later.
  335. */
  336. mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
  337. /*
  338. * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
  339. * command issued in lpfc_cmpl_els_acc().
  340. */
  341. mbox->vport = vport;
  342. spin_lock_irq(shost->host_lock);
  343. ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
  344. spin_unlock_irq(shost->host_lock);
  345. /*
  346. * If there is an outstanding PLOGI issued, abort it before
  347. * sending ACC rsp for received PLOGI. If pending plogi
  348. * is not canceled here, the plogi will be rejected by
  349. * remote port and will be retried. On a configuration with
  350. * single discovery thread, this will cause a huge delay in
  351. * discovery. Also this will cause multiple state machines
  352. * running in parallel for this node.
  353. */
  354. if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
  355. /* software abort outstanding PLOGI */
  356. lpfc_els_abort(phba, ndlp);
  357. }
  358. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
  359. return 1;
  360. out:
  361. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  362. stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
  363. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp);
  364. return 0;
  365. }
  366. static int
  367. lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  368. struct lpfc_iocbq *cmdiocb)
  369. {
  370. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  371. struct lpfc_dmabuf *pcmd;
  372. struct serv_parm *sp;
  373. struct lpfc_name *pnn, *ppn;
  374. struct ls_rjt stat;
  375. ADISC *ap;
  376. IOCB_t *icmd;
  377. uint32_t *lp;
  378. uint32_t cmd;
  379. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  380. lp = (uint32_t *) pcmd->virt;
  381. cmd = *lp++;
  382. if (cmd == ELS_CMD_ADISC) {
  383. ap = (ADISC *) lp;
  384. pnn = (struct lpfc_name *) & ap->nodeName;
  385. ppn = (struct lpfc_name *) & ap->portName;
  386. } else {
  387. sp = (struct serv_parm *) lp;
  388. pnn = (struct lpfc_name *) & sp->nodeName;
  389. ppn = (struct lpfc_name *) & sp->portName;
  390. }
  391. icmd = &cmdiocb->iocb;
  392. if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
  393. if (cmd == ELS_CMD_ADISC) {
  394. lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
  395. } else {
  396. lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
  397. NULL, 0);
  398. }
  399. return 1;
  400. }
  401. /* Reject this request because invalid parameters */
  402. stat.un.b.lsRjtRsvd0 = 0;
  403. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  404. stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
  405. stat.un.b.vendorUnique = 0;
  406. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp);
  407. /* 1 sec timeout */
  408. mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
  409. spin_lock_irq(shost->host_lock);
  410. ndlp->nlp_flag |= NLP_DELAY_TMO;
  411. spin_unlock_irq(shost->host_lock);
  412. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  413. ndlp->nlp_prev_state = ndlp->nlp_state;
  414. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  415. return 0;
  416. }
  417. static int
  418. lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  419. struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
  420. {
  421. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  422. /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
  423. /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
  424. * PLOGIs during LOGO storms from a device.
  425. */
  426. spin_lock_irq(shost->host_lock);
  427. ndlp->nlp_flag |= NLP_LOGO_ACC;
  428. spin_unlock_irq(shost->host_lock);
  429. if (els_cmd == ELS_CMD_PRLO)
  430. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
  431. else
  432. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
  433. if (!(ndlp->nlp_type & NLP_FABRIC) ||
  434. (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
  435. /* Only try to re-login if this is NOT a Fabric Node */
  436. mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
  437. spin_lock_irq(shost->host_lock);
  438. ndlp->nlp_flag |= NLP_DELAY_TMO;
  439. spin_unlock_irq(shost->host_lock);
  440. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  441. ndlp->nlp_prev_state = ndlp->nlp_state;
  442. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  443. } else {
  444. ndlp->nlp_prev_state = ndlp->nlp_state;
  445. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
  446. }
  447. spin_lock_irq(shost->host_lock);
  448. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  449. spin_unlock_irq(shost->host_lock);
  450. /* The driver has to wait until the ACC completes before it continues
  451. * processing the LOGO. The action will resume in
  452. * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
  453. * unreg_login, the driver waits so the ACC does not get aborted.
  454. */
  455. return 0;
  456. }
  457. static void
  458. lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  459. struct lpfc_iocbq *cmdiocb)
  460. {
  461. struct lpfc_dmabuf *pcmd;
  462. uint32_t *lp;
  463. PRLI *npr;
  464. struct fc_rport *rport = ndlp->rport;
  465. u32 roles;
  466. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  467. lp = (uint32_t *) pcmd->virt;
  468. npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
  469. ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
  470. ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
  471. if (npr->prliType == PRLI_FCP_TYPE) {
  472. if (npr->initiatorFunc)
  473. ndlp->nlp_type |= NLP_FCP_INITIATOR;
  474. if (npr->targetFunc)
  475. ndlp->nlp_type |= NLP_FCP_TARGET;
  476. if (npr->Retry)
  477. ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
  478. }
  479. if (rport) {
  480. /* We need to update the rport role values */
  481. roles = FC_RPORT_ROLE_UNKNOWN;
  482. if (ndlp->nlp_type & NLP_FCP_INITIATOR)
  483. roles |= FC_RPORT_ROLE_FCP_INITIATOR;
  484. if (ndlp->nlp_type & NLP_FCP_TARGET)
  485. roles |= FC_RPORT_ROLE_FCP_TARGET;
  486. fc_remote_port_rolechg(rport, roles);
  487. }
  488. }
  489. static uint32_t
  490. lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  491. {
  492. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  493. struct lpfc_hba *phba = vport->phba;
  494. /* Check config parameter use-adisc or FCP-2 */
  495. if ((phba->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
  496. ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
  497. spin_lock_irq(shost->host_lock);
  498. ndlp->nlp_flag |= NLP_NPR_ADISC;
  499. spin_unlock_irq(shost->host_lock);
  500. return 1;
  501. }
  502. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  503. lpfc_unreg_rpi(vport, ndlp);
  504. return 0;
  505. }
  506. static uint32_t
  507. lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  508. void *arg, uint32_t evt)
  509. {
  510. lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
  511. "%d (%d):0253 Illegal State Transition: node x%x "
  512. "event x%x, state x%x Data: x%x x%x\n",
  513. vport->phba->brd_no, vport->vpi,
  514. ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
  515. ndlp->nlp_flag);
  516. return ndlp->nlp_state;
  517. }
  518. /* Start of Discovery State Machine routines */
  519. static uint32_t
  520. lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  521. void *arg, uint32_t evt)
  522. {
  523. struct lpfc_iocbq *cmdiocb;
  524. cmdiocb = (struct lpfc_iocbq *) arg;
  525. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
  526. ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
  527. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
  528. return ndlp->nlp_state;
  529. }
  530. lpfc_drop_node(vport, ndlp);
  531. return NLP_STE_FREED_NODE;
  532. }
  533. static uint32_t
  534. lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  535. void *arg, uint32_t evt)
  536. {
  537. lpfc_issue_els_logo(vport, ndlp, 0);
  538. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
  539. return ndlp->nlp_state;
  540. }
  541. static uint32_t
  542. lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  543. void *arg, uint32_t evt)
  544. {
  545. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  546. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  547. spin_lock_irq(shost->host_lock);
  548. ndlp->nlp_flag |= NLP_LOGO_ACC;
  549. spin_unlock_irq(shost->host_lock);
  550. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
  551. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
  552. return ndlp->nlp_state;
  553. }
  554. static uint32_t
  555. lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  556. void *arg, uint32_t evt)
  557. {
  558. lpfc_drop_node(vport, ndlp);
  559. return NLP_STE_FREED_NODE;
  560. }
  561. static uint32_t
  562. lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  563. void *arg, uint32_t evt)
  564. {
  565. lpfc_drop_node(vport, ndlp);
  566. return NLP_STE_FREED_NODE;
  567. }
  568. static uint32_t
  569. lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  570. void *arg, uint32_t evt)
  571. {
  572. struct lpfc_hba *phba = vport->phba;
  573. struct lpfc_iocbq *cmdiocb = arg;
  574. struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  575. uint32_t *lp = (uint32_t *) pcmd->virt;
  576. struct serv_parm *sp = (struct serv_parm *) (lp + 1);
  577. struct ls_rjt stat;
  578. int port_cmp;
  579. memset(&stat, 0, sizeof (struct ls_rjt));
  580. /* For a PLOGI, we only accept if our portname is less
  581. * than the remote portname.
  582. */
  583. phba->fc_stat.elsLogiCol++;
  584. port_cmp = memcmp(&vport->fc_portname, &sp->portName,
  585. sizeof(struct lpfc_name));
  586. if (port_cmp >= 0) {
  587. /* Reject this request because the remote node will accept
  588. ours */
  589. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  590. stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
  591. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp);
  592. } else {
  593. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  594. } /* If our portname was less */
  595. return ndlp->nlp_state;
  596. }
  597. static uint32_t
  598. lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  599. void *arg, uint32_t evt)
  600. {
  601. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  602. struct ls_rjt stat;
  603. memset(&stat, 0, sizeof (struct ls_rjt));
  604. stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
  605. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  606. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp);
  607. return ndlp->nlp_state;
  608. }
  609. static uint32_t
  610. lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  611. void *arg, uint32_t evt)
  612. {
  613. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  614. /* software abort outstanding PLOGI */
  615. lpfc_els_abort(vport->phba, ndlp);
  616. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  617. return ndlp->nlp_state;
  618. }
  619. static uint32_t
  620. lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  621. void *arg, uint32_t evt)
  622. {
  623. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  624. struct lpfc_hba *phba = vport->phba;
  625. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  626. /* software abort outstanding PLOGI */
  627. lpfc_els_abort(phba, ndlp);
  628. if (evt == NLP_EVT_RCV_LOGO) {
  629. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
  630. } else {
  631. lpfc_issue_els_logo(vport, ndlp, 0);
  632. }
  633. /* Put ndlp in npr state set plogi timer for 1 sec */
  634. mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
  635. spin_lock_irq(shost->host_lock);
  636. ndlp->nlp_flag |= NLP_DELAY_TMO;
  637. spin_unlock_irq(shost->host_lock);
  638. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  639. ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
  640. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  641. return ndlp->nlp_state;
  642. }
  643. static uint32_t
  644. lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
  645. struct lpfc_nodelist *ndlp,
  646. void *arg,
  647. uint32_t evt)
  648. {
  649. struct lpfc_hba *phba = vport->phba;
  650. struct lpfc_iocbq *cmdiocb, *rspiocb;
  651. struct lpfc_dmabuf *pcmd, *prsp, *mp;
  652. uint32_t *lp;
  653. IOCB_t *irsp;
  654. struct serv_parm *sp;
  655. LPFC_MBOXQ_t *mbox;
  656. cmdiocb = (struct lpfc_iocbq *) arg;
  657. rspiocb = cmdiocb->context_un.rsp_iocb;
  658. if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
  659. /* Recovery from PLOGI collision logic */
  660. return ndlp->nlp_state;
  661. }
  662. irsp = &rspiocb->iocb;
  663. if (irsp->ulpStatus)
  664. goto out;
  665. pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
  666. prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
  667. lp = (uint32_t *) prsp->virt;
  668. sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
  669. if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
  670. goto out;
  671. /* PLOGI chkparm OK */
  672. lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
  673. "%d (%d):0121 PLOGI chkparm OK "
  674. "Data: x%x x%x x%x x%x\n",
  675. phba->brd_no, vport->vpi,
  676. ndlp->nlp_DID, ndlp->nlp_state,
  677. ndlp->nlp_flag, ndlp->nlp_rpi);
  678. if (phba->cfg_fcp_class == 2 && (sp->cls2.classValid))
  679. ndlp->nlp_fcp_info |= CLASS2;
  680. else
  681. ndlp->nlp_fcp_info |= CLASS3;
  682. ndlp->nlp_class_sup = 0;
  683. if (sp->cls1.classValid)
  684. ndlp->nlp_class_sup |= FC_COS_CLASS1;
  685. if (sp->cls2.classValid)
  686. ndlp->nlp_class_sup |= FC_COS_CLASS2;
  687. if (sp->cls3.classValid)
  688. ndlp->nlp_class_sup |= FC_COS_CLASS3;
  689. if (sp->cls4.classValid)
  690. ndlp->nlp_class_sup |= FC_COS_CLASS4;
  691. ndlp->nlp_maxframe =
  692. ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
  693. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  694. if (!mbox) {
  695. lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
  696. "%d (%d):0133 PLOGI: no memory for reg_login "
  697. "Data: x%x x%x x%x x%x\n",
  698. phba->brd_no, vport->vpi,
  699. ndlp->nlp_DID, ndlp->nlp_state,
  700. ndlp->nlp_flag, ndlp->nlp_rpi);
  701. goto out;
  702. }
  703. lpfc_unreg_rpi(vport, ndlp);
  704. if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
  705. (uint8_t *) sp, mbox, 0) == 0) {
  706. switch (ndlp->nlp_DID) {
  707. case NameServer_DID:
  708. mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
  709. break;
  710. case FDMI_DID:
  711. mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
  712. break;
  713. default:
  714. mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
  715. }
  716. mbox->context2 = lpfc_nlp_get(ndlp);
  717. mbox->vport = vport;
  718. if (lpfc_sli_issue_mbox(phba, mbox,
  719. (MBX_NOWAIT | MBX_STOP_IOCB))
  720. != MBX_NOT_FINISHED) {
  721. lpfc_nlp_set_state(vport, ndlp,
  722. NLP_STE_REG_LOGIN_ISSUE);
  723. return ndlp->nlp_state;
  724. }
  725. lpfc_nlp_put(ndlp);
  726. mp = (struct lpfc_dmabuf *) mbox->context1;
  727. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  728. kfree(mp);
  729. mempool_free(mbox, phba->mbox_mem_pool);
  730. lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
  731. "%d (%d):0134 PLOGI: cannot issue reg_login "
  732. "Data: x%x x%x x%x x%x\n",
  733. phba->brd_no, vport->vpi,
  734. ndlp->nlp_DID, ndlp->nlp_state,
  735. ndlp->nlp_flag, ndlp->nlp_rpi);
  736. } else {
  737. mempool_free(mbox, phba->mbox_mem_pool);
  738. lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
  739. "%d (%d):0135 PLOGI: cannot format reg_login "
  740. "Data: x%x x%x x%x x%x\n",
  741. phba->brd_no, vport->vpi,
  742. ndlp->nlp_DID, ndlp->nlp_state,
  743. ndlp->nlp_flag, ndlp->nlp_rpi);
  744. }
  745. out:
  746. if (ndlp->nlp_DID == NameServer_DID) {
  747. lpfc_vport_set_state(vport, FC_VPORT_FAILED);
  748. lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
  749. "%d (%d):0261 Cannot Register NameServer login\n",
  750. phba->brd_no, vport->vpi);
  751. }
  752. /* Free this node since the driver cannot login or has the wrong
  753. sparm */
  754. lpfc_drop_node(vport, ndlp);
  755. return NLP_STE_FREED_NODE;
  756. }
  757. static uint32_t
  758. lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  759. void *arg, uint32_t evt)
  760. {
  761. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  762. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  763. spin_lock_irq(shost->host_lock);
  764. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  765. spin_unlock_irq(shost->host_lock);
  766. return ndlp->nlp_state;
  767. } else {
  768. /* software abort outstanding PLOGI */
  769. lpfc_els_abort(vport->phba, ndlp);
  770. lpfc_drop_node(vport, ndlp);
  771. return NLP_STE_FREED_NODE;
  772. }
  773. }
  774. static uint32_t
  775. lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
  776. struct lpfc_nodelist *ndlp,
  777. void *arg,
  778. uint32_t evt)
  779. {
  780. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  781. struct lpfc_hba *phba = vport->phba;
  782. /* Don't do anything that will mess up processing of the
  783. * previous RSCN.
  784. */
  785. if (vport->fc_flag & FC_RSCN_DEFERRED)
  786. return ndlp->nlp_state;
  787. /* software abort outstanding PLOGI */
  788. lpfc_els_abort(phba, ndlp);
  789. ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
  790. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  791. spin_lock_irq(shost->host_lock);
  792. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  793. spin_unlock_irq(shost->host_lock);
  794. return ndlp->nlp_state;
  795. }
  796. static uint32_t
  797. lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  798. void *arg, uint32_t evt)
  799. {
  800. struct lpfc_hba *phba = vport->phba;
  801. struct lpfc_iocbq *cmdiocb;
  802. /* software abort outstanding ADISC */
  803. lpfc_els_abort(phba, ndlp);
  804. cmdiocb = (struct lpfc_iocbq *) arg;
  805. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb))
  806. return ndlp->nlp_state;
  807. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  808. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  809. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  810. return ndlp->nlp_state;
  811. }
  812. static uint32_t
  813. lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  814. void *arg, uint32_t evt)
  815. {
  816. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  817. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  818. return ndlp->nlp_state;
  819. }
  820. static uint32_t
  821. lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  822. void *arg, uint32_t evt)
  823. {
  824. struct lpfc_hba *phba = vport->phba;
  825. struct lpfc_iocbq *cmdiocb;
  826. cmdiocb = (struct lpfc_iocbq *) arg;
  827. /* software abort outstanding ADISC */
  828. lpfc_els_abort(phba, ndlp);
  829. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  830. return ndlp->nlp_state;
  831. }
  832. static uint32_t
  833. lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
  834. struct lpfc_nodelist *ndlp,
  835. void *arg, uint32_t evt)
  836. {
  837. struct lpfc_iocbq *cmdiocb;
  838. cmdiocb = (struct lpfc_iocbq *) arg;
  839. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  840. return ndlp->nlp_state;
  841. }
  842. static uint32_t
  843. lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  844. void *arg, uint32_t evt)
  845. {
  846. struct lpfc_iocbq *cmdiocb;
  847. cmdiocb = (struct lpfc_iocbq *) arg;
  848. /* Treat like rcv logo */
  849. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
  850. return ndlp->nlp_state;
  851. }
  852. static uint32_t
  853. lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
  854. struct lpfc_nodelist *ndlp,
  855. void *arg, uint32_t evt)
  856. {
  857. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  858. struct lpfc_hba *phba = vport->phba;
  859. struct lpfc_iocbq *cmdiocb, *rspiocb;
  860. IOCB_t *irsp;
  861. ADISC *ap;
  862. cmdiocb = (struct lpfc_iocbq *) arg;
  863. rspiocb = cmdiocb->context_un.rsp_iocb;
  864. ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
  865. irsp = &rspiocb->iocb;
  866. if ((irsp->ulpStatus) ||
  867. (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
  868. /* 1 sec timeout */
  869. mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
  870. spin_lock_irq(shost->host_lock);
  871. ndlp->nlp_flag |= NLP_DELAY_TMO;
  872. spin_unlock_irq(shost->host_lock);
  873. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  874. memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
  875. memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
  876. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  877. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  878. lpfc_unreg_rpi(vport, ndlp);
  879. return ndlp->nlp_state;
  880. }
  881. if (ndlp->nlp_type & NLP_FCP_TARGET) {
  882. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  883. lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
  884. } else {
  885. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  886. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  887. }
  888. return ndlp->nlp_state;
  889. }
  890. static uint32_t
  891. lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  892. void *arg, uint32_t evt)
  893. {
  894. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  895. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  896. spin_lock_irq(shost->host_lock);
  897. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  898. spin_unlock_irq(shost->host_lock);
  899. return ndlp->nlp_state;
  900. } else {
  901. /* software abort outstanding ADISC */
  902. lpfc_els_abort(vport->phba, ndlp);
  903. lpfc_drop_node(vport, ndlp);
  904. return NLP_STE_FREED_NODE;
  905. }
  906. }
  907. static uint32_t
  908. lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
  909. struct lpfc_nodelist *ndlp,
  910. void *arg,
  911. uint32_t evt)
  912. {
  913. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  914. struct lpfc_hba *phba = vport->phba;
  915. /* Don't do anything that will mess up processing of the
  916. * previous RSCN.
  917. */
  918. if (vport->fc_flag & FC_RSCN_DEFERRED)
  919. return ndlp->nlp_state;
  920. /* software abort outstanding ADISC */
  921. lpfc_els_abort(phba, ndlp);
  922. ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
  923. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  924. spin_lock_irq(shost->host_lock);
  925. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  926. spin_unlock_irq(shost->host_lock);
  927. lpfc_disc_set_adisc(vport, ndlp);
  928. return ndlp->nlp_state;
  929. }
  930. static uint32_t
  931. lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
  932. struct lpfc_nodelist *ndlp,
  933. void *arg,
  934. uint32_t evt)
  935. {
  936. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  937. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  938. return ndlp->nlp_state;
  939. }
  940. static uint32_t
  941. lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
  942. struct lpfc_nodelist *ndlp,
  943. void *arg,
  944. uint32_t evt)
  945. {
  946. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  947. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  948. return ndlp->nlp_state;
  949. }
  950. static uint32_t
  951. lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
  952. struct lpfc_nodelist *ndlp,
  953. void *arg,
  954. uint32_t evt)
  955. {
  956. struct lpfc_hba *phba = vport->phba;
  957. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  958. LPFC_MBOXQ_t *mb;
  959. LPFC_MBOXQ_t *nextmb;
  960. struct lpfc_dmabuf *mp;
  961. cmdiocb = (struct lpfc_iocbq *) arg;
  962. /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
  963. if ((mb = phba->sli.mbox_active)) {
  964. if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
  965. (ndlp == (struct lpfc_nodelist *) mb->context2)) {
  966. lpfc_nlp_put(ndlp);
  967. mb->context2 = NULL;
  968. mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  969. }
  970. }
  971. spin_lock_irq(&phba->hbalock);
  972. list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
  973. if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
  974. (ndlp == (struct lpfc_nodelist *) mb->context2)) {
  975. mp = (struct lpfc_dmabuf *) (mb->context1);
  976. if (mp) {
  977. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  978. kfree(mp);
  979. }
  980. lpfc_nlp_put(ndlp);
  981. list_del(&mb->list);
  982. mempool_free(mb, phba->mbox_mem_pool);
  983. }
  984. }
  985. spin_unlock_irq(&phba->hbalock);
  986. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  987. return ndlp->nlp_state;
  988. }
  989. static uint32_t
  990. lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
  991. struct lpfc_nodelist *ndlp,
  992. void *arg,
  993. uint32_t evt)
  994. {
  995. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  996. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  997. return ndlp->nlp_state;
  998. }
  999. static uint32_t
  1000. lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
  1001. struct lpfc_nodelist *ndlp,
  1002. void *arg,
  1003. uint32_t evt)
  1004. {
  1005. struct lpfc_iocbq *cmdiocb;
  1006. cmdiocb = (struct lpfc_iocbq *) arg;
  1007. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
  1008. return ndlp->nlp_state;
  1009. }
  1010. static uint32_t
  1011. lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
  1012. struct lpfc_nodelist *ndlp,
  1013. void *arg,
  1014. uint32_t evt)
  1015. {
  1016. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1017. struct lpfc_hba *phba = vport->phba;
  1018. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  1019. MAILBOX_t *mb = &pmb->mb;
  1020. uint32_t did = mb->un.varWords[1];
  1021. if (mb->mbxStatus) {
  1022. /* RegLogin failed */
  1023. lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
  1024. "%d (%d):0246 RegLogin failed Data: x%x x%x "
  1025. "x%x\n",
  1026. phba->brd_no, vport->vpi,
  1027. did, mb->mbxStatus, vport->port_state);
  1028. /*
  1029. * If RegLogin failed due to lack of HBA resources do not
  1030. * retry discovery.
  1031. */
  1032. if (mb->mbxStatus == MBXERR_RPI_FULL) {
  1033. ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
  1034. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
  1035. return ndlp->nlp_state;
  1036. }
  1037. /* Put ndlp in npr state set plogi timer for 1 sec */
  1038. mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
  1039. spin_lock_irq(shost->host_lock);
  1040. ndlp->nlp_flag |= NLP_DELAY_TMO;
  1041. spin_unlock_irq(shost->host_lock);
  1042. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  1043. lpfc_issue_els_logo(vport, ndlp, 0);
  1044. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1045. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1046. return ndlp->nlp_state;
  1047. }
  1048. ndlp->nlp_rpi = mb->un.varWords[0];
  1049. /* Only if we are not a fabric nport do we issue PRLI */
  1050. if (!(ndlp->nlp_type & NLP_FABRIC)) {
  1051. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1052. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
  1053. lpfc_issue_els_prli(vport, ndlp, 0);
  1054. } else {
  1055. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1056. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  1057. }
  1058. return ndlp->nlp_state;
  1059. }
  1060. static uint32_t
  1061. lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
  1062. struct lpfc_nodelist *ndlp,
  1063. void *arg,
  1064. uint32_t evt)
  1065. {
  1066. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1067. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1068. spin_lock_irq(shost->host_lock);
  1069. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  1070. spin_unlock_irq(shost->host_lock);
  1071. return ndlp->nlp_state;
  1072. } else {
  1073. lpfc_drop_node(vport, ndlp);
  1074. return NLP_STE_FREED_NODE;
  1075. }
  1076. }
  1077. static uint32_t
  1078. lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
  1079. struct lpfc_nodelist *ndlp,
  1080. void *arg,
  1081. uint32_t evt)
  1082. {
  1083. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1084. /* Don't do anything that will mess up processing of the
  1085. * previous RSCN.
  1086. */
  1087. if (vport->fc_flag & FC_RSCN_DEFERRED)
  1088. return ndlp->nlp_state;
  1089. ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
  1090. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1091. spin_lock_irq(shost->host_lock);
  1092. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1093. spin_unlock_irq(shost->host_lock);
  1094. lpfc_disc_set_adisc(vport, ndlp);
  1095. return ndlp->nlp_state;
  1096. }
  1097. static uint32_t
  1098. lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1099. void *arg, uint32_t evt)
  1100. {
  1101. struct lpfc_iocbq *cmdiocb;
  1102. cmdiocb = (struct lpfc_iocbq *) arg;
  1103. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  1104. return ndlp->nlp_state;
  1105. }
  1106. static uint32_t
  1107. lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1108. void *arg, uint32_t evt)
  1109. {
  1110. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1111. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1112. return ndlp->nlp_state;
  1113. }
  1114. static uint32_t
  1115. lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1116. void *arg, uint32_t evt)
  1117. {
  1118. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1119. /* Software abort outstanding PRLI before sending acc */
  1120. lpfc_els_abort(vport->phba, ndlp);
  1121. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1122. return ndlp->nlp_state;
  1123. }
  1124. static uint32_t
  1125. lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1126. void *arg, uint32_t evt)
  1127. {
  1128. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1129. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1130. return ndlp->nlp_state;
  1131. }
  1132. /* This routine is envoked when we rcv a PRLO request from a nport
  1133. * we are logged into. We should send back a PRLO rsp setting the
  1134. * appropriate bits.
  1135. * NEXT STATE = PRLI_ISSUE
  1136. */
  1137. static uint32_t
  1138. lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1139. void *arg, uint32_t evt)
  1140. {
  1141. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1142. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
  1143. return ndlp->nlp_state;
  1144. }
  1145. static uint32_t
  1146. lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1147. void *arg, uint32_t evt)
  1148. {
  1149. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1150. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1151. struct lpfc_hba *phba = vport->phba;
  1152. IOCB_t *irsp;
  1153. PRLI *npr;
  1154. cmdiocb = (struct lpfc_iocbq *) arg;
  1155. rspiocb = cmdiocb->context_un.rsp_iocb;
  1156. npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
  1157. irsp = &rspiocb->iocb;
  1158. if (irsp->ulpStatus) {
  1159. ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
  1160. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  1161. return ndlp->nlp_state;
  1162. }
  1163. /* Check out PRLI rsp */
  1164. ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
  1165. ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
  1166. if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
  1167. (npr->prliType == PRLI_FCP_TYPE)) {
  1168. if (npr->initiatorFunc)
  1169. ndlp->nlp_type |= NLP_FCP_INITIATOR;
  1170. if (npr->targetFunc)
  1171. ndlp->nlp_type |= NLP_FCP_TARGET;
  1172. if (npr->Retry)
  1173. ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
  1174. }
  1175. if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
  1176. (vport->port_type == LPFC_NPIV_PORT) &&
  1177. phba->cfg_vport_restrict_login) {
  1178. spin_lock_irq(shost->host_lock);
  1179. ndlp->nlp_flag |= NLP_TARGET_REMOVE;
  1180. spin_unlock_irq(shost->host_lock);
  1181. lpfc_issue_els_logo(vport, ndlp, 0);
  1182. ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
  1183. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
  1184. return ndlp->nlp_state;
  1185. }
  1186. ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
  1187. if (ndlp->nlp_type & NLP_FCP_TARGET)
  1188. lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
  1189. else
  1190. lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  1191. return ndlp->nlp_state;
  1192. }
  1193. /*! lpfc_device_rm_prli_issue
  1194. *
  1195. * \pre
  1196. * \post
  1197. * \param phba
  1198. * \param ndlp
  1199. * \param arg
  1200. * \param evt
  1201. * \return uint32_t
  1202. *
  1203. * \b Description:
  1204. * This routine is envoked when we a request to remove a nport we are in the
  1205. * process of PRLIing. We should software abort outstanding prli, unreg
  1206. * login, send a logout. We will change node state to UNUSED_NODE, put it
  1207. * on plogi list so it can be freed when LOGO completes.
  1208. *
  1209. */
  1210. static uint32_t
  1211. lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1212. void *arg, uint32_t evt)
  1213. {
  1214. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1215. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1216. spin_lock_irq(shost->host_lock);
  1217. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  1218. spin_unlock_irq(shost->host_lock);
  1219. return ndlp->nlp_state;
  1220. } else {
  1221. /* software abort outstanding PLOGI */
  1222. lpfc_els_abort(vport->phba, ndlp);
  1223. lpfc_drop_node(vport, ndlp);
  1224. return NLP_STE_FREED_NODE;
  1225. }
  1226. }
  1227. /*! lpfc_device_recov_prli_issue
  1228. *
  1229. * \pre
  1230. * \post
  1231. * \param phba
  1232. * \param ndlp
  1233. * \param arg
  1234. * \param evt
  1235. * \return uint32_t
  1236. *
  1237. * \b Description:
  1238. * The routine is envoked when the state of a device is unknown, like
  1239. * during a link down. We should remove the nodelist entry from the
  1240. * unmapped list, issue a UNREG_LOGIN, do a software abort of the
  1241. * outstanding PRLI command, then free the node entry.
  1242. */
  1243. static uint32_t
  1244. lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
  1245. struct lpfc_nodelist *ndlp,
  1246. void *arg,
  1247. uint32_t evt)
  1248. {
  1249. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1250. struct lpfc_hba *phba = vport->phba;
  1251. /* Don't do anything that will mess up processing of the
  1252. * previous RSCN.
  1253. */
  1254. if (vport->fc_flag & FC_RSCN_DEFERRED)
  1255. return ndlp->nlp_state;
  1256. /* software abort outstanding PRLI */
  1257. lpfc_els_abort(phba, ndlp);
  1258. ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
  1259. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1260. spin_lock_irq(shost->host_lock);
  1261. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1262. spin_unlock_irq(shost->host_lock);
  1263. lpfc_disc_set_adisc(vport, ndlp);
  1264. return ndlp->nlp_state;
  1265. }
  1266. static uint32_t
  1267. lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1268. void *arg, uint32_t evt)
  1269. {
  1270. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1271. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  1272. return ndlp->nlp_state;
  1273. }
  1274. static uint32_t
  1275. lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1276. void *arg, uint32_t evt)
  1277. {
  1278. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1279. lpfc_rcv_prli(vport, ndlp, cmdiocb);
  1280. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1281. return ndlp->nlp_state;
  1282. }
  1283. static uint32_t
  1284. lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1285. void *arg, uint32_t evt)
  1286. {
  1287. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1288. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1289. return ndlp->nlp_state;
  1290. }
  1291. static uint32_t
  1292. lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1293. void *arg, uint32_t evt)
  1294. {
  1295. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1296. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1297. return ndlp->nlp_state;
  1298. }
  1299. static uint32_t
  1300. lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1301. void *arg, uint32_t evt)
  1302. {
  1303. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1304. lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
  1305. return ndlp->nlp_state;
  1306. }
  1307. static uint32_t
  1308. lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
  1309. struct lpfc_nodelist *ndlp,
  1310. void *arg,
  1311. uint32_t evt)
  1312. {
  1313. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1314. ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
  1315. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1316. spin_lock_irq(shost->host_lock);
  1317. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1318. spin_unlock_irq(shost->host_lock);
  1319. lpfc_disc_set_adisc(vport, ndlp);
  1320. return ndlp->nlp_state;
  1321. }
  1322. static uint32_t
  1323. lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1324. void *arg, uint32_t evt)
  1325. {
  1326. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1327. lpfc_rcv_plogi(vport, ndlp, cmdiocb);
  1328. return ndlp->nlp_state;
  1329. }
  1330. static uint32_t
  1331. lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1332. void *arg, uint32_t evt)
  1333. {
  1334. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1335. lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
  1336. return ndlp->nlp_state;
  1337. }
  1338. static uint32_t
  1339. lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1340. void *arg, uint32_t evt)
  1341. {
  1342. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1343. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1344. return ndlp->nlp_state;
  1345. }
  1346. static uint32_t
  1347. lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
  1348. struct lpfc_nodelist *ndlp,
  1349. void *arg, uint32_t evt)
  1350. {
  1351. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1352. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1353. return ndlp->nlp_state;
  1354. }
  1355. static uint32_t
  1356. lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1357. void *arg, uint32_t evt)
  1358. {
  1359. struct lpfc_hba *phba = vport->phba;
  1360. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1361. /* flush the target */
  1362. lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
  1363. ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
  1364. /* Treat like rcv logo */
  1365. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
  1366. return ndlp->nlp_state;
  1367. }
  1368. static uint32_t
  1369. lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
  1370. struct lpfc_nodelist *ndlp,
  1371. void *arg,
  1372. uint32_t evt)
  1373. {
  1374. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1375. ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
  1376. lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
  1377. spin_lock_irq(shost->host_lock);
  1378. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1379. spin_unlock_irq(shost->host_lock);
  1380. lpfc_disc_set_adisc(vport, ndlp);
  1381. return ndlp->nlp_state;
  1382. }
  1383. static uint32_t
  1384. lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1385. void *arg, uint32_t evt)
  1386. {
  1387. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1388. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1389. /* Ignore PLOGI if we have an outstanding LOGO */
  1390. if (ndlp->nlp_flag & NLP_LOGO_SND) {
  1391. return ndlp->nlp_state;
  1392. }
  1393. if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
  1394. spin_lock_irq(shost->host_lock);
  1395. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  1396. spin_unlock_irq(shost->host_lock);
  1397. return ndlp->nlp_state;
  1398. }
  1399. /* send PLOGI immediately, move to PLOGI issue state */
  1400. if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
  1401. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  1402. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  1403. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  1404. }
  1405. return ndlp->nlp_state;
  1406. }
  1407. static uint32_t
  1408. lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1409. void *arg, uint32_t evt)
  1410. {
  1411. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1412. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1413. struct ls_rjt stat;
  1414. memset(&stat, 0, sizeof (struct ls_rjt));
  1415. stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
  1416. stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
  1417. lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp);
  1418. if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
  1419. if (ndlp->nlp_flag & NLP_NPR_ADISC) {
  1420. spin_lock_irq(shost->host_lock);
  1421. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  1422. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  1423. spin_unlock_irq(shost->host_lock);
  1424. lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
  1425. lpfc_issue_els_adisc(vport, ndlp, 0);
  1426. } else {
  1427. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  1428. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  1429. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  1430. }
  1431. }
  1432. return ndlp->nlp_state;
  1433. }
  1434. static uint32_t
  1435. lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1436. void *arg, uint32_t evt)
  1437. {
  1438. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1439. lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
  1440. return ndlp->nlp_state;
  1441. }
  1442. static uint32_t
  1443. lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1444. void *arg, uint32_t evt)
  1445. {
  1446. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1447. lpfc_rcv_padisc(vport, ndlp, cmdiocb);
  1448. /*
  1449. * Do not start discovery if discovery is about to start
  1450. * or discovery in progress for this node. Starting discovery
  1451. * here will affect the counting of discovery threads.
  1452. */
  1453. if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
  1454. !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
  1455. if (ndlp->nlp_flag & NLP_NPR_ADISC) {
  1456. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  1457. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  1458. lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
  1459. lpfc_issue_els_adisc(vport, ndlp, 0);
  1460. } else {
  1461. ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
  1462. lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
  1463. lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
  1464. }
  1465. }
  1466. return ndlp->nlp_state;
  1467. }
  1468. static uint32_t
  1469. lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1470. void *arg, uint32_t evt)
  1471. {
  1472. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1473. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
  1474. spin_lock_irq(shost->host_lock);
  1475. ndlp->nlp_flag |= NLP_LOGO_ACC;
  1476. spin_unlock_irq(shost->host_lock);
  1477. lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
  1478. if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
  1479. mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
  1480. spin_lock_irq(shost->host_lock);
  1481. ndlp->nlp_flag |= NLP_DELAY_TMO;
  1482. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  1483. spin_unlock_irq(shost->host_lock);
  1484. ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
  1485. } else {
  1486. spin_lock_irq(shost->host_lock);
  1487. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  1488. spin_unlock_irq(shost->host_lock);
  1489. }
  1490. return ndlp->nlp_state;
  1491. }
  1492. static uint32_t
  1493. lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1494. void *arg, uint32_t evt)
  1495. {
  1496. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1497. IOCB_t *irsp;
  1498. cmdiocb = (struct lpfc_iocbq *) arg;
  1499. rspiocb = cmdiocb->context_un.rsp_iocb;
  1500. irsp = &rspiocb->iocb;
  1501. if (irsp->ulpStatus) {
  1502. lpfc_drop_node(vport, ndlp);
  1503. return NLP_STE_FREED_NODE;
  1504. }
  1505. return ndlp->nlp_state;
  1506. }
  1507. static uint32_t
  1508. lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1509. void *arg, uint32_t evt)
  1510. {
  1511. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1512. IOCB_t *irsp;
  1513. cmdiocb = (struct lpfc_iocbq *) arg;
  1514. rspiocb = cmdiocb->context_un.rsp_iocb;
  1515. irsp = &rspiocb->iocb;
  1516. if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
  1517. lpfc_drop_node(vport, ndlp);
  1518. return NLP_STE_FREED_NODE;
  1519. }
  1520. return ndlp->nlp_state;
  1521. }
  1522. static uint32_t
  1523. lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1524. void *arg, uint32_t evt)
  1525. {
  1526. lpfc_unreg_rpi(vport, ndlp);
  1527. /* This routine does nothing, just return the current state */
  1528. return ndlp->nlp_state;
  1529. }
  1530. static uint32_t
  1531. lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1532. void *arg, uint32_t evt)
  1533. {
  1534. struct lpfc_iocbq *cmdiocb, *rspiocb;
  1535. IOCB_t *irsp;
  1536. cmdiocb = (struct lpfc_iocbq *) arg;
  1537. rspiocb = cmdiocb->context_un.rsp_iocb;
  1538. irsp = &rspiocb->iocb;
  1539. if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
  1540. lpfc_drop_node(vport, ndlp);
  1541. return NLP_STE_FREED_NODE;
  1542. }
  1543. return ndlp->nlp_state;
  1544. }
  1545. static uint32_t
  1546. lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
  1547. struct lpfc_nodelist *ndlp,
  1548. void *arg, uint32_t evt)
  1549. {
  1550. LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
  1551. MAILBOX_t *mb = &pmb->mb;
  1552. if (!mb->mbxStatus)
  1553. ndlp->nlp_rpi = mb->un.varWords[0];
  1554. else {
  1555. if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
  1556. lpfc_drop_node(vport, ndlp);
  1557. return NLP_STE_FREED_NODE;
  1558. }
  1559. }
  1560. return ndlp->nlp_state;
  1561. }
  1562. static uint32_t
  1563. lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1564. void *arg, uint32_t evt)
  1565. {
  1566. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1567. if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
  1568. spin_lock_irq(shost->host_lock);
  1569. ndlp->nlp_flag |= NLP_NODEV_REMOVE;
  1570. spin_unlock_irq(shost->host_lock);
  1571. return ndlp->nlp_state;
  1572. }
  1573. lpfc_drop_node(vport, ndlp);
  1574. return NLP_STE_FREED_NODE;
  1575. }
  1576. static uint32_t
  1577. lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1578. void *arg, uint32_t evt)
  1579. {
  1580. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  1581. /* Don't do anything that will mess up processing of the
  1582. * previous RSCN.
  1583. */
  1584. if (vport->fc_flag & FC_RSCN_DEFERRED)
  1585. return ndlp->nlp_state;
  1586. spin_lock_irq(shost->host_lock);
  1587. ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
  1588. spin_unlock_irq(shost->host_lock);
  1589. if (ndlp->nlp_flag & NLP_DELAY_TMO) {
  1590. lpfc_cancel_retry_delay_tmo(vport, ndlp);
  1591. }
  1592. return ndlp->nlp_state;
  1593. }
  1594. /* This next section defines the NPort Discovery State Machine */
  1595. /* There are 4 different double linked lists nodelist entries can reside on.
  1596. * The plogi list and adisc list are used when Link Up discovery or RSCN
  1597. * processing is needed. Each list holds the nodes that we will send PLOGI
  1598. * or ADISC on. These lists will keep track of what nodes will be effected
  1599. * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
  1600. * The unmapped_list will contain all nodes that we have successfully logged
  1601. * into at the Fibre Channel level. The mapped_list will contain all nodes
  1602. * that are mapped FCP targets.
  1603. */
  1604. /*
  1605. * The bind list is a list of undiscovered (potentially non-existent) nodes
  1606. * that we have saved binding information on. This information is used when
  1607. * nodes transition from the unmapped to the mapped list.
  1608. */
  1609. /* For UNUSED_NODE state, the node has just been allocated .
  1610. * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
  1611. * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
  1612. * and put on the unmapped list. For ADISC processing, the node is taken off
  1613. * the ADISC list and placed on either the mapped or unmapped list (depending
  1614. * on its previous state). Once on the unmapped list, a PRLI is issued and the
  1615. * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
  1616. * changed to UNMAPPED_NODE. If the completion indicates a mapped
  1617. * node, the node is taken off the unmapped list. The binding list is checked
  1618. * for a valid binding, or a binding is automatically assigned. If binding
  1619. * assignment is unsuccessful, the node is left on the unmapped list. If
  1620. * binding assignment is successful, the associated binding list entry (if
  1621. * any) is removed, and the node is placed on the mapped list.
  1622. */
  1623. /*
  1624. * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
  1625. * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
  1626. * expire, all effected nodes will receive a DEVICE_RM event.
  1627. */
  1628. /*
  1629. * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
  1630. * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
  1631. * check, additional nodes may be added or removed (via DEVICE_RM) to / from
  1632. * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
  1633. * we will first process the ADISC list. 32 entries are processed initially and
  1634. * ADISC is initited for each one. Completions / Events for each node are
  1635. * funnelled thru the state machine. As each node finishes ADISC processing, it
  1636. * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
  1637. * waiting, and the ADISC list count is identically 0, then we are done. For
  1638. * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
  1639. * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
  1640. * list. 32 entries are processed initially and PLOGI is initited for each one.
  1641. * Completions / Events for each node are funnelled thru the state machine. As
  1642. * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
  1643. * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
  1644. * indentically 0, then we are done. We have now completed discovery / RSCN
  1645. * handling. Upon completion, ALL nodes should be on either the mapped or
  1646. * unmapped lists.
  1647. */
  1648. static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
  1649. (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
  1650. /* Action routine Event Current State */
  1651. lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
  1652. lpfc_rcv_els_unused_node, /* RCV_PRLI */
  1653. lpfc_rcv_logo_unused_node, /* RCV_LOGO */
  1654. lpfc_rcv_els_unused_node, /* RCV_ADISC */
  1655. lpfc_rcv_els_unused_node, /* RCV_PDISC */
  1656. lpfc_rcv_els_unused_node, /* RCV_PRLO */
  1657. lpfc_disc_illegal, /* CMPL_PLOGI */
  1658. lpfc_disc_illegal, /* CMPL_PRLI */
  1659. lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
  1660. lpfc_disc_illegal, /* CMPL_ADISC */
  1661. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  1662. lpfc_device_rm_unused_node, /* DEVICE_RM */
  1663. lpfc_disc_illegal, /* DEVICE_RECOVERY */
  1664. lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
  1665. lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
  1666. lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
  1667. lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
  1668. lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
  1669. lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
  1670. lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
  1671. lpfc_disc_illegal, /* CMPL_PRLI */
  1672. lpfc_disc_illegal, /* CMPL_LOGO */
  1673. lpfc_disc_illegal, /* CMPL_ADISC */
  1674. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  1675. lpfc_device_rm_plogi_issue, /* DEVICE_RM */
  1676. lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
  1677. lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
  1678. lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
  1679. lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
  1680. lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
  1681. lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
  1682. lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
  1683. lpfc_disc_illegal, /* CMPL_PLOGI */
  1684. lpfc_disc_illegal, /* CMPL_PRLI */
  1685. lpfc_disc_illegal, /* CMPL_LOGO */
  1686. lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
  1687. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  1688. lpfc_device_rm_adisc_issue, /* DEVICE_RM */
  1689. lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
  1690. lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
  1691. lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
  1692. lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
  1693. lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
  1694. lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
  1695. lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
  1696. lpfc_disc_illegal, /* CMPL_PLOGI */
  1697. lpfc_disc_illegal, /* CMPL_PRLI */
  1698. lpfc_disc_illegal, /* CMPL_LOGO */
  1699. lpfc_disc_illegal, /* CMPL_ADISC */
  1700. lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
  1701. lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
  1702. lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
  1703. lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
  1704. lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
  1705. lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
  1706. lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
  1707. lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
  1708. lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
  1709. lpfc_disc_illegal, /* CMPL_PLOGI */
  1710. lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
  1711. lpfc_disc_illegal, /* CMPL_LOGO */
  1712. lpfc_disc_illegal, /* CMPL_ADISC */
  1713. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  1714. lpfc_device_rm_prli_issue, /* DEVICE_RM */
  1715. lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
  1716. lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
  1717. lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
  1718. lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
  1719. lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
  1720. lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
  1721. lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
  1722. lpfc_disc_illegal, /* CMPL_PLOGI */
  1723. lpfc_disc_illegal, /* CMPL_PRLI */
  1724. lpfc_disc_illegal, /* CMPL_LOGO */
  1725. lpfc_disc_illegal, /* CMPL_ADISC */
  1726. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  1727. lpfc_disc_illegal, /* DEVICE_RM */
  1728. lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
  1729. lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
  1730. lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
  1731. lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
  1732. lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
  1733. lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
  1734. lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
  1735. lpfc_disc_illegal, /* CMPL_PLOGI */
  1736. lpfc_disc_illegal, /* CMPL_PRLI */
  1737. lpfc_disc_illegal, /* CMPL_LOGO */
  1738. lpfc_disc_illegal, /* CMPL_ADISC */
  1739. lpfc_disc_illegal, /* CMPL_REG_LOGIN */
  1740. lpfc_disc_illegal, /* DEVICE_RM */
  1741. lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
  1742. lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
  1743. lpfc_rcv_prli_npr_node, /* RCV_PRLI */
  1744. lpfc_rcv_logo_npr_node, /* RCV_LOGO */
  1745. lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
  1746. lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
  1747. lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
  1748. lpfc_cmpl_plogi_npr_node, /* CMPL_PLOGI */
  1749. lpfc_cmpl_prli_npr_node, /* CMPL_PRLI */
  1750. lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
  1751. lpfc_cmpl_adisc_npr_node, /* CMPL_ADISC */
  1752. lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
  1753. lpfc_device_rm_npr_node, /* DEVICE_RM */
  1754. lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
  1755. };
  1756. int
  1757. lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  1758. void *arg, uint32_t evt)
  1759. {
  1760. struct lpfc_hba *phba = vport->phba;
  1761. uint32_t cur_state, rc;
  1762. uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
  1763. uint32_t);
  1764. lpfc_nlp_get(ndlp);
  1765. cur_state = ndlp->nlp_state;
  1766. /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
  1767. lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
  1768. "%d (%d):0211 DSM in event x%x on NPort x%x in "
  1769. "state %d Data: x%x\n",
  1770. phba->brd_no, vport->vpi,
  1771. evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
  1772. func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
  1773. rc = (func) (vport, ndlp, arg, evt);
  1774. /* DSM out state <rc> on NPort <nlp_DID> */
  1775. lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
  1776. "%d (%d):0212 DSM out state %d on NPort x%x "
  1777. "Data: x%x\n",
  1778. phba->brd_no, vport->vpi,
  1779. rc, ndlp->nlp_DID, ndlp->nlp_flag);
  1780. lpfc_nlp_put(ndlp);
  1781. return rc;
  1782. }