csio_lnode.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136
  1. /*
  2. * This file is part of the Chelsio FCoE driver for Linux.
  3. *
  4. * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/delay.h>
  36. #include <linux/slab.h>
  37. #include <linux/utsname.h>
  38. #include <scsi/scsi_device.h>
  39. #include <scsi/scsi_transport_fc.h>
  40. #include <asm/unaligned.h>
  41. #include <scsi/fc/fc_els.h>
  42. #include <scsi/fc/fc_fs.h>
  43. #include <scsi/fc/fc_gs.h>
  44. #include <scsi/fc/fc_ms.h>
  45. #include "csio_hw.h"
  46. #include "csio_mb.h"
  47. #include "csio_lnode.h"
  48. #include "csio_rnode.h"
  49. int csio_fcoe_rnodes = 1024;
  50. int csio_fdmi_enable = 1;
  51. #define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)
  52. /* Lnode SM declarations */
  53. static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
  54. static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
  55. static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
  56. static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
  57. static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
  58. void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
  59. enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
  60. /* LN event mapping */
  61. static enum csio_ln_ev fwevt_to_lnevt[] = {
  62. CSIO_LNE_NONE, /* None */
  63. CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */
  64. CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */
  65. CSIO_LNE_NONE, /* PLOGI_RCVD */
  66. CSIO_LNE_NONE, /* PLOGO_RCVD */
  67. CSIO_LNE_NONE, /* PRLI_ACC_RCVD */
  68. CSIO_LNE_NONE, /* PRLI_RJT_RCVD */
  69. CSIO_LNE_NONE, /* PRLI_RCVD */
  70. CSIO_LNE_NONE, /* PRLO_RCVD */
  71. CSIO_LNE_NONE, /* NPORT_ID_CHGD */
  72. CSIO_LNE_LOGO, /* FLOGO_RCVD */
  73. CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */
  74. CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */
  75. CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */
  76. CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */
  77. CSIO_LNE_NONE, /* FDISC_RJT_RCVD */
  78. CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */
  79. CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */
  80. CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */
  81. CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
  82. CSIO_LNE_NONE, /* PRLI_TMO */
  83. CSIO_LNE_NONE, /* ADISC_TMO */
  84. CSIO_LNE_NONE, /* RSCN_DEV_LOST */
  85. CSIO_LNE_NONE, /* SCR_ACC_RCVD */
  86. CSIO_LNE_NONE, /* ADISC_RJT_RCVD */
  87. CSIO_LNE_NONE, /* LOGO_SNT */
  88. CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */
  89. };
  90. #define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
  91. CSIO_LNE_NONE : \
  92. fwevt_to_lnevt[_evt])
  93. #define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)
  94. #define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)
  95. #define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)
  96. #define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
  97. /*
  98. * csio_ln_match_by_portid - lookup lnode using given portid.
  99. * @hw: HW module
  100. * @portid: port-id.
  101. *
  102. * If found, returns lnode matching given portid otherwise returns NULL.
  103. */
  104. static struct csio_lnode *
  105. csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
  106. {
  107. struct csio_lnode *ln = hw->rln;
  108. struct list_head *tmp;
  109. /* Match siblings lnode with portid */
  110. list_for_each(tmp, &hw->sln_head) {
  111. ln = (struct csio_lnode *) tmp;
  112. if (ln->portid == portid)
  113. return ln;
  114. }
  115. return NULL;
  116. }
  117. /*
  118. * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
  119. * @hw - HW module
  120. * @vnpi - vnp index.
  121. * Returns - If found, returns lnode matching given vnp id
  122. * otherwise returns NULL.
  123. */
  124. static struct csio_lnode *
  125. csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
  126. {
  127. struct list_head *tmp1, *tmp2;
  128. struct csio_lnode *sln = NULL, *cln = NULL;
  129. if (list_empty(&hw->sln_head)) {
  130. CSIO_INC_STATS(hw, n_lnlkup_miss);
  131. return NULL;
  132. }
  133. /* Traverse sibling lnodes */
  134. list_for_each(tmp1, &hw->sln_head) {
  135. sln = (struct csio_lnode *) tmp1;
  136. /* Match sibling lnode */
  137. if (sln->vnp_flowid == vnp_id)
  138. return sln;
  139. if (list_empty(&sln->cln_head))
  140. continue;
  141. /* Traverse children lnodes */
  142. list_for_each(tmp2, &sln->cln_head) {
  143. cln = (struct csio_lnode *) tmp2;
  144. if (cln->vnp_flowid == vnp_id)
  145. return cln;
  146. }
  147. }
  148. CSIO_INC_STATS(hw, n_lnlkup_miss);
  149. return NULL;
  150. }
  151. /**
  152. * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
  153. * @hw: HW module.
  154. * @wwpn: WWPN.
  155. *
  156. * If found, returns lnode matching given wwpn, returns NULL otherwise.
  157. */
  158. struct csio_lnode *
  159. csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
  160. {
  161. struct list_head *tmp1, *tmp2;
  162. struct csio_lnode *sln = NULL, *cln = NULL;
  163. if (list_empty(&hw->sln_head)) {
  164. CSIO_INC_STATS(hw, n_lnlkup_miss);
  165. return NULL;
  166. }
  167. /* Traverse sibling lnodes */
  168. list_for_each(tmp1, &hw->sln_head) {
  169. sln = (struct csio_lnode *) tmp1;
  170. /* Match sibling lnode */
  171. if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
  172. return sln;
  173. if (list_empty(&sln->cln_head))
  174. continue;
  175. /* Traverse children lnodes */
  176. list_for_each(tmp2, &sln->cln_head) {
  177. cln = (struct csio_lnode *) tmp2;
  178. if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
  179. return cln;
  180. }
  181. }
  182. return NULL;
  183. }
  184. /* FDMI */
  185. static void
  186. csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
  187. {
  188. struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
  189. cmd->ct_rev = FC_CT_REV;
  190. cmd->ct_fs_type = type;
  191. cmd->ct_fs_subtype = sub_type;
  192. cmd->ct_cmd = htons(op);
  193. }
  194. static int
  195. csio_hostname(uint8_t *buf, size_t buf_len)
  196. {
  197. if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
  198. return 0;
  199. return -1;
  200. }
  201. static int
  202. csio_osname(uint8_t *buf, size_t buf_len)
  203. {
  204. if (snprintf(buf, buf_len, "%s %s %s",
  205. init_utsname()->sysname,
  206. init_utsname()->release,
  207. init_utsname()->version) > 0)
  208. return 0;
  209. return -1;
  210. }
  211. static inline void
  212. csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
  213. {
  214. struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
  215. ae->type = htons(type);
  216. len += 4; /* includes attribute type and length */
  217. len = (len + 3) & ~3; /* should be multiple of 4 bytes */
  218. ae->len = htons(len);
  219. memset(ae->value, 0, len - 4);
  220. memcpy(ae->value, val, len);
  221. *ptr += len;
  222. }
  223. /*
  224. * csio_ln_fdmi_done - FDMI registeration completion
  225. * @hw: HW context
  226. * @fdmi_req: fdmi request
  227. */
  228. static void
  229. csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  230. {
  231. void *cmd;
  232. struct csio_lnode *ln = fdmi_req->lnode;
  233. if (fdmi_req->wr_status != FW_SUCCESS) {
  234. csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
  235. fdmi_req->wr_status);
  236. CSIO_INC_STATS(ln, n_fdmi_err);
  237. }
  238. cmd = fdmi_req->dma_buf.vaddr;
  239. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  240. csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
  241. csio_ct_reason(cmd), csio_ct_expl(cmd));
  242. }
  243. }
  244. /*
  245. * csio_ln_fdmi_rhba_cbfn - RHBA completion
  246. * @hw: HW context
  247. * @fdmi_req: fdmi request
  248. */
  249. static void
  250. csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  251. {
  252. void *cmd;
  253. uint8_t *pld;
  254. uint32_t len = 0;
  255. __be32 val;
  256. __be16 mfs;
  257. uint32_t numattrs = 0;
  258. struct csio_lnode *ln = fdmi_req->lnode;
  259. struct fs_fdmi_attrs *attrib_blk;
  260. struct fc_fdmi_port_name *port_name;
  261. uint8_t buf[64];
  262. uint8_t *fc4_type;
  263. if (fdmi_req->wr_status != FW_SUCCESS) {
  264. csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
  265. fdmi_req->wr_status);
  266. CSIO_INC_STATS(ln, n_fdmi_err);
  267. }
  268. cmd = fdmi_req->dma_buf.vaddr;
  269. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  270. csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
  271. csio_ct_reason(cmd), csio_ct_expl(cmd));
  272. }
  273. if (!csio_is_rnode_ready(fdmi_req->rnode)) {
  274. CSIO_INC_STATS(ln, n_fdmi_err);
  275. return;
  276. }
  277. /* Prepare CT hdr for RPA cmd */
  278. memset(cmd, 0, FC_CT_HDR_LEN);
  279. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA);
  280. /* Prepare RPA payload */
  281. pld = (uint8_t *)csio_ct_get_pld(cmd);
  282. port_name = (struct fc_fdmi_port_name *)pld;
  283. memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
  284. pld += sizeof(*port_name);
  285. /* Start appending Port attributes */
  286. attrib_blk = (struct fs_fdmi_attrs *)pld;
  287. attrib_blk->numattrs = 0;
  288. len += sizeof(attrib_blk->numattrs);
  289. pld += sizeof(attrib_blk->numattrs);
  290. fc4_type = &buf[0];
  291. memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
  292. fc4_type[2] = 1;
  293. fc4_type[7] = 1;
  294. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
  295. fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
  296. numattrs++;
  297. val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
  298. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
  299. (uint8_t *)&val,
  300. FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
  301. numattrs++;
  302. if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
  303. val = htonl(FC_PORTSPEED_1GBIT);
  304. else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
  305. val = htonl(FC_PORTSPEED_10GBIT);
  306. else
  307. val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
  308. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
  309. (uint8_t *)&val,
  310. FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
  311. numattrs++;
  312. mfs = ln->ln_sparm.csp.sp_bb_data;
  313. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
  314. (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
  315. numattrs++;
  316. strcpy(buf, "csiostor");
  317. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
  318. (uint16_t)strlen(buf));
  319. numattrs++;
  320. if (!csio_hostname(buf, sizeof(buf))) {
  321. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
  322. buf, (uint16_t)strlen(buf));
  323. numattrs++;
  324. }
  325. attrib_blk->numattrs = htonl(numattrs);
  326. len = (uint32_t)(pld - (uint8_t *)cmd);
  327. /* Submit FDMI RPA request */
  328. spin_lock_irq(&hw->lock);
  329. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
  330. FCOE_CT, &fdmi_req->dma_buf, len)) {
  331. CSIO_INC_STATS(ln, n_fdmi_err);
  332. csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
  333. }
  334. spin_unlock_irq(&hw->lock);
  335. }
  336. /*
  337. * csio_ln_fdmi_dprt_cbfn - DPRT completion
  338. * @hw: HW context
  339. * @fdmi_req: fdmi request
  340. */
  341. static void
  342. csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  343. {
  344. void *cmd;
  345. uint8_t *pld;
  346. uint32_t len = 0;
  347. uint32_t numattrs = 0;
  348. __be32 maxpayload = htonl(65536);
  349. struct fc_fdmi_hba_identifier *hbaid;
  350. struct csio_lnode *ln = fdmi_req->lnode;
  351. struct fc_fdmi_rpl *reg_pl;
  352. struct fs_fdmi_attrs *attrib_blk;
  353. uint8_t buf[64];
  354. if (fdmi_req->wr_status != FW_SUCCESS) {
  355. csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
  356. fdmi_req->wr_status);
  357. CSIO_INC_STATS(ln, n_fdmi_err);
  358. }
  359. if (!csio_is_rnode_ready(fdmi_req->rnode)) {
  360. CSIO_INC_STATS(ln, n_fdmi_err);
  361. return;
  362. }
  363. cmd = fdmi_req->dma_buf.vaddr;
  364. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  365. csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
  366. csio_ct_reason(cmd), csio_ct_expl(cmd));
  367. }
  368. /* Prepare CT hdr for RHBA cmd */
  369. memset(cmd, 0, FC_CT_HDR_LEN);
  370. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA);
  371. len = FC_CT_HDR_LEN;
  372. /* Prepare RHBA payload */
  373. pld = (uint8_t *)csio_ct_get_pld(cmd);
  374. hbaid = (struct fc_fdmi_hba_identifier *)pld;
  375. memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
  376. pld += sizeof(*hbaid);
  377. /* Register one port per hba */
  378. reg_pl = (struct fc_fdmi_rpl *)pld;
  379. reg_pl->numport = htonl(1);
  380. memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
  381. pld += sizeof(*reg_pl);
  382. /* Start appending HBA attributes hba */
  383. attrib_blk = (struct fs_fdmi_attrs *)pld;
  384. attrib_blk->numattrs = 0;
  385. len += sizeof(attrib_blk->numattrs);
  386. pld += sizeof(attrib_blk->numattrs);
  387. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
  388. FC_FDMI_HBA_ATTR_NODENAME_LEN);
  389. numattrs++;
  390. memset(buf, 0, sizeof(buf));
  391. strcpy(buf, "Chelsio Communications");
  392. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
  393. (uint16_t)strlen(buf));
  394. numattrs++;
  395. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
  396. hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
  397. numattrs++;
  398. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
  399. (uint16_t)sizeof(hw->vpd.id));
  400. numattrs++;
  401. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
  402. hw->model_desc, (uint16_t)strlen(hw->model_desc));
  403. numattrs++;
  404. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
  405. hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
  406. numattrs++;
  407. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
  408. hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
  409. numattrs++;
  410. if (!csio_osname(buf, sizeof(buf))) {
  411. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
  412. buf, (uint16_t)strlen(buf));
  413. numattrs++;
  414. }
  415. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
  416. (uint8_t *)&maxpayload,
  417. FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
  418. len = (uint32_t)(pld - (uint8_t *)cmd);
  419. numattrs++;
  420. attrib_blk->numattrs = htonl(numattrs);
  421. /* Submit FDMI RHBA request */
  422. spin_lock_irq(&hw->lock);
  423. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
  424. FCOE_CT, &fdmi_req->dma_buf, len)) {
  425. CSIO_INC_STATS(ln, n_fdmi_err);
  426. csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
  427. }
  428. spin_unlock_irq(&hw->lock);
  429. }
  430. /*
  431. * csio_ln_fdmi_dhba_cbfn - DHBA completion
  432. * @hw: HW context
  433. * @fdmi_req: fdmi request
  434. */
  435. static void
  436. csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  437. {
  438. struct csio_lnode *ln = fdmi_req->lnode;
  439. void *cmd;
  440. struct fc_fdmi_port_name *port_name;
  441. uint32_t len;
  442. if (fdmi_req->wr_status != FW_SUCCESS) {
  443. csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
  444. fdmi_req->wr_status);
  445. CSIO_INC_STATS(ln, n_fdmi_err);
  446. }
  447. if (!csio_is_rnode_ready(fdmi_req->rnode)) {
  448. CSIO_INC_STATS(ln, n_fdmi_err);
  449. return;
  450. }
  451. cmd = fdmi_req->dma_buf.vaddr;
  452. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  453. csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
  454. csio_ct_reason(cmd), csio_ct_expl(cmd));
  455. }
  456. /* Send FDMI cmd to de-register any Port attributes if registered
  457. * before
  458. */
  459. /* Prepare FDMI DPRT cmd */
  460. memset(cmd, 0, FC_CT_HDR_LEN);
  461. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT);
  462. len = FC_CT_HDR_LEN;
  463. port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
  464. memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
  465. len += sizeof(*port_name);
  466. /* Submit FDMI request */
  467. spin_lock_irq(&hw->lock);
  468. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
  469. FCOE_CT, &fdmi_req->dma_buf, len)) {
  470. CSIO_INC_STATS(ln, n_fdmi_err);
  471. csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
  472. }
  473. spin_unlock_irq(&hw->lock);
  474. }
  475. /**
  476. * csio_ln_fdmi_start - Start an FDMI request.
  477. * @ln: lnode
  478. * @context: session context
  479. *
  480. * Issued with lock held.
  481. */
  482. int
  483. csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
  484. {
  485. struct csio_ioreq *fdmi_req;
  486. struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
  487. void *cmd;
  488. struct fc_fdmi_hba_identifier *hbaid;
  489. uint32_t len;
  490. if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
  491. return -EPROTONOSUPPORT;
  492. if (!csio_is_rnode_ready(fdmi_rn))
  493. CSIO_INC_STATS(ln, n_fdmi_err);
  494. /* Send FDMI cmd to de-register any HBA attributes if registered
  495. * before
  496. */
  497. fdmi_req = ln->mgmt_req;
  498. fdmi_req->lnode = ln;
  499. fdmi_req->rnode = fdmi_rn;
  500. /* Prepare FDMI DHBA cmd */
  501. cmd = fdmi_req->dma_buf.vaddr;
  502. memset(cmd, 0, FC_CT_HDR_LEN);
  503. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA);
  504. len = FC_CT_HDR_LEN;
  505. hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
  506. memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
  507. len += sizeof(*hbaid);
  508. /* Submit FDMI request */
  509. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
  510. FCOE_CT, &fdmi_req->dma_buf, len)) {
  511. CSIO_INC_STATS(ln, n_fdmi_err);
  512. csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
  513. }
  514. return 0;
  515. }
  516. /*
  517. * csio_ln_vnp_read_cbfn - vnp read completion handler.
  518. * @hw: HW lnode
  519. * @cbfn: Completion handler.
  520. *
  521. * Reads vnp response and updates ln parameters.
  522. */
  523. static void
  524. csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
  525. {
  526. struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
  527. struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
  528. struct fc_els_csp *csp;
  529. struct fc_els_cssp *clsp;
  530. enum fw_retval retval;
  531. __be32 nport_id;
  532. retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
  533. if (retval != FW_SUCCESS) {
  534. csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
  535. mempool_free(mbp, hw->mb_mempool);
  536. return;
  537. }
  538. spin_lock_irq(&hw->lock);
  539. memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
  540. memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3);
  541. ln->nport_id = ntohl(nport_id);
  542. ln->nport_id = ln->nport_id >> 8;
  543. /* Update WWNs */
  544. /*
  545. * This may look like a duplication of what csio_fcoe_enable_link()
  546. * does, but is absolutely necessary if the vnpi changes between
  547. * a FCOE LINK UP and FCOE LINK DOWN.
  548. */
  549. memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
  550. memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
  551. /* Copy common sparam */
  552. csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
  553. ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
  554. ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
  555. ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred;
  556. ln->ln_sparm.csp.sp_features = csp->sp_features;
  557. ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data;
  558. ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov;
  559. ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov;
  560. /* Copy word 0 & word 1 of class sparam */
  561. clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
  562. ln->ln_sparm.clsp[2].cp_class = clsp->cp_class;
  563. ln->ln_sparm.clsp[2].cp_init = clsp->cp_init;
  564. ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip;
  565. ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs;
  566. spin_unlock_irq(&hw->lock);
  567. mempool_free(mbp, hw->mb_mempool);
  568. /* Send an event to update local attribs */
  569. csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
  570. }
  571. /*
  572. * csio_ln_vnp_read - Read vnp params.
  573. * @ln: lnode
  574. * @cbfn: Completion handler.
  575. *
  576. * Issued with lock held.
  577. */
  578. static int
  579. csio_ln_vnp_read(struct csio_lnode *ln,
  580. void (*cbfn) (struct csio_hw *, struct csio_mb *))
  581. {
  582. struct csio_hw *hw = ln->hwp;
  583. struct csio_mb *mbp;
  584. /* Allocate Mbox request */
  585. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  586. if (!mbp) {
  587. CSIO_INC_STATS(hw, n_err_nomem);
  588. return -ENOMEM;
  589. }
  590. /* Prepare VNP Command */
  591. csio_fcoe_vnp_read_init_mb(ln, mbp,
  592. CSIO_MB_DEFAULT_TMO,
  593. ln->fcf_flowid,
  594. ln->vnp_flowid,
  595. cbfn);
  596. /* Issue MBOX cmd */
  597. if (csio_mb_issue(hw, mbp)) {
  598. csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
  599. mempool_free(mbp, hw->mb_mempool);
  600. return -EINVAL;
  601. }
  602. return 0;
  603. }
  604. /*
  605. * csio_fcoe_enable_link - Enable fcoe link.
  606. * @ln: lnode
  607. * @enable: enable/disable
  608. * Issued with lock held.
  609. * Issues mbox cmd to bring up FCOE link on port associated with given ln.
  610. */
  611. static int
  612. csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
  613. {
  614. struct csio_hw *hw = ln->hwp;
  615. struct csio_mb *mbp;
  616. enum fw_retval retval;
  617. uint8_t portid;
  618. uint8_t sub_op;
  619. struct fw_fcoe_link_cmd *lcmd;
  620. int i;
  621. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  622. if (!mbp) {
  623. CSIO_INC_STATS(hw, n_err_nomem);
  624. return -ENOMEM;
  625. }
  626. portid = ln->portid;
  627. sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
  628. csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
  629. sub_op ? "UP" : "DOWN", portid);
  630. csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
  631. portid, sub_op, 0, 0, 0, NULL);
  632. if (csio_mb_issue(hw, mbp)) {
  633. csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
  634. portid);
  635. mempool_free(mbp, hw->mb_mempool);
  636. return -EINVAL;
  637. }
  638. retval = csio_mb_fw_retval(mbp);
  639. if (retval != FW_SUCCESS) {
  640. csio_err(hw,
  641. "FCOE LINK %s cmd on port[%d] failed with "
  642. "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
  643. mempool_free(mbp, hw->mb_mempool);
  644. return -EINVAL;
  645. }
  646. if (!enable)
  647. goto out;
  648. lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
  649. memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
  650. memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
  651. for (i = 0; i < CSIO_MAX_PPORTS; i++)
  652. if (hw->pport[i].portid == portid)
  653. memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
  654. out:
  655. mempool_free(mbp, hw->mb_mempool);
  656. return 0;
  657. }
  658. /*
  659. * csio_ln_read_fcf_cbfn - Read fcf parameters
  660. * @ln: lnode
  661. *
  662. * read fcf response and Update ln fcf information.
  663. */
  664. static void
  665. csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
  666. {
  667. struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
  668. struct csio_fcf_info *fcf_info;
  669. struct fw_fcoe_fcf_cmd *rsp =
  670. (struct fw_fcoe_fcf_cmd *)(mbp->mb);
  671. enum fw_retval retval;
  672. retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
  673. if (retval != FW_SUCCESS) {
  674. csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
  675. retval);
  676. mempool_free(mbp, hw->mb_mempool);
  677. return;
  678. }
  679. spin_lock_irq(&hw->lock);
  680. fcf_info = ln->fcfinfo;
  681. fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
  682. ntohs(rsp->priority_pkd));
  683. fcf_info->vf_id = ntohs(rsp->vf_id);
  684. fcf_info->vlan_id = rsp->vlan_id;
  685. fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
  686. fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
  687. fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
  688. fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
  689. fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
  690. fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
  691. fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
  692. memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
  693. memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
  694. memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
  695. memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
  696. memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
  697. spin_unlock_irq(&hw->lock);
  698. mempool_free(mbp, hw->mb_mempool);
  699. }
  700. /*
  701. * csio_ln_read_fcf_entry - Read fcf entry.
  702. * @ln: lnode
  703. * @cbfn: Completion handler.
  704. *
  705. * Issued with lock held.
  706. */
  707. static int
  708. csio_ln_read_fcf_entry(struct csio_lnode *ln,
  709. void (*cbfn) (struct csio_hw *, struct csio_mb *))
  710. {
  711. struct csio_hw *hw = ln->hwp;
  712. struct csio_mb *mbp;
  713. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  714. if (!mbp) {
  715. CSIO_INC_STATS(hw, n_err_nomem);
  716. return -ENOMEM;
  717. }
  718. /* Get FCoE FCF information */
  719. csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
  720. ln->portid, ln->fcf_flowid, cbfn);
  721. if (csio_mb_issue(hw, mbp)) {
  722. csio_err(hw, "failed to issue FCOE FCF cmd\n");
  723. mempool_free(mbp, hw->mb_mempool);
  724. return -EINVAL;
  725. }
  726. return 0;
  727. }
  728. /*
  729. * csio_handle_link_up - Logical Linkup event.
  730. * @hw - HW module.
  731. * @portid - Physical port number
  732. * @fcfi - FCF index.
  733. * @vnpi - VNP index.
  734. * Returns - none.
  735. *
  736. * This event is received from FW, when virtual link is established between
  737. * Physical port[ENode] and FCF. If its new vnpi, then local node object is
  738. * created on this FCF and set to [ONLINE] state.
  739. * Lnode waits for FW_RDEV_CMD event to be received indicating that
  740. * Fabric login is completed and lnode moves to [READY] state.
  741. *
  742. * This called with hw lock held
  743. */
  744. static void
  745. csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
  746. uint32_t vnpi)
  747. {
  748. struct csio_lnode *ln = NULL;
  749. /* Lookup lnode based on vnpi */
  750. ln = csio_ln_lookup_by_vnpi(hw, vnpi);
  751. if (!ln) {
  752. /* Pick lnode based on portid */
  753. ln = csio_ln_lookup_by_portid(hw, portid);
  754. if (!ln) {
  755. csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
  756. portid);
  757. CSIO_DB_ASSERT(0);
  758. return;
  759. }
  760. /* Check if lnode has valid vnp flowid */
  761. if (ln->vnp_flowid != CSIO_INVALID_IDX) {
  762. /* New VN-Port */
  763. spin_unlock_irq(&hw->lock);
  764. csio_lnode_alloc(hw);
  765. spin_lock_irq(&hw->lock);
  766. if (!ln) {
  767. csio_err(hw,
  768. "failed to allocate fcoe lnode"
  769. "for port:%d vnpi:x%x\n",
  770. portid, vnpi);
  771. CSIO_DB_ASSERT(0);
  772. return;
  773. }
  774. ln->portid = portid;
  775. }
  776. ln->vnp_flowid = vnpi;
  777. ln->dev_num &= ~0xFFFF;
  778. ln->dev_num |= vnpi;
  779. }
  780. /*Initialize fcfi */
  781. ln->fcf_flowid = fcfi;
  782. csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
  783. CSIO_INC_STATS(ln, n_link_up);
  784. /* Send LINKUP event to SM */
  785. csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
  786. }
  787. /*
  788. * csio_post_event_rns
  789. * @ln - FCOE lnode
  790. * @evt - Given rnode event
  791. * Returns - none
  792. *
  793. * Posts given rnode event to all FCOE rnodes connected with given Lnode.
  794. * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
  795. * event.
  796. *
  797. * This called with hw lock held
  798. */
  799. static void
  800. csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
  801. {
  802. struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
  803. struct list_head *tmp, *next;
  804. struct csio_rnode *rn;
  805. list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
  806. rn = (struct csio_rnode *) tmp;
  807. csio_post_event(&rn->sm, evt);
  808. }
  809. }
  810. /*
  811. * csio_cleanup_rns
  812. * @ln - FCOE lnode
  813. * Returns - none
  814. *
  815. * Frees all FCOE rnodes connected with given Lnode.
  816. *
  817. * This called with hw lock held
  818. */
  819. static void
  820. csio_cleanup_rns(struct csio_lnode *ln)
  821. {
  822. struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
  823. struct list_head *tmp, *next_rn;
  824. struct csio_rnode *rn;
  825. list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
  826. rn = (struct csio_rnode *) tmp;
  827. csio_put_rnode(ln, rn);
  828. }
  829. }
  830. /*
  831. * csio_post_event_lns
  832. * @ln - FCOE lnode
  833. * @evt - Given lnode event
  834. * Returns - none
  835. *
  836. * Posts given lnode event to all FCOE lnodes connected with given Lnode.
  837. * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
  838. * event.
  839. *
  840. * This called with hw lock held
  841. */
  842. static void
  843. csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
  844. {
  845. struct list_head *tmp;
  846. struct csio_lnode *cln, *sln;
  847. /* If NPIV lnode, send evt only to that and return */
  848. if (csio_is_npiv_ln(ln)) {
  849. csio_post_event(&ln->sm, evt);
  850. return;
  851. }
  852. sln = ln;
  853. /* Traverse children lnodes list and send evt */
  854. list_for_each(tmp, &sln->cln_head) {
  855. cln = (struct csio_lnode *) tmp;
  856. csio_post_event(&cln->sm, evt);
  857. }
  858. /* Send evt to parent lnode */
  859. csio_post_event(&ln->sm, evt);
  860. }
  861. /*
  862. * csio_ln_down - Lcoal nport is down
  863. * @ln - FCOE Lnode
  864. * Returns - none
  865. *
  866. * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
  867. *
  868. * This called with hw lock held
  869. */
  870. static void
  871. csio_ln_down(struct csio_lnode *ln)
  872. {
  873. csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
  874. }
  875. /*
  876. * csio_handle_link_down - Logical Linkdown event.
  877. * @hw - HW module.
  878. * @portid - Physical port number
  879. * @fcfi - FCF index.
  880. * @vnpi - VNP index.
  881. * Returns - none
  882. *
  883. * This event is received from FW, when virtual link goes down between
  884. * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
  885. * this vnpi[VN-Port] will be de-instantiated.
  886. *
  887. * This called with hw lock held
  888. */
  889. static void
  890. csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
  891. uint32_t vnpi)
  892. {
  893. struct csio_fcf_info *fp;
  894. struct csio_lnode *ln;
  895. /* Lookup lnode based on vnpi */
  896. ln = csio_ln_lookup_by_vnpi(hw, vnpi);
  897. if (ln) {
  898. fp = ln->fcfinfo;
  899. CSIO_INC_STATS(ln, n_link_down);
  900. /*Warn if linkdown received if lnode is not in ready state */
  901. if (!csio_is_lnode_ready(ln)) {
  902. csio_ln_warn(ln,
  903. "warn: FCOE link is already in offline "
  904. "Ignoring Fcoe linkdown event on portid %d\n",
  905. portid);
  906. CSIO_INC_STATS(ln, n_evt_drop);
  907. return;
  908. }
  909. /* Verify portid */
  910. if (fp->portid != portid) {
  911. csio_ln_warn(ln,
  912. "warn: FCOE linkdown recv with "
  913. "invalid port %d\n", portid);
  914. CSIO_INC_STATS(ln, n_evt_drop);
  915. return;
  916. }
  917. /* verify fcfi */
  918. if (ln->fcf_flowid != fcfi) {
  919. csio_ln_warn(ln,
  920. "warn: FCOE linkdown recv with "
  921. "invalid fcfi x%x\n", fcfi);
  922. CSIO_INC_STATS(ln, n_evt_drop);
  923. return;
  924. }
  925. csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
  926. /* Send LINK_DOWN event to lnode s/m */
  927. csio_ln_down(ln);
  928. return;
  929. } else {
  930. csio_warn(hw,
  931. "warn: FCOE linkdown recv with invalid vnpi x%x\n",
  932. vnpi);
  933. CSIO_INC_STATS(hw, n_evt_drop);
  934. }
  935. }
  936. /*
  937. * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
  938. * @ln: Lnode module
  939. *
  940. * Returns True if FCOE lnode is in ready state.
  941. */
  942. int
  943. csio_is_lnode_ready(struct csio_lnode *ln)
  944. {
  945. return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
  946. }
  947. /*****************************************************************************/
  948. /* START: Lnode SM */
  949. /*****************************************************************************/
  950. /*
  951. * csio_lns_uninit - The request in uninit state.
  952. * @ln - FCOE lnode.
  953. * @evt - Event to be processed.
  954. *
  955. * Process the given lnode event which is currently in "uninit" state.
  956. * Invoked with HW lock held.
  957. * Return - none.
  958. */
  959. static void
  960. csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
  961. {
  962. struct csio_hw *hw = csio_lnode_to_hw(ln);
  963. struct csio_lnode *rln = hw->rln;
  964. int rv;
  965. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  966. switch (evt) {
  967. case CSIO_LNE_LINKUP:
  968. csio_set_state(&ln->sm, csio_lns_online);
  969. /* Read FCF only for physical lnode */
  970. if (csio_is_phys_ln(ln)) {
  971. rv = csio_ln_read_fcf_entry(ln,
  972. csio_ln_read_fcf_cbfn);
  973. if (rv != 0) {
  974. /* TODO: Send HW RESET event */
  975. CSIO_INC_STATS(ln, n_err);
  976. break;
  977. }
  978. /* Add FCF record */
  979. list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
  980. }
  981. rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
  982. if (rv != 0) {
  983. /* TODO: Send HW RESET event */
  984. CSIO_INC_STATS(ln, n_err);
  985. }
  986. break;
  987. case CSIO_LNE_DOWN_LINK:
  988. break;
  989. default:
  990. csio_ln_dbg(ln,
  991. "unexp ln event %d recv from did:x%x in "
  992. "ln state[uninit].\n", evt, ln->nport_id);
  993. CSIO_INC_STATS(ln, n_evt_unexp);
  994. break;
  995. } /* switch event */
  996. }
  997. /*
  998. * csio_lns_online - The request in online state.
  999. * @ln - FCOE lnode.
  1000. * @evt - Event to be processed.
  1001. *
  1002. * Process the given lnode event which is currently in "online" state.
  1003. * Invoked with HW lock held.
  1004. * Return - none.
  1005. */
  1006. static void
  1007. csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
  1008. {
  1009. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1010. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  1011. switch (evt) {
  1012. case CSIO_LNE_LINKUP:
  1013. csio_ln_warn(ln,
  1014. "warn: FCOE link is up already "
  1015. "Ignoring linkup on port:%d\n", ln->portid);
  1016. CSIO_INC_STATS(ln, n_evt_drop);
  1017. break;
  1018. case CSIO_LNE_FAB_INIT_DONE:
  1019. csio_set_state(&ln->sm, csio_lns_ready);
  1020. spin_unlock_irq(&hw->lock);
  1021. csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
  1022. spin_lock_irq(&hw->lock);
  1023. break;
  1024. case CSIO_LNE_LINK_DOWN:
  1025. /* Fall through */
  1026. case CSIO_LNE_DOWN_LINK:
  1027. csio_set_state(&ln->sm, csio_lns_uninit);
  1028. if (csio_is_phys_ln(ln)) {
  1029. /* Remove FCF entry */
  1030. list_del_init(&ln->fcfinfo->list);
  1031. }
  1032. break;
  1033. default:
  1034. csio_ln_dbg(ln,
  1035. "unexp ln event %d recv from did:x%x in "
  1036. "ln state[uninit].\n", evt, ln->nport_id);
  1037. CSIO_INC_STATS(ln, n_evt_unexp);
  1038. break;
  1039. } /* switch event */
  1040. }
  1041. /*
  1042. * csio_lns_ready - The request in ready state.
  1043. * @ln - FCOE lnode.
  1044. * @evt - Event to be processed.
  1045. *
  1046. * Process the given lnode event which is currently in "ready" state.
  1047. * Invoked with HW lock held.
  1048. * Return - none.
  1049. */
  1050. static void
  1051. csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
  1052. {
  1053. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1054. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  1055. switch (evt) {
  1056. case CSIO_LNE_FAB_INIT_DONE:
  1057. csio_ln_dbg(ln,
  1058. "ignoring event %d recv from did x%x"
  1059. "in ln state[ready].\n", evt, ln->nport_id);
  1060. CSIO_INC_STATS(ln, n_evt_drop);
  1061. break;
  1062. case CSIO_LNE_LINK_DOWN:
  1063. csio_set_state(&ln->sm, csio_lns_offline);
  1064. csio_post_event_rns(ln, CSIO_RNFE_DOWN);
  1065. spin_unlock_irq(&hw->lock);
  1066. csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
  1067. spin_lock_irq(&hw->lock);
  1068. if (csio_is_phys_ln(ln)) {
  1069. /* Remove FCF entry */
  1070. list_del_init(&ln->fcfinfo->list);
  1071. }
  1072. break;
  1073. case CSIO_LNE_DOWN_LINK:
  1074. csio_set_state(&ln->sm, csio_lns_offline);
  1075. csio_post_event_rns(ln, CSIO_RNFE_DOWN);
  1076. /* Host need to issue aborts in case if FW has not returned
  1077. * WRs with status "ABORTED"
  1078. */
  1079. spin_unlock_irq(&hw->lock);
  1080. csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
  1081. spin_lock_irq(&hw->lock);
  1082. if (csio_is_phys_ln(ln)) {
  1083. /* Remove FCF entry */
  1084. list_del_init(&ln->fcfinfo->list);
  1085. }
  1086. break;
  1087. case CSIO_LNE_CLOSE:
  1088. csio_set_state(&ln->sm, csio_lns_uninit);
  1089. csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
  1090. break;
  1091. case CSIO_LNE_LOGO:
  1092. csio_set_state(&ln->sm, csio_lns_offline);
  1093. csio_post_event_rns(ln, CSIO_RNFE_DOWN);
  1094. break;
  1095. default:
  1096. csio_ln_dbg(ln,
  1097. "unexp ln event %d recv from did:x%x in "
  1098. "ln state[uninit].\n", evt, ln->nport_id);
  1099. CSIO_INC_STATS(ln, n_evt_unexp);
  1100. CSIO_DB_ASSERT(0);
  1101. break;
  1102. } /* switch event */
  1103. }
  1104. /*
  1105. * csio_lns_offline - The request in offline state.
  1106. * @ln - FCOE lnode.
  1107. * @evt - Event to be processed.
  1108. *
  1109. * Process the given lnode event which is currently in "offline" state.
  1110. * Invoked with HW lock held.
  1111. * Return - none.
  1112. */
  1113. static void
  1114. csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
  1115. {
  1116. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1117. struct csio_lnode *rln = hw->rln;
  1118. int rv;
  1119. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  1120. switch (evt) {
  1121. case CSIO_LNE_LINKUP:
  1122. csio_set_state(&ln->sm, csio_lns_online);
  1123. /* Read FCF only for physical lnode */
  1124. if (csio_is_phys_ln(ln)) {
  1125. rv = csio_ln_read_fcf_entry(ln,
  1126. csio_ln_read_fcf_cbfn);
  1127. if (rv != 0) {
  1128. /* TODO: Send HW RESET event */
  1129. CSIO_INC_STATS(ln, n_err);
  1130. break;
  1131. }
  1132. /* Add FCF record */
  1133. list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
  1134. }
  1135. rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
  1136. if (rv != 0) {
  1137. /* TODO: Send HW RESET event */
  1138. CSIO_INC_STATS(ln, n_err);
  1139. }
  1140. break;
  1141. case CSIO_LNE_LINK_DOWN:
  1142. case CSIO_LNE_DOWN_LINK:
  1143. case CSIO_LNE_LOGO:
  1144. csio_ln_dbg(ln,
  1145. "ignoring event %d recv from did x%x"
  1146. "in ln state[offline].\n", evt, ln->nport_id);
  1147. CSIO_INC_STATS(ln, n_evt_drop);
  1148. break;
  1149. case CSIO_LNE_CLOSE:
  1150. csio_set_state(&ln->sm, csio_lns_uninit);
  1151. csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
  1152. break;
  1153. default:
  1154. csio_ln_dbg(ln,
  1155. "unexp ln event %d recv from did:x%x in "
  1156. "ln state[offline]\n", evt, ln->nport_id);
  1157. CSIO_INC_STATS(ln, n_evt_unexp);
  1158. CSIO_DB_ASSERT(0);
  1159. break;
  1160. } /* switch event */
  1161. }
  1162. /*****************************************************************************/
  1163. /* END: Lnode SM */
  1164. /*****************************************************************************/
  1165. static void
  1166. csio_free_fcfinfo(struct kref *kref)
  1167. {
  1168. struct csio_fcf_info *fcfinfo = container_of(kref,
  1169. struct csio_fcf_info, kref);
  1170. kfree(fcfinfo);
  1171. }
  1172. /* Helper routines for attributes */
  1173. /*
  1174. * csio_lnode_state_to_str - Get current state of FCOE lnode.
  1175. * @ln - lnode
  1176. * @str - state of lnode.
  1177. *
  1178. */
  1179. void
  1180. csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
  1181. {
  1182. if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
  1183. strcpy(str, "UNINIT");
  1184. return;
  1185. }
  1186. if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
  1187. strcpy(str, "READY");
  1188. return;
  1189. }
  1190. if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
  1191. strcpy(str, "OFFLINE");
  1192. return;
  1193. }
  1194. strcpy(str, "UNKNOWN");
  1195. } /* csio_lnode_state_to_str */
  1196. int
  1197. csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
  1198. struct fw_fcoe_port_stats *port_stats)
  1199. {
  1200. struct csio_mb *mbp;
  1201. struct fw_fcoe_port_cmd_params portparams;
  1202. enum fw_retval retval;
  1203. int idx;
  1204. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  1205. if (!mbp) {
  1206. csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
  1207. return -EINVAL;
  1208. }
  1209. portparams.portid = portid;
  1210. for (idx = 1; idx <= 3; idx++) {
  1211. portparams.idx = (idx-1)*6 + 1;
  1212. portparams.nstats = 6;
  1213. if (idx == 3)
  1214. portparams.nstats = 4;
  1215. csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
  1216. &portparams, NULL);
  1217. if (csio_mb_issue(hw, mbp)) {
  1218. csio_err(hw, "Issue of FCoE port params failed!\n");
  1219. mempool_free(mbp, hw->mb_mempool);
  1220. return -EINVAL;
  1221. }
  1222. csio_mb_process_portparams_rsp(hw, mbp, &retval,
  1223. &portparams, port_stats);
  1224. }
  1225. mempool_free(mbp, hw->mb_mempool);
  1226. return 0;
  1227. }
  1228. /*
  1229. * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
  1230. * @wr - WR.
  1231. * @len - WR len.
  1232. * This handler is invoked when an outstanding mgmt WR is completed.
  1233. * Its invoked in the context of FW event worker thread for every
  1234. * mgmt event received.
  1235. * Return - none.
  1236. */
  1237. static void
  1238. csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
  1239. {
  1240. struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
  1241. struct csio_ioreq *io_req = NULL;
  1242. struct fw_fcoe_els_ct_wr *wr_cmd;
  1243. wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
  1244. if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
  1245. csio_err(mgmtm->hw,
  1246. "Invalid ELS CT WR length recvd, len:%x\n", len);
  1247. mgmtm->stats.n_err++;
  1248. return;
  1249. }
  1250. io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
  1251. io_req->wr_status = csio_wr_status(wr_cmd);
  1252. /* lookup ioreq exists in our active Q */
  1253. spin_lock_irq(&hw->lock);
  1254. if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
  1255. csio_err(mgmtm->hw,
  1256. "Error- Invalid IO handle recv in WR. handle: %p\n",
  1257. io_req);
  1258. mgmtm->stats.n_err++;
  1259. spin_unlock_irq(&hw->lock);
  1260. return;
  1261. }
  1262. mgmtm = csio_hw_to_mgmtm(hw);
  1263. /* Dequeue from active queue */
  1264. list_del_init(&io_req->sm.sm_list);
  1265. mgmtm->stats.n_active--;
  1266. spin_unlock_irq(&hw->lock);
  1267. /* io_req will be freed by completion handler */
  1268. if (io_req->io_cbfn)
  1269. io_req->io_cbfn(hw, io_req);
  1270. }
  1271. /**
  1272. * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
  1273. * @hw: HW module
  1274. * @cpl_op: CPL opcode
  1275. * @cmd: FW cmd/WR.
  1276. *
  1277. * Process received FCoE cmd/WR event from FW.
  1278. */
  1279. void
  1280. csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
  1281. {
  1282. struct csio_lnode *ln;
  1283. struct csio_rnode *rn;
  1284. uint8_t portid, opcode = *(uint8_t *)cmd;
  1285. struct fw_fcoe_link_cmd *lcmd;
  1286. struct fw_wr_hdr *wr;
  1287. struct fw_rdev_wr *rdev_wr;
  1288. enum fw_fcoe_link_status lstatus;
  1289. uint32_t fcfi, rdev_flowid, vnpi;
  1290. enum csio_ln_ev evt;
  1291. if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
  1292. lcmd = (struct fw_fcoe_link_cmd *)cmd;
  1293. lstatus = lcmd->lstatus;
  1294. portid = FW_FCOE_LINK_CMD_PORTID_GET(
  1295. ntohl(lcmd->op_to_portid));
  1296. fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
  1297. vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
  1298. if (lstatus == FCOE_LINKUP) {
  1299. /* HW lock here */
  1300. spin_lock_irq(&hw->lock);
  1301. csio_handle_link_up(hw, portid, fcfi, vnpi);
  1302. spin_unlock_irq(&hw->lock);
  1303. /* HW un lock here */
  1304. } else if (lstatus == FCOE_LINKDOWN) {
  1305. /* HW lock here */
  1306. spin_lock_irq(&hw->lock);
  1307. csio_handle_link_down(hw, portid, fcfi, vnpi);
  1308. spin_unlock_irq(&hw->lock);
  1309. /* HW un lock here */
  1310. } else {
  1311. csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
  1312. lcmd->lstatus);
  1313. CSIO_INC_STATS(hw, n_cpl_unexp);
  1314. }
  1315. } else if (cpl_op == CPL_FW6_PLD) {
  1316. wr = (struct fw_wr_hdr *) (cmd + 4);
  1317. if (FW_WR_OP_GET(be32_to_cpu(wr->hi))
  1318. == FW_RDEV_WR) {
  1319. rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
  1320. rdev_flowid = FW_RDEV_WR_FLOWID_GET(
  1321. ntohl(rdev_wr->alloc_to_len16));
  1322. vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
  1323. ntohl(rdev_wr->flags_to_assoc_flowid));
  1324. csio_dbg(hw,
  1325. "FW_RDEV_WR: flowid:x%x ev_cause:x%x "
  1326. "vnpi:0x%x\n", rdev_flowid,
  1327. rdev_wr->event_cause, vnpi);
  1328. if (rdev_wr->protocol != PROT_FCOE) {
  1329. csio_err(hw,
  1330. "FW_RDEV_WR: invalid proto:x%x "
  1331. "received with flowid:x%x\n",
  1332. rdev_wr->protocol,
  1333. rdev_flowid);
  1334. CSIO_INC_STATS(hw, n_evt_drop);
  1335. return;
  1336. }
  1337. /* HW lock here */
  1338. spin_lock_irq(&hw->lock);
  1339. ln = csio_ln_lookup_by_vnpi(hw, vnpi);
  1340. if (!ln) {
  1341. csio_err(hw,
  1342. "FW_DEV_WR: invalid vnpi:x%x received "
  1343. "with flowid:x%x\n", vnpi, rdev_flowid);
  1344. CSIO_INC_STATS(hw, n_evt_drop);
  1345. goto out_pld;
  1346. }
  1347. rn = csio_confirm_rnode(ln, rdev_flowid,
  1348. &rdev_wr->u.fcoe_rdev);
  1349. if (!rn) {
  1350. csio_ln_dbg(ln,
  1351. "Failed to confirm rnode "
  1352. "for flowid:x%x\n", rdev_flowid);
  1353. CSIO_INC_STATS(hw, n_evt_drop);
  1354. goto out_pld;
  1355. }
  1356. /* save previous event for debugging */
  1357. ln->prev_evt = ln->cur_evt;
  1358. ln->cur_evt = rdev_wr->event_cause;
  1359. CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
  1360. /* Translate all the fabric events to lnode SM events */
  1361. evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
  1362. if (evt) {
  1363. csio_ln_dbg(ln,
  1364. "Posting event to lnode event:%d "
  1365. "cause:%d flowid:x%x\n", evt,
  1366. rdev_wr->event_cause, rdev_flowid);
  1367. csio_post_event(&ln->sm, evt);
  1368. }
  1369. /* Handover event to rn SM here. */
  1370. csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
  1371. out_pld:
  1372. spin_unlock_irq(&hw->lock);
  1373. return;
  1374. } else {
  1375. csio_warn(hw, "unexpected WR op(0x%x) recv\n",
  1376. FW_WR_OP_GET(be32_to_cpu((wr->hi))));
  1377. CSIO_INC_STATS(hw, n_cpl_unexp);
  1378. }
  1379. } else if (cpl_op == CPL_FW6_MSG) {
  1380. wr = (struct fw_wr_hdr *) (cmd);
  1381. if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
  1382. csio_ln_mgmt_wr_handler(hw, wr,
  1383. sizeof(struct fw_fcoe_els_ct_wr));
  1384. } else {
  1385. csio_warn(hw, "unexpected WR op(0x%x) recv\n",
  1386. FW_WR_OP_GET(be32_to_cpu((wr->hi))));
  1387. CSIO_INC_STATS(hw, n_cpl_unexp);
  1388. }
  1389. } else {
  1390. csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
  1391. CSIO_INC_STATS(hw, n_cpl_unexp);
  1392. }
  1393. }
  1394. /**
  1395. * csio_lnode_start - Kickstart lnode discovery.
  1396. * @ln: lnode
  1397. *
  1398. * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
  1399. */
  1400. int
  1401. csio_lnode_start(struct csio_lnode *ln)
  1402. {
  1403. int rv = 0;
  1404. if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
  1405. rv = csio_fcoe_enable_link(ln, 1);
  1406. ln->flags |= CSIO_LNF_LINK_ENABLE;
  1407. }
  1408. return rv;
  1409. }
  1410. /**
  1411. * csio_lnode_stop - Stop the lnode.
  1412. * @ln: lnode
  1413. *
  1414. * This routine is invoked by HW module to stop lnode and its associated NPIV
  1415. * lnodes.
  1416. */
  1417. void
  1418. csio_lnode_stop(struct csio_lnode *ln)
  1419. {
  1420. csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
  1421. if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
  1422. csio_fcoe_enable_link(ln, 0);
  1423. ln->flags &= ~CSIO_LNF_LINK_ENABLE;
  1424. }
  1425. csio_ln_dbg(ln, "stopping ln :%p\n", ln);
  1426. }
  1427. /**
  1428. * csio_lnode_close - Close an lnode.
  1429. * @ln: lnode
  1430. *
  1431. * This routine is invoked by HW module to close an lnode and its
  1432. * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
  1433. * set to uninitialized state.
  1434. */
  1435. void
  1436. csio_lnode_close(struct csio_lnode *ln)
  1437. {
  1438. csio_post_event_lns(ln, CSIO_LNE_CLOSE);
  1439. if (csio_is_phys_ln(ln))
  1440. ln->vnp_flowid = CSIO_INVALID_IDX;
  1441. csio_ln_dbg(ln, "closed ln :%p\n", ln);
  1442. }
  1443. /*
  1444. * csio_ln_prep_ecwr - Prepare ELS/CT WR.
  1445. * @io_req - IO request.
  1446. * @wr_len - WR len
  1447. * @immd_len - WR immediate data
  1448. * @sub_op - Sub opcode
  1449. * @sid - source portid.
  1450. * @did - destination portid
  1451. * @flow_id - flowid
  1452. * @fw_wr - ELS/CT WR to be prepared.
  1453. * Returns: 0 - on success
  1454. */
  1455. static int
  1456. csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
  1457. uint32_t immd_len, uint8_t sub_op, uint32_t sid,
  1458. uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
  1459. {
  1460. struct fw_fcoe_els_ct_wr *wr;
  1461. __be32 port_id;
  1462. wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
  1463. wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) |
  1464. FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
  1465. wr_len = DIV_ROUND_UP(wr_len, 16);
  1466. wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) |
  1467. FW_WR_LEN16(wr_len));
  1468. wr->els_ct_type = sub_op;
  1469. wr->ctl_pri = 0;
  1470. wr->cp_en_class = 0;
  1471. wr->cookie = io_req->fw_handle;
  1472. wr->iqid = cpu_to_be16(csio_q_physiqid(
  1473. io_req->lnode->hwp, io_req->iq_idx));
  1474. wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);
  1475. wr->tmo_val = (uint8_t) io_req->tmo;
  1476. port_id = htonl(sid);
  1477. memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
  1478. port_id = htonl(did);
  1479. memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
  1480. /* Prepare RSP SGL */
  1481. wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
  1482. wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
  1483. return 0;
  1484. }
  1485. /*
  1486. * csio_ln_mgmt_submit_wr - Post elsct work request.
  1487. * @mgmtm - mgmtm
  1488. * @io_req - io request.
  1489. * @sub_op - ELS or CT request type
  1490. * @pld - Dma Payload buffer
  1491. * @pld_len - Payload len
  1492. * Prepares ELSCT Work request and sents it to FW.
  1493. * Returns: 0 - on success
  1494. */
  1495. static int
  1496. csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
  1497. uint8_t sub_op, struct csio_dma_buf *pld,
  1498. uint32_t pld_len)
  1499. {
  1500. struct csio_wr_pair wrp;
  1501. struct csio_lnode *ln = io_req->lnode;
  1502. struct csio_rnode *rn = io_req->rnode;
  1503. struct csio_hw *hw = mgmtm->hw;
  1504. uint8_t fw_wr[64];
  1505. struct ulptx_sgl dsgl;
  1506. uint32_t wr_size = 0;
  1507. uint8_t im_len = 0;
  1508. uint32_t wr_off = 0;
  1509. int ret = 0;
  1510. /* Calculate WR Size for this ELS REQ */
  1511. wr_size = sizeof(struct fw_fcoe_els_ct_wr);
  1512. /* Send as immediate data if pld < 256 */
  1513. if (pld_len < 256) {
  1514. wr_size += ALIGN(pld_len, 8);
  1515. im_len = (uint8_t)pld_len;
  1516. } else
  1517. wr_size += sizeof(struct ulptx_sgl);
  1518. /* Roundup WR size in units of 16 bytes */
  1519. wr_size = ALIGN(wr_size, 16);
  1520. /* Get WR to send ELS REQ */
  1521. ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
  1522. if (ret != 0) {
  1523. csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
  1524. io_req, ret);
  1525. return ret;
  1526. }
  1527. /* Prepare Generic WR used by all ELS/CT cmd */
  1528. csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
  1529. ln->nport_id, rn->nport_id,
  1530. csio_rn_flowid(rn),
  1531. &fw_wr[0]);
  1532. /* Copy ELS/CT WR CMD */
  1533. csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
  1534. sizeof(struct fw_fcoe_els_ct_wr));
  1535. wr_off += sizeof(struct fw_fcoe_els_ct_wr);
  1536. /* Copy payload to Immediate section of WR */
  1537. if (im_len)
  1538. csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
  1539. else {
  1540. /* Program DSGL to dma payload */
  1541. dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
  1542. ULPTX_MORE | ULPTX_NSGE(1));
  1543. dsgl.len0 = cpu_to_be32(pld_len);
  1544. dsgl.addr0 = cpu_to_be64(pld->paddr);
  1545. csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
  1546. sizeof(struct ulptx_sgl));
  1547. }
  1548. /* Issue work request to xmit ELS/CT req to FW */
  1549. csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
  1550. return ret;
  1551. }
  1552. /*
  1553. * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
  1554. * @io_req - IO Request
  1555. * @io_cbfn - Completion handler.
  1556. * @req_type - ELS or CT request type
  1557. * @pld - Dma Payload buffer
  1558. * @pld_len - Payload len
  1559. *
  1560. *
  1561. * This API used submit managment ELS/CT request.
  1562. * This called with hw lock held
  1563. * Returns: 0 - on success
  1564. * -ENOMEM - on error.
  1565. */
  1566. static int
  1567. csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
  1568. void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
  1569. enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
  1570. uint32_t pld_len)
  1571. {
  1572. struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
  1573. struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
  1574. int rv;
  1575. io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
  1576. io_req->fw_handle = (uintptr_t) (io_req);
  1577. io_req->eq_idx = mgmtm->eq_idx;
  1578. io_req->iq_idx = mgmtm->iq_idx;
  1579. rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
  1580. if (rv == 0) {
  1581. list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
  1582. mgmtm->stats.n_active++;
  1583. }
  1584. return rv;
  1585. }
  1586. /*
  1587. * csio_ln_fdmi_init - FDMI Init entry point.
  1588. * @ln: lnode
  1589. */
  1590. static int
  1591. csio_ln_fdmi_init(struct csio_lnode *ln)
  1592. {
  1593. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1594. struct csio_dma_buf *dma_buf;
  1595. /* Allocate MGMT request required for FDMI */
  1596. ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
  1597. if (!ln->mgmt_req) {
  1598. csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
  1599. CSIO_INC_STATS(hw, n_err_nomem);
  1600. return -ENOMEM;
  1601. }
  1602. /* Allocate Dma buffers for FDMI response Payload */
  1603. dma_buf = &ln->mgmt_req->dma_buf;
  1604. dma_buf->len = 2048;
  1605. dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
  1606. &dma_buf->paddr);
  1607. if (!dma_buf->vaddr) {
  1608. csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
  1609. kfree(ln->mgmt_req);
  1610. ln->mgmt_req = NULL;
  1611. return -ENOMEM;
  1612. }
  1613. ln->flags |= CSIO_LNF_FDMI_ENABLE;
  1614. return 0;
  1615. }
  1616. /*
  1617. * csio_ln_fdmi_exit - FDMI exit entry point.
  1618. * @ln: lnode
  1619. */
  1620. static int
  1621. csio_ln_fdmi_exit(struct csio_lnode *ln)
  1622. {
  1623. struct csio_dma_buf *dma_buf;
  1624. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1625. if (!ln->mgmt_req)
  1626. return 0;
  1627. dma_buf = &ln->mgmt_req->dma_buf;
  1628. if (dma_buf->vaddr)
  1629. pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
  1630. dma_buf->paddr);
  1631. kfree(ln->mgmt_req);
  1632. return 0;
  1633. }
  1634. int
  1635. csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
  1636. unsigned long time, unsigned long max_scan_ticks,
  1637. unsigned long delta_scan_ticks)
  1638. {
  1639. int rv = 0;
  1640. if (time >= max_scan_ticks)
  1641. return 1;
  1642. if (!ln->tgt_scan_tick)
  1643. ln->tgt_scan_tick = ticks;
  1644. if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
  1645. if (!ln->last_scan_ntgts)
  1646. ln->last_scan_ntgts = ln->n_scsi_tgts;
  1647. else {
  1648. if (ln->last_scan_ntgts == ln->n_scsi_tgts)
  1649. return 1;
  1650. ln->last_scan_ntgts = ln->n_scsi_tgts;
  1651. }
  1652. ln->tgt_scan_tick = ticks;
  1653. }
  1654. return rv;
  1655. }
  1656. /*
  1657. * csio_notify_lnodes:
  1658. * @hw: HW module
  1659. * @note: Notification
  1660. *
  1661. * Called from the HW SM to fan out notifications to the
  1662. * Lnode SM. Since the HW SM is entered with lock held,
  1663. * there is no need to hold locks here.
  1664. *
  1665. */
  1666. void
  1667. csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
  1668. {
  1669. struct list_head *tmp;
  1670. struct csio_lnode *ln;
  1671. csio_dbg(hw, "Notifying all nodes of event %d\n", note);
  1672. /* Traverse children lnodes list and send evt */
  1673. list_for_each(tmp, &hw->sln_head) {
  1674. ln = (struct csio_lnode *) tmp;
  1675. switch (note) {
  1676. case CSIO_LN_NOTIFY_HWREADY:
  1677. csio_lnode_start(ln);
  1678. break;
  1679. case CSIO_LN_NOTIFY_HWRESET:
  1680. case CSIO_LN_NOTIFY_HWREMOVE:
  1681. csio_lnode_close(ln);
  1682. break;
  1683. case CSIO_LN_NOTIFY_HWSTOP:
  1684. csio_lnode_stop(ln);
  1685. break;
  1686. default:
  1687. break;
  1688. }
  1689. }
  1690. }
  1691. /*
  1692. * csio_disable_lnodes:
  1693. * @hw: HW module
  1694. * @portid:port id
  1695. * @disable: disable/enable flag.
  1696. * If disable=1, disables all lnode hosted on given physical port.
  1697. * otherwise enables all the lnodes on given phsysical port.
  1698. * This routine need to called with hw lock held.
  1699. */
  1700. void
  1701. csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
  1702. {
  1703. struct list_head *tmp;
  1704. struct csio_lnode *ln;
  1705. csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
  1706. /* Traverse sibling lnodes list and send evt */
  1707. list_for_each(tmp, &hw->sln_head) {
  1708. ln = (struct csio_lnode *) tmp;
  1709. if (ln->portid != portid)
  1710. continue;
  1711. if (disable)
  1712. csio_lnode_stop(ln);
  1713. else
  1714. csio_lnode_start(ln);
  1715. }
  1716. }
  1717. /*
  1718. * csio_ln_init - Initialize an lnode.
  1719. * @ln: lnode
  1720. *
  1721. */
  1722. static int
  1723. csio_ln_init(struct csio_lnode *ln)
  1724. {
  1725. int rv = -EINVAL;
  1726. struct csio_lnode *rln, *pln;
  1727. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1728. csio_init_state(&ln->sm, csio_lns_uninit);
  1729. ln->vnp_flowid = CSIO_INVALID_IDX;
  1730. ln->fcf_flowid = CSIO_INVALID_IDX;
  1731. if (csio_is_root_ln(ln)) {
  1732. /* This is the lnode used during initialization */
  1733. ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
  1734. if (!ln->fcfinfo) {
  1735. csio_ln_err(ln, "Failed to alloc FCF record\n");
  1736. CSIO_INC_STATS(hw, n_err_nomem);
  1737. goto err;
  1738. }
  1739. INIT_LIST_HEAD(&ln->fcf_lsthead);
  1740. kref_init(&ln->fcfinfo->kref);
  1741. if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
  1742. goto err;
  1743. } else { /* Either a non-root physical or a virtual lnode */
  1744. /*
  1745. * THe rest is common for non-root physical and NPIV lnodes.
  1746. * Just get references to all other modules
  1747. */
  1748. rln = csio_root_lnode(ln);
  1749. if (csio_is_npiv_ln(ln)) {
  1750. /* NPIV */
  1751. pln = csio_parent_lnode(ln);
  1752. kref_get(&pln->fcfinfo->kref);
  1753. ln->fcfinfo = pln->fcfinfo;
  1754. } else {
  1755. /* Another non-root physical lnode (FCF) */
  1756. ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
  1757. GFP_KERNEL);
  1758. if (!ln->fcfinfo) {
  1759. csio_ln_err(ln, "Failed to alloc FCF info\n");
  1760. CSIO_INC_STATS(hw, n_err_nomem);
  1761. goto err;
  1762. }
  1763. kref_init(&ln->fcfinfo->kref);
  1764. if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
  1765. goto err;
  1766. }
  1767. } /* if (!csio_is_root_ln(ln)) */
  1768. return 0;
  1769. err:
  1770. return rv;
  1771. }
  1772. static void
  1773. csio_ln_exit(struct csio_lnode *ln)
  1774. {
  1775. struct csio_lnode *pln;
  1776. csio_cleanup_rns(ln);
  1777. if (csio_is_npiv_ln(ln)) {
  1778. pln = csio_parent_lnode(ln);
  1779. kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
  1780. } else {
  1781. kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
  1782. if (csio_fdmi_enable)
  1783. csio_ln_fdmi_exit(ln);
  1784. }
  1785. ln->fcfinfo = NULL;
  1786. }
  1787. /**
  1788. * csio_lnode_init - Initialize the members of an lnode.
  1789. * @ln: lnode
  1790. *
  1791. */
  1792. int
  1793. csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
  1794. struct csio_lnode *pln)
  1795. {
  1796. int rv = -EINVAL;
  1797. /* Link this lnode to hw */
  1798. csio_lnode_to_hw(ln) = hw;
  1799. /* Link child to parent if child lnode */
  1800. if (pln)
  1801. ln->pln = pln;
  1802. else
  1803. ln->pln = NULL;
  1804. /* Initialize scsi_tgt and timers to zero */
  1805. ln->n_scsi_tgts = 0;
  1806. ln->last_scan_ntgts = 0;
  1807. ln->tgt_scan_tick = 0;
  1808. /* Initialize rnode list */
  1809. INIT_LIST_HEAD(&ln->rnhead);
  1810. INIT_LIST_HEAD(&ln->cln_head);
  1811. /* Initialize log level for debug */
  1812. ln->params.log_level = hw->params.log_level;
  1813. if (csio_ln_init(ln))
  1814. goto err;
  1815. /* Add lnode to list of sibling or children lnodes */
  1816. spin_lock_irq(&hw->lock);
  1817. list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
  1818. if (pln)
  1819. pln->num_vports++;
  1820. spin_unlock_irq(&hw->lock);
  1821. hw->num_lns++;
  1822. return 0;
  1823. err:
  1824. csio_lnode_to_hw(ln) = NULL;
  1825. return rv;
  1826. }
  1827. /**
  1828. * csio_lnode_exit - De-instantiate an lnode.
  1829. * @ln: lnode
  1830. *
  1831. */
  1832. void
  1833. csio_lnode_exit(struct csio_lnode *ln)
  1834. {
  1835. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1836. csio_ln_exit(ln);
  1837. /* Remove this lnode from hw->sln_head */
  1838. spin_lock_irq(&hw->lock);
  1839. list_del_init(&ln->sm.sm_list);
  1840. /* If it is children lnode, decrement the
  1841. * counter in its parent lnode
  1842. */
  1843. if (ln->pln)
  1844. ln->pln->num_vports--;
  1845. /* Update root lnode pointer */
  1846. if (list_empty(&hw->sln_head))
  1847. hw->rln = NULL;
  1848. else
  1849. hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
  1850. spin_unlock_irq(&hw->lock);
  1851. csio_lnode_to_hw(ln) = NULL;
  1852. hw->num_lns--;
  1853. }