bna_enet.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bna.h"
  19. static inline int
  20. ethport_can_be_up(struct bna_ethport *ethport)
  21. {
  22. int ready = 0;
  23. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  24. ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  25. (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  26. (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  27. else
  28. ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  29. (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  30. !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  31. return ready;
  32. }
  33. #define ethport_is_up ethport_can_be_up
  34. enum bna_ethport_event {
  35. ETHPORT_E_START = 1,
  36. ETHPORT_E_STOP = 2,
  37. ETHPORT_E_FAIL = 3,
  38. ETHPORT_E_UP = 4,
  39. ETHPORT_E_DOWN = 5,
  40. ETHPORT_E_FWRESP_UP_OK = 6,
  41. ETHPORT_E_FWRESP_DOWN = 7,
  42. ETHPORT_E_FWRESP_UP_FAIL = 8,
  43. };
  44. enum bna_enet_event {
  45. ENET_E_START = 1,
  46. ENET_E_STOP = 2,
  47. ENET_E_FAIL = 3,
  48. ENET_E_PAUSE_CFG = 4,
  49. ENET_E_MTU_CFG = 5,
  50. ENET_E_FWRESP_PAUSE = 6,
  51. ENET_E_CHLD_STOPPED = 7,
  52. };
  53. enum bna_ioceth_event {
  54. IOCETH_E_ENABLE = 1,
  55. IOCETH_E_DISABLE = 2,
  56. IOCETH_E_IOC_RESET = 3,
  57. IOCETH_E_IOC_FAILED = 4,
  58. IOCETH_E_IOC_READY = 5,
  59. IOCETH_E_ENET_ATTR_RESP = 6,
  60. IOCETH_E_ENET_STOPPED = 7,
  61. IOCETH_E_IOC_DISABLED = 8,
  62. };
  63. #define bna_stats_copy(_name, _type) \
  64. do { \
  65. count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
  66. stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
  67. stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
  68. for (i = 0; i < count; i++) \
  69. stats_dst[i] = be64_to_cpu(stats_src[i]); \
  70. } while (0) \
  71. /*
  72. * FW response handlers
  73. */
  74. static void
  75. bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
  76. struct bfi_msgq_mhdr *msghdr)
  77. {
  78. ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
  79. if (ethport_can_be_up(ethport))
  80. bfa_fsm_send_event(ethport, ETHPORT_E_UP);
  81. }
  82. static void
  83. bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
  84. struct bfi_msgq_mhdr *msghdr)
  85. {
  86. int ethport_up = ethport_is_up(ethport);
  87. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  88. if (ethport_up)
  89. bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
  90. }
  91. static void
  92. bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
  93. struct bfi_msgq_mhdr *msghdr)
  94. {
  95. struct bfi_enet_enable_req *admin_req =
  96. &ethport->bfi_enet_cmd.admin_req;
  97. struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
  98. switch (admin_req->enable) {
  99. case BNA_STATUS_T_ENABLED:
  100. if (rsp->error == BFI_ENET_CMD_OK)
  101. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
  102. else {
  103. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  104. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
  105. }
  106. break;
  107. case BNA_STATUS_T_DISABLED:
  108. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
  109. ethport->link_status = BNA_LINK_DOWN;
  110. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  111. break;
  112. }
  113. }
  114. static void
  115. bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
  116. struct bfi_msgq_mhdr *msghdr)
  117. {
  118. struct bfi_enet_diag_lb_req *diag_lb_req =
  119. &ethport->bfi_enet_cmd.lpbk_req;
  120. struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
  121. switch (diag_lb_req->enable) {
  122. case BNA_STATUS_T_ENABLED:
  123. if (rsp->error == BFI_ENET_CMD_OK)
  124. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
  125. else {
  126. ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
  127. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
  128. }
  129. break;
  130. case BNA_STATUS_T_DISABLED:
  131. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
  132. break;
  133. }
  134. }
  135. static void
  136. bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
  137. {
  138. bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
  139. }
  140. static void
  141. bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
  142. struct bfi_msgq_mhdr *msghdr)
  143. {
  144. struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
  145. /**
  146. * Store only if not set earlier, since BNAD can override the HW
  147. * attributes
  148. */
  149. if (!ioceth->attr.fw_query_complete) {
  150. ioceth->attr.num_txq = ntohl(rsp->max_cfg);
  151. ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
  152. ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
  153. ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
  154. ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
  155. ioceth->attr.fw_query_complete = true;
  156. }
  157. bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
  158. }
  159. static void
  160. bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
  161. {
  162. struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
  163. u64 *stats_src;
  164. u64 *stats_dst;
  165. u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
  166. u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
  167. int count;
  168. int i;
  169. bna_stats_copy(mac, mac);
  170. bna_stats_copy(bpc, bpc);
  171. bna_stats_copy(rad, rad);
  172. bna_stats_copy(rlb, rad);
  173. bna_stats_copy(fc_rx, fc_rx);
  174. bna_stats_copy(fc_tx, fc_tx);
  175. stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
  176. /* Copy Rxf stats to SW area, scatter them while copying */
  177. for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
  178. stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
  179. memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
  180. if (rx_enet_mask & ((u32)(1 << i))) {
  181. int k;
  182. count = sizeof(struct bfi_enet_stats_rxf) /
  183. sizeof(u64);
  184. for (k = 0; k < count; k++) {
  185. stats_dst[k] = be64_to_cpu(*stats_src);
  186. stats_src++;
  187. }
  188. }
  189. }
  190. /* Copy Txf stats to SW area, scatter them while copying */
  191. for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
  192. stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
  193. memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
  194. if (tx_enet_mask & ((u32)(1 << i))) {
  195. int k;
  196. count = sizeof(struct bfi_enet_stats_txf) /
  197. sizeof(u64);
  198. for (k = 0; k < count; k++) {
  199. stats_dst[k] = be64_to_cpu(*stats_src);
  200. stats_src++;
  201. }
  202. }
  203. }
  204. bna->stats_mod.stats_get_busy = false;
  205. bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
  206. }
  207. static void
  208. bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
  209. struct bfi_msgq_mhdr *msghdr)
  210. {
  211. ethport->link_status = BNA_LINK_UP;
  212. /* Dispatch events */
  213. ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
  214. }
  215. static void
  216. bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
  217. struct bfi_msgq_mhdr *msghdr)
  218. {
  219. ethport->link_status = BNA_LINK_DOWN;
  220. /* Dispatch events */
  221. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  222. }
  223. static void
  224. bna_err_handler(struct bna *bna, u32 intr_status)
  225. {
  226. if (BNA_IS_HALT_INTR(bna, intr_status))
  227. bna_halt_clear(bna);
  228. bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
  229. }
  230. void
  231. bna_mbox_handler(struct bna *bna, u32 intr_status)
  232. {
  233. if (BNA_IS_ERR_INTR(bna, intr_status)) {
  234. bna_err_handler(bna, intr_status);
  235. return;
  236. }
  237. if (BNA_IS_MBOX_INTR(bna, intr_status))
  238. bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
  239. }
  240. static void
  241. bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
  242. {
  243. struct bna *bna = (struct bna *)arg;
  244. struct bna_tx *tx;
  245. struct bna_rx *rx;
  246. switch (msghdr->msg_id) {
  247. case BFI_ENET_I2H_RX_CFG_SET_RSP:
  248. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  249. if (rx)
  250. bna_bfi_rx_enet_start_rsp(rx, msghdr);
  251. break;
  252. case BFI_ENET_I2H_RX_CFG_CLR_RSP:
  253. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  254. if (rx)
  255. bna_bfi_rx_enet_stop_rsp(rx, msghdr);
  256. break;
  257. case BFI_ENET_I2H_RIT_CFG_RSP:
  258. case BFI_ENET_I2H_RSS_CFG_RSP:
  259. case BFI_ENET_I2H_RSS_ENABLE_RSP:
  260. case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
  261. case BFI_ENET_I2H_RX_DEFAULT_RSP:
  262. case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
  263. case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
  264. case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
  265. case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
  266. case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
  267. case BFI_ENET_I2H_RX_VLAN_SET_RSP:
  268. case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
  269. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  270. if (rx)
  271. bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
  272. break;
  273. case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
  274. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  275. if (rx)
  276. bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
  277. break;
  278. case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
  279. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  280. if (rx)
  281. bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
  282. break;
  283. case BFI_ENET_I2H_TX_CFG_SET_RSP:
  284. bna_tx_from_rid(bna, msghdr->enet_id, tx);
  285. if (tx)
  286. bna_bfi_tx_enet_start_rsp(tx, msghdr);
  287. break;
  288. case BFI_ENET_I2H_TX_CFG_CLR_RSP:
  289. bna_tx_from_rid(bna, msghdr->enet_id, tx);
  290. if (tx)
  291. bna_bfi_tx_enet_stop_rsp(tx, msghdr);
  292. break;
  293. case BFI_ENET_I2H_PORT_ADMIN_RSP:
  294. bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
  295. break;
  296. case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
  297. bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
  298. break;
  299. case BFI_ENET_I2H_SET_PAUSE_RSP:
  300. bna_bfi_pause_set_rsp(&bna->enet, msghdr);
  301. break;
  302. case BFI_ENET_I2H_GET_ATTR_RSP:
  303. bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
  304. break;
  305. case BFI_ENET_I2H_STATS_GET_RSP:
  306. bna_bfi_stats_get_rsp(bna, msghdr);
  307. break;
  308. case BFI_ENET_I2H_STATS_CLR_RSP:
  309. /* No-op */
  310. break;
  311. case BFI_ENET_I2H_LINK_UP_AEN:
  312. bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
  313. break;
  314. case BFI_ENET_I2H_LINK_DOWN_AEN:
  315. bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
  316. break;
  317. case BFI_ENET_I2H_PORT_ENABLE_AEN:
  318. bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
  319. break;
  320. case BFI_ENET_I2H_PORT_DISABLE_AEN:
  321. bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
  322. break;
  323. case BFI_ENET_I2H_BW_UPDATE_AEN:
  324. bna_bfi_bw_update_aen(&bna->tx_mod);
  325. break;
  326. default:
  327. break;
  328. }
  329. }
  330. /* ETHPORT */
  331. #define call_ethport_stop_cbfn(_ethport) \
  332. do { \
  333. if ((_ethport)->stop_cbfn) { \
  334. void (*cbfn)(struct bna_enet *); \
  335. cbfn = (_ethport)->stop_cbfn; \
  336. (_ethport)->stop_cbfn = NULL; \
  337. cbfn(&(_ethport)->bna->enet); \
  338. } \
  339. } while (0)
  340. #define call_ethport_adminup_cbfn(ethport, status) \
  341. do { \
  342. if ((ethport)->adminup_cbfn) { \
  343. void (*cbfn)(struct bnad *, enum bna_cb_status); \
  344. cbfn = (ethport)->adminup_cbfn; \
  345. (ethport)->adminup_cbfn = NULL; \
  346. cbfn((ethport)->bna->bnad, status); \
  347. } \
  348. } while (0)
  349. static void
  350. bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
  351. {
  352. struct bfi_enet_enable_req *admin_up_req =
  353. &ethport->bfi_enet_cmd.admin_req;
  354. bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
  355. BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
  356. admin_up_req->mh.num_entries = htons(
  357. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  358. admin_up_req->enable = BNA_STATUS_T_ENABLED;
  359. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  360. sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
  361. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  362. }
  363. static void
  364. bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
  365. {
  366. struct bfi_enet_enable_req *admin_down_req =
  367. &ethport->bfi_enet_cmd.admin_req;
  368. bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
  369. BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
  370. admin_down_req->mh.num_entries = htons(
  371. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  372. admin_down_req->enable = BNA_STATUS_T_DISABLED;
  373. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  374. sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
  375. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  376. }
  377. static void
  378. bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
  379. {
  380. struct bfi_enet_diag_lb_req *lpbk_up_req =
  381. &ethport->bfi_enet_cmd.lpbk_req;
  382. bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
  383. BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
  384. lpbk_up_req->mh.num_entries = htons(
  385. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
  386. lpbk_up_req->mode = (ethport->bna->enet.type ==
  387. BNA_ENET_T_LOOPBACK_INTERNAL) ?
  388. BFI_ENET_DIAG_LB_OPMODE_EXT :
  389. BFI_ENET_DIAG_LB_OPMODE_CBL;
  390. lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
  391. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  392. sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
  393. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  394. }
  395. static void
  396. bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
  397. {
  398. struct bfi_enet_diag_lb_req *lpbk_down_req =
  399. &ethport->bfi_enet_cmd.lpbk_req;
  400. bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
  401. BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
  402. lpbk_down_req->mh.num_entries = htons(
  403. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
  404. lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
  405. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  406. sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
  407. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  408. }
  409. static void
  410. bna_bfi_ethport_up(struct bna_ethport *ethport)
  411. {
  412. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  413. bna_bfi_ethport_admin_up(ethport);
  414. else
  415. bna_bfi_ethport_lpbk_up(ethport);
  416. }
  417. static void
  418. bna_bfi_ethport_down(struct bna_ethport *ethport)
  419. {
  420. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  421. bna_bfi_ethport_admin_down(ethport);
  422. else
  423. bna_bfi_ethport_lpbk_down(ethport);
  424. }
  425. bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
  426. enum bna_ethport_event);
  427. bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
  428. enum bna_ethport_event);
  429. bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
  430. enum bna_ethport_event);
  431. bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
  432. enum bna_ethport_event);
  433. bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
  434. enum bna_ethport_event);
  435. bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
  436. enum bna_ethport_event);
  437. static void
  438. bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
  439. {
  440. call_ethport_stop_cbfn(ethport);
  441. }
  442. static void
  443. bna_ethport_sm_stopped(struct bna_ethport *ethport,
  444. enum bna_ethport_event event)
  445. {
  446. switch (event) {
  447. case ETHPORT_E_START:
  448. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  449. break;
  450. case ETHPORT_E_STOP:
  451. call_ethport_stop_cbfn(ethport);
  452. break;
  453. case ETHPORT_E_FAIL:
  454. /* No-op */
  455. break;
  456. case ETHPORT_E_DOWN:
  457. /* This event is received due to Rx objects failing */
  458. /* No-op */
  459. break;
  460. default:
  461. bfa_sm_fault(event);
  462. }
  463. }
  464. static void
  465. bna_ethport_sm_down_entry(struct bna_ethport *ethport)
  466. {
  467. }
  468. static void
  469. bna_ethport_sm_down(struct bna_ethport *ethport,
  470. enum bna_ethport_event event)
  471. {
  472. switch (event) {
  473. case ETHPORT_E_STOP:
  474. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  475. break;
  476. case ETHPORT_E_FAIL:
  477. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  478. break;
  479. case ETHPORT_E_UP:
  480. bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
  481. bna_bfi_ethport_up(ethport);
  482. break;
  483. default:
  484. bfa_sm_fault(event);
  485. }
  486. }
  487. static void
  488. bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
  489. {
  490. }
  491. static void
  492. bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
  493. enum bna_ethport_event event)
  494. {
  495. switch (event) {
  496. case ETHPORT_E_STOP:
  497. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  498. break;
  499. case ETHPORT_E_FAIL:
  500. call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
  501. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  502. break;
  503. case ETHPORT_E_DOWN:
  504. call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
  505. bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
  506. break;
  507. case ETHPORT_E_FWRESP_UP_OK:
  508. call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
  509. bfa_fsm_set_state(ethport, bna_ethport_sm_up);
  510. break;
  511. case ETHPORT_E_FWRESP_UP_FAIL:
  512. call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
  513. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  514. break;
  515. case ETHPORT_E_FWRESP_DOWN:
  516. /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
  517. bna_bfi_ethport_up(ethport);
  518. break;
  519. default:
  520. bfa_sm_fault(event);
  521. }
  522. }
  523. static void
  524. bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
  525. {
  526. /**
  527. * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
  528. * mbox due to up_resp_wait -> down_resp_wait transition on event
  529. * ETHPORT_E_DOWN
  530. */
  531. }
  532. static void
  533. bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
  534. enum bna_ethport_event event)
  535. {
  536. switch (event) {
  537. case ETHPORT_E_STOP:
  538. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  539. break;
  540. case ETHPORT_E_FAIL:
  541. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  542. break;
  543. case ETHPORT_E_UP:
  544. bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
  545. break;
  546. case ETHPORT_E_FWRESP_UP_OK:
  547. /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
  548. bna_bfi_ethport_down(ethport);
  549. break;
  550. case ETHPORT_E_FWRESP_UP_FAIL:
  551. case ETHPORT_E_FWRESP_DOWN:
  552. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  553. break;
  554. default:
  555. bfa_sm_fault(event);
  556. }
  557. }
  558. static void
  559. bna_ethport_sm_up_entry(struct bna_ethport *ethport)
  560. {
  561. }
  562. static void
  563. bna_ethport_sm_up(struct bna_ethport *ethport,
  564. enum bna_ethport_event event)
  565. {
  566. switch (event) {
  567. case ETHPORT_E_STOP:
  568. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  569. bna_bfi_ethport_down(ethport);
  570. break;
  571. case ETHPORT_E_FAIL:
  572. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  573. break;
  574. case ETHPORT_E_DOWN:
  575. bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
  576. bna_bfi_ethport_down(ethport);
  577. break;
  578. default:
  579. bfa_sm_fault(event);
  580. }
  581. }
  582. static void
  583. bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
  584. {
  585. }
  586. static void
  587. bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
  588. enum bna_ethport_event event)
  589. {
  590. switch (event) {
  591. case ETHPORT_E_FAIL:
  592. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  593. break;
  594. case ETHPORT_E_DOWN:
  595. /**
  596. * This event is received due to Rx objects stopping in
  597. * parallel to ethport
  598. */
  599. /* No-op */
  600. break;
  601. case ETHPORT_E_FWRESP_UP_OK:
  602. /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
  603. bna_bfi_ethport_down(ethport);
  604. break;
  605. case ETHPORT_E_FWRESP_UP_FAIL:
  606. case ETHPORT_E_FWRESP_DOWN:
  607. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  608. break;
  609. default:
  610. bfa_sm_fault(event);
  611. }
  612. }
  613. static void
  614. bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
  615. {
  616. ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
  617. ethport->bna = bna;
  618. ethport->link_status = BNA_LINK_DOWN;
  619. ethport->link_cbfn = bnad_cb_ethport_link_status;
  620. ethport->rx_started_count = 0;
  621. ethport->stop_cbfn = NULL;
  622. ethport->adminup_cbfn = NULL;
  623. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  624. }
  625. static void
  626. bna_ethport_uninit(struct bna_ethport *ethport)
  627. {
  628. ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
  629. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  630. ethport->bna = NULL;
  631. }
  632. static void
  633. bna_ethport_start(struct bna_ethport *ethport)
  634. {
  635. bfa_fsm_send_event(ethport, ETHPORT_E_START);
  636. }
  637. static void
  638. bna_enet_cb_ethport_stopped(struct bna_enet *enet)
  639. {
  640. bfa_wc_down(&enet->chld_stop_wc);
  641. }
  642. static void
  643. bna_ethport_stop(struct bna_ethport *ethport)
  644. {
  645. ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
  646. bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
  647. }
  648. static void
  649. bna_ethport_fail(struct bna_ethport *ethport)
  650. {
  651. /* Reset the physical port status to enabled */
  652. ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
  653. if (ethport->link_status != BNA_LINK_DOWN) {
  654. ethport->link_status = BNA_LINK_DOWN;
  655. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  656. }
  657. bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
  658. }
  659. /* Should be called only when ethport is disabled */
  660. void
  661. bna_ethport_cb_rx_started(struct bna_ethport *ethport)
  662. {
  663. ethport->rx_started_count++;
  664. if (ethport->rx_started_count == 1) {
  665. ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
  666. if (ethport_can_be_up(ethport))
  667. bfa_fsm_send_event(ethport, ETHPORT_E_UP);
  668. }
  669. }
  670. void
  671. bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
  672. {
  673. int ethport_up = ethport_is_up(ethport);
  674. ethport->rx_started_count--;
  675. if (ethport->rx_started_count == 0) {
  676. ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
  677. if (ethport_up)
  678. bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
  679. }
  680. }
  681. /* ENET */
  682. #define bna_enet_chld_start(enet) \
  683. do { \
  684. enum bna_tx_type tx_type = \
  685. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  686. BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
  687. enum bna_rx_type rx_type = \
  688. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  689. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  690. bna_ethport_start(&(enet)->bna->ethport); \
  691. bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
  692. bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
  693. } while (0)
  694. #define bna_enet_chld_stop(enet) \
  695. do { \
  696. enum bna_tx_type tx_type = \
  697. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  698. BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
  699. enum bna_rx_type rx_type = \
  700. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  701. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  702. bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
  703. bfa_wc_up(&(enet)->chld_stop_wc); \
  704. bna_ethport_stop(&(enet)->bna->ethport); \
  705. bfa_wc_up(&(enet)->chld_stop_wc); \
  706. bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
  707. bfa_wc_up(&(enet)->chld_stop_wc); \
  708. bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
  709. bfa_wc_wait(&(enet)->chld_stop_wc); \
  710. } while (0)
  711. #define bna_enet_chld_fail(enet) \
  712. do { \
  713. bna_ethport_fail(&(enet)->bna->ethport); \
  714. bna_tx_mod_fail(&(enet)->bna->tx_mod); \
  715. bna_rx_mod_fail(&(enet)->bna->rx_mod); \
  716. } while (0)
  717. #define bna_enet_rx_start(enet) \
  718. do { \
  719. enum bna_rx_type rx_type = \
  720. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  721. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  722. bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
  723. } while (0)
  724. #define bna_enet_rx_stop(enet) \
  725. do { \
  726. enum bna_rx_type rx_type = \
  727. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  728. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  729. bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
  730. bfa_wc_up(&(enet)->chld_stop_wc); \
  731. bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
  732. bfa_wc_wait(&(enet)->chld_stop_wc); \
  733. } while (0)
  734. #define call_enet_stop_cbfn(enet) \
  735. do { \
  736. if ((enet)->stop_cbfn) { \
  737. void (*cbfn)(void *); \
  738. void *cbarg; \
  739. cbfn = (enet)->stop_cbfn; \
  740. cbarg = (enet)->stop_cbarg; \
  741. (enet)->stop_cbfn = NULL; \
  742. (enet)->stop_cbarg = NULL; \
  743. cbfn(cbarg); \
  744. } \
  745. } while (0)
  746. #define call_enet_pause_cbfn(enet) \
  747. do { \
  748. if ((enet)->pause_cbfn) { \
  749. void (*cbfn)(struct bnad *); \
  750. cbfn = (enet)->pause_cbfn; \
  751. (enet)->pause_cbfn = NULL; \
  752. cbfn((enet)->bna->bnad); \
  753. } \
  754. } while (0)
  755. #define call_enet_mtu_cbfn(enet) \
  756. do { \
  757. if ((enet)->mtu_cbfn) { \
  758. void (*cbfn)(struct bnad *); \
  759. cbfn = (enet)->mtu_cbfn; \
  760. (enet)->mtu_cbfn = NULL; \
  761. cbfn((enet)->bna->bnad); \
  762. } \
  763. } while (0)
  764. static void bna_enet_cb_chld_stopped(void *arg);
  765. static void bna_bfi_pause_set(struct bna_enet *enet);
  766. bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
  767. enum bna_enet_event);
  768. bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
  769. enum bna_enet_event);
  770. bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
  771. enum bna_enet_event);
  772. bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
  773. enum bna_enet_event);
  774. bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
  775. enum bna_enet_event);
  776. bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
  777. enum bna_enet_event);
  778. bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
  779. enum bna_enet_event);
  780. static void
  781. bna_enet_sm_stopped_entry(struct bna_enet *enet)
  782. {
  783. call_enet_pause_cbfn(enet);
  784. call_enet_mtu_cbfn(enet);
  785. call_enet_stop_cbfn(enet);
  786. }
  787. static void
  788. bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
  789. {
  790. switch (event) {
  791. case ENET_E_START:
  792. bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
  793. break;
  794. case ENET_E_STOP:
  795. call_enet_stop_cbfn(enet);
  796. break;
  797. case ENET_E_FAIL:
  798. /* No-op */
  799. break;
  800. case ENET_E_PAUSE_CFG:
  801. call_enet_pause_cbfn(enet);
  802. break;
  803. case ENET_E_MTU_CFG:
  804. call_enet_mtu_cbfn(enet);
  805. break;
  806. case ENET_E_CHLD_STOPPED:
  807. /**
  808. * This event is received due to Ethport, Tx and Rx objects
  809. * failing
  810. */
  811. /* No-op */
  812. break;
  813. default:
  814. bfa_sm_fault(event);
  815. }
  816. }
  817. static void
  818. bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
  819. {
  820. bna_bfi_pause_set(enet);
  821. }
  822. static void
  823. bna_enet_sm_pause_init_wait(struct bna_enet *enet,
  824. enum bna_enet_event event)
  825. {
  826. switch (event) {
  827. case ENET_E_STOP:
  828. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  829. bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
  830. break;
  831. case ENET_E_FAIL:
  832. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  833. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  834. break;
  835. case ENET_E_PAUSE_CFG:
  836. enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
  837. break;
  838. case ENET_E_MTU_CFG:
  839. /* No-op */
  840. break;
  841. case ENET_E_FWRESP_PAUSE:
  842. if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
  843. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  844. bna_bfi_pause_set(enet);
  845. } else {
  846. bfa_fsm_set_state(enet, bna_enet_sm_started);
  847. bna_enet_chld_start(enet);
  848. }
  849. break;
  850. default:
  851. bfa_sm_fault(event);
  852. }
  853. }
  854. static void
  855. bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
  856. {
  857. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  858. }
  859. static void
  860. bna_enet_sm_last_resp_wait(struct bna_enet *enet,
  861. enum bna_enet_event event)
  862. {
  863. switch (event) {
  864. case ENET_E_FAIL:
  865. case ENET_E_FWRESP_PAUSE:
  866. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  867. break;
  868. default:
  869. bfa_sm_fault(event);
  870. }
  871. }
  872. static void
  873. bna_enet_sm_started_entry(struct bna_enet *enet)
  874. {
  875. /**
  876. * NOTE: Do not call bna_enet_chld_start() here, since it will be
  877. * inadvertently called during cfg_wait->started transition as well
  878. */
  879. call_enet_pause_cbfn(enet);
  880. call_enet_mtu_cbfn(enet);
  881. }
  882. static void
  883. bna_enet_sm_started(struct bna_enet *enet,
  884. enum bna_enet_event event)
  885. {
  886. switch (event) {
  887. case ENET_E_STOP:
  888. bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
  889. break;
  890. case ENET_E_FAIL:
  891. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  892. bna_enet_chld_fail(enet);
  893. break;
  894. case ENET_E_PAUSE_CFG:
  895. bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
  896. bna_bfi_pause_set(enet);
  897. break;
  898. case ENET_E_MTU_CFG:
  899. bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
  900. bna_enet_rx_stop(enet);
  901. break;
  902. default:
  903. bfa_sm_fault(event);
  904. }
  905. }
  906. static void
  907. bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
  908. {
  909. }
  910. static void
  911. bna_enet_sm_cfg_wait(struct bna_enet *enet,
  912. enum bna_enet_event event)
  913. {
  914. switch (event) {
  915. case ENET_E_STOP:
  916. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  917. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  918. bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
  919. break;
  920. case ENET_E_FAIL:
  921. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  922. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  923. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  924. bna_enet_chld_fail(enet);
  925. break;
  926. case ENET_E_PAUSE_CFG:
  927. enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
  928. break;
  929. case ENET_E_MTU_CFG:
  930. enet->flags |= BNA_ENET_F_MTU_CHANGED;
  931. break;
  932. case ENET_E_CHLD_STOPPED:
  933. bna_enet_rx_start(enet);
  934. /* Fall through */
  935. case ENET_E_FWRESP_PAUSE:
  936. if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
  937. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  938. bna_bfi_pause_set(enet);
  939. } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
  940. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  941. bna_enet_rx_stop(enet);
  942. } else {
  943. bfa_fsm_set_state(enet, bna_enet_sm_started);
  944. }
  945. break;
  946. default:
  947. bfa_sm_fault(event);
  948. }
  949. }
  950. static void
  951. bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
  952. {
  953. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  954. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  955. }
  956. static void
  957. bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
  958. enum bna_enet_event event)
  959. {
  960. switch (event) {
  961. case ENET_E_FAIL:
  962. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  963. bna_enet_chld_fail(enet);
  964. break;
  965. case ENET_E_FWRESP_PAUSE:
  966. case ENET_E_CHLD_STOPPED:
  967. bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
  968. break;
  969. default:
  970. bfa_sm_fault(event);
  971. }
  972. }
  973. static void
  974. bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
  975. {
  976. bna_enet_chld_stop(enet);
  977. }
  978. static void
  979. bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
  980. enum bna_enet_event event)
  981. {
  982. switch (event) {
  983. case ENET_E_FAIL:
  984. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  985. bna_enet_chld_fail(enet);
  986. break;
  987. case ENET_E_CHLD_STOPPED:
  988. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  989. break;
  990. default:
  991. bfa_sm_fault(event);
  992. }
  993. }
  994. static void
  995. bna_bfi_pause_set(struct bna_enet *enet)
  996. {
  997. struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
  998. bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
  999. BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
  1000. pause_req->mh.num_entries = htons(
  1001. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
  1002. pause_req->tx_pause = enet->pause_config.tx_pause;
  1003. pause_req->rx_pause = enet->pause_config.rx_pause;
  1004. bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
  1005. sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
  1006. bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
  1007. }
  1008. static void
  1009. bna_enet_cb_chld_stopped(void *arg)
  1010. {
  1011. struct bna_enet *enet = (struct bna_enet *)arg;
  1012. bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
  1013. }
  1014. static void
  1015. bna_enet_init(struct bna_enet *enet, struct bna *bna)
  1016. {
  1017. enet->bna = bna;
  1018. enet->flags = 0;
  1019. enet->mtu = 0;
  1020. enet->type = BNA_ENET_T_REGULAR;
  1021. enet->stop_cbfn = NULL;
  1022. enet->stop_cbarg = NULL;
  1023. enet->pause_cbfn = NULL;
  1024. enet->mtu_cbfn = NULL;
  1025. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  1026. }
  1027. static void
  1028. bna_enet_uninit(struct bna_enet *enet)
  1029. {
  1030. enet->flags = 0;
  1031. enet->bna = NULL;
  1032. }
  1033. static void
  1034. bna_enet_start(struct bna_enet *enet)
  1035. {
  1036. enet->flags |= BNA_ENET_F_IOCETH_READY;
  1037. if (enet->flags & BNA_ENET_F_ENABLED)
  1038. bfa_fsm_send_event(enet, ENET_E_START);
  1039. }
  1040. static void
  1041. bna_ioceth_cb_enet_stopped(void *arg)
  1042. {
  1043. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1044. bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
  1045. }
  1046. static void
  1047. bna_enet_stop(struct bna_enet *enet)
  1048. {
  1049. enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
  1050. enet->stop_cbarg = &enet->bna->ioceth;
  1051. enet->flags &= ~BNA_ENET_F_IOCETH_READY;
  1052. bfa_fsm_send_event(enet, ENET_E_STOP);
  1053. }
  1054. static void
  1055. bna_enet_fail(struct bna_enet *enet)
  1056. {
  1057. enet->flags &= ~BNA_ENET_F_IOCETH_READY;
  1058. bfa_fsm_send_event(enet, ENET_E_FAIL);
  1059. }
  1060. void
  1061. bna_enet_cb_tx_stopped(struct bna_enet *enet)
  1062. {
  1063. bfa_wc_down(&enet->chld_stop_wc);
  1064. }
  1065. void
  1066. bna_enet_cb_rx_stopped(struct bna_enet *enet)
  1067. {
  1068. bfa_wc_down(&enet->chld_stop_wc);
  1069. }
  1070. int
  1071. bna_enet_mtu_get(struct bna_enet *enet)
  1072. {
  1073. return enet->mtu;
  1074. }
  1075. void
  1076. bna_enet_enable(struct bna_enet *enet)
  1077. {
  1078. if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
  1079. return;
  1080. enet->flags |= BNA_ENET_F_ENABLED;
  1081. if (enet->flags & BNA_ENET_F_IOCETH_READY)
  1082. bfa_fsm_send_event(enet, ENET_E_START);
  1083. }
  1084. void
  1085. bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
  1086. void (*cbfn)(void *))
  1087. {
  1088. if (type == BNA_SOFT_CLEANUP) {
  1089. (*cbfn)(enet->bna->bnad);
  1090. return;
  1091. }
  1092. enet->stop_cbfn = cbfn;
  1093. enet->stop_cbarg = enet->bna->bnad;
  1094. enet->flags &= ~BNA_ENET_F_ENABLED;
  1095. bfa_fsm_send_event(enet, ENET_E_STOP);
  1096. }
  1097. void
  1098. bna_enet_pause_config(struct bna_enet *enet,
  1099. struct bna_pause_config *pause_config,
  1100. void (*cbfn)(struct bnad *))
  1101. {
  1102. enet->pause_config = *pause_config;
  1103. enet->pause_cbfn = cbfn;
  1104. bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
  1105. }
  1106. void
  1107. bna_enet_mtu_set(struct bna_enet *enet, int mtu,
  1108. void (*cbfn)(struct bnad *))
  1109. {
  1110. enet->mtu = mtu;
  1111. enet->mtu_cbfn = cbfn;
  1112. bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
  1113. }
  1114. void
  1115. bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
  1116. {
  1117. *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
  1118. }
  1119. /* IOCETH */
  1120. #define enable_mbox_intr(_ioceth) \
  1121. do { \
  1122. u32 intr_status; \
  1123. bna_intr_status_get((_ioceth)->bna, intr_status); \
  1124. bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
  1125. bna_mbox_intr_enable((_ioceth)->bna); \
  1126. } while (0)
  1127. #define disable_mbox_intr(_ioceth) \
  1128. do { \
  1129. bna_mbox_intr_disable((_ioceth)->bna); \
  1130. bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
  1131. } while (0)
  1132. #define call_ioceth_stop_cbfn(_ioceth) \
  1133. do { \
  1134. if ((_ioceth)->stop_cbfn) { \
  1135. void (*cbfn)(struct bnad *); \
  1136. struct bnad *cbarg; \
  1137. cbfn = (_ioceth)->stop_cbfn; \
  1138. cbarg = (_ioceth)->stop_cbarg; \
  1139. (_ioceth)->stop_cbfn = NULL; \
  1140. (_ioceth)->stop_cbarg = NULL; \
  1141. cbfn(cbarg); \
  1142. } \
  1143. } while (0)
  1144. #define bna_stats_mod_uninit(_stats_mod) \
  1145. do { \
  1146. } while (0)
  1147. #define bna_stats_mod_start(_stats_mod) \
  1148. do { \
  1149. (_stats_mod)->ioc_ready = true; \
  1150. } while (0)
  1151. #define bna_stats_mod_stop(_stats_mod) \
  1152. do { \
  1153. (_stats_mod)->ioc_ready = false; \
  1154. } while (0)
  1155. #define bna_stats_mod_fail(_stats_mod) \
  1156. do { \
  1157. (_stats_mod)->ioc_ready = false; \
  1158. (_stats_mod)->stats_get_busy = false; \
  1159. (_stats_mod)->stats_clr_busy = false; \
  1160. } while (0)
  1161. static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
  1162. bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
  1163. enum bna_ioceth_event);
  1164. bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
  1165. enum bna_ioceth_event);
  1166. bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
  1167. enum bna_ioceth_event);
  1168. bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
  1169. enum bna_ioceth_event);
  1170. bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
  1171. enum bna_ioceth_event);
  1172. bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
  1173. enum bna_ioceth_event);
  1174. bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
  1175. enum bna_ioceth_event);
  1176. bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
  1177. enum bna_ioceth_event);
  1178. static void
  1179. bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
  1180. {
  1181. call_ioceth_stop_cbfn(ioceth);
  1182. }
  1183. static void
  1184. bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
  1185. enum bna_ioceth_event event)
  1186. {
  1187. switch (event) {
  1188. case IOCETH_E_ENABLE:
  1189. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
  1190. bfa_nw_ioc_enable(&ioceth->ioc);
  1191. break;
  1192. case IOCETH_E_DISABLE:
  1193. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1194. break;
  1195. case IOCETH_E_IOC_RESET:
  1196. enable_mbox_intr(ioceth);
  1197. break;
  1198. case IOCETH_E_IOC_FAILED:
  1199. disable_mbox_intr(ioceth);
  1200. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1201. break;
  1202. default:
  1203. bfa_sm_fault(event);
  1204. }
  1205. }
  1206. static void
  1207. bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
  1208. {
  1209. /**
  1210. * Do not call bfa_nw_ioc_enable() here. It must be called in the
  1211. * previous state due to failed -> ioc_ready_wait transition.
  1212. */
  1213. }
  1214. static void
  1215. bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
  1216. enum bna_ioceth_event event)
  1217. {
  1218. switch (event) {
  1219. case IOCETH_E_DISABLE:
  1220. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1221. bfa_nw_ioc_disable(&ioceth->ioc);
  1222. break;
  1223. case IOCETH_E_IOC_RESET:
  1224. enable_mbox_intr(ioceth);
  1225. break;
  1226. case IOCETH_E_IOC_FAILED:
  1227. disable_mbox_intr(ioceth);
  1228. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1229. break;
  1230. case IOCETH_E_IOC_READY:
  1231. bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
  1232. break;
  1233. default:
  1234. bfa_sm_fault(event);
  1235. }
  1236. }
  1237. static void
  1238. bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
  1239. {
  1240. bna_bfi_attr_get(ioceth);
  1241. }
  1242. static void
  1243. bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
  1244. enum bna_ioceth_event event)
  1245. {
  1246. switch (event) {
  1247. case IOCETH_E_DISABLE:
  1248. bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
  1249. break;
  1250. case IOCETH_E_IOC_FAILED:
  1251. disable_mbox_intr(ioceth);
  1252. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1253. break;
  1254. case IOCETH_E_ENET_ATTR_RESP:
  1255. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
  1256. break;
  1257. default:
  1258. bfa_sm_fault(event);
  1259. }
  1260. }
  1261. static void
  1262. bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
  1263. {
  1264. bna_enet_start(&ioceth->bna->enet);
  1265. bna_stats_mod_start(&ioceth->bna->stats_mod);
  1266. bnad_cb_ioceth_ready(ioceth->bna->bnad);
  1267. }
  1268. static void
  1269. bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
  1270. {
  1271. switch (event) {
  1272. case IOCETH_E_DISABLE:
  1273. bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
  1274. break;
  1275. case IOCETH_E_IOC_FAILED:
  1276. disable_mbox_intr(ioceth);
  1277. bna_enet_fail(&ioceth->bna->enet);
  1278. bna_stats_mod_fail(&ioceth->bna->stats_mod);
  1279. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1280. break;
  1281. default:
  1282. bfa_sm_fault(event);
  1283. }
  1284. }
  1285. static void
  1286. bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
  1287. {
  1288. }
  1289. static void
  1290. bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
  1291. enum bna_ioceth_event event)
  1292. {
  1293. switch (event) {
  1294. case IOCETH_E_IOC_FAILED:
  1295. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1296. disable_mbox_intr(ioceth);
  1297. bfa_nw_ioc_disable(&ioceth->ioc);
  1298. break;
  1299. case IOCETH_E_ENET_ATTR_RESP:
  1300. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1301. bfa_nw_ioc_disable(&ioceth->ioc);
  1302. break;
  1303. default:
  1304. bfa_sm_fault(event);
  1305. }
  1306. }
  1307. static void
  1308. bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
  1309. {
  1310. bna_stats_mod_stop(&ioceth->bna->stats_mod);
  1311. bna_enet_stop(&ioceth->bna->enet);
  1312. }
  1313. static void
  1314. bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
  1315. enum bna_ioceth_event event)
  1316. {
  1317. switch (event) {
  1318. case IOCETH_E_IOC_FAILED:
  1319. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1320. disable_mbox_intr(ioceth);
  1321. bna_enet_fail(&ioceth->bna->enet);
  1322. bna_stats_mod_fail(&ioceth->bna->stats_mod);
  1323. bfa_nw_ioc_disable(&ioceth->ioc);
  1324. break;
  1325. case IOCETH_E_ENET_STOPPED:
  1326. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1327. bfa_nw_ioc_disable(&ioceth->ioc);
  1328. break;
  1329. default:
  1330. bfa_sm_fault(event);
  1331. }
  1332. }
  1333. static void
  1334. bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
  1335. {
  1336. }
  1337. static void
  1338. bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
  1339. enum bna_ioceth_event event)
  1340. {
  1341. switch (event) {
  1342. case IOCETH_E_IOC_DISABLED:
  1343. disable_mbox_intr(ioceth);
  1344. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1345. break;
  1346. case IOCETH_E_ENET_STOPPED:
  1347. /* This event is received due to enet failing */
  1348. /* No-op */
  1349. break;
  1350. default:
  1351. bfa_sm_fault(event);
  1352. }
  1353. }
  1354. static void
  1355. bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
  1356. {
  1357. bnad_cb_ioceth_failed(ioceth->bna->bnad);
  1358. }
  1359. static void
  1360. bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
  1361. enum bna_ioceth_event event)
  1362. {
  1363. switch (event) {
  1364. case IOCETH_E_DISABLE:
  1365. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1366. bfa_nw_ioc_disable(&ioceth->ioc);
  1367. break;
  1368. case IOCETH_E_IOC_RESET:
  1369. enable_mbox_intr(ioceth);
  1370. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
  1371. break;
  1372. case IOCETH_E_IOC_FAILED:
  1373. break;
  1374. default:
  1375. bfa_sm_fault(event);
  1376. }
  1377. }
  1378. static void
  1379. bna_bfi_attr_get(struct bna_ioceth *ioceth)
  1380. {
  1381. struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
  1382. bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
  1383. BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
  1384. attr_req->mh.num_entries = htons(
  1385. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
  1386. bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
  1387. sizeof(struct bfi_enet_attr_req), &attr_req->mh);
  1388. bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
  1389. }
  1390. /* IOC callback functions */
  1391. static void
  1392. bna_cb_ioceth_enable(void *arg, enum bfa_status error)
  1393. {
  1394. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1395. if (error)
  1396. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
  1397. else
  1398. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
  1399. }
  1400. static void
  1401. bna_cb_ioceth_disable(void *arg)
  1402. {
  1403. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1404. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
  1405. }
  1406. static void
  1407. bna_cb_ioceth_hbfail(void *arg)
  1408. {
  1409. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1410. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
  1411. }
  1412. static void
  1413. bna_cb_ioceth_reset(void *arg)
  1414. {
  1415. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1416. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
  1417. }
  1418. static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
  1419. bna_cb_ioceth_enable,
  1420. bna_cb_ioceth_disable,
  1421. bna_cb_ioceth_hbfail,
  1422. bna_cb_ioceth_reset
  1423. };
  1424. static void bna_attr_init(struct bna_ioceth *ioceth)
  1425. {
  1426. ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
  1427. ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
  1428. ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
  1429. ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
  1430. ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
  1431. ioceth->attr.fw_query_complete = false;
  1432. }
  1433. static void
  1434. bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
  1435. struct bna_res_info *res_info)
  1436. {
  1437. u64 dma;
  1438. u8 *kva;
  1439. ioceth->bna = bna;
  1440. /**
  1441. * Attach IOC and claim:
  1442. * 1. DMA memory for IOC attributes
  1443. * 2. Kernel memory for FW trace
  1444. */
  1445. bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
  1446. bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
  1447. BNA_GET_DMA_ADDR(
  1448. &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
  1449. kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
  1450. bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
  1451. kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
  1452. bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
  1453. /**
  1454. * Attach common modules (Diag, SFP, CEE, Port) and claim respective
  1455. * DMA memory.
  1456. */
  1457. BNA_GET_DMA_ADDR(
  1458. &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
  1459. kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
  1460. bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
  1461. bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
  1462. kva += bfa_nw_cee_meminfo();
  1463. dma += bfa_nw_cee_meminfo();
  1464. bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
  1465. bfa_nw_flash_memclaim(&bna->flash, kva, dma);
  1466. kva += bfa_nw_flash_meminfo();
  1467. dma += bfa_nw_flash_meminfo();
  1468. bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
  1469. bfa_msgq_memclaim(&bna->msgq, kva, dma);
  1470. bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
  1471. kva += bfa_msgq_meminfo();
  1472. dma += bfa_msgq_meminfo();
  1473. ioceth->stop_cbfn = NULL;
  1474. ioceth->stop_cbarg = NULL;
  1475. bna_attr_init(ioceth);
  1476. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1477. }
  1478. static void
  1479. bna_ioceth_uninit(struct bna_ioceth *ioceth)
  1480. {
  1481. bfa_nw_ioc_detach(&ioceth->ioc);
  1482. ioceth->bna = NULL;
  1483. }
  1484. void
  1485. bna_ioceth_enable(struct bna_ioceth *ioceth)
  1486. {
  1487. if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
  1488. bnad_cb_ioceth_ready(ioceth->bna->bnad);
  1489. return;
  1490. }
  1491. if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
  1492. bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
  1493. }
  1494. void
  1495. bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
  1496. {
  1497. if (type == BNA_SOFT_CLEANUP) {
  1498. bnad_cb_ioceth_disabled(ioceth->bna->bnad);
  1499. return;
  1500. }
  1501. ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
  1502. ioceth->stop_cbarg = ioceth->bna->bnad;
  1503. bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
  1504. }
  1505. static void
  1506. bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
  1507. struct bna_res_info *res_info)
  1508. {
  1509. int i;
  1510. ucam_mod->ucmac = (struct bna_mac *)
  1511. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
  1512. INIT_LIST_HEAD(&ucam_mod->free_q);
  1513. for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
  1514. bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
  1515. list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
  1516. }
  1517. ucam_mod->bna = bna;
  1518. }
  1519. static void
  1520. bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
  1521. {
  1522. struct list_head *qe;
  1523. int i = 0;
  1524. list_for_each(qe, &ucam_mod->free_q)
  1525. i++;
  1526. ucam_mod->bna = NULL;
  1527. }
  1528. static void
  1529. bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
  1530. struct bna_res_info *res_info)
  1531. {
  1532. int i;
  1533. mcam_mod->mcmac = (struct bna_mac *)
  1534. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
  1535. INIT_LIST_HEAD(&mcam_mod->free_q);
  1536. for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
  1537. bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
  1538. list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
  1539. }
  1540. mcam_mod->mchandle = (struct bna_mcam_handle *)
  1541. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
  1542. INIT_LIST_HEAD(&mcam_mod->free_handle_q);
  1543. for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
  1544. bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
  1545. list_add_tail(&mcam_mod->mchandle[i].qe,
  1546. &mcam_mod->free_handle_q);
  1547. }
  1548. mcam_mod->bna = bna;
  1549. }
  1550. static void
  1551. bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
  1552. {
  1553. struct list_head *qe;
  1554. int i;
  1555. i = 0;
  1556. list_for_each(qe, &mcam_mod->free_q) i++;
  1557. i = 0;
  1558. list_for_each(qe, &mcam_mod->free_handle_q) i++;
  1559. mcam_mod->bna = NULL;
  1560. }
  1561. static void
  1562. bna_bfi_stats_get(struct bna *bna)
  1563. {
  1564. struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
  1565. bna->stats_mod.stats_get_busy = true;
  1566. bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
  1567. BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
  1568. stats_req->mh.num_entries = htons(
  1569. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
  1570. stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
  1571. stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
  1572. stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
  1573. stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
  1574. stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
  1575. bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
  1576. sizeof(struct bfi_enet_stats_req), &stats_req->mh);
  1577. bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
  1578. }
  1579. void
  1580. bna_res_req(struct bna_res_info *res_info)
  1581. {
  1582. /* DMA memory for COMMON_MODULE */
  1583. res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
  1584. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1585. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
  1586. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
  1587. (bfa_nw_cee_meminfo() +
  1588. bfa_nw_flash_meminfo() +
  1589. bfa_msgq_meminfo()), PAGE_SIZE);
  1590. /* DMA memory for retrieving IOC attributes */
  1591. res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
  1592. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1593. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
  1594. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
  1595. ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
  1596. /* Virtual memory for retreiving fw_trc */
  1597. res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
  1598. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
  1599. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
  1600. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
  1601. /* DMA memory for retreiving stats */
  1602. res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
  1603. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1604. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
  1605. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
  1606. ALIGN(sizeof(struct bfi_enet_stats),
  1607. PAGE_SIZE);
  1608. }
  1609. void
  1610. bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
  1611. {
  1612. struct bna_attr *attr = &bna->ioceth.attr;
  1613. /* Virtual memory for Tx objects - stored by Tx module */
  1614. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
  1615. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
  1616. BNA_MEM_T_KVA;
  1617. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
  1618. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
  1619. attr->num_txq * sizeof(struct bna_tx);
  1620. /* Virtual memory for TxQ - stored by Tx module */
  1621. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
  1622. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
  1623. BNA_MEM_T_KVA;
  1624. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
  1625. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
  1626. attr->num_txq * sizeof(struct bna_txq);
  1627. /* Virtual memory for Rx objects - stored by Rx module */
  1628. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
  1629. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
  1630. BNA_MEM_T_KVA;
  1631. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
  1632. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
  1633. attr->num_rxp * sizeof(struct bna_rx);
  1634. /* Virtual memory for RxPath - stored by Rx module */
  1635. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
  1636. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
  1637. BNA_MEM_T_KVA;
  1638. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
  1639. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
  1640. attr->num_rxp * sizeof(struct bna_rxp);
  1641. /* Virtual memory for RxQ - stored by Rx module */
  1642. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
  1643. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
  1644. BNA_MEM_T_KVA;
  1645. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
  1646. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
  1647. (attr->num_rxp * 2) * sizeof(struct bna_rxq);
  1648. /* Virtual memory for Unicast MAC address - stored by ucam module */
  1649. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
  1650. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
  1651. BNA_MEM_T_KVA;
  1652. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
  1653. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
  1654. attr->num_ucmac * sizeof(struct bna_mac);
  1655. /* Virtual memory for Multicast MAC address - stored by mcam module */
  1656. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
  1657. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
  1658. BNA_MEM_T_KVA;
  1659. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
  1660. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
  1661. attr->num_mcmac * sizeof(struct bna_mac);
  1662. /* Virtual memory for Multicast handle - stored by mcam module */
  1663. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
  1664. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
  1665. BNA_MEM_T_KVA;
  1666. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
  1667. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
  1668. attr->num_mcmac * sizeof(struct bna_mcam_handle);
  1669. }
  1670. void
  1671. bna_init(struct bna *bna, struct bnad *bnad,
  1672. struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
  1673. {
  1674. bna->bnad = bnad;
  1675. bna->pcidev = *pcidev;
  1676. bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
  1677. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
  1678. bna->stats.hw_stats_dma.msb =
  1679. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
  1680. bna->stats.hw_stats_dma.lsb =
  1681. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
  1682. bna_reg_addr_init(bna, &bna->pcidev);
  1683. /* Also initializes diag, cee, sfp, phy_port, msgq */
  1684. bna_ioceth_init(&bna->ioceth, bna, res_info);
  1685. bna_enet_init(&bna->enet, bna);
  1686. bna_ethport_init(&bna->ethport, bna);
  1687. }
  1688. void
  1689. bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
  1690. {
  1691. bna_tx_mod_init(&bna->tx_mod, bna, res_info);
  1692. bna_rx_mod_init(&bna->rx_mod, bna, res_info);
  1693. bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
  1694. bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
  1695. bna->default_mode_rid = BFI_INVALID_RID;
  1696. bna->promisc_rid = BFI_INVALID_RID;
  1697. bna->mod_flags |= BNA_MOD_F_INIT_DONE;
  1698. }
  1699. void
  1700. bna_uninit(struct bna *bna)
  1701. {
  1702. if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
  1703. bna_mcam_mod_uninit(&bna->mcam_mod);
  1704. bna_ucam_mod_uninit(&bna->ucam_mod);
  1705. bna_rx_mod_uninit(&bna->rx_mod);
  1706. bna_tx_mod_uninit(&bna->tx_mod);
  1707. bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
  1708. }
  1709. bna_stats_mod_uninit(&bna->stats_mod);
  1710. bna_ethport_uninit(&bna->ethport);
  1711. bna_enet_uninit(&bna->enet);
  1712. bna_ioceth_uninit(&bna->ioceth);
  1713. bna->bnad = NULL;
  1714. }
  1715. int
  1716. bna_num_txq_set(struct bna *bna, int num_txq)
  1717. {
  1718. if (bna->ioceth.attr.fw_query_complete &&
  1719. (num_txq <= bna->ioceth.attr.num_txq)) {
  1720. bna->ioceth.attr.num_txq = num_txq;
  1721. return BNA_CB_SUCCESS;
  1722. }
  1723. return BNA_CB_FAIL;
  1724. }
  1725. int
  1726. bna_num_rxp_set(struct bna *bna, int num_rxp)
  1727. {
  1728. if (bna->ioceth.attr.fw_query_complete &&
  1729. (num_rxp <= bna->ioceth.attr.num_rxp)) {
  1730. bna->ioceth.attr.num_rxp = num_rxp;
  1731. return BNA_CB_SUCCESS;
  1732. }
  1733. return BNA_CB_FAIL;
  1734. }
  1735. struct bna_mac *
  1736. bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
  1737. {
  1738. struct list_head *qe;
  1739. if (list_empty(&ucam_mod->free_q))
  1740. return NULL;
  1741. bfa_q_deq(&ucam_mod->free_q, &qe);
  1742. return (struct bna_mac *)qe;
  1743. }
  1744. void
  1745. bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
  1746. {
  1747. list_add_tail(&mac->qe, &ucam_mod->free_q);
  1748. }
  1749. struct bna_mac *
  1750. bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
  1751. {
  1752. struct list_head *qe;
  1753. if (list_empty(&mcam_mod->free_q))
  1754. return NULL;
  1755. bfa_q_deq(&mcam_mod->free_q, &qe);
  1756. return (struct bna_mac *)qe;
  1757. }
  1758. void
  1759. bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
  1760. {
  1761. list_add_tail(&mac->qe, &mcam_mod->free_q);
  1762. }
  1763. struct bna_mcam_handle *
  1764. bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
  1765. {
  1766. struct list_head *qe;
  1767. if (list_empty(&mcam_mod->free_handle_q))
  1768. return NULL;
  1769. bfa_q_deq(&mcam_mod->free_handle_q, &qe);
  1770. return (struct bna_mcam_handle *)qe;
  1771. }
  1772. void
  1773. bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
  1774. struct bna_mcam_handle *handle)
  1775. {
  1776. list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
  1777. }
  1778. void
  1779. bna_hw_stats_get(struct bna *bna)
  1780. {
  1781. if (!bna->stats_mod.ioc_ready) {
  1782. bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
  1783. return;
  1784. }
  1785. if (bna->stats_mod.stats_get_busy) {
  1786. bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
  1787. return;
  1788. }
  1789. bna_bfi_stats_get(bna);
  1790. }