bna_enet.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bna.h"
  19. static inline int
  20. ethport_can_be_up(struct bna_ethport *ethport)
  21. {
  22. int ready = 0;
  23. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  24. ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  25. (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  26. (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  27. else
  28. ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  29. (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  30. !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  31. return ready;
  32. }
  33. #define ethport_is_up ethport_can_be_up
  34. enum bna_ethport_event {
  35. ETHPORT_E_START = 1,
  36. ETHPORT_E_STOP = 2,
  37. ETHPORT_E_FAIL = 3,
  38. ETHPORT_E_UP = 4,
  39. ETHPORT_E_DOWN = 5,
  40. ETHPORT_E_FWRESP_UP_OK = 6,
  41. ETHPORT_E_FWRESP_DOWN = 7,
  42. ETHPORT_E_FWRESP_UP_FAIL = 8,
  43. };
  44. enum bna_enet_event {
  45. ENET_E_START = 1,
  46. ENET_E_STOP = 2,
  47. ENET_E_FAIL = 3,
  48. ENET_E_PAUSE_CFG = 4,
  49. ENET_E_MTU_CFG = 5,
  50. ENET_E_FWRESP_PAUSE = 6,
  51. ENET_E_CHLD_STOPPED = 7,
  52. };
  53. enum bna_ioceth_event {
  54. IOCETH_E_ENABLE = 1,
  55. IOCETH_E_DISABLE = 2,
  56. IOCETH_E_IOC_RESET = 3,
  57. IOCETH_E_IOC_FAILED = 4,
  58. IOCETH_E_IOC_READY = 5,
  59. IOCETH_E_ENET_ATTR_RESP = 6,
  60. IOCETH_E_ENET_STOPPED = 7,
  61. IOCETH_E_IOC_DISABLED = 8,
  62. };
  63. #define bna_stats_copy(_name, _type) \
  64. do { \
  65. count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
  66. stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
  67. stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
  68. for (i = 0; i < count; i++) \
  69. stats_dst[i] = be64_to_cpu(stats_src[i]); \
  70. } while (0) \
  71. /*
  72. * FW response handlers
  73. */
  74. static void
  75. bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
  76. struct bfi_msgq_mhdr *msghdr)
  77. {
  78. ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
  79. if (ethport_can_be_up(ethport))
  80. bfa_fsm_send_event(ethport, ETHPORT_E_UP);
  81. }
  82. static void
  83. bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
  84. struct bfi_msgq_mhdr *msghdr)
  85. {
  86. int ethport_up = ethport_is_up(ethport);
  87. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  88. if (ethport_up)
  89. bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
  90. }
  91. static void
  92. bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
  93. struct bfi_msgq_mhdr *msghdr)
  94. {
  95. struct bfi_enet_enable_req *admin_req =
  96. &ethport->bfi_enet_cmd.admin_req;
  97. struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
  98. switch (admin_req->enable) {
  99. case BNA_STATUS_T_ENABLED:
  100. if (rsp->error == BFI_ENET_CMD_OK)
  101. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
  102. else {
  103. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  104. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
  105. }
  106. break;
  107. case BNA_STATUS_T_DISABLED:
  108. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
  109. ethport->link_status = BNA_LINK_DOWN;
  110. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  111. break;
  112. }
  113. }
  114. static void
  115. bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
  116. struct bfi_msgq_mhdr *msghdr)
  117. {
  118. struct bfi_enet_diag_lb_req *diag_lb_req =
  119. &ethport->bfi_enet_cmd.lpbk_req;
  120. struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
  121. switch (diag_lb_req->enable) {
  122. case BNA_STATUS_T_ENABLED:
  123. if (rsp->error == BFI_ENET_CMD_OK)
  124. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
  125. else {
  126. ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
  127. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
  128. }
  129. break;
  130. case BNA_STATUS_T_DISABLED:
  131. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
  132. break;
  133. }
  134. }
  135. static void
  136. bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
  137. {
  138. bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
  139. }
  140. static void
  141. bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
  142. struct bfi_msgq_mhdr *msghdr)
  143. {
  144. struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
  145. /**
  146. * Store only if not set earlier, since BNAD can override the HW
  147. * attributes
  148. */
  149. if (!ioceth->attr.fw_query_complete) {
  150. ioceth->attr.num_txq = ntohl(rsp->max_cfg);
  151. ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
  152. ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
  153. ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
  154. ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
  155. ioceth->attr.fw_query_complete = true;
  156. }
  157. bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
  158. }
  159. static void
  160. bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
  161. {
  162. struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
  163. u64 *stats_src;
  164. u64 *stats_dst;
  165. u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
  166. u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
  167. int count;
  168. int i;
  169. bna_stats_copy(mac, mac);
  170. bna_stats_copy(bpc, bpc);
  171. bna_stats_copy(rad, rad);
  172. bna_stats_copy(rlb, rad);
  173. bna_stats_copy(fc_rx, fc_rx);
  174. bna_stats_copy(fc_tx, fc_tx);
  175. stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
  176. /* Copy Rxf stats to SW area, scatter them while copying */
  177. for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
  178. stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
  179. memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
  180. if (rx_enet_mask & ((u32)(1 << i))) {
  181. int k;
  182. count = sizeof(struct bfi_enet_stats_rxf) /
  183. sizeof(u64);
  184. for (k = 0; k < count; k++) {
  185. stats_dst[k] = be64_to_cpu(*stats_src);
  186. stats_src++;
  187. }
  188. }
  189. }
  190. /* Copy Txf stats to SW area, scatter them while copying */
  191. for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
  192. stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
  193. memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
  194. if (tx_enet_mask & ((u32)(1 << i))) {
  195. int k;
  196. count = sizeof(struct bfi_enet_stats_txf) /
  197. sizeof(u64);
  198. for (k = 0; k < count; k++) {
  199. stats_dst[k] = be64_to_cpu(*stats_src);
  200. stats_src++;
  201. }
  202. }
  203. }
  204. bna->stats_mod.stats_get_busy = false;
  205. bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
  206. }
  207. static void
  208. bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
  209. struct bfi_msgq_mhdr *msghdr)
  210. {
  211. ethport->link_status = BNA_LINK_UP;
  212. /* Dispatch events */
  213. ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
  214. }
  215. static void
  216. bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
  217. struct bfi_msgq_mhdr *msghdr)
  218. {
  219. ethport->link_status = BNA_LINK_DOWN;
  220. /* Dispatch events */
  221. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  222. }
  223. static void
  224. bna_err_handler(struct bna *bna, u32 intr_status)
  225. {
  226. if (BNA_IS_HALT_INTR(bna, intr_status))
  227. bna_halt_clear(bna);
  228. bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
  229. }
  230. void
  231. bna_mbox_handler(struct bna *bna, u32 intr_status)
  232. {
  233. if (BNA_IS_ERR_INTR(bna, intr_status)) {
  234. bna_err_handler(bna, intr_status);
  235. return;
  236. }
  237. if (BNA_IS_MBOX_INTR(bna, intr_status))
  238. bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
  239. }
  240. static void
  241. bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
  242. {
  243. struct bna *bna = (struct bna *)arg;
  244. struct bna_tx *tx;
  245. struct bna_rx *rx;
  246. switch (msghdr->msg_id) {
  247. case BFI_ENET_I2H_RX_CFG_SET_RSP:
  248. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  249. if (rx)
  250. bna_bfi_rx_enet_start_rsp(rx, msghdr);
  251. break;
  252. case BFI_ENET_I2H_RX_CFG_CLR_RSP:
  253. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  254. if (rx)
  255. bna_bfi_rx_enet_stop_rsp(rx, msghdr);
  256. break;
  257. case BFI_ENET_I2H_RIT_CFG_RSP:
  258. case BFI_ENET_I2H_RSS_CFG_RSP:
  259. case BFI_ENET_I2H_RSS_ENABLE_RSP:
  260. case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
  261. case BFI_ENET_I2H_RX_DEFAULT_RSP:
  262. case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
  263. case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
  264. case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
  265. case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
  266. case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
  267. case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
  268. case BFI_ENET_I2H_RX_VLAN_SET_RSP:
  269. case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
  270. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  271. if (rx)
  272. bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
  273. break;
  274. case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
  275. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  276. if (rx)
  277. bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
  278. break;
  279. case BFI_ENET_I2H_TX_CFG_SET_RSP:
  280. bna_tx_from_rid(bna, msghdr->enet_id, tx);
  281. if (tx)
  282. bna_bfi_tx_enet_start_rsp(tx, msghdr);
  283. break;
  284. case BFI_ENET_I2H_TX_CFG_CLR_RSP:
  285. bna_tx_from_rid(bna, msghdr->enet_id, tx);
  286. if (tx)
  287. bna_bfi_tx_enet_stop_rsp(tx, msghdr);
  288. break;
  289. case BFI_ENET_I2H_PORT_ADMIN_RSP:
  290. bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
  291. break;
  292. case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
  293. bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
  294. break;
  295. case BFI_ENET_I2H_SET_PAUSE_RSP:
  296. bna_bfi_pause_set_rsp(&bna->enet, msghdr);
  297. break;
  298. case BFI_ENET_I2H_GET_ATTR_RSP:
  299. bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
  300. break;
  301. case BFI_ENET_I2H_STATS_GET_RSP:
  302. bna_bfi_stats_get_rsp(bna, msghdr);
  303. break;
  304. case BFI_ENET_I2H_STATS_CLR_RSP:
  305. /* No-op */
  306. break;
  307. case BFI_ENET_I2H_LINK_UP_AEN:
  308. bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
  309. break;
  310. case BFI_ENET_I2H_LINK_DOWN_AEN:
  311. bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
  312. break;
  313. case BFI_ENET_I2H_PORT_ENABLE_AEN:
  314. bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
  315. break;
  316. case BFI_ENET_I2H_PORT_DISABLE_AEN:
  317. bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
  318. break;
  319. case BFI_ENET_I2H_BW_UPDATE_AEN:
  320. bna_bfi_bw_update_aen(&bna->tx_mod);
  321. break;
  322. default:
  323. break;
  324. }
  325. }
  326. /* ETHPORT */
  327. #define call_ethport_stop_cbfn(_ethport) \
  328. do { \
  329. if ((_ethport)->stop_cbfn) { \
  330. void (*cbfn)(struct bna_enet *); \
  331. cbfn = (_ethport)->stop_cbfn; \
  332. (_ethport)->stop_cbfn = NULL; \
  333. cbfn(&(_ethport)->bna->enet); \
  334. } \
  335. } while (0)
  336. #define call_ethport_adminup_cbfn(ethport, status) \
  337. do { \
  338. if ((ethport)->adminup_cbfn) { \
  339. void (*cbfn)(struct bnad *, enum bna_cb_status); \
  340. cbfn = (ethport)->adminup_cbfn; \
  341. (ethport)->adminup_cbfn = NULL; \
  342. cbfn((ethport)->bna->bnad, status); \
  343. } \
  344. } while (0)
  345. static void
  346. bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
  347. {
  348. struct bfi_enet_enable_req *admin_up_req =
  349. &ethport->bfi_enet_cmd.admin_req;
  350. bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
  351. BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
  352. admin_up_req->mh.num_entries = htons(
  353. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  354. admin_up_req->enable = BNA_STATUS_T_ENABLED;
  355. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  356. sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
  357. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  358. }
  359. static void
  360. bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
  361. {
  362. struct bfi_enet_enable_req *admin_down_req =
  363. &ethport->bfi_enet_cmd.admin_req;
  364. bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
  365. BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
  366. admin_down_req->mh.num_entries = htons(
  367. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  368. admin_down_req->enable = BNA_STATUS_T_DISABLED;
  369. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  370. sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
  371. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  372. }
  373. static void
  374. bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
  375. {
  376. struct bfi_enet_diag_lb_req *lpbk_up_req =
  377. &ethport->bfi_enet_cmd.lpbk_req;
  378. bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
  379. BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
  380. lpbk_up_req->mh.num_entries = htons(
  381. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
  382. lpbk_up_req->mode = (ethport->bna->enet.type ==
  383. BNA_ENET_T_LOOPBACK_INTERNAL) ?
  384. BFI_ENET_DIAG_LB_OPMODE_EXT :
  385. BFI_ENET_DIAG_LB_OPMODE_CBL;
  386. lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
  387. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  388. sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
  389. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  390. }
  391. static void
  392. bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
  393. {
  394. struct bfi_enet_diag_lb_req *lpbk_down_req =
  395. &ethport->bfi_enet_cmd.lpbk_req;
  396. bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
  397. BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
  398. lpbk_down_req->mh.num_entries = htons(
  399. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
  400. lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
  401. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  402. sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
  403. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  404. }
  405. static void
  406. bna_bfi_ethport_up(struct bna_ethport *ethport)
  407. {
  408. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  409. bna_bfi_ethport_admin_up(ethport);
  410. else
  411. bna_bfi_ethport_lpbk_up(ethport);
  412. }
  413. static void
  414. bna_bfi_ethport_down(struct bna_ethport *ethport)
  415. {
  416. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  417. bna_bfi_ethport_admin_down(ethport);
  418. else
  419. bna_bfi_ethport_lpbk_down(ethport);
  420. }
  421. bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
  422. enum bna_ethport_event);
  423. bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
  424. enum bna_ethport_event);
  425. bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
  426. enum bna_ethport_event);
  427. bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
  428. enum bna_ethport_event);
  429. bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
  430. enum bna_ethport_event);
  431. bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
  432. enum bna_ethport_event);
  433. static void
  434. bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
  435. {
  436. call_ethport_stop_cbfn(ethport);
  437. }
  438. static void
  439. bna_ethport_sm_stopped(struct bna_ethport *ethport,
  440. enum bna_ethport_event event)
  441. {
  442. switch (event) {
  443. case ETHPORT_E_START:
  444. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  445. break;
  446. case ETHPORT_E_STOP:
  447. call_ethport_stop_cbfn(ethport);
  448. break;
  449. case ETHPORT_E_FAIL:
  450. /* No-op */
  451. break;
  452. case ETHPORT_E_DOWN:
  453. /* This event is received due to Rx objects failing */
  454. /* No-op */
  455. break;
  456. default:
  457. bfa_sm_fault(event);
  458. }
  459. }
  460. static void
  461. bna_ethport_sm_down_entry(struct bna_ethport *ethport)
  462. {
  463. }
  464. static void
  465. bna_ethport_sm_down(struct bna_ethport *ethport,
  466. enum bna_ethport_event event)
  467. {
  468. switch (event) {
  469. case ETHPORT_E_STOP:
  470. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  471. break;
  472. case ETHPORT_E_FAIL:
  473. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  474. break;
  475. case ETHPORT_E_UP:
  476. bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
  477. bna_bfi_ethport_up(ethport);
  478. break;
  479. default:
  480. bfa_sm_fault(event);
  481. }
  482. }
  483. static void
  484. bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
  485. {
  486. }
  487. static void
  488. bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
  489. enum bna_ethport_event event)
  490. {
  491. switch (event) {
  492. case ETHPORT_E_STOP:
  493. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  494. break;
  495. case ETHPORT_E_FAIL:
  496. call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
  497. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  498. break;
  499. case ETHPORT_E_DOWN:
  500. call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
  501. bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
  502. break;
  503. case ETHPORT_E_FWRESP_UP_OK:
  504. call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
  505. bfa_fsm_set_state(ethport, bna_ethport_sm_up);
  506. break;
  507. case ETHPORT_E_FWRESP_UP_FAIL:
  508. call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
  509. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  510. break;
  511. case ETHPORT_E_FWRESP_DOWN:
  512. /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
  513. bna_bfi_ethport_up(ethport);
  514. break;
  515. default:
  516. bfa_sm_fault(event);
  517. }
  518. }
  519. static void
  520. bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
  521. {
  522. /**
  523. * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
  524. * mbox due to up_resp_wait -> down_resp_wait transition on event
  525. * ETHPORT_E_DOWN
  526. */
  527. }
  528. static void
  529. bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
  530. enum bna_ethport_event event)
  531. {
  532. switch (event) {
  533. case ETHPORT_E_STOP:
  534. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  535. break;
  536. case ETHPORT_E_FAIL:
  537. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  538. break;
  539. case ETHPORT_E_UP:
  540. bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
  541. break;
  542. case ETHPORT_E_FWRESP_UP_OK:
  543. /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
  544. bna_bfi_ethport_down(ethport);
  545. break;
  546. case ETHPORT_E_FWRESP_UP_FAIL:
  547. case ETHPORT_E_FWRESP_DOWN:
  548. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  549. break;
  550. default:
  551. bfa_sm_fault(event);
  552. }
  553. }
  554. static void
  555. bna_ethport_sm_up_entry(struct bna_ethport *ethport)
  556. {
  557. }
  558. static void
  559. bna_ethport_sm_up(struct bna_ethport *ethport,
  560. enum bna_ethport_event event)
  561. {
  562. switch (event) {
  563. case ETHPORT_E_STOP:
  564. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  565. bna_bfi_ethport_down(ethport);
  566. break;
  567. case ETHPORT_E_FAIL:
  568. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  569. break;
  570. case ETHPORT_E_DOWN:
  571. bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
  572. bna_bfi_ethport_down(ethport);
  573. break;
  574. default:
  575. bfa_sm_fault(event);
  576. }
  577. }
  578. static void
  579. bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
  580. {
  581. }
  582. static void
  583. bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
  584. enum bna_ethport_event event)
  585. {
  586. switch (event) {
  587. case ETHPORT_E_FAIL:
  588. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  589. break;
  590. case ETHPORT_E_DOWN:
  591. /**
  592. * This event is received due to Rx objects stopping in
  593. * parallel to ethport
  594. */
  595. /* No-op */
  596. break;
  597. case ETHPORT_E_FWRESP_UP_OK:
  598. /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
  599. bna_bfi_ethport_down(ethport);
  600. break;
  601. case ETHPORT_E_FWRESP_UP_FAIL:
  602. case ETHPORT_E_FWRESP_DOWN:
  603. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  604. break;
  605. default:
  606. bfa_sm_fault(event);
  607. }
  608. }
  609. static void
  610. bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
  611. {
  612. ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
  613. ethport->bna = bna;
  614. ethport->link_status = BNA_LINK_DOWN;
  615. ethport->link_cbfn = bnad_cb_ethport_link_status;
  616. ethport->rx_started_count = 0;
  617. ethport->stop_cbfn = NULL;
  618. ethport->adminup_cbfn = NULL;
  619. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  620. }
  621. static void
  622. bna_ethport_uninit(struct bna_ethport *ethport)
  623. {
  624. ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
  625. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  626. ethport->bna = NULL;
  627. }
  628. static void
  629. bna_ethport_start(struct bna_ethport *ethport)
  630. {
  631. bfa_fsm_send_event(ethport, ETHPORT_E_START);
  632. }
  633. static void
  634. bna_enet_cb_ethport_stopped(struct bna_enet *enet)
  635. {
  636. bfa_wc_down(&enet->chld_stop_wc);
  637. }
  638. static void
  639. bna_ethport_stop(struct bna_ethport *ethport)
  640. {
  641. ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
  642. bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
  643. }
  644. static void
  645. bna_ethport_fail(struct bna_ethport *ethport)
  646. {
  647. /* Reset the physical port status to enabled */
  648. ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
  649. if (ethport->link_status != BNA_LINK_DOWN) {
  650. ethport->link_status = BNA_LINK_DOWN;
  651. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  652. }
  653. bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
  654. }
  655. /* Should be called only when ethport is disabled */
  656. void
  657. bna_ethport_cb_rx_started(struct bna_ethport *ethport)
  658. {
  659. ethport->rx_started_count++;
  660. if (ethport->rx_started_count == 1) {
  661. ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
  662. if (ethport_can_be_up(ethport))
  663. bfa_fsm_send_event(ethport, ETHPORT_E_UP);
  664. }
  665. }
  666. void
  667. bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
  668. {
  669. int ethport_up = ethport_is_up(ethport);
  670. ethport->rx_started_count--;
  671. if (ethport->rx_started_count == 0) {
  672. ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
  673. if (ethport_up)
  674. bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
  675. }
  676. }
  677. /* ENET */
  678. #define bna_enet_chld_start(enet) \
  679. do { \
  680. enum bna_tx_type tx_type = \
  681. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  682. BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
  683. enum bna_rx_type rx_type = \
  684. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  685. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  686. bna_ethport_start(&(enet)->bna->ethport); \
  687. bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
  688. bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
  689. } while (0)
  690. #define bna_enet_chld_stop(enet) \
  691. do { \
  692. enum bna_tx_type tx_type = \
  693. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  694. BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
  695. enum bna_rx_type rx_type = \
  696. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  697. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  698. bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
  699. bfa_wc_up(&(enet)->chld_stop_wc); \
  700. bna_ethport_stop(&(enet)->bna->ethport); \
  701. bfa_wc_up(&(enet)->chld_stop_wc); \
  702. bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
  703. bfa_wc_up(&(enet)->chld_stop_wc); \
  704. bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
  705. bfa_wc_wait(&(enet)->chld_stop_wc); \
  706. } while (0)
  707. #define bna_enet_chld_fail(enet) \
  708. do { \
  709. bna_ethport_fail(&(enet)->bna->ethport); \
  710. bna_tx_mod_fail(&(enet)->bna->tx_mod); \
  711. bna_rx_mod_fail(&(enet)->bna->rx_mod); \
  712. } while (0)
  713. #define bna_enet_rx_start(enet) \
  714. do { \
  715. enum bna_rx_type rx_type = \
  716. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  717. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  718. bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
  719. } while (0)
  720. #define bna_enet_rx_stop(enet) \
  721. do { \
  722. enum bna_rx_type rx_type = \
  723. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  724. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  725. bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
  726. bfa_wc_up(&(enet)->chld_stop_wc); \
  727. bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
  728. bfa_wc_wait(&(enet)->chld_stop_wc); \
  729. } while (0)
  730. #define call_enet_stop_cbfn(enet) \
  731. do { \
  732. if ((enet)->stop_cbfn) { \
  733. void (*cbfn)(void *); \
  734. void *cbarg; \
  735. cbfn = (enet)->stop_cbfn; \
  736. cbarg = (enet)->stop_cbarg; \
  737. (enet)->stop_cbfn = NULL; \
  738. (enet)->stop_cbarg = NULL; \
  739. cbfn(cbarg); \
  740. } \
  741. } while (0)
  742. #define call_enet_pause_cbfn(enet) \
  743. do { \
  744. if ((enet)->pause_cbfn) { \
  745. void (*cbfn)(struct bnad *); \
  746. cbfn = (enet)->pause_cbfn; \
  747. (enet)->pause_cbfn = NULL; \
  748. cbfn((enet)->bna->bnad); \
  749. } \
  750. } while (0)
  751. #define call_enet_mtu_cbfn(enet) \
  752. do { \
  753. if ((enet)->mtu_cbfn) { \
  754. void (*cbfn)(struct bnad *); \
  755. cbfn = (enet)->mtu_cbfn; \
  756. (enet)->mtu_cbfn = NULL; \
  757. cbfn((enet)->bna->bnad); \
  758. } \
  759. } while (0)
  760. static void bna_enet_cb_chld_stopped(void *arg);
  761. static void bna_bfi_pause_set(struct bna_enet *enet);
  762. bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
  763. enum bna_enet_event);
  764. bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
  765. enum bna_enet_event);
  766. bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
  767. enum bna_enet_event);
  768. bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
  769. enum bna_enet_event);
  770. bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
  771. enum bna_enet_event);
  772. bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
  773. enum bna_enet_event);
  774. bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
  775. enum bna_enet_event);
  776. static void
  777. bna_enet_sm_stopped_entry(struct bna_enet *enet)
  778. {
  779. call_enet_pause_cbfn(enet);
  780. call_enet_mtu_cbfn(enet);
  781. call_enet_stop_cbfn(enet);
  782. }
  783. static void
  784. bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
  785. {
  786. switch (event) {
  787. case ENET_E_START:
  788. bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
  789. break;
  790. case ENET_E_STOP:
  791. call_enet_stop_cbfn(enet);
  792. break;
  793. case ENET_E_FAIL:
  794. /* No-op */
  795. break;
  796. case ENET_E_PAUSE_CFG:
  797. call_enet_pause_cbfn(enet);
  798. break;
  799. case ENET_E_MTU_CFG:
  800. call_enet_mtu_cbfn(enet);
  801. break;
  802. case ENET_E_CHLD_STOPPED:
  803. /**
  804. * This event is received due to Ethport, Tx and Rx objects
  805. * failing
  806. */
  807. /* No-op */
  808. break;
  809. default:
  810. bfa_sm_fault(event);
  811. }
  812. }
  813. static void
  814. bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
  815. {
  816. bna_bfi_pause_set(enet);
  817. }
  818. static void
  819. bna_enet_sm_pause_init_wait(struct bna_enet *enet,
  820. enum bna_enet_event event)
  821. {
  822. switch (event) {
  823. case ENET_E_STOP:
  824. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  825. bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
  826. break;
  827. case ENET_E_FAIL:
  828. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  829. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  830. break;
  831. case ENET_E_PAUSE_CFG:
  832. enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
  833. break;
  834. case ENET_E_MTU_CFG:
  835. /* No-op */
  836. break;
  837. case ENET_E_FWRESP_PAUSE:
  838. if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
  839. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  840. bna_bfi_pause_set(enet);
  841. } else {
  842. bfa_fsm_set_state(enet, bna_enet_sm_started);
  843. bna_enet_chld_start(enet);
  844. }
  845. break;
  846. default:
  847. bfa_sm_fault(event);
  848. }
  849. }
  850. static void
  851. bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
  852. {
  853. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  854. }
  855. static void
  856. bna_enet_sm_last_resp_wait(struct bna_enet *enet,
  857. enum bna_enet_event event)
  858. {
  859. switch (event) {
  860. case ENET_E_FAIL:
  861. case ENET_E_FWRESP_PAUSE:
  862. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  863. break;
  864. default:
  865. bfa_sm_fault(event);
  866. }
  867. }
  868. static void
  869. bna_enet_sm_started_entry(struct bna_enet *enet)
  870. {
  871. /**
  872. * NOTE: Do not call bna_enet_chld_start() here, since it will be
  873. * inadvertently called during cfg_wait->started transition as well
  874. */
  875. call_enet_pause_cbfn(enet);
  876. call_enet_mtu_cbfn(enet);
  877. }
  878. static void
  879. bna_enet_sm_started(struct bna_enet *enet,
  880. enum bna_enet_event event)
  881. {
  882. switch (event) {
  883. case ENET_E_STOP:
  884. bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
  885. break;
  886. case ENET_E_FAIL:
  887. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  888. bna_enet_chld_fail(enet);
  889. break;
  890. case ENET_E_PAUSE_CFG:
  891. bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
  892. bna_bfi_pause_set(enet);
  893. break;
  894. case ENET_E_MTU_CFG:
  895. bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
  896. bna_enet_rx_stop(enet);
  897. break;
  898. default:
  899. bfa_sm_fault(event);
  900. }
  901. }
  902. static void
  903. bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
  904. {
  905. }
  906. static void
  907. bna_enet_sm_cfg_wait(struct bna_enet *enet,
  908. enum bna_enet_event event)
  909. {
  910. switch (event) {
  911. case ENET_E_STOP:
  912. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  913. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  914. bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
  915. break;
  916. case ENET_E_FAIL:
  917. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  918. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  919. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  920. bna_enet_chld_fail(enet);
  921. break;
  922. case ENET_E_PAUSE_CFG:
  923. enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
  924. break;
  925. case ENET_E_MTU_CFG:
  926. enet->flags |= BNA_ENET_F_MTU_CHANGED;
  927. break;
  928. case ENET_E_CHLD_STOPPED:
  929. bna_enet_rx_start(enet);
  930. /* Fall through */
  931. case ENET_E_FWRESP_PAUSE:
  932. if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
  933. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  934. bna_bfi_pause_set(enet);
  935. } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
  936. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  937. bna_enet_rx_stop(enet);
  938. } else {
  939. bfa_fsm_set_state(enet, bna_enet_sm_started);
  940. }
  941. break;
  942. default:
  943. bfa_sm_fault(event);
  944. }
  945. }
  946. static void
  947. bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
  948. {
  949. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  950. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  951. }
  952. static void
  953. bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
  954. enum bna_enet_event event)
  955. {
  956. switch (event) {
  957. case ENET_E_FAIL:
  958. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  959. bna_enet_chld_fail(enet);
  960. break;
  961. case ENET_E_FWRESP_PAUSE:
  962. case ENET_E_CHLD_STOPPED:
  963. bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
  964. break;
  965. default:
  966. bfa_sm_fault(event);
  967. }
  968. }
  969. static void
  970. bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
  971. {
  972. bna_enet_chld_stop(enet);
  973. }
  974. static void
  975. bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
  976. enum bna_enet_event event)
  977. {
  978. switch (event) {
  979. case ENET_E_FAIL:
  980. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  981. bna_enet_chld_fail(enet);
  982. break;
  983. case ENET_E_CHLD_STOPPED:
  984. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  985. break;
  986. default:
  987. bfa_sm_fault(event);
  988. }
  989. }
  990. static void
  991. bna_bfi_pause_set(struct bna_enet *enet)
  992. {
  993. struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
  994. bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
  995. BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
  996. pause_req->mh.num_entries = htons(
  997. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
  998. pause_req->tx_pause = enet->pause_config.tx_pause;
  999. pause_req->rx_pause = enet->pause_config.rx_pause;
  1000. bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
  1001. sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
  1002. bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
  1003. }
  1004. static void
  1005. bna_enet_cb_chld_stopped(void *arg)
  1006. {
  1007. struct bna_enet *enet = (struct bna_enet *)arg;
  1008. bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
  1009. }
  1010. static void
  1011. bna_enet_init(struct bna_enet *enet, struct bna *bna)
  1012. {
  1013. enet->bna = bna;
  1014. enet->flags = 0;
  1015. enet->mtu = 0;
  1016. enet->type = BNA_ENET_T_REGULAR;
  1017. enet->stop_cbfn = NULL;
  1018. enet->stop_cbarg = NULL;
  1019. enet->pause_cbfn = NULL;
  1020. enet->mtu_cbfn = NULL;
  1021. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  1022. }
  1023. static void
  1024. bna_enet_uninit(struct bna_enet *enet)
  1025. {
  1026. enet->flags = 0;
  1027. enet->bna = NULL;
  1028. }
  1029. static void
  1030. bna_enet_start(struct bna_enet *enet)
  1031. {
  1032. enet->flags |= BNA_ENET_F_IOCETH_READY;
  1033. if (enet->flags & BNA_ENET_F_ENABLED)
  1034. bfa_fsm_send_event(enet, ENET_E_START);
  1035. }
  1036. static void
  1037. bna_ioceth_cb_enet_stopped(void *arg)
  1038. {
  1039. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1040. bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
  1041. }
  1042. static void
  1043. bna_enet_stop(struct bna_enet *enet)
  1044. {
  1045. enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
  1046. enet->stop_cbarg = &enet->bna->ioceth;
  1047. enet->flags &= ~BNA_ENET_F_IOCETH_READY;
  1048. bfa_fsm_send_event(enet, ENET_E_STOP);
  1049. }
  1050. static void
  1051. bna_enet_fail(struct bna_enet *enet)
  1052. {
  1053. enet->flags &= ~BNA_ENET_F_IOCETH_READY;
  1054. bfa_fsm_send_event(enet, ENET_E_FAIL);
  1055. }
  1056. void
  1057. bna_enet_cb_tx_stopped(struct bna_enet *enet)
  1058. {
  1059. bfa_wc_down(&enet->chld_stop_wc);
  1060. }
  1061. void
  1062. bna_enet_cb_rx_stopped(struct bna_enet *enet)
  1063. {
  1064. bfa_wc_down(&enet->chld_stop_wc);
  1065. }
  1066. int
  1067. bna_enet_mtu_get(struct bna_enet *enet)
  1068. {
  1069. return enet->mtu;
  1070. }
  1071. void
  1072. bna_enet_enable(struct bna_enet *enet)
  1073. {
  1074. if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
  1075. return;
  1076. enet->flags |= BNA_ENET_F_ENABLED;
  1077. if (enet->flags & BNA_ENET_F_IOCETH_READY)
  1078. bfa_fsm_send_event(enet, ENET_E_START);
  1079. }
  1080. void
  1081. bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
  1082. void (*cbfn)(void *))
  1083. {
  1084. if (type == BNA_SOFT_CLEANUP) {
  1085. (*cbfn)(enet->bna->bnad);
  1086. return;
  1087. }
  1088. enet->stop_cbfn = cbfn;
  1089. enet->stop_cbarg = enet->bna->bnad;
  1090. enet->flags &= ~BNA_ENET_F_ENABLED;
  1091. bfa_fsm_send_event(enet, ENET_E_STOP);
  1092. }
  1093. void
  1094. bna_enet_pause_config(struct bna_enet *enet,
  1095. struct bna_pause_config *pause_config,
  1096. void (*cbfn)(struct bnad *))
  1097. {
  1098. enet->pause_config = *pause_config;
  1099. enet->pause_cbfn = cbfn;
  1100. bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
  1101. }
  1102. void
  1103. bna_enet_mtu_set(struct bna_enet *enet, int mtu,
  1104. void (*cbfn)(struct bnad *))
  1105. {
  1106. enet->mtu = mtu;
  1107. enet->mtu_cbfn = cbfn;
  1108. bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
  1109. }
  1110. void
  1111. bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
  1112. {
  1113. *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
  1114. }
  1115. /* IOCETH */
  1116. #define enable_mbox_intr(_ioceth) \
  1117. do { \
  1118. u32 intr_status; \
  1119. bna_intr_status_get((_ioceth)->bna, intr_status); \
  1120. bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
  1121. bna_mbox_intr_enable((_ioceth)->bna); \
  1122. } while (0)
  1123. #define disable_mbox_intr(_ioceth) \
  1124. do { \
  1125. bna_mbox_intr_disable((_ioceth)->bna); \
  1126. bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
  1127. } while (0)
  1128. #define call_ioceth_stop_cbfn(_ioceth) \
  1129. do { \
  1130. if ((_ioceth)->stop_cbfn) { \
  1131. void (*cbfn)(struct bnad *); \
  1132. struct bnad *cbarg; \
  1133. cbfn = (_ioceth)->stop_cbfn; \
  1134. cbarg = (_ioceth)->stop_cbarg; \
  1135. (_ioceth)->stop_cbfn = NULL; \
  1136. (_ioceth)->stop_cbarg = NULL; \
  1137. cbfn(cbarg); \
  1138. } \
  1139. } while (0)
  1140. #define bna_stats_mod_uninit(_stats_mod) \
  1141. do { \
  1142. } while (0)
  1143. #define bna_stats_mod_start(_stats_mod) \
  1144. do { \
  1145. (_stats_mod)->ioc_ready = true; \
  1146. } while (0)
  1147. #define bna_stats_mod_stop(_stats_mod) \
  1148. do { \
  1149. (_stats_mod)->ioc_ready = false; \
  1150. } while (0)
  1151. #define bna_stats_mod_fail(_stats_mod) \
  1152. do { \
  1153. (_stats_mod)->ioc_ready = false; \
  1154. (_stats_mod)->stats_get_busy = false; \
  1155. (_stats_mod)->stats_clr_busy = false; \
  1156. } while (0)
  1157. static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
  1158. bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
  1159. enum bna_ioceth_event);
  1160. bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
  1161. enum bna_ioceth_event);
  1162. bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
  1163. enum bna_ioceth_event);
  1164. bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
  1165. enum bna_ioceth_event);
  1166. bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
  1167. enum bna_ioceth_event);
  1168. bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
  1169. enum bna_ioceth_event);
  1170. bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
  1171. enum bna_ioceth_event);
  1172. bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
  1173. enum bna_ioceth_event);
  1174. static void
  1175. bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
  1176. {
  1177. call_ioceth_stop_cbfn(ioceth);
  1178. }
  1179. static void
  1180. bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
  1181. enum bna_ioceth_event event)
  1182. {
  1183. switch (event) {
  1184. case IOCETH_E_ENABLE:
  1185. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
  1186. bfa_nw_ioc_enable(&ioceth->ioc);
  1187. break;
  1188. case IOCETH_E_DISABLE:
  1189. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1190. break;
  1191. case IOCETH_E_IOC_RESET:
  1192. enable_mbox_intr(ioceth);
  1193. break;
  1194. case IOCETH_E_IOC_FAILED:
  1195. disable_mbox_intr(ioceth);
  1196. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1197. break;
  1198. default:
  1199. bfa_sm_fault(event);
  1200. }
  1201. }
  1202. static void
  1203. bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
  1204. {
  1205. /**
  1206. * Do not call bfa_nw_ioc_enable() here. It must be called in the
  1207. * previous state due to failed -> ioc_ready_wait transition.
  1208. */
  1209. }
  1210. static void
  1211. bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
  1212. enum bna_ioceth_event event)
  1213. {
  1214. switch (event) {
  1215. case IOCETH_E_DISABLE:
  1216. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1217. bfa_nw_ioc_disable(&ioceth->ioc);
  1218. break;
  1219. case IOCETH_E_IOC_RESET:
  1220. enable_mbox_intr(ioceth);
  1221. break;
  1222. case IOCETH_E_IOC_FAILED:
  1223. disable_mbox_intr(ioceth);
  1224. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1225. break;
  1226. case IOCETH_E_IOC_READY:
  1227. bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
  1228. break;
  1229. default:
  1230. bfa_sm_fault(event);
  1231. }
  1232. }
  1233. static void
  1234. bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
  1235. {
  1236. bna_bfi_attr_get(ioceth);
  1237. }
  1238. static void
  1239. bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
  1240. enum bna_ioceth_event event)
  1241. {
  1242. switch (event) {
  1243. case IOCETH_E_DISABLE:
  1244. bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
  1245. break;
  1246. case IOCETH_E_IOC_FAILED:
  1247. disable_mbox_intr(ioceth);
  1248. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1249. break;
  1250. case IOCETH_E_ENET_ATTR_RESP:
  1251. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
  1252. break;
  1253. default:
  1254. bfa_sm_fault(event);
  1255. }
  1256. }
  1257. static void
  1258. bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
  1259. {
  1260. bna_enet_start(&ioceth->bna->enet);
  1261. bna_stats_mod_start(&ioceth->bna->stats_mod);
  1262. bnad_cb_ioceth_ready(ioceth->bna->bnad);
  1263. }
  1264. static void
  1265. bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
  1266. {
  1267. switch (event) {
  1268. case IOCETH_E_DISABLE:
  1269. bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
  1270. break;
  1271. case IOCETH_E_IOC_FAILED:
  1272. disable_mbox_intr(ioceth);
  1273. bna_enet_fail(&ioceth->bna->enet);
  1274. bna_stats_mod_fail(&ioceth->bna->stats_mod);
  1275. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1276. break;
  1277. default:
  1278. bfa_sm_fault(event);
  1279. }
  1280. }
  1281. static void
  1282. bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
  1283. {
  1284. }
  1285. static void
  1286. bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
  1287. enum bna_ioceth_event event)
  1288. {
  1289. switch (event) {
  1290. case IOCETH_E_IOC_FAILED:
  1291. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1292. disable_mbox_intr(ioceth);
  1293. bfa_nw_ioc_disable(&ioceth->ioc);
  1294. break;
  1295. case IOCETH_E_ENET_ATTR_RESP:
  1296. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1297. bfa_nw_ioc_disable(&ioceth->ioc);
  1298. break;
  1299. default:
  1300. bfa_sm_fault(event);
  1301. }
  1302. }
  1303. static void
  1304. bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
  1305. {
  1306. bna_stats_mod_stop(&ioceth->bna->stats_mod);
  1307. bna_enet_stop(&ioceth->bna->enet);
  1308. }
  1309. static void
  1310. bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
  1311. enum bna_ioceth_event event)
  1312. {
  1313. switch (event) {
  1314. case IOCETH_E_IOC_FAILED:
  1315. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1316. disable_mbox_intr(ioceth);
  1317. bna_enet_fail(&ioceth->bna->enet);
  1318. bna_stats_mod_fail(&ioceth->bna->stats_mod);
  1319. bfa_nw_ioc_disable(&ioceth->ioc);
  1320. break;
  1321. case IOCETH_E_ENET_STOPPED:
  1322. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1323. bfa_nw_ioc_disable(&ioceth->ioc);
  1324. break;
  1325. default:
  1326. bfa_sm_fault(event);
  1327. }
  1328. }
  1329. static void
  1330. bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
  1331. {
  1332. }
  1333. static void
  1334. bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
  1335. enum bna_ioceth_event event)
  1336. {
  1337. switch (event) {
  1338. case IOCETH_E_IOC_DISABLED:
  1339. disable_mbox_intr(ioceth);
  1340. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1341. break;
  1342. case IOCETH_E_ENET_STOPPED:
  1343. /* This event is received due to enet failing */
  1344. /* No-op */
  1345. break;
  1346. default:
  1347. bfa_sm_fault(event);
  1348. }
  1349. }
  1350. static void
  1351. bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
  1352. {
  1353. bnad_cb_ioceth_failed(ioceth->bna->bnad);
  1354. }
  1355. static void
  1356. bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
  1357. enum bna_ioceth_event event)
  1358. {
  1359. switch (event) {
  1360. case IOCETH_E_DISABLE:
  1361. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1362. bfa_nw_ioc_disable(&ioceth->ioc);
  1363. break;
  1364. case IOCETH_E_IOC_RESET:
  1365. enable_mbox_intr(ioceth);
  1366. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
  1367. break;
  1368. case IOCETH_E_IOC_FAILED:
  1369. break;
  1370. default:
  1371. bfa_sm_fault(event);
  1372. }
  1373. }
  1374. static void
  1375. bna_bfi_attr_get(struct bna_ioceth *ioceth)
  1376. {
  1377. struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
  1378. bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
  1379. BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
  1380. attr_req->mh.num_entries = htons(
  1381. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
  1382. bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
  1383. sizeof(struct bfi_enet_attr_req), &attr_req->mh);
  1384. bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
  1385. }
  1386. /* IOC callback functions */
  1387. static void
  1388. bna_cb_ioceth_enable(void *arg, enum bfa_status error)
  1389. {
  1390. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1391. if (error)
  1392. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
  1393. else
  1394. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
  1395. }
  1396. static void
  1397. bna_cb_ioceth_disable(void *arg)
  1398. {
  1399. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1400. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
  1401. }
  1402. static void
  1403. bna_cb_ioceth_hbfail(void *arg)
  1404. {
  1405. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1406. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
  1407. }
  1408. static void
  1409. bna_cb_ioceth_reset(void *arg)
  1410. {
  1411. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1412. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
  1413. }
  1414. static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
  1415. bna_cb_ioceth_enable,
  1416. bna_cb_ioceth_disable,
  1417. bna_cb_ioceth_hbfail,
  1418. bna_cb_ioceth_reset
  1419. };
  1420. static void bna_attr_init(struct bna_ioceth *ioceth)
  1421. {
  1422. ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
  1423. ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
  1424. ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
  1425. ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
  1426. ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
  1427. ioceth->attr.fw_query_complete = false;
  1428. }
  1429. static void
  1430. bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
  1431. struct bna_res_info *res_info)
  1432. {
  1433. u64 dma;
  1434. u8 *kva;
  1435. ioceth->bna = bna;
  1436. /**
  1437. * Attach IOC and claim:
  1438. * 1. DMA memory for IOC attributes
  1439. * 2. Kernel memory for FW trace
  1440. */
  1441. bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
  1442. bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
  1443. BNA_GET_DMA_ADDR(
  1444. &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
  1445. kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
  1446. bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
  1447. kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
  1448. bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
  1449. /**
  1450. * Attach common modules (Diag, SFP, CEE, Port) and claim respective
  1451. * DMA memory.
  1452. */
  1453. BNA_GET_DMA_ADDR(
  1454. &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
  1455. kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
  1456. bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
  1457. bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
  1458. kva += bfa_nw_cee_meminfo();
  1459. dma += bfa_nw_cee_meminfo();
  1460. bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
  1461. bfa_nw_flash_memclaim(&bna->flash, kva, dma);
  1462. kva += bfa_nw_flash_meminfo();
  1463. dma += bfa_nw_flash_meminfo();
  1464. bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
  1465. bfa_msgq_memclaim(&bna->msgq, kva, dma);
  1466. bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
  1467. kva += bfa_msgq_meminfo();
  1468. dma += bfa_msgq_meminfo();
  1469. ioceth->stop_cbfn = NULL;
  1470. ioceth->stop_cbarg = NULL;
  1471. bna_attr_init(ioceth);
  1472. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1473. }
  1474. static void
  1475. bna_ioceth_uninit(struct bna_ioceth *ioceth)
  1476. {
  1477. bfa_nw_ioc_detach(&ioceth->ioc);
  1478. ioceth->bna = NULL;
  1479. }
  1480. void
  1481. bna_ioceth_enable(struct bna_ioceth *ioceth)
  1482. {
  1483. if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
  1484. bnad_cb_ioceth_ready(ioceth->bna->bnad);
  1485. return;
  1486. }
  1487. if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
  1488. bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
  1489. }
  1490. void
  1491. bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
  1492. {
  1493. if (type == BNA_SOFT_CLEANUP) {
  1494. bnad_cb_ioceth_disabled(ioceth->bna->bnad);
  1495. return;
  1496. }
  1497. ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
  1498. ioceth->stop_cbarg = ioceth->bna->bnad;
  1499. bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
  1500. }
  1501. static void
  1502. bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
  1503. struct bna_res_info *res_info)
  1504. {
  1505. int i;
  1506. ucam_mod->ucmac = (struct bna_mac *)
  1507. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
  1508. INIT_LIST_HEAD(&ucam_mod->free_q);
  1509. for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
  1510. bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
  1511. list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
  1512. }
  1513. ucam_mod->bna = bna;
  1514. }
  1515. static void
  1516. bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
  1517. {
  1518. struct list_head *qe;
  1519. int i = 0;
  1520. list_for_each(qe, &ucam_mod->free_q)
  1521. i++;
  1522. ucam_mod->bna = NULL;
  1523. }
  1524. static void
  1525. bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
  1526. struct bna_res_info *res_info)
  1527. {
  1528. int i;
  1529. mcam_mod->mcmac = (struct bna_mac *)
  1530. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
  1531. INIT_LIST_HEAD(&mcam_mod->free_q);
  1532. for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
  1533. bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
  1534. list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
  1535. }
  1536. mcam_mod->mchandle = (struct bna_mcam_handle *)
  1537. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
  1538. INIT_LIST_HEAD(&mcam_mod->free_handle_q);
  1539. for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
  1540. bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
  1541. list_add_tail(&mcam_mod->mchandle[i].qe,
  1542. &mcam_mod->free_handle_q);
  1543. }
  1544. mcam_mod->bna = bna;
  1545. }
  1546. static void
  1547. bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
  1548. {
  1549. struct list_head *qe;
  1550. int i;
  1551. i = 0;
  1552. list_for_each(qe, &mcam_mod->free_q) i++;
  1553. i = 0;
  1554. list_for_each(qe, &mcam_mod->free_handle_q) i++;
  1555. mcam_mod->bna = NULL;
  1556. }
  1557. static void
  1558. bna_bfi_stats_get(struct bna *bna)
  1559. {
  1560. struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
  1561. bna->stats_mod.stats_get_busy = true;
  1562. bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
  1563. BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
  1564. stats_req->mh.num_entries = htons(
  1565. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
  1566. stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
  1567. stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
  1568. stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
  1569. stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
  1570. stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
  1571. bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
  1572. sizeof(struct bfi_enet_stats_req), &stats_req->mh);
  1573. bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
  1574. }
  1575. void
  1576. bna_res_req(struct bna_res_info *res_info)
  1577. {
  1578. /* DMA memory for COMMON_MODULE */
  1579. res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
  1580. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1581. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
  1582. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
  1583. (bfa_nw_cee_meminfo() +
  1584. bfa_nw_flash_meminfo() +
  1585. bfa_msgq_meminfo()), PAGE_SIZE);
  1586. /* DMA memory for retrieving IOC attributes */
  1587. res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
  1588. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1589. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
  1590. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
  1591. ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
  1592. /* Virtual memory for retreiving fw_trc */
  1593. res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
  1594. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
  1595. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
  1596. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
  1597. /* DMA memory for retreiving stats */
  1598. res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
  1599. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1600. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
  1601. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
  1602. ALIGN(sizeof(struct bfi_enet_stats),
  1603. PAGE_SIZE);
  1604. }
  1605. void
  1606. bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
  1607. {
  1608. struct bna_attr *attr = &bna->ioceth.attr;
  1609. /* Virtual memory for Tx objects - stored by Tx module */
  1610. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
  1611. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
  1612. BNA_MEM_T_KVA;
  1613. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
  1614. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
  1615. attr->num_txq * sizeof(struct bna_tx);
  1616. /* Virtual memory for TxQ - stored by Tx module */
  1617. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
  1618. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
  1619. BNA_MEM_T_KVA;
  1620. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
  1621. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
  1622. attr->num_txq * sizeof(struct bna_txq);
  1623. /* Virtual memory for Rx objects - stored by Rx module */
  1624. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
  1625. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
  1626. BNA_MEM_T_KVA;
  1627. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
  1628. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
  1629. attr->num_rxp * sizeof(struct bna_rx);
  1630. /* Virtual memory for RxPath - stored by Rx module */
  1631. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
  1632. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
  1633. BNA_MEM_T_KVA;
  1634. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
  1635. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
  1636. attr->num_rxp * sizeof(struct bna_rxp);
  1637. /* Virtual memory for RxQ - stored by Rx module */
  1638. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
  1639. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
  1640. BNA_MEM_T_KVA;
  1641. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
  1642. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
  1643. (attr->num_rxp * 2) * sizeof(struct bna_rxq);
  1644. /* Virtual memory for Unicast MAC address - stored by ucam module */
  1645. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
  1646. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
  1647. BNA_MEM_T_KVA;
  1648. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
  1649. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
  1650. attr->num_ucmac * sizeof(struct bna_mac);
  1651. /* Virtual memory for Multicast MAC address - stored by mcam module */
  1652. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
  1653. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
  1654. BNA_MEM_T_KVA;
  1655. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
  1656. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
  1657. attr->num_mcmac * sizeof(struct bna_mac);
  1658. /* Virtual memory for Multicast handle - stored by mcam module */
  1659. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
  1660. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
  1661. BNA_MEM_T_KVA;
  1662. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
  1663. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
  1664. attr->num_mcmac * sizeof(struct bna_mcam_handle);
  1665. }
  1666. void
  1667. bna_init(struct bna *bna, struct bnad *bnad,
  1668. struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
  1669. {
  1670. bna->bnad = bnad;
  1671. bna->pcidev = *pcidev;
  1672. bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
  1673. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
  1674. bna->stats.hw_stats_dma.msb =
  1675. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
  1676. bna->stats.hw_stats_dma.lsb =
  1677. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
  1678. bna_reg_addr_init(bna, &bna->pcidev);
  1679. /* Also initializes diag, cee, sfp, phy_port, msgq */
  1680. bna_ioceth_init(&bna->ioceth, bna, res_info);
  1681. bna_enet_init(&bna->enet, bna);
  1682. bna_ethport_init(&bna->ethport, bna);
  1683. }
  1684. void
  1685. bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
  1686. {
  1687. bna_tx_mod_init(&bna->tx_mod, bna, res_info);
  1688. bna_rx_mod_init(&bna->rx_mod, bna, res_info);
  1689. bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
  1690. bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
  1691. bna->default_mode_rid = BFI_INVALID_RID;
  1692. bna->promisc_rid = BFI_INVALID_RID;
  1693. bna->mod_flags |= BNA_MOD_F_INIT_DONE;
  1694. }
  1695. void
  1696. bna_uninit(struct bna *bna)
  1697. {
  1698. if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
  1699. bna_mcam_mod_uninit(&bna->mcam_mod);
  1700. bna_ucam_mod_uninit(&bna->ucam_mod);
  1701. bna_rx_mod_uninit(&bna->rx_mod);
  1702. bna_tx_mod_uninit(&bna->tx_mod);
  1703. bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
  1704. }
  1705. bna_stats_mod_uninit(&bna->stats_mod);
  1706. bna_ethport_uninit(&bna->ethport);
  1707. bna_enet_uninit(&bna->enet);
  1708. bna_ioceth_uninit(&bna->ioceth);
  1709. bna->bnad = NULL;
  1710. }
  1711. int
  1712. bna_num_txq_set(struct bna *bna, int num_txq)
  1713. {
  1714. if (bna->ioceth.attr.fw_query_complete &&
  1715. (num_txq <= bna->ioceth.attr.num_txq)) {
  1716. bna->ioceth.attr.num_txq = num_txq;
  1717. return BNA_CB_SUCCESS;
  1718. }
  1719. return BNA_CB_FAIL;
  1720. }
  1721. int
  1722. bna_num_rxp_set(struct bna *bna, int num_rxp)
  1723. {
  1724. if (bna->ioceth.attr.fw_query_complete &&
  1725. (num_rxp <= bna->ioceth.attr.num_rxp)) {
  1726. bna->ioceth.attr.num_rxp = num_rxp;
  1727. return BNA_CB_SUCCESS;
  1728. }
  1729. return BNA_CB_FAIL;
  1730. }
  1731. struct bna_mac *
  1732. bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
  1733. {
  1734. struct list_head *qe;
  1735. if (list_empty(&ucam_mod->free_q))
  1736. return NULL;
  1737. bfa_q_deq(&ucam_mod->free_q, &qe);
  1738. return (struct bna_mac *)qe;
  1739. }
  1740. void
  1741. bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
  1742. {
  1743. list_add_tail(&mac->qe, &ucam_mod->free_q);
  1744. }
  1745. struct bna_mac *
  1746. bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
  1747. {
  1748. struct list_head *qe;
  1749. if (list_empty(&mcam_mod->free_q))
  1750. return NULL;
  1751. bfa_q_deq(&mcam_mod->free_q, &qe);
  1752. return (struct bna_mac *)qe;
  1753. }
  1754. void
  1755. bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
  1756. {
  1757. list_add_tail(&mac->qe, &mcam_mod->free_q);
  1758. }
  1759. struct bna_mcam_handle *
  1760. bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
  1761. {
  1762. struct list_head *qe;
  1763. if (list_empty(&mcam_mod->free_handle_q))
  1764. return NULL;
  1765. bfa_q_deq(&mcam_mod->free_handle_q, &qe);
  1766. return (struct bna_mcam_handle *)qe;
  1767. }
  1768. void
  1769. bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
  1770. struct bna_mcam_handle *handle)
  1771. {
  1772. list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
  1773. }
  1774. void
  1775. bna_hw_stats_get(struct bna *bna)
  1776. {
  1777. if (!bna->stats_mod.ioc_ready) {
  1778. bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
  1779. return;
  1780. }
  1781. if (bna->stats_mod.stats_get_busy) {
  1782. bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
  1783. return;
  1784. }
  1785. bna_bfi_stats_get(bna);
  1786. }