bfa_fcport.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <bfa.h>
  18. #include <bfa_svc.h>
  19. #include <bfi/bfi_pport.h>
  20. #include <cs/bfa_debug.h>
  21. #include <aen/bfa_aen.h>
  22. #include <cs/bfa_plog.h>
  23. #include <aen/bfa_aen_port.h>
  24. BFA_TRC_FILE(HAL, PPORT);
  25. BFA_MODULE(pport);
  26. /*
  27. * The port is considered disabled if corresponding physical port or IOC are
  28. * disabled explicitly
  29. */
  30. #define BFA_PORT_IS_DISABLED(bfa) \
  31. ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \
  32. (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
  33. /*
  34. * forward declarations
  35. */
  36. static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port);
  37. static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port);
  38. static void bfa_pport_update_linkinfo(struct bfa_pport_s *pport);
  39. static void bfa_pport_reset_linkinfo(struct bfa_pport_s *pport);
  40. static void bfa_pport_set_wwns(struct bfa_pport_s *port);
  41. static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete);
  42. static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete);
  43. static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete);
  44. static void bfa_port_stats_timeout(void *cbarg);
  45. static void bfa_port_stats_clr_timeout(void *cbarg);
  46. static void bfa_pport_callback(struct bfa_pport_s *pport,
  47. enum bfa_pport_linkstate event);
  48. static void bfa_pport_queue_cb(struct bfa_pport_ln_s *ln,
  49. enum bfa_pport_linkstate event);
  50. /**
  51. * bfa_pport_private
  52. */
  53. /**
  54. * BFA port state machine events
  55. */
  56. enum bfa_pport_sm_event {
  57. BFA_PPORT_SM_START = 1, /* start port state machine */
  58. BFA_PPORT_SM_STOP = 2, /* stop port state machine */
  59. BFA_PPORT_SM_ENABLE = 3, /* enable port */
  60. BFA_PPORT_SM_DISABLE = 4, /* disable port state machine */
  61. BFA_PPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
  62. BFA_PPORT_SM_LINKUP = 6, /* firmware linkup event */
  63. BFA_PPORT_SM_LINKDOWN = 7, /* firmware linkup down */
  64. BFA_PPORT_SM_QRESUME = 8, /* CQ space available */
  65. BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */
  66. };
  67. /**
  68. * BFA port link notification state machine events
  69. */
  70. enum bfa_pport_ln_sm_event {
  71. BFA_PPORT_LN_SM_LINKUP = 1, /* linkup event */
  72. BFA_PPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
  73. BFA_PPORT_LN_SM_NOTIFICATION = 3 /* done notification */
  74. };
  75. static void bfa_pport_sm_uninit(struct bfa_pport_s *pport,
  76. enum bfa_pport_sm_event event);
  77. static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
  78. enum bfa_pport_sm_event event);
  79. static void bfa_pport_sm_enabling(struct bfa_pport_s *pport,
  80. enum bfa_pport_sm_event event);
  81. static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport,
  82. enum bfa_pport_sm_event event);
  83. static void bfa_pport_sm_linkup(struct bfa_pport_s *pport,
  84. enum bfa_pport_sm_event event);
  85. static void bfa_pport_sm_disabling(struct bfa_pport_s *pport,
  86. enum bfa_pport_sm_event event);
  87. static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
  88. enum bfa_pport_sm_event event);
  89. static void bfa_pport_sm_disabled(struct bfa_pport_s *pport,
  90. enum bfa_pport_sm_event event);
  91. static void bfa_pport_sm_stopped(struct bfa_pport_s *pport,
  92. enum bfa_pport_sm_event event);
  93. static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport,
  94. enum bfa_pport_sm_event event);
  95. static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport,
  96. enum bfa_pport_sm_event event);
  97. static void bfa_pport_ln_sm_dn(struct bfa_pport_ln_s *ln,
  98. enum bfa_pport_ln_sm_event event);
  99. static void bfa_pport_ln_sm_dn_nf(struct bfa_pport_ln_s *ln,
  100. enum bfa_pport_ln_sm_event event);
  101. static void bfa_pport_ln_sm_dn_up_nf(struct bfa_pport_ln_s *ln,
  102. enum bfa_pport_ln_sm_event event);
  103. static void bfa_pport_ln_sm_up(struct bfa_pport_ln_s *ln,
  104. enum bfa_pport_ln_sm_event event);
  105. static void bfa_pport_ln_sm_up_nf(struct bfa_pport_ln_s *ln,
  106. enum bfa_pport_ln_sm_event event);
  107. static void bfa_pport_ln_sm_up_dn_nf(struct bfa_pport_ln_s *ln,
  108. enum bfa_pport_ln_sm_event event);
  109. static void bfa_pport_ln_sm_up_dn_up_nf(struct bfa_pport_ln_s *ln,
  110. enum bfa_pport_ln_sm_event event);
  111. static struct bfa_sm_table_s hal_pport_sm_table[] = {
  112. {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT},
  113. {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
  114. {BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING},
  115. {BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
  116. {BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP},
  117. {BFA_SM(bfa_pport_sm_disabling_qwait),
  118. BFA_PPORT_ST_DISABLING_QWAIT},
  119. {BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING},
  120. {BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED},
  121. {BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED},
  122. {BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
  123. {BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
  124. };
  125. static void
  126. bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event)
  127. {
  128. union bfa_aen_data_u aen_data;
  129. struct bfa_log_mod_s *logmod = pport->bfa->logm;
  130. wwn_t pwwn = pport->pwwn;
  131. char pwwn_ptr[BFA_STRING_32];
  132. struct bfa_ioc_attr_s ioc_attr;
  133. memset(&aen_data, 0, sizeof(aen_data));
  134. wwn2str(pwwn_ptr, pwwn);
  135. switch (event) {
  136. case BFA_PORT_AEN_ONLINE:
  137. bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr);
  138. break;
  139. case BFA_PORT_AEN_OFFLINE:
  140. bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr);
  141. break;
  142. case BFA_PORT_AEN_ENABLE:
  143. bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr);
  144. break;
  145. case BFA_PORT_AEN_DISABLE:
  146. bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr);
  147. break;
  148. case BFA_PORT_AEN_DISCONNECT:
  149. bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr);
  150. break;
  151. case BFA_PORT_AEN_QOS_NEG:
  152. bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr);
  153. break;
  154. default:
  155. break;
  156. }
  157. bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr);
  158. aen_data.port.ioc_type = ioc_attr.ioc_type;
  159. aen_data.port.pwwn = pwwn;
  160. }
  161. static void
  162. bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
  163. {
  164. bfa_trc(pport->bfa, event);
  165. switch (event) {
  166. case BFA_PPORT_SM_START:
  167. /**
  168. * Start event after IOC is configured and BFA is started.
  169. */
  170. if (bfa_pport_send_enable(pport))
  171. bfa_sm_set_state(pport, bfa_pport_sm_enabling);
  172. else
  173. bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
  174. break;
  175. case BFA_PPORT_SM_ENABLE:
  176. /**
  177. * Port is persistently configured to be in enabled state. Do
  178. * not change state. Port enabling is done when START event is
  179. * received.
  180. */
  181. break;
  182. case BFA_PPORT_SM_DISABLE:
  183. /**
  184. * If a port is persistently configured to be disabled, the
  185. * first event will a port disable request.
  186. */
  187. bfa_sm_set_state(pport, bfa_pport_sm_disabled);
  188. break;
  189. case BFA_PPORT_SM_HWFAIL:
  190. bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
  191. break;
  192. default:
  193. bfa_sm_fault(pport->bfa, event);
  194. }
  195. }
  196. static void
  197. bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
  198. enum bfa_pport_sm_event event)
  199. {
  200. bfa_trc(pport->bfa, event);
  201. switch (event) {
  202. case BFA_PPORT_SM_QRESUME:
  203. bfa_sm_set_state(pport, bfa_pport_sm_enabling);
  204. bfa_pport_send_enable(pport);
  205. break;
  206. case BFA_PPORT_SM_STOP:
  207. bfa_reqq_wcancel(&pport->reqq_wait);
  208. bfa_sm_set_state(pport, bfa_pport_sm_stopped);
  209. break;
  210. case BFA_PPORT_SM_ENABLE:
  211. /**
  212. * Already enable is in progress.
  213. */
  214. break;
  215. case BFA_PPORT_SM_DISABLE:
  216. /**
  217. * Just send disable request to firmware when room becomes
  218. * available in request queue.
  219. */
  220. bfa_sm_set_state(pport, bfa_pport_sm_disabled);
  221. bfa_reqq_wcancel(&pport->reqq_wait);
  222. bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
  223. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  224. bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
  225. break;
  226. case BFA_PPORT_SM_LINKUP:
  227. case BFA_PPORT_SM_LINKDOWN:
  228. /**
  229. * Possible to get link events when doing back-to-back
  230. * enable/disables.
  231. */
  232. break;
  233. case BFA_PPORT_SM_HWFAIL:
  234. bfa_reqq_wcancel(&pport->reqq_wait);
  235. bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
  236. break;
  237. default:
  238. bfa_sm_fault(pport->bfa, event);
  239. }
  240. }
  241. static void
  242. bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
  243. {
  244. bfa_trc(pport->bfa, event);
  245. switch (event) {
  246. case BFA_PPORT_SM_FWRSP:
  247. case BFA_PPORT_SM_LINKDOWN:
  248. bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
  249. break;
  250. case BFA_PPORT_SM_LINKUP:
  251. bfa_pport_update_linkinfo(pport);
  252. bfa_sm_set_state(pport, bfa_pport_sm_linkup);
  253. bfa_assert(pport->event_cbfn);
  254. bfa_pport_callback(pport, BFA_PPORT_LINKUP);
  255. break;
  256. case BFA_PPORT_SM_ENABLE:
  257. /**
  258. * Already being enabled.
  259. */
  260. break;
  261. case BFA_PPORT_SM_DISABLE:
  262. if (bfa_pport_send_disable(pport))
  263. bfa_sm_set_state(pport, bfa_pport_sm_disabling);
  264. else
  265. bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
  266. bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
  267. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  268. bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
  269. break;
  270. case BFA_PPORT_SM_STOP:
  271. bfa_sm_set_state(pport, bfa_pport_sm_stopped);
  272. break;
  273. case BFA_PPORT_SM_HWFAIL:
  274. bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
  275. break;
  276. default:
  277. bfa_sm_fault(pport->bfa, event);
  278. }
  279. }
  280. static void
  281. bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
  282. {
  283. bfa_trc(pport->bfa, event);
  284. switch (event) {
  285. case BFA_PPORT_SM_LINKUP:
  286. bfa_pport_update_linkinfo(pport);
  287. bfa_sm_set_state(pport, bfa_pport_sm_linkup);
  288. bfa_assert(pport->event_cbfn);
  289. bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
  290. BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
  291. bfa_pport_callback(pport, BFA_PPORT_LINKUP);
  292. bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE);
  293. /**
  294. * If QoS is enabled and it is not online,
  295. * Send a separate event.
  296. */
  297. if ((pport->cfg.qos_enabled)
  298. && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE))
  299. bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG);
  300. break;
  301. case BFA_PPORT_SM_LINKDOWN:
  302. /**
  303. * Possible to get link down event.
  304. */
  305. break;
  306. case BFA_PPORT_SM_ENABLE:
  307. /**
  308. * Already enabled.
  309. */
  310. break;
  311. case BFA_PPORT_SM_DISABLE:
  312. if (bfa_pport_send_disable(pport))
  313. bfa_sm_set_state(pport, bfa_pport_sm_disabling);
  314. else
  315. bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
  316. bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
  317. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  318. bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
  319. break;
  320. case BFA_PPORT_SM_STOP:
  321. bfa_sm_set_state(pport, bfa_pport_sm_stopped);
  322. break;
  323. case BFA_PPORT_SM_HWFAIL:
  324. bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
  325. break;
  326. default:
  327. bfa_sm_fault(pport->bfa, event);
  328. }
  329. }
  330. static void
  331. bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
  332. {
  333. bfa_trc(pport->bfa, event);
  334. switch (event) {
  335. case BFA_PPORT_SM_ENABLE:
  336. /**
  337. * Already enabled.
  338. */
  339. break;
  340. case BFA_PPORT_SM_DISABLE:
  341. if (bfa_pport_send_disable(pport))
  342. bfa_sm_set_state(pport, bfa_pport_sm_disabling);
  343. else
  344. bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
  345. bfa_pport_reset_linkinfo(pport);
  346. bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
  347. bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
  348. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  349. bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
  350. bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
  351. break;
  352. case BFA_PPORT_SM_LINKDOWN:
  353. bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
  354. bfa_pport_reset_linkinfo(pport);
  355. bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
  356. bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
  357. BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
  358. if (BFA_PORT_IS_DISABLED(pport->bfa))
  359. bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
  360. else
  361. bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
  362. break;
  363. case BFA_PPORT_SM_STOP:
  364. bfa_sm_set_state(pport, bfa_pport_sm_stopped);
  365. bfa_pport_reset_linkinfo(pport);
  366. if (BFA_PORT_IS_DISABLED(pport->bfa))
  367. bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
  368. else
  369. bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
  370. break;
  371. case BFA_PPORT_SM_HWFAIL:
  372. bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
  373. bfa_pport_reset_linkinfo(pport);
  374. bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
  375. if (BFA_PORT_IS_DISABLED(pport->bfa))
  376. bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
  377. else
  378. bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
  379. break;
  380. default:
  381. bfa_sm_fault(pport->bfa, event);
  382. }
  383. }
  384. static void
  385. bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
  386. enum bfa_pport_sm_event event)
  387. {
  388. bfa_trc(pport->bfa, event);
  389. switch (event) {
  390. case BFA_PPORT_SM_QRESUME:
  391. bfa_sm_set_state(pport, bfa_pport_sm_disabling);
  392. bfa_pport_send_disable(pport);
  393. break;
  394. case BFA_PPORT_SM_STOP:
  395. bfa_sm_set_state(pport, bfa_pport_sm_stopped);
  396. bfa_reqq_wcancel(&pport->reqq_wait);
  397. break;
  398. case BFA_PPORT_SM_DISABLE:
  399. /**
  400. * Already being disabled.
  401. */
  402. break;
  403. case BFA_PPORT_SM_LINKUP:
  404. case BFA_PPORT_SM_LINKDOWN:
  405. /**
  406. * Possible to get link events when doing back-to-back
  407. * enable/disables.
  408. */
  409. break;
  410. case BFA_PPORT_SM_HWFAIL:
  411. bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
  412. bfa_reqq_wcancel(&pport->reqq_wait);
  413. break;
  414. default:
  415. bfa_sm_fault(pport->bfa, event);
  416. }
  417. }
  418. static void
  419. bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
  420. {
  421. bfa_trc(pport->bfa, event);
  422. switch (event) {
  423. case BFA_PPORT_SM_FWRSP:
  424. bfa_sm_set_state(pport, bfa_pport_sm_disabled);
  425. break;
  426. case BFA_PPORT_SM_DISABLE:
  427. /**
  428. * Already being disabled.
  429. */
  430. break;
  431. case BFA_PPORT_SM_ENABLE:
  432. if (bfa_pport_send_enable(pport))
  433. bfa_sm_set_state(pport, bfa_pport_sm_enabling);
  434. else
  435. bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
  436. bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
  437. BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
  438. bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
  439. break;
  440. case BFA_PPORT_SM_STOP:
  441. bfa_sm_set_state(pport, bfa_pport_sm_stopped);
  442. break;
  443. case BFA_PPORT_SM_LINKUP:
  444. case BFA_PPORT_SM_LINKDOWN:
  445. /**
  446. * Possible to get link events when doing back-to-back
  447. * enable/disables.
  448. */
  449. break;
  450. case BFA_PPORT_SM_HWFAIL:
  451. bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
  452. break;
  453. default:
  454. bfa_sm_fault(pport->bfa, event);
  455. }
  456. }
  457. static void
  458. bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
  459. {
  460. bfa_trc(pport->bfa, event);
  461. switch (event) {
  462. case BFA_PPORT_SM_START:
  463. /**
  464. * Ignore start event for a port that is disabled.
  465. */
  466. break;
  467. case BFA_PPORT_SM_STOP:
  468. bfa_sm_set_state(pport, bfa_pport_sm_stopped);
  469. break;
  470. case BFA_PPORT_SM_ENABLE:
  471. if (bfa_pport_send_enable(pport))
  472. bfa_sm_set_state(pport, bfa_pport_sm_enabling);
  473. else
  474. bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
  475. bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
  476. BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
  477. bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
  478. break;
  479. case BFA_PPORT_SM_DISABLE:
  480. /**
  481. * Already disabled.
  482. */
  483. break;
  484. case BFA_PPORT_SM_HWFAIL:
  485. bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
  486. break;
  487. default:
  488. bfa_sm_fault(pport->bfa, event);
  489. }
  490. }
  491. static void
  492. bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
  493. {
  494. bfa_trc(pport->bfa, event);
  495. switch (event) {
  496. case BFA_PPORT_SM_START:
  497. if (bfa_pport_send_enable(pport))
  498. bfa_sm_set_state(pport, bfa_pport_sm_enabling);
  499. else
  500. bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
  501. break;
  502. default:
  503. /**
  504. * Ignore all other events.
  505. */
  506. ;
  507. }
  508. }
  509. /**
  510. * Port is enabled. IOC is down/failed.
  511. */
  512. static void
  513. bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
  514. {
  515. bfa_trc(pport->bfa, event);
  516. switch (event) {
  517. case BFA_PPORT_SM_START:
  518. if (bfa_pport_send_enable(pport))
  519. bfa_sm_set_state(pport, bfa_pport_sm_enabling);
  520. else
  521. bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
  522. break;
  523. default:
  524. /**
  525. * Ignore all events.
  526. */
  527. ;
  528. }
  529. }
  530. /**
  531. * Port is disabled. IOC is down/failed.
  532. */
  533. static void
  534. bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
  535. {
  536. bfa_trc(pport->bfa, event);
  537. switch (event) {
  538. case BFA_PPORT_SM_START:
  539. bfa_sm_set_state(pport, bfa_pport_sm_disabled);
  540. break;
  541. case BFA_PPORT_SM_ENABLE:
  542. bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
  543. break;
  544. default:
  545. /**
  546. * Ignore all events.
  547. */
  548. ;
  549. }
  550. }
  551. /**
  552. * Link state is down
  553. */
  554. static void
  555. bfa_pport_ln_sm_dn(struct bfa_pport_ln_s *ln,
  556. enum bfa_pport_ln_sm_event event)
  557. {
  558. bfa_trc(ln->pport->bfa, event);
  559. switch (event) {
  560. case BFA_PPORT_LN_SM_LINKUP:
  561. bfa_sm_set_state(ln, bfa_pport_ln_sm_up_nf);
  562. bfa_pport_queue_cb(ln, BFA_PPORT_LINKUP);
  563. break;
  564. default:
  565. bfa_sm_fault(ln->pport->bfa, event);
  566. }
  567. }
  568. /**
  569. * Link state is waiting for down notification
  570. */
  571. static void
  572. bfa_pport_ln_sm_dn_nf(struct bfa_pport_ln_s *ln,
  573. enum bfa_pport_ln_sm_event event)
  574. {
  575. bfa_trc(ln->pport->bfa, event);
  576. switch (event) {
  577. case BFA_PPORT_LN_SM_LINKUP:
  578. bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_up_nf);
  579. break;
  580. case BFA_PPORT_LN_SM_NOTIFICATION:
  581. bfa_sm_set_state(ln, bfa_pport_ln_sm_dn);
  582. break;
  583. default:
  584. bfa_sm_fault(ln->pport->bfa, event);
  585. }
  586. }
  587. /**
  588. * Link state is waiting for down notification and there is a pending up
  589. */
  590. static void
  591. bfa_pport_ln_sm_dn_up_nf(struct bfa_pport_ln_s *ln,
  592. enum bfa_pport_ln_sm_event event)
  593. {
  594. bfa_trc(ln->pport->bfa, event);
  595. switch (event) {
  596. case BFA_PPORT_LN_SM_LINKDOWN:
  597. bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_nf);
  598. break;
  599. case BFA_PPORT_LN_SM_NOTIFICATION:
  600. bfa_sm_set_state(ln, bfa_pport_ln_sm_up_nf);
  601. bfa_pport_queue_cb(ln, BFA_PPORT_LINKUP);
  602. break;
  603. default:
  604. bfa_sm_fault(ln->pport->bfa, event);
  605. }
  606. }
  607. /**
  608. * Link state is up
  609. */
  610. static void
  611. bfa_pport_ln_sm_up(struct bfa_pport_ln_s *ln,
  612. enum bfa_pport_ln_sm_event event)
  613. {
  614. bfa_trc(ln->pport->bfa, event);
  615. switch (event) {
  616. case BFA_PPORT_LN_SM_LINKDOWN:
  617. bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_nf);
  618. bfa_pport_queue_cb(ln, BFA_PPORT_LINKDOWN);
  619. break;
  620. default:
  621. bfa_sm_fault(ln->pport->bfa, event);
  622. }
  623. }
  624. /**
  625. * Link state is waiting for up notification
  626. */
  627. static void
  628. bfa_pport_ln_sm_up_nf(struct bfa_pport_ln_s *ln,
  629. enum bfa_pport_ln_sm_event event)
  630. {
  631. bfa_trc(ln->pport->bfa, event);
  632. switch (event) {
  633. case BFA_PPORT_LN_SM_LINKDOWN:
  634. bfa_sm_set_state(ln, bfa_pport_ln_sm_up_dn_nf);
  635. break;
  636. case BFA_PPORT_LN_SM_NOTIFICATION:
  637. bfa_sm_set_state(ln, bfa_pport_ln_sm_up);
  638. break;
  639. default:
  640. bfa_sm_fault(ln->pport->bfa, event);
  641. }
  642. }
  643. /**
  644. * Link state is waiting for up notification and there is a pending down
  645. */
  646. static void
  647. bfa_pport_ln_sm_up_dn_nf(struct bfa_pport_ln_s *ln,
  648. enum bfa_pport_ln_sm_event event)
  649. {
  650. bfa_trc(ln->pport->bfa, event);
  651. switch (event) {
  652. case BFA_PPORT_LN_SM_LINKUP:
  653. bfa_sm_set_state(ln, bfa_pport_ln_sm_up_dn_up_nf);
  654. break;
  655. case BFA_PPORT_LN_SM_NOTIFICATION:
  656. bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_nf);
  657. bfa_pport_queue_cb(ln, BFA_PPORT_LINKDOWN);
  658. break;
  659. default:
  660. bfa_sm_fault(ln->pport->bfa, event);
  661. }
  662. }
  663. /**
  664. * Link state is waiting for up notification and there are pending down and up
  665. */
  666. static void
  667. bfa_pport_ln_sm_up_dn_up_nf(struct bfa_pport_ln_s *ln,
  668. enum bfa_pport_ln_sm_event event)
  669. {
  670. bfa_trc(ln->pport->bfa, event);
  671. switch (event) {
  672. case BFA_PPORT_LN_SM_LINKDOWN:
  673. bfa_sm_set_state(ln, bfa_pport_ln_sm_up_dn_nf);
  674. break;
  675. case BFA_PPORT_LN_SM_NOTIFICATION:
  676. bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_up_nf);
  677. bfa_pport_queue_cb(ln, BFA_PPORT_LINKDOWN);
  678. break;
  679. default:
  680. bfa_sm_fault(ln->pport->bfa, event);
  681. }
  682. }
  683. /**
  684. * bfa_pport_private
  685. */
  686. static void
  687. __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete)
  688. {
  689. struct bfa_pport_ln_s *ln = cbarg;
  690. if (complete)
  691. ln->pport->event_cbfn(ln->pport->event_cbarg, ln->ln_event);
  692. else
  693. bfa_sm_send_event(ln, BFA_PPORT_LN_SM_NOTIFICATION);
  694. }
  695. #define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \
  696. BFA_CACHELINE_SZ))
  697. static void
  698. bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
  699. u32 *dm_len)
  700. {
  701. *dm_len += PPORT_STATS_DMA_SZ;
  702. }
  703. static void
  704. bfa_pport_qresume(void *cbarg)
  705. {
  706. struct bfa_pport_s *port = cbarg;
  707. bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME);
  708. }
  709. static void
  710. bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
  711. {
  712. u8 *dm_kva;
  713. u64 dm_pa;
  714. dm_kva = bfa_meminfo_dma_virt(meminfo);
  715. dm_pa = bfa_meminfo_dma_phys(meminfo);
  716. pport->stats_kva = dm_kva;
  717. pport->stats_pa = dm_pa;
  718. pport->stats = (union bfa_pport_stats_u *)dm_kva;
  719. dm_kva += PPORT_STATS_DMA_SZ;
  720. dm_pa += PPORT_STATS_DMA_SZ;
  721. bfa_meminfo_dma_virt(meminfo) = dm_kva;
  722. bfa_meminfo_dma_phys(meminfo) = dm_pa;
  723. }
  724. /**
  725. * Memory initialization.
  726. */
  727. static void
  728. bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  729. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  730. {
  731. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  732. struct bfa_pport_cfg_s *port_cfg = &pport->cfg;
  733. struct bfa_pport_ln_s *ln = &pport->ln;
  734. bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s));
  735. pport->bfa = bfa;
  736. ln->pport = pport;
  737. bfa_pport_mem_claim(pport, meminfo);
  738. bfa_sm_set_state(pport, bfa_pport_sm_uninit);
  739. bfa_sm_set_state(ln, bfa_pport_ln_sm_dn);
  740. /**
  741. * initialize and set default configuration
  742. */
  743. port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
  744. port_cfg->speed = BFA_PPORT_SPEED_AUTO;
  745. port_cfg->trunked = BFA_FALSE;
  746. port_cfg->maxfrsize = 0;
  747. port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
  748. bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport);
  749. }
  750. static void
  751. bfa_pport_initdone(struct bfa_s *bfa)
  752. {
  753. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  754. /**
  755. * Initialize port attributes from IOC hardware data.
  756. */
  757. bfa_pport_set_wwns(pport);
  758. if (pport->cfg.maxfrsize == 0)
  759. pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
  760. pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
  761. pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
  762. bfa_assert(pport->cfg.maxfrsize);
  763. bfa_assert(pport->cfg.rx_bbcredit);
  764. bfa_assert(pport->speed_sup);
  765. }
  766. static void
  767. bfa_pport_detach(struct bfa_s *bfa)
  768. {
  769. }
  770. /**
  771. * Called when IOC is ready.
  772. */
  773. static void
  774. bfa_pport_start(struct bfa_s *bfa)
  775. {
  776. bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START);
  777. }
  778. /**
  779. * Called before IOC is stopped.
  780. */
  781. static void
  782. bfa_pport_stop(struct bfa_s *bfa)
  783. {
  784. bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP);
  785. }
  786. /**
  787. * Called when IOC failure is detected.
  788. */
  789. static void
  790. bfa_pport_iocdisable(struct bfa_s *bfa)
  791. {
  792. bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL);
  793. }
  794. static void
  795. bfa_pport_update_linkinfo(struct bfa_pport_s *pport)
  796. {
  797. struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event;
  798. pport->speed = pevent->link_state.speed;
  799. pport->topology = pevent->link_state.topology;
  800. if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP)
  801. pport->myalpa = pevent->link_state.tl.loop_info.myalpa;
  802. /*
  803. * QoS Details
  804. */
  805. bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr);
  806. bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr);
  807. bfa_trc(pport->bfa, pport->speed);
  808. bfa_trc(pport->bfa, pport->topology);
  809. }
  810. static void
  811. bfa_pport_reset_linkinfo(struct bfa_pport_s *pport)
  812. {
  813. pport->speed = BFA_PPORT_SPEED_UNKNOWN;
  814. pport->topology = BFA_PPORT_TOPOLOGY_NONE;
  815. }
  816. /**
  817. * Send port enable message to firmware.
  818. */
  819. static bfa_boolean_t
  820. bfa_pport_send_enable(struct bfa_pport_s *port)
  821. {
  822. struct bfi_pport_enable_req_s *m;
  823. /**
  824. * Increment message tag before queue check, so that responses to old
  825. * requests are discarded.
  826. */
  827. port->msgtag++;
  828. /**
  829. * check for room in queue to send request now
  830. */
  831. m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
  832. if (!m) {
  833. bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
  834. return BFA_FALSE;
  835. }
  836. bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ,
  837. bfa_lpuid(port->bfa));
  838. m->nwwn = port->nwwn;
  839. m->pwwn = port->pwwn;
  840. m->port_cfg = port->cfg;
  841. m->msgtag = port->msgtag;
  842. m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize);
  843. bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa);
  844. bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo);
  845. bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi);
  846. /**
  847. * queue I/O message to firmware
  848. */
  849. bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
  850. return BFA_TRUE;
  851. }
  852. /**
  853. * Send port disable message to firmware.
  854. */
  855. static bfa_boolean_t
  856. bfa_pport_send_disable(struct bfa_pport_s *port)
  857. {
  858. bfi_pport_disable_req_t *m;
  859. /**
  860. * Increment message tag before queue check, so that responses to old
  861. * requests are discarded.
  862. */
  863. port->msgtag++;
  864. /**
  865. * check for room in queue to send request now
  866. */
  867. m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
  868. if (!m) {
  869. bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
  870. return BFA_FALSE;
  871. }
  872. bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ,
  873. bfa_lpuid(port->bfa));
  874. m->msgtag = port->msgtag;
  875. /**
  876. * queue I/O message to firmware
  877. */
  878. bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
  879. return BFA_TRUE;
  880. }
  881. static void
  882. bfa_pport_set_wwns(struct bfa_pport_s *port)
  883. {
  884. port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc);
  885. port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc);
  886. bfa_trc(port->bfa, port->pwwn);
  887. bfa_trc(port->bfa, port->nwwn);
  888. }
  889. static void
  890. bfa_port_send_txcredit(void *port_cbarg)
  891. {
  892. struct bfa_pport_s *port = port_cbarg;
  893. struct bfi_pport_set_svc_params_req_s *m;
  894. /**
  895. * check for room in queue to send request now
  896. */
  897. m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
  898. if (!m) {
  899. bfa_trc(port->bfa, port->cfg.tx_bbcredit);
  900. return;
  901. }
  902. bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ,
  903. bfa_lpuid(port->bfa));
  904. m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit);
  905. /**
  906. * queue I/O message to firmware
  907. */
  908. bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
  909. }
  910. /**
  911. * bfa_pport_public
  912. */
  913. /**
  914. * Firmware message handler.
  915. */
  916. void
  917. bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
  918. {
  919. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  920. union bfi_pport_i2h_msg_u i2hmsg;
  921. i2hmsg.msg = msg;
  922. pport->event_arg.i2hmsg = i2hmsg;
  923. switch (msg->mhdr.msg_id) {
  924. case BFI_PPORT_I2H_ENABLE_RSP:
  925. if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
  926. bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
  927. break;
  928. case BFI_PPORT_I2H_DISABLE_RSP:
  929. if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
  930. bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
  931. break;
  932. case BFI_PPORT_I2H_EVENT:
  933. switch (i2hmsg.event->link_state.linkstate) {
  934. case BFA_PPORT_LINKUP:
  935. bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP);
  936. break;
  937. case BFA_PPORT_LINKDOWN:
  938. bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN);
  939. break;
  940. case BFA_PPORT_TRUNK_LINKDOWN:
  941. /** todo: event notification */
  942. break;
  943. }
  944. break;
  945. case BFI_PPORT_I2H_GET_STATS_RSP:
  946. case BFI_PPORT_I2H_GET_QOS_STATS_RSP:
  947. /*
  948. * check for timer pop before processing the rsp
  949. */
  950. if (pport->stats_busy == BFA_FALSE
  951. || pport->stats_status == BFA_STATUS_ETIMER)
  952. break;
  953. bfa_timer_stop(&pport->timer);
  954. pport->stats_status = i2hmsg.getstats_rsp->status;
  955. bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats,
  956. pport);
  957. break;
  958. case BFI_PPORT_I2H_CLEAR_STATS_RSP:
  959. case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP:
  960. /*
  961. * check for timer pop before processing the rsp
  962. */
  963. if (pport->stats_busy == BFA_FALSE
  964. || pport->stats_status == BFA_STATUS_ETIMER)
  965. break;
  966. bfa_timer_stop(&pport->timer);
  967. pport->stats_status = BFA_STATUS_OK;
  968. bfa_cb_queue(pport->bfa, &pport->hcb_qe,
  969. __bfa_cb_port_stats_clr, pport);
  970. break;
  971. default:
  972. bfa_assert(0);
  973. }
  974. }
  975. /**
  976. * bfa_pport_api
  977. */
  978. /**
  979. * Registered callback for port events.
  980. */
  981. void
  982. bfa_pport_event_register(struct bfa_s *bfa,
  983. void (*cbfn) (void *cbarg, bfa_pport_event_t event),
  984. void *cbarg)
  985. {
  986. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  987. pport->event_cbfn = cbfn;
  988. pport->event_cbarg = cbarg;
  989. }
  990. bfa_status_t
  991. bfa_pport_enable(struct bfa_s *bfa)
  992. {
  993. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  994. if (pport->diag_busy)
  995. return BFA_STATUS_DIAG_BUSY;
  996. else if (bfa_sm_cmp_state
  997. (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait))
  998. return BFA_STATUS_DEVBUSY;
  999. bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE);
  1000. return BFA_STATUS_OK;
  1001. }
  1002. bfa_status_t
  1003. bfa_pport_disable(struct bfa_s *bfa)
  1004. {
  1005. bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE);
  1006. return BFA_STATUS_OK;
  1007. }
  1008. /**
  1009. * Configure port speed.
  1010. */
  1011. bfa_status_t
  1012. bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
  1013. {
  1014. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1015. bfa_trc(bfa, speed);
  1016. if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) {
  1017. bfa_trc(bfa, pport->speed_sup);
  1018. return BFA_STATUS_UNSUPP_SPEED;
  1019. }
  1020. pport->cfg.speed = speed;
  1021. return BFA_STATUS_OK;
  1022. }
  1023. /**
  1024. * Get current speed.
  1025. */
  1026. enum bfa_pport_speed
  1027. bfa_pport_get_speed(struct bfa_s *bfa)
  1028. {
  1029. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1030. return port->speed;
  1031. }
  1032. /**
  1033. * Configure port topology.
  1034. */
  1035. bfa_status_t
  1036. bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
  1037. {
  1038. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1039. bfa_trc(bfa, topology);
  1040. bfa_trc(bfa, pport->cfg.topology);
  1041. switch (topology) {
  1042. case BFA_PPORT_TOPOLOGY_P2P:
  1043. case BFA_PPORT_TOPOLOGY_LOOP:
  1044. case BFA_PPORT_TOPOLOGY_AUTO:
  1045. break;
  1046. default:
  1047. return BFA_STATUS_EINVAL;
  1048. }
  1049. pport->cfg.topology = topology;
  1050. return BFA_STATUS_OK;
  1051. }
  1052. /**
  1053. * Get current topology.
  1054. */
  1055. enum bfa_pport_topology
  1056. bfa_pport_get_topology(struct bfa_s *bfa)
  1057. {
  1058. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1059. return port->topology;
  1060. }
  1061. bfa_status_t
  1062. bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
  1063. {
  1064. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1065. bfa_trc(bfa, alpa);
  1066. bfa_trc(bfa, pport->cfg.cfg_hardalpa);
  1067. bfa_trc(bfa, pport->cfg.hardalpa);
  1068. pport->cfg.cfg_hardalpa = BFA_TRUE;
  1069. pport->cfg.hardalpa = alpa;
  1070. return BFA_STATUS_OK;
  1071. }
  1072. bfa_status_t
  1073. bfa_pport_clr_hardalpa(struct bfa_s *bfa)
  1074. {
  1075. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1076. bfa_trc(bfa, pport->cfg.cfg_hardalpa);
  1077. bfa_trc(bfa, pport->cfg.hardalpa);
  1078. pport->cfg.cfg_hardalpa = BFA_FALSE;
  1079. return BFA_STATUS_OK;
  1080. }
  1081. bfa_boolean_t
  1082. bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
  1083. {
  1084. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1085. *alpa = port->cfg.hardalpa;
  1086. return port->cfg.cfg_hardalpa;
  1087. }
  1088. u8
  1089. bfa_pport_get_myalpa(struct bfa_s *bfa)
  1090. {
  1091. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1092. return port->myalpa;
  1093. }
  1094. bfa_status_t
  1095. bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
  1096. {
  1097. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1098. bfa_trc(bfa, maxfrsize);
  1099. bfa_trc(bfa, pport->cfg.maxfrsize);
  1100. /*
  1101. * with in range
  1102. */
  1103. if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
  1104. return BFA_STATUS_INVLD_DFSZ;
  1105. /*
  1106. * power of 2, if not the max frame size of 2112
  1107. */
  1108. if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
  1109. return BFA_STATUS_INVLD_DFSZ;
  1110. pport->cfg.maxfrsize = maxfrsize;
  1111. return BFA_STATUS_OK;
  1112. }
  1113. u16
  1114. bfa_pport_get_maxfrsize(struct bfa_s *bfa)
  1115. {
  1116. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1117. return port->cfg.maxfrsize;
  1118. }
  1119. u32
  1120. bfa_pport_mypid(struct bfa_s *bfa)
  1121. {
  1122. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1123. return port->mypid;
  1124. }
  1125. u8
  1126. bfa_pport_get_rx_bbcredit(struct bfa_s *bfa)
  1127. {
  1128. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1129. return port->cfg.rx_bbcredit;
  1130. }
  1131. void
  1132. bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
  1133. {
  1134. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1135. port->cfg.tx_bbcredit = (u8) tx_bbcredit;
  1136. bfa_port_send_txcredit(port);
  1137. }
  1138. /**
  1139. * Get port attributes.
  1140. */
  1141. wwn_t
  1142. bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
  1143. {
  1144. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1145. if (node)
  1146. return pport->nwwn;
  1147. else
  1148. return pport->pwwn;
  1149. }
  1150. void
  1151. bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
  1152. {
  1153. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1154. bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
  1155. attr->nwwn = pport->nwwn;
  1156. attr->pwwn = pport->pwwn;
  1157. bfa_os_memcpy(&attr->pport_cfg, &pport->cfg,
  1158. sizeof(struct bfa_pport_cfg_s));
  1159. /*
  1160. * speed attributes
  1161. */
  1162. attr->pport_cfg.speed = pport->cfg.speed;
  1163. attr->speed_supported = pport->speed_sup;
  1164. attr->speed = pport->speed;
  1165. attr->cos_supported = FC_CLASS_3;
  1166. /*
  1167. * topology attributes
  1168. */
  1169. attr->pport_cfg.topology = pport->cfg.topology;
  1170. attr->topology = pport->topology;
  1171. /*
  1172. * beacon attributes
  1173. */
  1174. attr->beacon = pport->beacon;
  1175. attr->link_e2e_beacon = pport->link_e2e_beacon;
  1176. attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog);
  1177. attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
  1178. attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
  1179. attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm);
  1180. if (bfa_ioc_is_disabled(&pport->bfa->ioc))
  1181. attr->port_state = BFA_PPORT_ST_IOCDIS;
  1182. else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc))
  1183. attr->port_state = BFA_PPORT_ST_FWMISMATCH;
  1184. }
  1185. static void
  1186. bfa_port_stats_query(void *cbarg)
  1187. {
  1188. struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
  1189. bfi_pport_get_stats_req_t *msg;
  1190. msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
  1191. if (!msg) {
  1192. port->stats_qfull = BFA_TRUE;
  1193. bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query,
  1194. port);
  1195. bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
  1196. return;
  1197. }
  1198. port->stats_qfull = BFA_FALSE;
  1199. bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t));
  1200. bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ,
  1201. bfa_lpuid(port->bfa));
  1202. bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
  1203. return;
  1204. }
  1205. static void
  1206. bfa_port_stats_clear(void *cbarg)
  1207. {
  1208. struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
  1209. bfi_pport_clear_stats_req_t *msg;
  1210. msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
  1211. if (!msg) {
  1212. port->stats_qfull = BFA_TRUE;
  1213. bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear,
  1214. port);
  1215. bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
  1216. return;
  1217. }
  1218. port->stats_qfull = BFA_FALSE;
  1219. bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t));
  1220. bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ,
  1221. bfa_lpuid(port->bfa));
  1222. bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
  1223. return;
  1224. }
  1225. static void
  1226. bfa_port_qos_stats_clear(void *cbarg)
  1227. {
  1228. struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
  1229. bfi_pport_clear_qos_stats_req_t *msg;
  1230. msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
  1231. if (!msg) {
  1232. port->stats_qfull = BFA_TRUE;
  1233. bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear,
  1234. port);
  1235. bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
  1236. return;
  1237. }
  1238. port->stats_qfull = BFA_FALSE;
  1239. bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t));
  1240. bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ,
  1241. bfa_lpuid(port->bfa));
  1242. bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
  1243. return;
  1244. }
  1245. static void
  1246. bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s)
  1247. {
  1248. u32 *dip = (u32 *) d;
  1249. u32 *sip = (u32 *) s;
  1250. int i;
  1251. /*
  1252. * Do 64 bit fields swap first
  1253. */
  1254. for (i = 0;
  1255. i <
  1256. ((sizeof(union bfa_pport_stats_u) -
  1257. sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) {
  1258. #ifdef __BIGENDIAN
  1259. dip[i] = bfa_os_ntohl(sip[i]);
  1260. dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
  1261. #else
  1262. dip[i] = bfa_os_ntohl(sip[i + 1]);
  1263. dip[i + 1] = bfa_os_ntohl(sip[i]);
  1264. #endif
  1265. }
  1266. /*
  1267. * Now swap the 32 bit fields
  1268. */
  1269. for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i)
  1270. dip[i] = bfa_os_ntohl(sip[i]);
  1271. }
  1272. static void
  1273. __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete)
  1274. {
  1275. struct bfa_pport_s *port = cbarg;
  1276. if (complete) {
  1277. port->stats_cbfn(port->stats_cbarg, port->stats_status);
  1278. } else {
  1279. port->stats_busy = BFA_FALSE;
  1280. port->stats_status = BFA_STATUS_OK;
  1281. }
  1282. }
  1283. static void
  1284. bfa_port_stats_clr_timeout(void *cbarg)
  1285. {
  1286. struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
  1287. bfa_trc(port->bfa, port->stats_qfull);
  1288. if (port->stats_qfull) {
  1289. bfa_reqq_wcancel(&port->stats_reqq_wait);
  1290. port->stats_qfull = BFA_FALSE;
  1291. }
  1292. port->stats_status = BFA_STATUS_ETIMER;
  1293. bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port);
  1294. }
  1295. static void
  1296. bfa_pport_callback(struct bfa_pport_s *pport, enum bfa_pport_linkstate event)
  1297. {
  1298. if (pport->bfa->fcs) {
  1299. pport->event_cbfn(pport->event_cbarg, event);
  1300. return;
  1301. }
  1302. switch (event) {
  1303. case BFA_PPORT_LINKUP:
  1304. bfa_sm_send_event(&pport->ln, BFA_PPORT_LN_SM_LINKUP);
  1305. break;
  1306. case BFA_PPORT_LINKDOWN:
  1307. bfa_sm_send_event(&pport->ln, BFA_PPORT_LN_SM_LINKDOWN);
  1308. break;
  1309. default:
  1310. bfa_assert(0);
  1311. }
  1312. }
  1313. static void
  1314. bfa_pport_queue_cb(struct bfa_pport_ln_s *ln, enum bfa_pport_linkstate event)
  1315. {
  1316. ln->ln_event = event;
  1317. bfa_cb_queue(ln->pport->bfa, &ln->ln_qe, __bfa_cb_port_event, ln);
  1318. }
  1319. static void
  1320. __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete)
  1321. {
  1322. struct bfa_pport_s *port = cbarg;
  1323. if (complete) {
  1324. if (port->stats_status == BFA_STATUS_OK)
  1325. bfa_pport_stats_swap(port->stats_ret, port->stats);
  1326. port->stats_cbfn(port->stats_cbarg, port->stats_status);
  1327. } else {
  1328. port->stats_busy = BFA_FALSE;
  1329. port->stats_status = BFA_STATUS_OK;
  1330. }
  1331. }
  1332. static void
  1333. bfa_port_stats_timeout(void *cbarg)
  1334. {
  1335. struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
  1336. bfa_trc(port->bfa, port->stats_qfull);
  1337. if (port->stats_qfull) {
  1338. bfa_reqq_wcancel(&port->stats_reqq_wait);
  1339. port->stats_qfull = BFA_FALSE;
  1340. }
  1341. port->stats_status = BFA_STATUS_ETIMER;
  1342. bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port);
  1343. }
  1344. #define BFA_PORT_STATS_TOV 1000
  1345. /**
  1346. * Fetch port attributes.
  1347. */
  1348. bfa_status_t
  1349. bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
  1350. bfa_cb_pport_t cbfn, void *cbarg)
  1351. {
  1352. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1353. if (port->stats_busy) {
  1354. bfa_trc(bfa, port->stats_busy);
  1355. return BFA_STATUS_DEVBUSY;
  1356. }
  1357. port->stats_busy = BFA_TRUE;
  1358. port->stats_ret = stats;
  1359. port->stats_cbfn = cbfn;
  1360. port->stats_cbarg = cbarg;
  1361. bfa_port_stats_query(port);
  1362. bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port,
  1363. BFA_PORT_STATS_TOV);
  1364. return BFA_STATUS_OK;
  1365. }
  1366. bfa_status_t
  1367. bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
  1368. {
  1369. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1370. if (port->stats_busy) {
  1371. bfa_trc(bfa, port->stats_busy);
  1372. return BFA_STATUS_DEVBUSY;
  1373. }
  1374. port->stats_busy = BFA_TRUE;
  1375. port->stats_cbfn = cbfn;
  1376. port->stats_cbarg = cbarg;
  1377. bfa_port_stats_clear(port);
  1378. bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
  1379. BFA_PORT_STATS_TOV);
  1380. return BFA_STATUS_OK;
  1381. }
  1382. bfa_status_t
  1383. bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
  1384. {
  1385. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1386. bfa_trc(bfa, bitmap);
  1387. bfa_trc(bfa, pport->cfg.trunked);
  1388. bfa_trc(bfa, pport->cfg.trunk_ports);
  1389. if (!bitmap || (bitmap & (bitmap - 1)))
  1390. return BFA_STATUS_EINVAL;
  1391. pport->cfg.trunked = BFA_TRUE;
  1392. pport->cfg.trunk_ports = bitmap;
  1393. return BFA_STATUS_OK;
  1394. }
  1395. void
  1396. bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
  1397. {
  1398. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1399. qos_attr->state = bfa_os_ntohl(pport->qos_attr.state);
  1400. qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr);
  1401. }
  1402. void
  1403. bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
  1404. struct bfa_qos_vc_attr_s *qos_vc_attr)
  1405. {
  1406. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1407. struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr;
  1408. u32 i = 0;
  1409. qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
  1410. qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
  1411. qos_vc_attr->elp_opmode_flags =
  1412. bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
  1413. /*
  1414. * Individual VC info
  1415. */
  1416. while (i < qos_vc_attr->total_vc_count) {
  1417. qos_vc_attr->vc_info[i].vc_credit =
  1418. bfa_vc_attr->vc_info[i].vc_credit;
  1419. qos_vc_attr->vc_info[i].borrow_credit =
  1420. bfa_vc_attr->vc_info[i].borrow_credit;
  1421. qos_vc_attr->vc_info[i].priority =
  1422. bfa_vc_attr->vc_info[i].priority;
  1423. ++i;
  1424. }
  1425. }
  1426. /**
  1427. * Fetch QoS Stats.
  1428. */
  1429. bfa_status_t
  1430. bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
  1431. bfa_cb_pport_t cbfn, void *cbarg)
  1432. {
  1433. /*
  1434. * QoS stats is embedded in port stats
  1435. */
  1436. return bfa_pport_get_stats(bfa, stats, cbfn, cbarg);
  1437. }
  1438. bfa_status_t
  1439. bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
  1440. {
  1441. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1442. if (port->stats_busy) {
  1443. bfa_trc(bfa, port->stats_busy);
  1444. return BFA_STATUS_DEVBUSY;
  1445. }
  1446. port->stats_busy = BFA_TRUE;
  1447. port->stats_cbfn = cbfn;
  1448. port->stats_cbarg = cbarg;
  1449. bfa_port_qos_stats_clear(port);
  1450. bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
  1451. BFA_PORT_STATS_TOV);
  1452. return BFA_STATUS_OK;
  1453. }
  1454. /**
  1455. * Fetch port attributes.
  1456. */
  1457. bfa_status_t
  1458. bfa_pport_trunk_disable(struct bfa_s *bfa)
  1459. {
  1460. return BFA_STATUS_OK;
  1461. }
  1462. bfa_boolean_t
  1463. bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
  1464. {
  1465. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1466. *bitmap = port->cfg.trunk_ports;
  1467. return port->cfg.trunked;
  1468. }
  1469. bfa_boolean_t
  1470. bfa_pport_is_disabled(struct bfa_s *bfa)
  1471. {
  1472. struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
  1473. return bfa_sm_to_state(hal_pport_sm_table, port->sm) ==
  1474. BFA_PPORT_ST_DISABLED;
  1475. }
  1476. bfa_boolean_t
  1477. bfa_pport_is_ratelim(struct bfa_s *bfa)
  1478. {
  1479. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1480. return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
  1481. }
  1482. void
  1483. bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
  1484. {
  1485. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1486. bfa_trc(bfa, on_off);
  1487. bfa_trc(bfa, pport->cfg.qos_enabled);
  1488. pport->cfg.qos_enabled = on_off;
  1489. }
  1490. void
  1491. bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
  1492. {
  1493. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1494. bfa_trc(bfa, on_off);
  1495. bfa_trc(bfa, pport->cfg.ratelimit);
  1496. pport->cfg.ratelimit = on_off;
  1497. if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
  1498. pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
  1499. }
  1500. /**
  1501. * Configure default minimum ratelim speed
  1502. */
  1503. bfa_status_t
  1504. bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
  1505. {
  1506. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1507. bfa_trc(bfa, speed);
  1508. /*
  1509. * Auto and speeds greater than the supported speed, are invalid
  1510. */
  1511. if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) {
  1512. bfa_trc(bfa, pport->speed_sup);
  1513. return BFA_STATUS_UNSUPP_SPEED;
  1514. }
  1515. pport->cfg.trl_def_speed = speed;
  1516. return BFA_STATUS_OK;
  1517. }
  1518. /**
  1519. * Get default minimum ratelim speed
  1520. */
  1521. enum bfa_pport_speed
  1522. bfa_pport_get_ratelim_speed(struct bfa_s *bfa)
  1523. {
  1524. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1525. bfa_trc(bfa, pport->cfg.trl_def_speed);
  1526. return pport->cfg.trl_def_speed;
  1527. }
  1528. void
  1529. bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status)
  1530. {
  1531. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1532. bfa_trc(bfa, status);
  1533. bfa_trc(bfa, pport->diag_busy);
  1534. pport->diag_busy = status;
  1535. }
  1536. void
  1537. bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
  1538. bfa_boolean_t link_e2e_beacon)
  1539. {
  1540. struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
  1541. bfa_trc(bfa, beacon);
  1542. bfa_trc(bfa, link_e2e_beacon);
  1543. bfa_trc(bfa, pport->beacon);
  1544. bfa_trc(bfa, pport->link_e2e_beacon);
  1545. pport->beacon = beacon;
  1546. pport->link_e2e_beacon = link_e2e_beacon;
  1547. }
  1548. bfa_boolean_t
  1549. bfa_pport_is_linkup(struct bfa_s *bfa)
  1550. {
  1551. return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup);
  1552. }