bnx2x_stats.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875
  1. /* bnx2x_stats.c: Broadcom Everest network driver.
  2. *
  3. * Copyright (c) 2007-2012 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  10. * Written by: Eliezer Tamir
  11. * Based on code from Michael Chan's bnx2 driver
  12. * UDP CSUM errata workaround by Arik Gendelman
  13. * Slowpath and fastpath rework by Vladislav Zolotarov
  14. * Statistics and Link management by Yitchak Gertner
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include "bnx2x_stats.h"
  19. #include "bnx2x_cmn.h"
  20. /* Statistics */
  21. /*
  22. * General service functions
  23. */
  24. static inline long bnx2x_hilo(u32 *hiref)
  25. {
  26. u32 lo = *(hiref + 1);
  27. #if (BITS_PER_LONG == 64)
  28. u32 hi = *hiref;
  29. return HILO_U64(hi, lo);
  30. #else
  31. return lo;
  32. #endif
  33. }
  34. static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
  35. {
  36. u16 res = 0;
  37. /* 'newest' convention - shmem2 cotains the size of the port stats */
  38. if (SHMEM2_HAS(bp, sizeof_port_stats)) {
  39. u32 size = SHMEM2_RD(bp, sizeof_port_stats);
  40. if (size)
  41. res = size;
  42. /* prevent newer BC from causing buffer overflow */
  43. if (res > sizeof(struct host_port_stats))
  44. res = sizeof(struct host_port_stats);
  45. }
  46. /* Older convention - all BCs support the port stats' fields up until
  47. * the 'not_used' field
  48. */
  49. if (!res) {
  50. res = offsetof(struct host_port_stats, not_used) + 4;
  51. /* if PFC stats are supported by the MFW, DMA them as well */
  52. if (bp->flags & BC_SUPPORTS_PFC_STATS) {
  53. res += offsetof(struct host_port_stats,
  54. pfc_frames_rx_lo) -
  55. offsetof(struct host_port_stats,
  56. pfc_frames_tx_hi) + 4 ;
  57. }
  58. }
  59. res >>= 2;
  60. WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
  61. return res;
  62. }
  63. /*
  64. * Init service functions
  65. */
  66. /* Post the next statistics ramrod. Protect it with the spin in
  67. * order to ensure the strict order between statistics ramrods
  68. * (each ramrod has a sequence number passed in a
  69. * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
  70. * sent in order).
  71. */
  72. static void bnx2x_storm_stats_post(struct bnx2x *bp)
  73. {
  74. if (!bp->stats_pending) {
  75. int rc;
  76. spin_lock_bh(&bp->stats_lock);
  77. if (bp->stats_pending) {
  78. spin_unlock_bh(&bp->stats_lock);
  79. return;
  80. }
  81. bp->fw_stats_req->hdr.drv_stats_counter =
  82. cpu_to_le16(bp->stats_counter++);
  83. DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
  84. bp->fw_stats_req->hdr.drv_stats_counter);
  85. /* send FW stats ramrod */
  86. rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
  87. U64_HI(bp->fw_stats_req_mapping),
  88. U64_LO(bp->fw_stats_req_mapping),
  89. NONE_CONNECTION_TYPE);
  90. if (rc == 0)
  91. bp->stats_pending = 1;
  92. spin_unlock_bh(&bp->stats_lock);
  93. }
  94. }
  95. static void bnx2x_hw_stats_post(struct bnx2x *bp)
  96. {
  97. struct dmae_command *dmae = &bp->stats_dmae;
  98. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  99. *stats_comp = DMAE_COMP_VAL;
  100. if (CHIP_REV_IS_SLOW(bp))
  101. return;
  102. /* loader */
  103. if (bp->executer_idx) {
  104. int loader_idx = PMF_DMAE_C(bp);
  105. u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
  106. true, DMAE_COMP_GRC);
  107. opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
  108. memset(dmae, 0, sizeof(struct dmae_command));
  109. dmae->opcode = opcode;
  110. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
  111. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
  112. dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
  113. sizeof(struct dmae_command) *
  114. (loader_idx + 1)) >> 2;
  115. dmae->dst_addr_hi = 0;
  116. dmae->len = sizeof(struct dmae_command) >> 2;
  117. if (CHIP_IS_E1(bp))
  118. dmae->len--;
  119. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
  120. dmae->comp_addr_hi = 0;
  121. dmae->comp_val = 1;
  122. *stats_comp = 0;
  123. bnx2x_post_dmae(bp, dmae, loader_idx);
  124. } else if (bp->func_stx) {
  125. *stats_comp = 0;
  126. memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
  127. sizeof(bp->func_stats));
  128. bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
  129. }
  130. }
  131. static int bnx2x_stats_comp(struct bnx2x *bp)
  132. {
  133. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  134. int cnt = 10;
  135. might_sleep();
  136. while (*stats_comp != DMAE_COMP_VAL) {
  137. if (!cnt) {
  138. BNX2X_ERR("timeout waiting for stats finished\n");
  139. break;
  140. }
  141. cnt--;
  142. usleep_range(1000, 1000);
  143. }
  144. return 1;
  145. }
  146. /*
  147. * Statistics service functions
  148. */
  149. static void bnx2x_stats_pmf_update(struct bnx2x *bp)
  150. {
  151. struct dmae_command *dmae;
  152. u32 opcode;
  153. int loader_idx = PMF_DMAE_C(bp);
  154. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  155. /* sanity */
  156. if (!bp->port.pmf || !bp->port.port_stx) {
  157. BNX2X_ERR("BUG!\n");
  158. return;
  159. }
  160. bp->executer_idx = 0;
  161. opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
  162. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  163. dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
  164. dmae->src_addr_lo = bp->port.port_stx >> 2;
  165. dmae->src_addr_hi = 0;
  166. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
  167. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
  168. dmae->len = DMAE_LEN32_RD_MAX;
  169. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  170. dmae->comp_addr_hi = 0;
  171. dmae->comp_val = 1;
  172. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  173. dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
  174. dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
  175. dmae->src_addr_hi = 0;
  176. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
  177. DMAE_LEN32_RD_MAX * 4);
  178. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
  179. DMAE_LEN32_RD_MAX * 4);
  180. dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
  181. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  182. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  183. dmae->comp_val = DMAE_COMP_VAL;
  184. *stats_comp = 0;
  185. bnx2x_hw_stats_post(bp);
  186. bnx2x_stats_comp(bp);
  187. }
  188. static void bnx2x_port_stats_init(struct bnx2x *bp)
  189. {
  190. struct dmae_command *dmae;
  191. int port = BP_PORT(bp);
  192. u32 opcode;
  193. int loader_idx = PMF_DMAE_C(bp);
  194. u32 mac_addr;
  195. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  196. /* sanity */
  197. if (!bp->link_vars.link_up || !bp->port.pmf) {
  198. BNX2X_ERR("BUG!\n");
  199. return;
  200. }
  201. bp->executer_idx = 0;
  202. /* MCP */
  203. opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
  204. true, DMAE_COMP_GRC);
  205. if (bp->port.port_stx) {
  206. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  207. dmae->opcode = opcode;
  208. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
  209. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
  210. dmae->dst_addr_lo = bp->port.port_stx >> 2;
  211. dmae->dst_addr_hi = 0;
  212. dmae->len = bnx2x_get_port_stats_dma_len(bp);
  213. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  214. dmae->comp_addr_hi = 0;
  215. dmae->comp_val = 1;
  216. }
  217. if (bp->func_stx) {
  218. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  219. dmae->opcode = opcode;
  220. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
  221. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
  222. dmae->dst_addr_lo = bp->func_stx >> 2;
  223. dmae->dst_addr_hi = 0;
  224. dmae->len = sizeof(struct host_func_stats) >> 2;
  225. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  226. dmae->comp_addr_hi = 0;
  227. dmae->comp_val = 1;
  228. }
  229. /* MAC */
  230. opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
  231. true, DMAE_COMP_GRC);
  232. /* EMAC is special */
  233. if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
  234. mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
  235. /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
  236. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  237. dmae->opcode = opcode;
  238. dmae->src_addr_lo = (mac_addr +
  239. EMAC_REG_EMAC_RX_STAT_AC) >> 2;
  240. dmae->src_addr_hi = 0;
  241. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
  242. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
  243. dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
  244. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  245. dmae->comp_addr_hi = 0;
  246. dmae->comp_val = 1;
  247. /* EMAC_REG_EMAC_RX_STAT_AC_28 */
  248. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  249. dmae->opcode = opcode;
  250. dmae->src_addr_lo = (mac_addr +
  251. EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
  252. dmae->src_addr_hi = 0;
  253. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
  254. offsetof(struct emac_stats, rx_stat_falsecarriererrors));
  255. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
  256. offsetof(struct emac_stats, rx_stat_falsecarriererrors));
  257. dmae->len = 1;
  258. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  259. dmae->comp_addr_hi = 0;
  260. dmae->comp_val = 1;
  261. /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
  262. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  263. dmae->opcode = opcode;
  264. dmae->src_addr_lo = (mac_addr +
  265. EMAC_REG_EMAC_TX_STAT_AC) >> 2;
  266. dmae->src_addr_hi = 0;
  267. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
  268. offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
  269. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
  270. offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
  271. dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
  272. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  273. dmae->comp_addr_hi = 0;
  274. dmae->comp_val = 1;
  275. } else {
  276. u32 tx_src_addr_lo, rx_src_addr_lo;
  277. u16 rx_len, tx_len;
  278. /* configure the params according to MAC type */
  279. switch (bp->link_vars.mac_type) {
  280. case MAC_TYPE_BMAC:
  281. mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
  282. NIG_REG_INGRESS_BMAC0_MEM);
  283. /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
  284. BIGMAC_REGISTER_TX_STAT_GTBYT */
  285. if (CHIP_IS_E1x(bp)) {
  286. tx_src_addr_lo = (mac_addr +
  287. BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
  288. tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
  289. BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
  290. rx_src_addr_lo = (mac_addr +
  291. BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
  292. rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
  293. BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
  294. } else {
  295. tx_src_addr_lo = (mac_addr +
  296. BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
  297. tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
  298. BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
  299. rx_src_addr_lo = (mac_addr +
  300. BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
  301. rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
  302. BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
  303. }
  304. break;
  305. case MAC_TYPE_UMAC: /* handled by MSTAT */
  306. case MAC_TYPE_XMAC: /* handled by MSTAT */
  307. default:
  308. mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
  309. tx_src_addr_lo = (mac_addr +
  310. MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
  311. rx_src_addr_lo = (mac_addr +
  312. MSTAT_REG_RX_STAT_GR64_LO) >> 2;
  313. tx_len = sizeof(bp->slowpath->
  314. mac_stats.mstat_stats.stats_tx) >> 2;
  315. rx_len = sizeof(bp->slowpath->
  316. mac_stats.mstat_stats.stats_rx) >> 2;
  317. break;
  318. }
  319. /* TX stats */
  320. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  321. dmae->opcode = opcode;
  322. dmae->src_addr_lo = tx_src_addr_lo;
  323. dmae->src_addr_hi = 0;
  324. dmae->len = tx_len;
  325. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
  326. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
  327. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  328. dmae->comp_addr_hi = 0;
  329. dmae->comp_val = 1;
  330. /* RX stats */
  331. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  332. dmae->opcode = opcode;
  333. dmae->src_addr_hi = 0;
  334. dmae->src_addr_lo = rx_src_addr_lo;
  335. dmae->dst_addr_lo =
  336. U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
  337. dmae->dst_addr_hi =
  338. U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
  339. dmae->len = rx_len;
  340. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  341. dmae->comp_addr_hi = 0;
  342. dmae->comp_val = 1;
  343. }
  344. /* NIG */
  345. if (!CHIP_IS_E3(bp)) {
  346. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  347. dmae->opcode = opcode;
  348. dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
  349. NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
  350. dmae->src_addr_hi = 0;
  351. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
  352. offsetof(struct nig_stats, egress_mac_pkt0_lo));
  353. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
  354. offsetof(struct nig_stats, egress_mac_pkt0_lo));
  355. dmae->len = (2*sizeof(u32)) >> 2;
  356. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  357. dmae->comp_addr_hi = 0;
  358. dmae->comp_val = 1;
  359. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  360. dmae->opcode = opcode;
  361. dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
  362. NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
  363. dmae->src_addr_hi = 0;
  364. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
  365. offsetof(struct nig_stats, egress_mac_pkt1_lo));
  366. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
  367. offsetof(struct nig_stats, egress_mac_pkt1_lo));
  368. dmae->len = (2*sizeof(u32)) >> 2;
  369. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  370. dmae->comp_addr_hi = 0;
  371. dmae->comp_val = 1;
  372. }
  373. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  374. dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
  375. true, DMAE_COMP_PCI);
  376. dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
  377. NIG_REG_STAT0_BRB_DISCARD) >> 2;
  378. dmae->src_addr_hi = 0;
  379. dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
  380. dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
  381. dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
  382. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  383. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  384. dmae->comp_val = DMAE_COMP_VAL;
  385. *stats_comp = 0;
  386. }
  387. static void bnx2x_func_stats_init(struct bnx2x *bp)
  388. {
  389. struct dmae_command *dmae = &bp->stats_dmae;
  390. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  391. /* sanity */
  392. if (!bp->func_stx) {
  393. BNX2X_ERR("BUG!\n");
  394. return;
  395. }
  396. bp->executer_idx = 0;
  397. memset(dmae, 0, sizeof(struct dmae_command));
  398. dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
  399. true, DMAE_COMP_PCI);
  400. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
  401. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
  402. dmae->dst_addr_lo = bp->func_stx >> 2;
  403. dmae->dst_addr_hi = 0;
  404. dmae->len = sizeof(struct host_func_stats) >> 2;
  405. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  406. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  407. dmae->comp_val = DMAE_COMP_VAL;
  408. *stats_comp = 0;
  409. }
  410. static void bnx2x_stats_start(struct bnx2x *bp)
  411. {
  412. if (bp->port.pmf)
  413. bnx2x_port_stats_init(bp);
  414. else if (bp->func_stx)
  415. bnx2x_func_stats_init(bp);
  416. bnx2x_hw_stats_post(bp);
  417. bnx2x_storm_stats_post(bp);
  418. }
  419. static void bnx2x_stats_pmf_start(struct bnx2x *bp)
  420. {
  421. bnx2x_stats_comp(bp);
  422. bnx2x_stats_pmf_update(bp);
  423. bnx2x_stats_start(bp);
  424. }
  425. static void bnx2x_stats_restart(struct bnx2x *bp)
  426. {
  427. bnx2x_stats_comp(bp);
  428. bnx2x_stats_start(bp);
  429. }
  430. static void bnx2x_bmac_stats_update(struct bnx2x *bp)
  431. {
  432. struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
  433. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  434. struct {
  435. u32 lo;
  436. u32 hi;
  437. } diff;
  438. if (CHIP_IS_E1x(bp)) {
  439. struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
  440. /* the macros below will use "bmac1_stats" type */
  441. UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
  442. UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
  443. UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
  444. UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
  445. UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
  446. UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
  447. UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
  448. UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
  449. UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
  450. UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
  451. UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
  452. UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
  453. UPDATE_STAT64(tx_stat_gt127,
  454. tx_stat_etherstatspkts65octetsto127octets);
  455. UPDATE_STAT64(tx_stat_gt255,
  456. tx_stat_etherstatspkts128octetsto255octets);
  457. UPDATE_STAT64(tx_stat_gt511,
  458. tx_stat_etherstatspkts256octetsto511octets);
  459. UPDATE_STAT64(tx_stat_gt1023,
  460. tx_stat_etherstatspkts512octetsto1023octets);
  461. UPDATE_STAT64(tx_stat_gt1518,
  462. tx_stat_etherstatspkts1024octetsto1522octets);
  463. UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
  464. UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
  465. UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
  466. UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
  467. UPDATE_STAT64(tx_stat_gterr,
  468. tx_stat_dot3statsinternalmactransmiterrors);
  469. UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
  470. } else {
  471. struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
  472. /* the macros below will use "bmac2_stats" type */
  473. UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
  474. UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
  475. UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
  476. UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
  477. UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
  478. UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
  479. UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
  480. UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
  481. UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
  482. UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
  483. UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
  484. UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
  485. UPDATE_STAT64(tx_stat_gt127,
  486. tx_stat_etherstatspkts65octetsto127octets);
  487. UPDATE_STAT64(tx_stat_gt255,
  488. tx_stat_etherstatspkts128octetsto255octets);
  489. UPDATE_STAT64(tx_stat_gt511,
  490. tx_stat_etherstatspkts256octetsto511octets);
  491. UPDATE_STAT64(tx_stat_gt1023,
  492. tx_stat_etherstatspkts512octetsto1023octets);
  493. UPDATE_STAT64(tx_stat_gt1518,
  494. tx_stat_etherstatspkts1024octetsto1522octets);
  495. UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
  496. UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
  497. UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
  498. UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
  499. UPDATE_STAT64(tx_stat_gterr,
  500. tx_stat_dot3statsinternalmactransmiterrors);
  501. UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
  502. /* collect PFC stats */
  503. pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
  504. pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
  505. pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
  506. pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
  507. }
  508. estats->pause_frames_received_hi =
  509. pstats->mac_stx[1].rx_stat_mac_xpf_hi;
  510. estats->pause_frames_received_lo =
  511. pstats->mac_stx[1].rx_stat_mac_xpf_lo;
  512. estats->pause_frames_sent_hi =
  513. pstats->mac_stx[1].tx_stat_outxoffsent_hi;
  514. estats->pause_frames_sent_lo =
  515. pstats->mac_stx[1].tx_stat_outxoffsent_lo;
  516. estats->pfc_frames_received_hi =
  517. pstats->pfc_frames_rx_hi;
  518. estats->pfc_frames_received_lo =
  519. pstats->pfc_frames_rx_lo;
  520. estats->pfc_frames_sent_hi =
  521. pstats->pfc_frames_tx_hi;
  522. estats->pfc_frames_sent_lo =
  523. pstats->pfc_frames_tx_lo;
  524. }
  525. static void bnx2x_mstat_stats_update(struct bnx2x *bp)
  526. {
  527. struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
  528. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  529. struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
  530. ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
  531. ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
  532. ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
  533. ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
  534. ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
  535. ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
  536. ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
  537. ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
  538. ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
  539. ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
  540. /* collect pfc stats */
  541. ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
  542. pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
  543. ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
  544. pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
  545. ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
  546. ADD_STAT64(stats_tx.tx_gt127,
  547. tx_stat_etherstatspkts65octetsto127octets);
  548. ADD_STAT64(stats_tx.tx_gt255,
  549. tx_stat_etherstatspkts128octetsto255octets);
  550. ADD_STAT64(stats_tx.tx_gt511,
  551. tx_stat_etherstatspkts256octetsto511octets);
  552. ADD_STAT64(stats_tx.tx_gt1023,
  553. tx_stat_etherstatspkts512octetsto1023octets);
  554. ADD_STAT64(stats_tx.tx_gt1518,
  555. tx_stat_etherstatspkts1024octetsto1522octets);
  556. ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
  557. ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
  558. ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
  559. ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
  560. ADD_STAT64(stats_tx.tx_gterr,
  561. tx_stat_dot3statsinternalmactransmiterrors);
  562. ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
  563. estats->etherstatspkts1024octetsto1522octets_hi =
  564. pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
  565. estats->etherstatspkts1024octetsto1522octets_lo =
  566. pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
  567. estats->etherstatspktsover1522octets_hi =
  568. pstats->mac_stx[1].tx_stat_mac_2047_hi;
  569. estats->etherstatspktsover1522octets_lo =
  570. pstats->mac_stx[1].tx_stat_mac_2047_lo;
  571. ADD_64(estats->etherstatspktsover1522octets_hi,
  572. pstats->mac_stx[1].tx_stat_mac_4095_hi,
  573. estats->etherstatspktsover1522octets_lo,
  574. pstats->mac_stx[1].tx_stat_mac_4095_lo);
  575. ADD_64(estats->etherstatspktsover1522octets_hi,
  576. pstats->mac_stx[1].tx_stat_mac_9216_hi,
  577. estats->etherstatspktsover1522octets_lo,
  578. pstats->mac_stx[1].tx_stat_mac_9216_lo);
  579. ADD_64(estats->etherstatspktsover1522octets_hi,
  580. pstats->mac_stx[1].tx_stat_mac_16383_hi,
  581. estats->etherstatspktsover1522octets_lo,
  582. pstats->mac_stx[1].tx_stat_mac_16383_lo);
  583. estats->pause_frames_received_hi =
  584. pstats->mac_stx[1].rx_stat_mac_xpf_hi;
  585. estats->pause_frames_received_lo =
  586. pstats->mac_stx[1].rx_stat_mac_xpf_lo;
  587. estats->pause_frames_sent_hi =
  588. pstats->mac_stx[1].tx_stat_outxoffsent_hi;
  589. estats->pause_frames_sent_lo =
  590. pstats->mac_stx[1].tx_stat_outxoffsent_lo;
  591. estats->pfc_frames_received_hi =
  592. pstats->pfc_frames_rx_hi;
  593. estats->pfc_frames_received_lo =
  594. pstats->pfc_frames_rx_lo;
  595. estats->pfc_frames_sent_hi =
  596. pstats->pfc_frames_tx_hi;
  597. estats->pfc_frames_sent_lo =
  598. pstats->pfc_frames_tx_lo;
  599. }
  600. static void bnx2x_emac_stats_update(struct bnx2x *bp)
  601. {
  602. struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
  603. struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
  604. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  605. UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
  606. UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
  607. UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
  608. UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
  609. UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
  610. UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
  611. UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
  612. UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
  613. UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
  614. UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
  615. UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
  616. UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
  617. UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
  618. UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
  619. UPDATE_EXTEND_STAT(tx_stat_outxonsent);
  620. UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
  621. UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
  622. UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
  623. UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
  624. UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
  625. UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
  626. UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
  627. UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
  628. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
  629. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
  630. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
  631. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
  632. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
  633. UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
  634. UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
  635. UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
  636. estats->pause_frames_received_hi =
  637. pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
  638. estats->pause_frames_received_lo =
  639. pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
  640. ADD_64(estats->pause_frames_received_hi,
  641. pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
  642. estats->pause_frames_received_lo,
  643. pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
  644. estats->pause_frames_sent_hi =
  645. pstats->mac_stx[1].tx_stat_outxonsent_hi;
  646. estats->pause_frames_sent_lo =
  647. pstats->mac_stx[1].tx_stat_outxonsent_lo;
  648. ADD_64(estats->pause_frames_sent_hi,
  649. pstats->mac_stx[1].tx_stat_outxoffsent_hi,
  650. estats->pause_frames_sent_lo,
  651. pstats->mac_stx[1].tx_stat_outxoffsent_lo);
  652. }
  653. static int bnx2x_hw_stats_update(struct bnx2x *bp)
  654. {
  655. struct nig_stats *new = bnx2x_sp(bp, nig_stats);
  656. struct nig_stats *old = &(bp->port.old_nig_stats);
  657. struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
  658. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  659. struct {
  660. u32 lo;
  661. u32 hi;
  662. } diff;
  663. switch (bp->link_vars.mac_type) {
  664. case MAC_TYPE_BMAC:
  665. bnx2x_bmac_stats_update(bp);
  666. break;
  667. case MAC_TYPE_EMAC:
  668. bnx2x_emac_stats_update(bp);
  669. break;
  670. case MAC_TYPE_UMAC:
  671. case MAC_TYPE_XMAC:
  672. bnx2x_mstat_stats_update(bp);
  673. break;
  674. case MAC_TYPE_NONE: /* unreached */
  675. DP(BNX2X_MSG_STATS,
  676. "stats updated by DMAE but no MAC active\n");
  677. return -1;
  678. default: /* unreached */
  679. BNX2X_ERR("Unknown MAC type\n");
  680. }
  681. ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
  682. new->brb_discard - old->brb_discard);
  683. ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
  684. new->brb_truncate - old->brb_truncate);
  685. if (!CHIP_IS_E3(bp)) {
  686. UPDATE_STAT64_NIG(egress_mac_pkt0,
  687. etherstatspkts1024octetsto1522octets);
  688. UPDATE_STAT64_NIG(egress_mac_pkt1,
  689. etherstatspktsover1522octets);
  690. }
  691. memcpy(old, new, sizeof(struct nig_stats));
  692. memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
  693. sizeof(struct mac_stx));
  694. estats->brb_drop_hi = pstats->brb_drop_hi;
  695. estats->brb_drop_lo = pstats->brb_drop_lo;
  696. pstats->host_port_stats_counter++;
  697. if (CHIP_IS_E3(bp)) {
  698. u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
  699. : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
  700. estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
  701. }
  702. if (!BP_NOMCP(bp)) {
  703. u32 nig_timer_max =
  704. SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
  705. if (nig_timer_max != estats->nig_timer_max) {
  706. estats->nig_timer_max = nig_timer_max;
  707. BNX2X_ERR("NIG timer max (%u)\n",
  708. estats->nig_timer_max);
  709. }
  710. }
  711. return 0;
  712. }
  713. static int bnx2x_storm_stats_update(struct bnx2x *bp)
  714. {
  715. struct tstorm_per_port_stats *tport =
  716. &bp->fw_stats_data->port.tstorm_port_statistics;
  717. struct tstorm_per_pf_stats *tfunc =
  718. &bp->fw_stats_data->pf.tstorm_pf_statistics;
  719. struct host_func_stats *fstats = &bp->func_stats;
  720. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  721. struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
  722. struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
  723. int i;
  724. u16 cur_stats_counter;
  725. /* Make sure we use the value of the counter
  726. * used for sending the last stats ramrod.
  727. */
  728. spin_lock_bh(&bp->stats_lock);
  729. cur_stats_counter = bp->stats_counter - 1;
  730. spin_unlock_bh(&bp->stats_lock);
  731. /* are storm stats valid? */
  732. if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
  733. DP(BNX2X_MSG_STATS,
  734. "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
  735. le16_to_cpu(counters->xstats_counter), bp->stats_counter);
  736. return -EAGAIN;
  737. }
  738. if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
  739. DP(BNX2X_MSG_STATS,
  740. "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
  741. le16_to_cpu(counters->ustats_counter), bp->stats_counter);
  742. return -EAGAIN;
  743. }
  744. if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
  745. DP(BNX2X_MSG_STATS,
  746. "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
  747. le16_to_cpu(counters->cstats_counter), bp->stats_counter);
  748. return -EAGAIN;
  749. }
  750. if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
  751. DP(BNX2X_MSG_STATS,
  752. "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
  753. le16_to_cpu(counters->tstats_counter), bp->stats_counter);
  754. return -EAGAIN;
  755. }
  756. estats->error_bytes_received_hi = 0;
  757. estats->error_bytes_received_lo = 0;
  758. for_each_eth_queue(bp, i) {
  759. struct bnx2x_fastpath *fp = &bp->fp[i];
  760. struct tstorm_per_queue_stats *tclient =
  761. &bp->fw_stats_data->queue_stats[i].
  762. tstorm_queue_statistics;
  763. struct tstorm_per_queue_stats *old_tclient =
  764. &bnx2x_fp_stats(bp, fp)->old_tclient;
  765. struct ustorm_per_queue_stats *uclient =
  766. &bp->fw_stats_data->queue_stats[i].
  767. ustorm_queue_statistics;
  768. struct ustorm_per_queue_stats *old_uclient =
  769. &bnx2x_fp_stats(bp, fp)->old_uclient;
  770. struct xstorm_per_queue_stats *xclient =
  771. &bp->fw_stats_data->queue_stats[i].
  772. xstorm_queue_statistics;
  773. struct xstorm_per_queue_stats *old_xclient =
  774. &bnx2x_fp_stats(bp, fp)->old_xclient;
  775. struct bnx2x_eth_q_stats *qstats =
  776. &bnx2x_fp_stats(bp, fp)->eth_q_stats;
  777. struct bnx2x_eth_q_stats_old *qstats_old =
  778. &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
  779. u32 diff;
  780. DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
  781. i, xclient->ucast_pkts_sent,
  782. xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
  783. DP(BNX2X_MSG_STATS, "---------------\n");
  784. UPDATE_QSTAT(tclient->rcv_bcast_bytes,
  785. total_broadcast_bytes_received);
  786. UPDATE_QSTAT(tclient->rcv_mcast_bytes,
  787. total_multicast_bytes_received);
  788. UPDATE_QSTAT(tclient->rcv_ucast_bytes,
  789. total_unicast_bytes_received);
  790. /*
  791. * sum to total_bytes_received all
  792. * unicast/multicast/broadcast
  793. */
  794. qstats->total_bytes_received_hi =
  795. qstats->total_broadcast_bytes_received_hi;
  796. qstats->total_bytes_received_lo =
  797. qstats->total_broadcast_bytes_received_lo;
  798. ADD_64(qstats->total_bytes_received_hi,
  799. qstats->total_multicast_bytes_received_hi,
  800. qstats->total_bytes_received_lo,
  801. qstats->total_multicast_bytes_received_lo);
  802. ADD_64(qstats->total_bytes_received_hi,
  803. qstats->total_unicast_bytes_received_hi,
  804. qstats->total_bytes_received_lo,
  805. qstats->total_unicast_bytes_received_lo);
  806. qstats->valid_bytes_received_hi =
  807. qstats->total_bytes_received_hi;
  808. qstats->valid_bytes_received_lo =
  809. qstats->total_bytes_received_lo;
  810. UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
  811. total_unicast_packets_received);
  812. UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
  813. total_multicast_packets_received);
  814. UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
  815. total_broadcast_packets_received);
  816. UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
  817. etherstatsoverrsizepkts);
  818. UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
  819. SUB_EXTEND_USTAT(ucast_no_buff_pkts,
  820. total_unicast_packets_received);
  821. SUB_EXTEND_USTAT(mcast_no_buff_pkts,
  822. total_multicast_packets_received);
  823. SUB_EXTEND_USTAT(bcast_no_buff_pkts,
  824. total_broadcast_packets_received);
  825. UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
  826. UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
  827. UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
  828. UPDATE_QSTAT(xclient->bcast_bytes_sent,
  829. total_broadcast_bytes_transmitted);
  830. UPDATE_QSTAT(xclient->mcast_bytes_sent,
  831. total_multicast_bytes_transmitted);
  832. UPDATE_QSTAT(xclient->ucast_bytes_sent,
  833. total_unicast_bytes_transmitted);
  834. /*
  835. * sum to total_bytes_transmitted all
  836. * unicast/multicast/broadcast
  837. */
  838. qstats->total_bytes_transmitted_hi =
  839. qstats->total_unicast_bytes_transmitted_hi;
  840. qstats->total_bytes_transmitted_lo =
  841. qstats->total_unicast_bytes_transmitted_lo;
  842. ADD_64(qstats->total_bytes_transmitted_hi,
  843. qstats->total_broadcast_bytes_transmitted_hi,
  844. qstats->total_bytes_transmitted_lo,
  845. qstats->total_broadcast_bytes_transmitted_lo);
  846. ADD_64(qstats->total_bytes_transmitted_hi,
  847. qstats->total_multicast_bytes_transmitted_hi,
  848. qstats->total_bytes_transmitted_lo,
  849. qstats->total_multicast_bytes_transmitted_lo);
  850. UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
  851. total_unicast_packets_transmitted);
  852. UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
  853. total_multicast_packets_transmitted);
  854. UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
  855. total_broadcast_packets_transmitted);
  856. UPDATE_EXTEND_TSTAT(checksum_discard,
  857. total_packets_received_checksum_discarded);
  858. UPDATE_EXTEND_TSTAT(ttl0_discard,
  859. total_packets_received_ttl0_discarded);
  860. UPDATE_EXTEND_XSTAT(error_drop_pkts,
  861. total_transmitted_dropped_packets_error);
  862. /* TPA aggregations completed */
  863. UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
  864. /* Number of network frames aggregated by TPA */
  865. UPDATE_EXTEND_E_USTAT(coalesced_pkts,
  866. total_tpa_aggregated_frames);
  867. /* Total number of bytes in completed TPA aggregations */
  868. UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
  869. UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
  870. UPDATE_FSTAT_QSTAT(total_bytes_received);
  871. UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
  872. UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
  873. UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
  874. UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
  875. UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
  876. UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
  877. UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
  878. UPDATE_FSTAT_QSTAT(valid_bytes_received);
  879. }
  880. ADD_64(estats->total_bytes_received_hi,
  881. estats->rx_stat_ifhcinbadoctets_hi,
  882. estats->total_bytes_received_lo,
  883. estats->rx_stat_ifhcinbadoctets_lo);
  884. ADD_64(estats->total_bytes_received_hi,
  885. le32_to_cpu(tfunc->rcv_error_bytes.hi),
  886. estats->total_bytes_received_lo,
  887. le32_to_cpu(tfunc->rcv_error_bytes.lo));
  888. ADD_64(estats->error_bytes_received_hi,
  889. le32_to_cpu(tfunc->rcv_error_bytes.hi),
  890. estats->error_bytes_received_lo,
  891. le32_to_cpu(tfunc->rcv_error_bytes.lo));
  892. UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
  893. ADD_64(estats->error_bytes_received_hi,
  894. estats->rx_stat_ifhcinbadoctets_hi,
  895. estats->error_bytes_received_lo,
  896. estats->rx_stat_ifhcinbadoctets_lo);
  897. if (bp->port.pmf) {
  898. struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
  899. UPDATE_FW_STAT(mac_filter_discard);
  900. UPDATE_FW_STAT(mf_tag_discard);
  901. UPDATE_FW_STAT(brb_truncate_discard);
  902. UPDATE_FW_STAT(mac_discard);
  903. }
  904. fstats->host_func_stats_start = ++fstats->host_func_stats_end;
  905. bp->stats_pending = 0;
  906. return 0;
  907. }
  908. static void bnx2x_net_stats_update(struct bnx2x *bp)
  909. {
  910. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  911. struct net_device_stats *nstats = &bp->dev->stats;
  912. unsigned long tmp;
  913. int i;
  914. nstats->rx_packets =
  915. bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
  916. bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
  917. bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
  918. nstats->tx_packets =
  919. bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
  920. bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
  921. bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
  922. nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
  923. nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
  924. tmp = estats->mac_discard;
  925. for_each_rx_queue(bp, i) {
  926. struct tstorm_per_queue_stats *old_tclient =
  927. &bp->fp_stats[i].old_tclient;
  928. tmp += le32_to_cpu(old_tclient->checksum_discard);
  929. }
  930. nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
  931. nstats->tx_dropped = 0;
  932. nstats->multicast =
  933. bnx2x_hilo(&estats->total_multicast_packets_received_hi);
  934. nstats->collisions =
  935. bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
  936. nstats->rx_length_errors =
  937. bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
  938. bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
  939. nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
  940. bnx2x_hilo(&estats->brb_truncate_hi);
  941. nstats->rx_crc_errors =
  942. bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
  943. nstats->rx_frame_errors =
  944. bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
  945. nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
  946. nstats->rx_missed_errors = 0;
  947. nstats->rx_errors = nstats->rx_length_errors +
  948. nstats->rx_over_errors +
  949. nstats->rx_crc_errors +
  950. nstats->rx_frame_errors +
  951. nstats->rx_fifo_errors +
  952. nstats->rx_missed_errors;
  953. nstats->tx_aborted_errors =
  954. bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
  955. bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
  956. nstats->tx_carrier_errors =
  957. bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
  958. nstats->tx_fifo_errors = 0;
  959. nstats->tx_heartbeat_errors = 0;
  960. nstats->tx_window_errors = 0;
  961. nstats->tx_errors = nstats->tx_aborted_errors +
  962. nstats->tx_carrier_errors +
  963. bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
  964. }
  965. static void bnx2x_drv_stats_update(struct bnx2x *bp)
  966. {
  967. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  968. int i;
  969. for_each_queue(bp, i) {
  970. struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
  971. struct bnx2x_eth_q_stats_old *qstats_old =
  972. &bp->fp_stats[i].eth_q_stats_old;
  973. UPDATE_ESTAT_QSTAT(driver_xoff);
  974. UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
  975. UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
  976. UPDATE_ESTAT_QSTAT(hw_csum_err);
  977. }
  978. }
  979. static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
  980. {
  981. u32 val;
  982. if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
  983. val = SHMEM2_RD(bp, edebug_driver_if[1]);
  984. if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
  985. return true;
  986. }
  987. return false;
  988. }
  989. static void bnx2x_stats_update(struct bnx2x *bp)
  990. {
  991. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  992. if (bnx2x_edebug_stats_stopped(bp))
  993. return;
  994. if (*stats_comp != DMAE_COMP_VAL)
  995. return;
  996. if (bp->port.pmf)
  997. bnx2x_hw_stats_update(bp);
  998. if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
  999. BNX2X_ERR("storm stats were not updated for 3 times\n");
  1000. bnx2x_panic();
  1001. return;
  1002. }
  1003. bnx2x_net_stats_update(bp);
  1004. bnx2x_drv_stats_update(bp);
  1005. if (netif_msg_timer(bp)) {
  1006. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  1007. netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
  1008. estats->brb_drop_lo, estats->brb_truncate_lo);
  1009. }
  1010. bnx2x_hw_stats_post(bp);
  1011. bnx2x_storm_stats_post(bp);
  1012. }
  1013. static void bnx2x_port_stats_stop(struct bnx2x *bp)
  1014. {
  1015. struct dmae_command *dmae;
  1016. u32 opcode;
  1017. int loader_idx = PMF_DMAE_C(bp);
  1018. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  1019. bp->executer_idx = 0;
  1020. opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
  1021. if (bp->port.port_stx) {
  1022. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  1023. if (bp->func_stx)
  1024. dmae->opcode = bnx2x_dmae_opcode_add_comp(
  1025. opcode, DMAE_COMP_GRC);
  1026. else
  1027. dmae->opcode = bnx2x_dmae_opcode_add_comp(
  1028. opcode, DMAE_COMP_PCI);
  1029. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
  1030. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
  1031. dmae->dst_addr_lo = bp->port.port_stx >> 2;
  1032. dmae->dst_addr_hi = 0;
  1033. dmae->len = bnx2x_get_port_stats_dma_len(bp);
  1034. if (bp->func_stx) {
  1035. dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
  1036. dmae->comp_addr_hi = 0;
  1037. dmae->comp_val = 1;
  1038. } else {
  1039. dmae->comp_addr_lo =
  1040. U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  1041. dmae->comp_addr_hi =
  1042. U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  1043. dmae->comp_val = DMAE_COMP_VAL;
  1044. *stats_comp = 0;
  1045. }
  1046. }
  1047. if (bp->func_stx) {
  1048. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  1049. dmae->opcode =
  1050. bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
  1051. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
  1052. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
  1053. dmae->dst_addr_lo = bp->func_stx >> 2;
  1054. dmae->dst_addr_hi = 0;
  1055. dmae->len = sizeof(struct host_func_stats) >> 2;
  1056. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  1057. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  1058. dmae->comp_val = DMAE_COMP_VAL;
  1059. *stats_comp = 0;
  1060. }
  1061. }
  1062. static void bnx2x_stats_stop(struct bnx2x *bp)
  1063. {
  1064. int update = 0;
  1065. bnx2x_stats_comp(bp);
  1066. if (bp->port.pmf)
  1067. update = (bnx2x_hw_stats_update(bp) == 0);
  1068. update |= (bnx2x_storm_stats_update(bp) == 0);
  1069. if (update) {
  1070. bnx2x_net_stats_update(bp);
  1071. if (bp->port.pmf)
  1072. bnx2x_port_stats_stop(bp);
  1073. bnx2x_hw_stats_post(bp);
  1074. bnx2x_stats_comp(bp);
  1075. }
  1076. }
  1077. static void bnx2x_stats_do_nothing(struct bnx2x *bp)
  1078. {
  1079. }
  1080. static const struct {
  1081. void (*action)(struct bnx2x *bp);
  1082. enum bnx2x_stats_state next_state;
  1083. } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
  1084. /* state event */
  1085. {
  1086. /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
  1087. /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
  1088. /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
  1089. /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
  1090. },
  1091. {
  1092. /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
  1093. /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
  1094. /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
  1095. /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
  1096. }
  1097. };
  1098. void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
  1099. {
  1100. enum bnx2x_stats_state state;
  1101. if (unlikely(bp->panic))
  1102. return;
  1103. spin_lock_bh(&bp->stats_lock);
  1104. state = bp->stats_state;
  1105. bp->stats_state = bnx2x_stats_stm[state][event].next_state;
  1106. spin_unlock_bh(&bp->stats_lock);
  1107. bnx2x_stats_stm[state][event].action(bp);
  1108. if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
  1109. DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
  1110. state, event, bp->stats_state);
  1111. }
  1112. static void bnx2x_port_stats_base_init(struct bnx2x *bp)
  1113. {
  1114. struct dmae_command *dmae;
  1115. u32 *stats_comp = bnx2x_sp(bp, stats_comp);
  1116. /* sanity */
  1117. if (!bp->port.pmf || !bp->port.port_stx) {
  1118. BNX2X_ERR("BUG!\n");
  1119. return;
  1120. }
  1121. bp->executer_idx = 0;
  1122. dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
  1123. dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
  1124. true, DMAE_COMP_PCI);
  1125. dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
  1126. dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
  1127. dmae->dst_addr_lo = bp->port.port_stx >> 2;
  1128. dmae->dst_addr_hi = 0;
  1129. dmae->len = bnx2x_get_port_stats_dma_len(bp);
  1130. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
  1131. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
  1132. dmae->comp_val = DMAE_COMP_VAL;
  1133. *stats_comp = 0;
  1134. bnx2x_hw_stats_post(bp);
  1135. bnx2x_stats_comp(bp);
  1136. }
  1137. /* This function will prepare the statistics ramrod data the way
  1138. * we will only have to increment the statistics counter and
  1139. * send the ramrod each time we have to.
  1140. */
  1141. static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
  1142. {
  1143. int i;
  1144. int first_queue_query_index;
  1145. struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
  1146. dma_addr_t cur_data_offset;
  1147. struct stats_query_entry *cur_query_entry;
  1148. stats_hdr->cmd_num = bp->fw_stats_num;
  1149. stats_hdr->drv_stats_counter = 0;
  1150. /* storm_counters struct contains the counters of completed
  1151. * statistics requests per storm which are incremented by FW
  1152. * each time it completes hadning a statistics ramrod. We will
  1153. * check these counters in the timer handler and discard a
  1154. * (statistics) ramrod completion.
  1155. */
  1156. cur_data_offset = bp->fw_stats_data_mapping +
  1157. offsetof(struct bnx2x_fw_stats_data, storm_counters);
  1158. stats_hdr->stats_counters_addrs.hi =
  1159. cpu_to_le32(U64_HI(cur_data_offset));
  1160. stats_hdr->stats_counters_addrs.lo =
  1161. cpu_to_le32(U64_LO(cur_data_offset));
  1162. /* prepare to the first stats ramrod (will be completed with
  1163. * the counters equal to zero) - init counters to somethig different.
  1164. */
  1165. memset(&bp->fw_stats_data->storm_counters, 0xff,
  1166. sizeof(struct stats_counter));
  1167. /**** Port FW statistics data ****/
  1168. cur_data_offset = bp->fw_stats_data_mapping +
  1169. offsetof(struct bnx2x_fw_stats_data, port);
  1170. cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
  1171. cur_query_entry->kind = STATS_TYPE_PORT;
  1172. /* For port query index is a DONT CARE */
  1173. cur_query_entry->index = BP_PORT(bp);
  1174. /* For port query funcID is a DONT CARE */
  1175. cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
  1176. cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
  1177. cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
  1178. /**** PF FW statistics data ****/
  1179. cur_data_offset = bp->fw_stats_data_mapping +
  1180. offsetof(struct bnx2x_fw_stats_data, pf);
  1181. cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
  1182. cur_query_entry->kind = STATS_TYPE_PF;
  1183. /* For PF query index is a DONT CARE */
  1184. cur_query_entry->index = BP_PORT(bp);
  1185. cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
  1186. cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
  1187. cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
  1188. /**** FCoE FW statistics data ****/
  1189. if (!NO_FCOE(bp)) {
  1190. cur_data_offset = bp->fw_stats_data_mapping +
  1191. offsetof(struct bnx2x_fw_stats_data, fcoe);
  1192. cur_query_entry =
  1193. &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
  1194. cur_query_entry->kind = STATS_TYPE_FCOE;
  1195. /* For FCoE query index is a DONT CARE */
  1196. cur_query_entry->index = BP_PORT(bp);
  1197. cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
  1198. cur_query_entry->address.hi =
  1199. cpu_to_le32(U64_HI(cur_data_offset));
  1200. cur_query_entry->address.lo =
  1201. cpu_to_le32(U64_LO(cur_data_offset));
  1202. }
  1203. /**** Clients' queries ****/
  1204. cur_data_offset = bp->fw_stats_data_mapping +
  1205. offsetof(struct bnx2x_fw_stats_data, queue_stats);
  1206. /* first queue query index depends whether FCoE offloaded request will
  1207. * be included in the ramrod
  1208. */
  1209. if (!NO_FCOE(bp))
  1210. first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
  1211. else
  1212. first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
  1213. for_each_eth_queue(bp, i) {
  1214. cur_query_entry =
  1215. &bp->fw_stats_req->
  1216. query[first_queue_query_index + i];
  1217. cur_query_entry->kind = STATS_TYPE_QUEUE;
  1218. cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
  1219. cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
  1220. cur_query_entry->address.hi =
  1221. cpu_to_le32(U64_HI(cur_data_offset));
  1222. cur_query_entry->address.lo =
  1223. cpu_to_le32(U64_LO(cur_data_offset));
  1224. cur_data_offset += sizeof(struct per_queue_stats);
  1225. }
  1226. /* add FCoE queue query if needed */
  1227. if (!NO_FCOE(bp)) {
  1228. cur_query_entry =
  1229. &bp->fw_stats_req->
  1230. query[first_queue_query_index + i];
  1231. cur_query_entry->kind = STATS_TYPE_QUEUE;
  1232. cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
  1233. cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
  1234. cur_query_entry->address.hi =
  1235. cpu_to_le32(U64_HI(cur_data_offset));
  1236. cur_query_entry->address.lo =
  1237. cpu_to_le32(U64_LO(cur_data_offset));
  1238. }
  1239. }
  1240. void bnx2x_stats_init(struct bnx2x *bp)
  1241. {
  1242. int /*abs*/port = BP_PORT(bp);
  1243. int mb_idx = BP_FW_MB_IDX(bp);
  1244. int i;
  1245. bp->stats_pending = 0;
  1246. bp->executer_idx = 0;
  1247. bp->stats_counter = 0;
  1248. /* port and func stats for management */
  1249. if (!BP_NOMCP(bp)) {
  1250. bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
  1251. bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
  1252. } else {
  1253. bp->port.port_stx = 0;
  1254. bp->func_stx = 0;
  1255. }
  1256. DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
  1257. bp->port.port_stx, bp->func_stx);
  1258. /* pmf should retrieve port statistics from SP on a non-init*/
  1259. if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
  1260. bnx2x_stats_handle(bp, STATS_EVENT_PMF);
  1261. port = BP_PORT(bp);
  1262. /* port stats */
  1263. memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
  1264. bp->port.old_nig_stats.brb_discard =
  1265. REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
  1266. bp->port.old_nig_stats.brb_truncate =
  1267. REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
  1268. if (!CHIP_IS_E3(bp)) {
  1269. REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
  1270. &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
  1271. REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
  1272. &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
  1273. }
  1274. /* function stats */
  1275. for_each_queue(bp, i) {
  1276. struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
  1277. memset(&fp_stats->old_tclient, 0,
  1278. sizeof(fp_stats->old_tclient));
  1279. memset(&fp_stats->old_uclient, 0,
  1280. sizeof(fp_stats->old_uclient));
  1281. memset(&fp_stats->old_xclient, 0,
  1282. sizeof(fp_stats->old_xclient));
  1283. if (bp->stats_init) {
  1284. memset(&fp_stats->eth_q_stats, 0,
  1285. sizeof(fp_stats->eth_q_stats));
  1286. memset(&fp_stats->eth_q_stats_old, 0,
  1287. sizeof(fp_stats->eth_q_stats_old));
  1288. }
  1289. }
  1290. /* Prepare statistics ramrod data */
  1291. bnx2x_prep_fw_stats_req(bp);
  1292. memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
  1293. if (bp->stats_init) {
  1294. memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
  1295. memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
  1296. memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
  1297. memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
  1298. memset(&bp->func_stats, 0, sizeof(bp->func_stats));
  1299. /* Clean SP from previous statistics */
  1300. if (bp->func_stx) {
  1301. memset(bnx2x_sp(bp, func_stats), 0,
  1302. sizeof(struct host_func_stats));
  1303. bnx2x_func_stats_init(bp);
  1304. bnx2x_hw_stats_post(bp);
  1305. bnx2x_stats_comp(bp);
  1306. }
  1307. }
  1308. bp->stats_state = STATS_STATE_DISABLED;
  1309. if (bp->port.pmf && bp->port.port_stx)
  1310. bnx2x_port_stats_base_init(bp);
  1311. /* mark the end of statistics initializiation */
  1312. bp->stats_init = false;
  1313. }
  1314. void bnx2x_save_statistics(struct bnx2x *bp)
  1315. {
  1316. int i;
  1317. struct net_device_stats *nstats = &bp->dev->stats;
  1318. /* save queue statistics */
  1319. for_each_eth_queue(bp, i) {
  1320. struct bnx2x_fastpath *fp = &bp->fp[i];
  1321. struct bnx2x_eth_q_stats *qstats =
  1322. &bnx2x_fp_stats(bp, fp)->eth_q_stats;
  1323. struct bnx2x_eth_q_stats_old *qstats_old =
  1324. &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
  1325. UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
  1326. UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
  1327. UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
  1328. UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
  1329. UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
  1330. UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
  1331. UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
  1332. UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
  1333. UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
  1334. UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
  1335. UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
  1336. UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
  1337. UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
  1338. UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
  1339. }
  1340. /* save net_device_stats statistics */
  1341. bp->net_stats_old.rx_dropped = nstats->rx_dropped;
  1342. /* store port firmware statistics */
  1343. if (bp->port.pmf && IS_MF(bp)) {
  1344. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  1345. struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
  1346. UPDATE_FW_STAT_OLD(mac_filter_discard);
  1347. UPDATE_FW_STAT_OLD(mf_tag_discard);
  1348. UPDATE_FW_STAT_OLD(brb_truncate_discard);
  1349. UPDATE_FW_STAT_OLD(mac_discard);
  1350. }
  1351. }
  1352. void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
  1353. u32 stats_type)
  1354. {
  1355. int i;
  1356. struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
  1357. struct bnx2x_eth_stats *estats = &bp->eth_stats;
  1358. struct per_queue_stats *fcoe_q_stats =
  1359. &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
  1360. struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
  1361. &fcoe_q_stats->tstorm_queue_statistics;
  1362. struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
  1363. &fcoe_q_stats->ustorm_queue_statistics;
  1364. struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
  1365. &fcoe_q_stats->xstorm_queue_statistics;
  1366. struct fcoe_statistics_params *fw_fcoe_stat =
  1367. &bp->fw_stats_data->fcoe;
  1368. memset(afex_stats, 0, sizeof(struct afex_stats));
  1369. for_each_eth_queue(bp, i) {
  1370. struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
  1371. ADD_64(afex_stats->rx_unicast_bytes_hi,
  1372. qstats->total_unicast_bytes_received_hi,
  1373. afex_stats->rx_unicast_bytes_lo,
  1374. qstats->total_unicast_bytes_received_lo);
  1375. ADD_64(afex_stats->rx_broadcast_bytes_hi,
  1376. qstats->total_broadcast_bytes_received_hi,
  1377. afex_stats->rx_broadcast_bytes_lo,
  1378. qstats->total_broadcast_bytes_received_lo);
  1379. ADD_64(afex_stats->rx_multicast_bytes_hi,
  1380. qstats->total_multicast_bytes_received_hi,
  1381. afex_stats->rx_multicast_bytes_lo,
  1382. qstats->total_multicast_bytes_received_lo);
  1383. ADD_64(afex_stats->rx_unicast_frames_hi,
  1384. qstats->total_unicast_packets_received_hi,
  1385. afex_stats->rx_unicast_frames_lo,
  1386. qstats->total_unicast_packets_received_lo);
  1387. ADD_64(afex_stats->rx_broadcast_frames_hi,
  1388. qstats->total_broadcast_packets_received_hi,
  1389. afex_stats->rx_broadcast_frames_lo,
  1390. qstats->total_broadcast_packets_received_lo);
  1391. ADD_64(afex_stats->rx_multicast_frames_hi,
  1392. qstats->total_multicast_packets_received_hi,
  1393. afex_stats->rx_multicast_frames_lo,
  1394. qstats->total_multicast_packets_received_lo);
  1395. /* sum to rx_frames_discarded all discraded
  1396. * packets due to size, ttl0 and checksum
  1397. */
  1398. ADD_64(afex_stats->rx_frames_discarded_hi,
  1399. qstats->total_packets_received_checksum_discarded_hi,
  1400. afex_stats->rx_frames_discarded_lo,
  1401. qstats->total_packets_received_checksum_discarded_lo);
  1402. ADD_64(afex_stats->rx_frames_discarded_hi,
  1403. qstats->total_packets_received_ttl0_discarded_hi,
  1404. afex_stats->rx_frames_discarded_lo,
  1405. qstats->total_packets_received_ttl0_discarded_lo);
  1406. ADD_64(afex_stats->rx_frames_discarded_hi,
  1407. qstats->etherstatsoverrsizepkts_hi,
  1408. afex_stats->rx_frames_discarded_lo,
  1409. qstats->etherstatsoverrsizepkts_lo);
  1410. ADD_64(afex_stats->rx_frames_dropped_hi,
  1411. qstats->no_buff_discard_hi,
  1412. afex_stats->rx_frames_dropped_lo,
  1413. qstats->no_buff_discard_lo);
  1414. ADD_64(afex_stats->tx_unicast_bytes_hi,
  1415. qstats->total_unicast_bytes_transmitted_hi,
  1416. afex_stats->tx_unicast_bytes_lo,
  1417. qstats->total_unicast_bytes_transmitted_lo);
  1418. ADD_64(afex_stats->tx_broadcast_bytes_hi,
  1419. qstats->total_broadcast_bytes_transmitted_hi,
  1420. afex_stats->tx_broadcast_bytes_lo,
  1421. qstats->total_broadcast_bytes_transmitted_lo);
  1422. ADD_64(afex_stats->tx_multicast_bytes_hi,
  1423. qstats->total_multicast_bytes_transmitted_hi,
  1424. afex_stats->tx_multicast_bytes_lo,
  1425. qstats->total_multicast_bytes_transmitted_lo);
  1426. ADD_64(afex_stats->tx_unicast_frames_hi,
  1427. qstats->total_unicast_packets_transmitted_hi,
  1428. afex_stats->tx_unicast_frames_lo,
  1429. qstats->total_unicast_packets_transmitted_lo);
  1430. ADD_64(afex_stats->tx_broadcast_frames_hi,
  1431. qstats->total_broadcast_packets_transmitted_hi,
  1432. afex_stats->tx_broadcast_frames_lo,
  1433. qstats->total_broadcast_packets_transmitted_lo);
  1434. ADD_64(afex_stats->tx_multicast_frames_hi,
  1435. qstats->total_multicast_packets_transmitted_hi,
  1436. afex_stats->tx_multicast_frames_lo,
  1437. qstats->total_multicast_packets_transmitted_lo);
  1438. ADD_64(afex_stats->tx_frames_dropped_hi,
  1439. qstats->total_transmitted_dropped_packets_error_hi,
  1440. afex_stats->tx_frames_dropped_lo,
  1441. qstats->total_transmitted_dropped_packets_error_lo);
  1442. }
  1443. /* now add FCoE statistics which are collected separately
  1444. * (both offloaded and non offloaded)
  1445. */
  1446. if (!NO_FCOE(bp)) {
  1447. ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
  1448. LE32_0,
  1449. afex_stats->rx_unicast_bytes_lo,
  1450. fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
  1451. ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
  1452. fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
  1453. afex_stats->rx_unicast_bytes_lo,
  1454. fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
  1455. ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
  1456. fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
  1457. afex_stats->rx_broadcast_bytes_lo,
  1458. fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
  1459. ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
  1460. fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
  1461. afex_stats->rx_multicast_bytes_lo,
  1462. fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
  1463. ADD_64_LE(afex_stats->rx_unicast_frames_hi,
  1464. LE32_0,
  1465. afex_stats->rx_unicast_frames_lo,
  1466. fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
  1467. ADD_64_LE(afex_stats->rx_unicast_frames_hi,
  1468. LE32_0,
  1469. afex_stats->rx_unicast_frames_lo,
  1470. fcoe_q_tstorm_stats->rcv_ucast_pkts);
  1471. ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
  1472. LE32_0,
  1473. afex_stats->rx_broadcast_frames_lo,
  1474. fcoe_q_tstorm_stats->rcv_bcast_pkts);
  1475. ADD_64_LE(afex_stats->rx_multicast_frames_hi,
  1476. LE32_0,
  1477. afex_stats->rx_multicast_frames_lo,
  1478. fcoe_q_tstorm_stats->rcv_ucast_pkts);
  1479. ADD_64_LE(afex_stats->rx_frames_discarded_hi,
  1480. LE32_0,
  1481. afex_stats->rx_frames_discarded_lo,
  1482. fcoe_q_tstorm_stats->checksum_discard);
  1483. ADD_64_LE(afex_stats->rx_frames_discarded_hi,
  1484. LE32_0,
  1485. afex_stats->rx_frames_discarded_lo,
  1486. fcoe_q_tstorm_stats->pkts_too_big_discard);
  1487. ADD_64_LE(afex_stats->rx_frames_discarded_hi,
  1488. LE32_0,
  1489. afex_stats->rx_frames_discarded_lo,
  1490. fcoe_q_tstorm_stats->ttl0_discard);
  1491. ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
  1492. LE16_0,
  1493. afex_stats->rx_frames_dropped_lo,
  1494. fcoe_q_tstorm_stats->no_buff_discard);
  1495. ADD_64_LE(afex_stats->rx_frames_dropped_hi,
  1496. LE32_0,
  1497. afex_stats->rx_frames_dropped_lo,
  1498. fcoe_q_ustorm_stats->ucast_no_buff_pkts);
  1499. ADD_64_LE(afex_stats->rx_frames_dropped_hi,
  1500. LE32_0,
  1501. afex_stats->rx_frames_dropped_lo,
  1502. fcoe_q_ustorm_stats->mcast_no_buff_pkts);
  1503. ADD_64_LE(afex_stats->rx_frames_dropped_hi,
  1504. LE32_0,
  1505. afex_stats->rx_frames_dropped_lo,
  1506. fcoe_q_ustorm_stats->bcast_no_buff_pkts);
  1507. ADD_64_LE(afex_stats->rx_frames_dropped_hi,
  1508. LE32_0,
  1509. afex_stats->rx_frames_dropped_lo,
  1510. fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
  1511. ADD_64_LE(afex_stats->rx_frames_dropped_hi,
  1512. LE32_0,
  1513. afex_stats->rx_frames_dropped_lo,
  1514. fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
  1515. ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
  1516. LE32_0,
  1517. afex_stats->tx_unicast_bytes_lo,
  1518. fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
  1519. ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
  1520. fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
  1521. afex_stats->tx_unicast_bytes_lo,
  1522. fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
  1523. ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
  1524. fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
  1525. afex_stats->tx_broadcast_bytes_lo,
  1526. fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
  1527. ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
  1528. fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
  1529. afex_stats->tx_multicast_bytes_lo,
  1530. fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
  1531. ADD_64_LE(afex_stats->tx_unicast_frames_hi,
  1532. LE32_0,
  1533. afex_stats->tx_unicast_frames_lo,
  1534. fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
  1535. ADD_64_LE(afex_stats->tx_unicast_frames_hi,
  1536. LE32_0,
  1537. afex_stats->tx_unicast_frames_lo,
  1538. fcoe_q_xstorm_stats->ucast_pkts_sent);
  1539. ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
  1540. LE32_0,
  1541. afex_stats->tx_broadcast_frames_lo,
  1542. fcoe_q_xstorm_stats->bcast_pkts_sent);
  1543. ADD_64_LE(afex_stats->tx_multicast_frames_hi,
  1544. LE32_0,
  1545. afex_stats->tx_multicast_frames_lo,
  1546. fcoe_q_xstorm_stats->mcast_pkts_sent);
  1547. ADD_64_LE(afex_stats->tx_frames_dropped_hi,
  1548. LE32_0,
  1549. afex_stats->tx_frames_dropped_lo,
  1550. fcoe_q_xstorm_stats->error_drop_pkts);
  1551. }
  1552. /* if port stats are requested, add them to the PMF
  1553. * stats, as anyway they will be accumulated by the
  1554. * MCP before sent to the switch
  1555. */
  1556. if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
  1557. ADD_64(afex_stats->rx_frames_dropped_hi,
  1558. 0,
  1559. afex_stats->rx_frames_dropped_lo,
  1560. estats->mac_filter_discard);
  1561. ADD_64(afex_stats->rx_frames_dropped_hi,
  1562. 0,
  1563. afex_stats->rx_frames_dropped_lo,
  1564. estats->brb_truncate_discard);
  1565. ADD_64(afex_stats->rx_frames_discarded_hi,
  1566. 0,
  1567. afex_stats->rx_frames_discarded_lo,
  1568. estats->mac_discard);
  1569. }
  1570. }