bnx2x_sp.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. #include <linux/version.h>
  2. #include <linux/module.h>
  3. #include <linux/crc32.h>
  4. #include <linux/netdevice.h>
  5. #include <linux/etherdevice.h>
  6. #include <linux/crc32c.h>
  7. #include "bnx2x.h"
  8. #include "bnx2x_cmn.h"
  9. #include "bnx2x_sp.h"
  10. /**
  11. * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
  12. *
  13. * @bp: driver handle
  14. * @set: set or clear an entry (1 or 0)
  15. * @mac: pointer to a buffer containing a MAC
  16. * @cl_bit_vec: bit vector of clients to register a MAC for
  17. * @cam_offset: offset in a CAM to use
  18. * @is_bcast: is the set MAC a broadcast address (for E1 only)
  19. */
  20. void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
  21. u32 cl_bit_vec, u8 cam_offset,
  22. u8 is_bcast)
  23. {
  24. struct mac_configuration_cmd *config =
  25. (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
  26. int ramrod_flags = WAIT_RAMROD_COMMON;
  27. bp->set_mac_pending = 1;
  28. config->hdr.length = 1;
  29. config->hdr.offset = cam_offset;
  30. config->hdr.client_id = 0xff;
  31. /* Mark the single MAC configuration ramrod as opposed to a
  32. * UC/MC list configuration).
  33. */
  34. config->hdr.echo = 1;
  35. /* primary MAC */
  36. config->config_table[0].msb_mac_addr =
  37. swab16(*(u16 *)&mac[0]);
  38. config->config_table[0].middle_mac_addr =
  39. swab16(*(u16 *)&mac[2]);
  40. config->config_table[0].lsb_mac_addr =
  41. swab16(*(u16 *)&mac[4]);
  42. config->config_table[0].clients_bit_vector =
  43. cpu_to_le32(cl_bit_vec);
  44. config->config_table[0].vlan_id = 0;
  45. config->config_table[0].pf_id = BP_FUNC(bp);
  46. if (set)
  47. SET_FLAG(config->config_table[0].flags,
  48. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  49. T_ETH_MAC_COMMAND_SET);
  50. else
  51. SET_FLAG(config->config_table[0].flags,
  52. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  53. T_ETH_MAC_COMMAND_INVALIDATE);
  54. if (is_bcast)
  55. SET_FLAG(config->config_table[0].flags,
  56. MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
  57. DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
  58. (set ? "setting" : "clearing"),
  59. config->config_table[0].msb_mac_addr,
  60. config->config_table[0].middle_mac_addr,
  61. config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
  62. mb();
  63. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  64. U64_HI(bnx2x_sp_mapping(bp, mac_config)),
  65. U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
  66. /* Wait for a completion */
  67. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
  68. }
  69. static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
  70. {
  71. return CHIP_REV_IS_SLOW(bp) ?
  72. (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
  73. (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
  74. }
  75. /* set mc list, do not wait as wait implies sleep and
  76. * set_rx_mode can be invoked from non-sleepable context.
  77. *
  78. * Instead we use the same ramrod data buffer each time we need
  79. * to configure a list of addresses, and use the fact that the
  80. * list of MACs is changed in an incremental way and that the
  81. * function is called under the netif_addr_lock. A temporary
  82. * inconsistent CAM configuration (possible in case of a very fast
  83. * sequence of add/del/add on the host side) will shortly be
  84. * restored by the handler of the last ramrod.
  85. */
  86. int bnx2x_set_e1_mc_list(struct bnx2x *bp)
  87. {
  88. int i = 0, old;
  89. struct net_device *dev = bp->dev;
  90. u8 offset = bnx2x_e1_cam_mc_offset(bp);
  91. struct netdev_hw_addr *ha;
  92. struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
  93. dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
  94. if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
  95. return -EINVAL;
  96. netdev_for_each_mc_addr(ha, dev) {
  97. /* copy mac */
  98. config_cmd->config_table[i].msb_mac_addr =
  99. swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
  100. config_cmd->config_table[i].middle_mac_addr =
  101. swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
  102. config_cmd->config_table[i].lsb_mac_addr =
  103. swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
  104. config_cmd->config_table[i].vlan_id = 0;
  105. config_cmd->config_table[i].pf_id = BP_FUNC(bp);
  106. config_cmd->config_table[i].clients_bit_vector =
  107. cpu_to_le32(1 << BP_L_ID(bp));
  108. SET_FLAG(config_cmd->config_table[i].flags,
  109. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  110. T_ETH_MAC_COMMAND_SET);
  111. DP(NETIF_MSG_IFUP,
  112. "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
  113. config_cmd->config_table[i].msb_mac_addr,
  114. config_cmd->config_table[i].middle_mac_addr,
  115. config_cmd->config_table[i].lsb_mac_addr);
  116. i++;
  117. }
  118. old = config_cmd->hdr.length;
  119. if (old > i) {
  120. for (; i < old; i++) {
  121. if (CAM_IS_INVALID(config_cmd->
  122. config_table[i])) {
  123. /* already invalidated */
  124. break;
  125. }
  126. /* invalidate */
  127. SET_FLAG(config_cmd->config_table[i].flags,
  128. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  129. T_ETH_MAC_COMMAND_INVALIDATE);
  130. }
  131. }
  132. wmb();
  133. config_cmd->hdr.length = i;
  134. config_cmd->hdr.offset = offset;
  135. config_cmd->hdr.client_id = 0xff;
  136. /* Mark that this ramrod doesn't use bp->set_mac_pending for
  137. * synchronization.
  138. */
  139. config_cmd->hdr.echo = 0;
  140. mb();
  141. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  142. U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  143. }
  144. void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
  145. {
  146. int i;
  147. struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
  148. dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
  149. int ramrod_flags = WAIT_RAMROD_COMMON;
  150. u8 offset = bnx2x_e1_cam_mc_offset(bp);
  151. for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
  152. SET_FLAG(config_cmd->config_table[i].flags,
  153. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  154. T_ETH_MAC_COMMAND_INVALIDATE);
  155. wmb();
  156. config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
  157. config_cmd->hdr.offset = offset;
  158. config_cmd->hdr.client_id = 0xff;
  159. /* We'll wait for a completion this time... */
  160. config_cmd->hdr.echo = 1;
  161. bp->set_mac_pending = 1;
  162. mb();
  163. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  164. U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  165. /* Wait for a completion */
  166. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
  167. ramrod_flags);
  168. }
  169. /* Accept one or more multicasts */
  170. int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
  171. {
  172. struct net_device *dev = bp->dev;
  173. struct netdev_hw_addr *ha;
  174. u32 mc_filter[MC_HASH_SIZE];
  175. u32 crc, bit, regidx;
  176. int i;
  177. memset(mc_filter, 0, 4 * MC_HASH_SIZE);
  178. netdev_for_each_mc_addr(ha, dev) {
  179. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  180. bnx2x_mc_addr(ha));
  181. crc = crc32c_le(0, bnx2x_mc_addr(ha),
  182. ETH_ALEN);
  183. bit = (crc >> 24) & 0xff;
  184. regidx = bit >> 5;
  185. bit &= 0x1f;
  186. mc_filter[regidx] |= (1 << bit);
  187. }
  188. for (i = 0; i < MC_HASH_SIZE; i++)
  189. REG_WR(bp, MC_HASH_OFFSET(bp, i),
  190. mc_filter[i]);
  191. return 0;
  192. }
  193. void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
  194. {
  195. int i;
  196. for (i = 0; i < MC_HASH_SIZE; i++)
  197. REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
  198. }
  199. /* must be called under rtnl_lock */
  200. void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
  201. {
  202. u32 mask = (1 << cl_id);
  203. /* initial seeting is BNX2X_ACCEPT_NONE */
  204. u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
  205. u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
  206. u8 unmatched_unicast = 0;
  207. if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
  208. unmatched_unicast = 1;
  209. if (filters & BNX2X_PROMISCUOUS_MODE) {
  210. /* promiscious - accept all, drop none */
  211. drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
  212. accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
  213. if (IS_MF_SI(bp)) {
  214. /*
  215. * SI mode defines to accept in promiscuos mode
  216. * only unmatched packets
  217. */
  218. unmatched_unicast = 1;
  219. accp_all_ucast = 0;
  220. }
  221. }
  222. if (filters & BNX2X_ACCEPT_UNICAST) {
  223. /* accept matched ucast */
  224. drop_all_ucast = 0;
  225. }
  226. if (filters & BNX2X_ACCEPT_MULTICAST)
  227. /* accept matched mcast */
  228. drop_all_mcast = 0;
  229. if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
  230. /* accept all mcast */
  231. drop_all_ucast = 0;
  232. accp_all_ucast = 1;
  233. }
  234. if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
  235. /* accept all mcast */
  236. drop_all_mcast = 0;
  237. accp_all_mcast = 1;
  238. }
  239. if (filters & BNX2X_ACCEPT_BROADCAST) {
  240. /* accept (all) bcast */
  241. drop_all_bcast = 0;
  242. accp_all_bcast = 1;
  243. }
  244. bp->mac_filters.ucast_drop_all = drop_all_ucast ?
  245. bp->mac_filters.ucast_drop_all | mask :
  246. bp->mac_filters.ucast_drop_all & ~mask;
  247. bp->mac_filters.mcast_drop_all = drop_all_mcast ?
  248. bp->mac_filters.mcast_drop_all | mask :
  249. bp->mac_filters.mcast_drop_all & ~mask;
  250. bp->mac_filters.bcast_drop_all = drop_all_bcast ?
  251. bp->mac_filters.bcast_drop_all | mask :
  252. bp->mac_filters.bcast_drop_all & ~mask;
  253. bp->mac_filters.ucast_accept_all = accp_all_ucast ?
  254. bp->mac_filters.ucast_accept_all | mask :
  255. bp->mac_filters.ucast_accept_all & ~mask;
  256. bp->mac_filters.mcast_accept_all = accp_all_mcast ?
  257. bp->mac_filters.mcast_accept_all | mask :
  258. bp->mac_filters.mcast_accept_all & ~mask;
  259. bp->mac_filters.bcast_accept_all = accp_all_bcast ?
  260. bp->mac_filters.bcast_accept_all | mask :
  261. bp->mac_filters.bcast_accept_all & ~mask;
  262. bp->mac_filters.unmatched_unicast = unmatched_unicast ?
  263. bp->mac_filters.unmatched_unicast | mask :
  264. bp->mac_filters.unmatched_unicast & ~mask;
  265. }
  266. void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
  267. {
  268. int mode = bp->rx_mode;
  269. int port = BP_PORT(bp);
  270. u16 cl_id;
  271. u32 def_q_filters = 0;
  272. /* All but management unicast packets should pass to the host as well */
  273. u32 llh_mask =
  274. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
  275. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
  276. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
  277. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
  278. switch (mode) {
  279. case BNX2X_RX_MODE_NONE: /* no Rx */
  280. def_q_filters = BNX2X_ACCEPT_NONE;
  281. #ifdef BCM_CNIC
  282. if (!NO_FCOE(bp)) {
  283. cl_id = bnx2x_fcoe(bp, cl_id);
  284. bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
  285. }
  286. #endif
  287. break;
  288. case BNX2X_RX_MODE_NORMAL:
  289. def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
  290. BNX2X_ACCEPT_MULTICAST;
  291. #ifdef BCM_CNIC
  292. if (!NO_FCOE(bp)) {
  293. cl_id = bnx2x_fcoe(bp, cl_id);
  294. bnx2x_rxq_set_mac_filters(bp, cl_id,
  295. BNX2X_ACCEPT_UNICAST |
  296. BNX2X_ACCEPT_MULTICAST);
  297. }
  298. #endif
  299. break;
  300. case BNX2X_RX_MODE_ALLMULTI:
  301. def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
  302. BNX2X_ACCEPT_ALL_MULTICAST;
  303. #ifdef BCM_CNIC
  304. /*
  305. * Prevent duplication of multicast packets by configuring FCoE
  306. * L2 Client to receive only matched unicast frames.
  307. */
  308. if (!NO_FCOE(bp)) {
  309. cl_id = bnx2x_fcoe(bp, cl_id);
  310. bnx2x_rxq_set_mac_filters(bp, cl_id,
  311. BNX2X_ACCEPT_UNICAST);
  312. }
  313. #endif
  314. break;
  315. case BNX2X_RX_MODE_PROMISC:
  316. def_q_filters |= BNX2X_PROMISCUOUS_MODE;
  317. #ifdef BCM_CNIC
  318. /*
  319. * Prevent packets duplication by configuring DROP_ALL for FCoE
  320. * L2 Client.
  321. */
  322. if (!NO_FCOE(bp)) {
  323. cl_id = bnx2x_fcoe(bp, cl_id);
  324. bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
  325. }
  326. #endif
  327. /* pass management unicast packets as well */
  328. llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
  329. break;
  330. default:
  331. BNX2X_ERR("BAD rx mode (%d)\n", mode);
  332. break;
  333. }
  334. cl_id = BP_L_ID(bp);
  335. bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
  336. REG_WR(bp,
  337. (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
  338. NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
  339. DP(NETIF_MSG_IFUP, "rx mode %d\n"
  340. "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
  341. "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
  342. "unmatched_ucast 0x%x\n", mode,
  343. bp->mac_filters.ucast_drop_all,
  344. bp->mac_filters.mcast_drop_all,
  345. bp->mac_filters.bcast_drop_all,
  346. bp->mac_filters.ucast_accept_all,
  347. bp->mac_filters.mcast_accept_all,
  348. bp->mac_filters.bcast_accept_all,
  349. bp->mac_filters.unmatched_unicast
  350. );
  351. storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
  352. }
  353. /* RSS configuration */
  354. static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
  355. u32 addr, dma_addr_t mapping)
  356. {
  357. REG_WR(bp, addr, U64_LO(mapping));
  358. REG_WR(bp, addr + 4, U64_HI(mapping));
  359. }
  360. static inline void __storm_fill(struct bnx2x *bp,
  361. u32 addr, size_t size, u32 val)
  362. {
  363. int i;
  364. for (i = 0; i < size/4; i++)
  365. REG_WR(bp, addr + (i * 4), val);
  366. }
  367. static inline void storm_memset_ustats_zero(struct bnx2x *bp,
  368. u8 port, u16 stat_id)
  369. {
  370. size_t size = sizeof(struct ustorm_per_client_stats);
  371. u32 addr = BAR_USTRORM_INTMEM +
  372. USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
  373. __storm_fill(bp, addr, size, 0);
  374. }
  375. static inline void storm_memset_tstats_zero(struct bnx2x *bp,
  376. u8 port, u16 stat_id)
  377. {
  378. size_t size = sizeof(struct tstorm_per_client_stats);
  379. u32 addr = BAR_TSTRORM_INTMEM +
  380. TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
  381. __storm_fill(bp, addr, size, 0);
  382. }
  383. static inline void storm_memset_xstats_zero(struct bnx2x *bp,
  384. u8 port, u16 stat_id)
  385. {
  386. size_t size = sizeof(struct xstorm_per_client_stats);
  387. u32 addr = BAR_XSTRORM_INTMEM +
  388. XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
  389. __storm_fill(bp, addr, size, 0);
  390. }
  391. static inline void storm_memset_spq_addr(struct bnx2x *bp,
  392. dma_addr_t mapping, u16 abs_fid)
  393. {
  394. u32 addr = XSEM_REG_FAST_MEMORY +
  395. XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
  396. __storm_memset_dma_mapping(bp, addr, mapping);
  397. }
  398. static inline void storm_memset_xstats_flags(struct bnx2x *bp,
  399. struct stats_indication_flags *flags,
  400. u16 abs_fid)
  401. {
  402. size_t size = sizeof(struct stats_indication_flags);
  403. u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
  404. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  405. }
  406. static inline void storm_memset_tstats_flags(struct bnx2x *bp,
  407. struct stats_indication_flags *flags,
  408. u16 abs_fid)
  409. {
  410. size_t size = sizeof(struct stats_indication_flags);
  411. u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
  412. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  413. }
  414. static inline void storm_memset_ustats_flags(struct bnx2x *bp,
  415. struct stats_indication_flags *flags,
  416. u16 abs_fid)
  417. {
  418. size_t size = sizeof(struct stats_indication_flags);
  419. u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
  420. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  421. }
  422. static inline void storm_memset_cstats_flags(struct bnx2x *bp,
  423. struct stats_indication_flags *flags,
  424. u16 abs_fid)
  425. {
  426. size_t size = sizeof(struct stats_indication_flags);
  427. u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
  428. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  429. }
  430. static inline void storm_memset_xstats_addr(struct bnx2x *bp,
  431. dma_addr_t mapping, u16 abs_fid)
  432. {
  433. u32 addr = BAR_XSTRORM_INTMEM +
  434. XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  435. __storm_memset_dma_mapping(bp, addr, mapping);
  436. }
  437. static inline void storm_memset_tstats_addr(struct bnx2x *bp,
  438. dma_addr_t mapping, u16 abs_fid)
  439. {
  440. u32 addr = BAR_TSTRORM_INTMEM +
  441. TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  442. __storm_memset_dma_mapping(bp, addr, mapping);
  443. }
  444. static inline void storm_memset_ustats_addr(struct bnx2x *bp,
  445. dma_addr_t mapping, u16 abs_fid)
  446. {
  447. u32 addr = BAR_USTRORM_INTMEM +
  448. USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  449. __storm_memset_dma_mapping(bp, addr, mapping);
  450. }
  451. static inline void storm_memset_cstats_addr(struct bnx2x *bp,
  452. dma_addr_t mapping, u16 abs_fid)
  453. {
  454. u32 addr = BAR_CSTRORM_INTMEM +
  455. CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  456. __storm_memset_dma_mapping(bp, addr, mapping);
  457. }
  458. static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  459. u16 pf_id)
  460. {
  461. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  462. pf_id);
  463. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  464. pf_id);
  465. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  466. pf_id);
  467. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  468. pf_id);
  469. }
  470. static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  471. u8 enable)
  472. {
  473. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  474. enable);
  475. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  476. enable);
  477. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  478. enable);
  479. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  480. enable);
  481. }
  482. static inline void storm_memset_func_cfg(struct bnx2x *bp,
  483. struct tstorm_eth_function_common_config *tcfg,
  484. u16 abs_fid)
  485. {
  486. size_t size = sizeof(struct tstorm_eth_function_common_config);
  487. u32 addr = BAR_TSTRORM_INTMEM +
  488. TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
  489. __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
  490. }
  491. void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
  492. {
  493. struct tstorm_eth_function_common_config tcfg = {0};
  494. u16 rss_flgs;
  495. /* tpa */
  496. if (p->func_flgs & FUNC_FLG_TPA)
  497. tcfg.config_flags |=
  498. TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
  499. /* set rss flags */
  500. rss_flgs = (p->rss->mode <<
  501. TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
  502. if (p->rss->cap & RSS_IPV4_CAP)
  503. rss_flgs |= RSS_IPV4_CAP_MASK;
  504. if (p->rss->cap & RSS_IPV4_TCP_CAP)
  505. rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
  506. if (p->rss->cap & RSS_IPV6_CAP)
  507. rss_flgs |= RSS_IPV6_CAP_MASK;
  508. if (p->rss->cap & RSS_IPV6_TCP_CAP)
  509. rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
  510. tcfg.config_flags |= rss_flgs;
  511. tcfg.rss_result_mask = p->rss->result_mask;
  512. storm_memset_func_cfg(bp, &tcfg, p->func_id);
  513. /* Enable the function in the FW */
  514. storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
  515. storm_memset_func_en(bp, p->func_id, 1);
  516. /* statistics */
  517. if (p->func_flgs & FUNC_FLG_STATS) {
  518. struct stats_indication_flags stats_flags = {0};
  519. stats_flags.collect_eth = 1;
  520. storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
  521. storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
  522. storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
  523. storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
  524. storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
  525. storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
  526. storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
  527. storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
  528. }
  529. /* spq */
  530. if (p->func_flgs & FUNC_FLG_SPQ) {
  531. storm_memset_spq_addr(bp, p->spq_map, p->func_id);
  532. REG_WR(bp, XSEM_REG_FAST_MEMORY +
  533. XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
  534. }
  535. }
  536. static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
  537. struct bnx2x_client_init_params *params,
  538. u8 activate,
  539. struct client_init_ramrod_data *data)
  540. {
  541. /* Clear the buffer */
  542. memset(data, 0, sizeof(*data));
  543. /* general */
  544. data->general.client_id = params->rxq_params.cl_id;
  545. data->general.statistics_counter_id = params->rxq_params.stat_id;
  546. data->general.statistics_en_flg =
  547. (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
  548. data->general.is_fcoe_flg =
  549. (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
  550. data->general.activate_flg = activate;
  551. data->general.sp_client_id = params->rxq_params.spcl_id;
  552. /* Rx data */
  553. data->rx.tpa_en_flg =
  554. (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
  555. data->rx.vmqueue_mode_en_flg = 0;
  556. data->rx.cache_line_alignment_log_size =
  557. params->rxq_params.cache_line_log;
  558. data->rx.enable_dynamic_hc =
  559. (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
  560. data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
  561. data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
  562. data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
  563. /* We don't set drop flags */
  564. data->rx.drop_ip_cs_err_flg = 0;
  565. data->rx.drop_tcp_cs_err_flg = 0;
  566. data->rx.drop_ttl0_flg = 0;
  567. data->rx.drop_udp_cs_err_flg = 0;
  568. data->rx.inner_vlan_removal_enable_flg =
  569. (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
  570. data->rx.outer_vlan_removal_enable_flg =
  571. (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
  572. data->rx.status_block_id = params->rxq_params.fw_sb_id;
  573. data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
  574. data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
  575. data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
  576. data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
  577. data->rx.bd_page_base.lo =
  578. cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
  579. data->rx.bd_page_base.hi =
  580. cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
  581. data->rx.sge_page_base.lo =
  582. cpu_to_le32(U64_LO(params->rxq_params.sge_map));
  583. data->rx.sge_page_base.hi =
  584. cpu_to_le32(U64_HI(params->rxq_params.sge_map));
  585. data->rx.cqe_page_base.lo =
  586. cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
  587. data->rx.cqe_page_base.hi =
  588. cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
  589. data->rx.is_leading_rss =
  590. (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
  591. data->rx.is_approx_mcast = data->rx.is_leading_rss;
  592. /* Tx data */
  593. data->tx.enforce_security_flg = 0; /* VF specific */
  594. data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
  595. data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
  596. data->tx.mtu = 0; /* VF specific */
  597. data->tx.tx_bd_page_base.lo =
  598. cpu_to_le32(U64_LO(params->txq_params.dscr_map));
  599. data->tx.tx_bd_page_base.hi =
  600. cpu_to_le32(U64_HI(params->txq_params.dscr_map));
  601. /* flow control data */
  602. data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
  603. data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
  604. data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
  605. data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
  606. data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
  607. data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
  608. data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
  609. data->fc.safc_group_num = params->txq_params.cos;
  610. data->fc.safc_group_en_flg =
  611. (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
  612. data->fc.traffic_type =
  613. (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
  614. LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
  615. }
  616. int bnx2x_setup_fw_client(struct bnx2x *bp,
  617. struct bnx2x_client_init_params *params,
  618. u8 activate,
  619. struct client_init_ramrod_data *data,
  620. dma_addr_t data_mapping)
  621. {
  622. u16 hc_usec;
  623. int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
  624. int ramrod_flags = 0, rc;
  625. /* HC and context validation values */
  626. hc_usec = params->txq_params.hc_rate ?
  627. 1000000 / params->txq_params.hc_rate : 0;
  628. bnx2x_update_coalesce_sb_index(bp,
  629. params->txq_params.fw_sb_id,
  630. params->txq_params.sb_cq_index,
  631. !(params->txq_params.flags & QUEUE_FLG_HC),
  632. hc_usec);
  633. *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
  634. hc_usec = params->rxq_params.hc_rate ?
  635. 1000000 / params->rxq_params.hc_rate : 0;
  636. bnx2x_update_coalesce_sb_index(bp,
  637. params->rxq_params.fw_sb_id,
  638. params->rxq_params.sb_cq_index,
  639. !(params->rxq_params.flags & QUEUE_FLG_HC),
  640. hc_usec);
  641. bnx2x_set_ctx_validation(params->rxq_params.cxt,
  642. params->rxq_params.cid);
  643. /* zero stats */
  644. if (params->txq_params.flags & QUEUE_FLG_STATS)
  645. storm_memset_xstats_zero(bp, BP_PORT(bp),
  646. params->txq_params.stat_id);
  647. if (params->rxq_params.flags & QUEUE_FLG_STATS) {
  648. storm_memset_ustats_zero(bp, BP_PORT(bp),
  649. params->rxq_params.stat_id);
  650. storm_memset_tstats_zero(bp, BP_PORT(bp),
  651. params->rxq_params.stat_id);
  652. }
  653. /* Fill the ramrod data */
  654. bnx2x_fill_cl_init_data(bp, params, activate, data);
  655. /* SETUP ramrod.
  656. *
  657. * bnx2x_sp_post() takes a spin_lock thus no other explict memory
  658. * barrier except from mmiowb() is needed to impose a
  659. * proper ordering of memory operations.
  660. */
  661. mmiowb();
  662. bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
  663. U64_HI(data_mapping), U64_LO(data_mapping), 0);
  664. /* Wait for completion */
  665. rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
  666. params->ramrod_params.index,
  667. params->ramrod_params.pstate,
  668. ramrod_flags);
  669. return rc;
  670. }
  671. void bnx2x_push_indir_table(struct bnx2x *bp)
  672. {
  673. int func = BP_FUNC(bp);
  674. int i;
  675. if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
  676. return;
  677. for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
  678. REG_WR8(bp, BAR_TSTRORM_INTMEM +
  679. TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
  680. bp->fp->cl_id + bp->rx_indir_table[i]);
  681. }