Browse Source

New 7.0 FW: bnx2x, cnic, bnx2i, bnx2fc

New FW/HSI (7.0):
 - Added support to 578xx chips
 - Improved HSI - much less driver's direct access to the FW internal
   memory needed.

New implementation of the HSI handling layer in the bnx2x (bnx2x_sp.c):
 - Introduced chip dependent objects that have chip independent interfaces
   for configuration of MACs, multicast addresses, Rx mode, indirection table,
   fast path queues and function initialization/cleanup.
 - Objects functionality is based on the private function pointers, which
   allows not only a per-chip but also PF/VF differentiation while still
   preserving the same interface towards the driver.
 - Objects interface is not influenced by the HSI changes which do not require
   providing new parameters keeping the code outside the bnx2x_sp.c invariant
   with regard to such HSI chnages.

Changes in a CNIC, bnx2fc and bnx2i modules due to the new HSI.

Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@conan.davemloft.net>
Vlad Zolotarov 14 years ago
parent
commit
619c5cb688

File diff suppressed because it is too large
+ 389 - 249
drivers/net/bnx2x/bnx2x.h


File diff suppressed because it is too large
+ 420 - 227
drivers/net/bnx2x/bnx2x_cmn.c


+ 425 - 124
drivers/net/bnx2x/bnx2x_cmn.h

@@ -18,11 +18,15 @@
 #define BNX2X_CMN_H
 
 #include <linux/types.h>
+#include <linux/pci.h>
 #include <linux/netdevice.h>
 
 
 #include "bnx2x.h"
 
+/* This is used as a replacement for an MCP if it's not present */
+extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+
 extern int num_queues;
 
 /************************ Macros ********************************/
@@ -61,6 +65,73 @@ extern int num_queues;
 /*********************** Interfaces ****************************
  *  Functions that need to be implemented by each driver version
  */
+/* Init */
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp:			driver handle
+ * @unload_mode:	requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp);
+
+/**
+ * bnx2x_config_rss_pf - configure RSS parameters.
+ *
+ * @bp:			driver handle
+ * @ind_table:		indirection table to configure
+ * @config_hash:	re-configure RSS hash keys configuration
+ */
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
+
+/**
+ * bnx2x__init_func_obj - init function object
+ *
+ * @bp:			driver handle
+ *
+ * Initializes the Function Object with the appropriate
+ * parameters which include a function slow path driver
+ * interface.
+ */
+void bnx2x__init_func_obj(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_queue - setup eth queue.
+ *
+ * @bp:		driver handle
+ * @fp:		pointer to the fastpath structure
+ * @leading:	boolean
+ *
+ */
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+		       bool leading);
+
+/**
+ * bnx2x_setup_leading - bring up a leading eth queue.
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_setup_leading(struct bnx2x *bp);
+
+/**
+ * bnx2x_fw_command - send the MCP a request
+ *
+ * @bp:		driver handle
+ * @command:	request
+ * @param:	request's parameter
+ *
+ * block until there is a reply
+ */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
 
 /**
  * bnx2x_initial_phy_init - initialize link parameters structure variables.
@@ -87,6 +158,29 @@ void bnx2x_link_set(struct bnx2x *bp);
  */
 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
 
+/**
+ * bnx2x_drv_pulse - write driver pulse to shmem
+ *
+ * @bp:		driver handle
+ *
+ * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
+ * in the shmem.
+ */
+void bnx2x_drv_pulse(struct bnx2x *bp);
+
+/**
+ * bnx2x_igu_ack_sb - update IGU with current SB value
+ *
+ * @bp:		driver handle
+ * @igu_sb_id:	SB id
+ * @segment:	SB segment
+ * @index:	SB index
+ * @op:		SB operation
+ * @update:	is HW update required
+ */
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+		      u16 index, u8 op, u8 update);
+
 /**
  * bnx2x__link_status_update - handles link status change.
  *
@@ -164,21 +258,6 @@ void bnx2x_int_enable(struct bnx2x *bp);
  */
 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
 
-/**
- * bnx2x_init_firmware - loads device firmware
- *
- * @bp:		driver handle
- */
-int bnx2x_init_firmware(struct bnx2x *bp);
-
-/**
- * bnx2x_init_hw - init HW blocks according to current initialization stage.
- *
- * @bp:		driver handle
- * @load_code:	COMMON, PORT or FUNCTION
- */
-int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
-
 /**
  * bnx2x_nic_init - init driver internals.
  *
@@ -206,16 +285,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp);
  */
 void bnx2x_free_mem(struct bnx2x *bp);
 
-/**
- * bnx2x_setup_client - setup eth client.
- *
- * @bp:		driver handle
- * @fp:		pointer to fastpath structure
- * @is_leading:	boolean
- */
-int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-		       int is_leading);
-
 /**
  * bnx2x_set_num_queues - set number of queues according to mode.
  *
@@ -259,38 +328,44 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
  *
  * Configures according to the value in netdev->dev_addr.
  */
-void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
 
-#ifdef BCM_CNIC
 /**
- * bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s)
+ * bnx2x_set_rx_mode - set MAC filtering configurations.
  *
- * @bp:		driver handle
- * @set:	set or clear the CAM entry
+ * @dev:	netdevice
  *
- * Used next enties in the CAM after the ETH MAC(s).
- * This function will wait until the ramdord completion returns.
- * Return 0 if cussess, -ENODEV if ramrod doesn't return.
+ * called with netif_tx_lock from dev_mcast.c
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh()
  */
-int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
+void bnx2x_set_rx_mode(struct net_device *dev);
 
 /**
- * bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC.
+ * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
  *
  * @bp:		driver handle
- * @set:	set or clear
+ *
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh().
  */
-int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
-#endif
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
 
 /**
- * bnx2x_set_rx_mode - set MAC filtering configurations.
- *
- * @dev:	netdevice
+ * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
  *
- * called with netif_tx_lock from dev_mcast.c
+ * @bp:			driver handle
+ * @cl_id:		client id
+ * @rx_mode_flags:	rx mode configuration
+ * @rx_accept_flags:	rx accept configuration
+ * @tx_accept_flags:	tx accept configuration (tx switch)
+ * @ramrod_flags:	ramrod configuration
  */
-void bnx2x_set_rx_mode(struct net_device *dev);
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+			 unsigned long rx_mode_flags,
+			 unsigned long rx_accept_flags,
+			 unsigned long tx_accept_flags,
+			 unsigned long ramrod_flags);
 
 /* Parity errors related */
 void bnx2x_inc_load_cnt(struct bnx2x *bp);
@@ -299,14 +374,6 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp);
 bool bnx2x_reset_is_done(struct bnx2x *bp);
 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
 
-/**
- * bnx2x_stats_handle - perform statistics handling according to event.
- *
- * @bp:		driver handle
- * @event:	bnx2x_stats_event
- */
-void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
-
 /**
  * bnx2x_sp_event - handle ramrods completion.
  *
@@ -315,15 +382,6 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
  */
 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
 
-/**
- * bnx2x_func_start - init function
- *
- * @bp:		driver handle
- *
- * Must be called before sending CLIENT_SETUP for the first client.
- */
-int bnx2x_func_start(struct bnx2x *bp);
-
 /**
  * bnx2x_ilt_set_info - prepare ILT configurations.
  *
@@ -355,6 +413,8 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
  * @value:	new value
  */
 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp);
 
 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
 
@@ -378,6 +438,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p);
 /* NAPI poll Rx part */
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
 
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
+
 /* NAPI poll Tx part */
 int bnx2x_tx_int(struct bnx2x_fastpath *fp);
 
@@ -390,7 +453,6 @@ void bnx2x_free_irq(struct bnx2x *bp);
 
 void bnx2x_free_fp_mem(struct bnx2x *bp);
 int bnx2x_alloc_fp_mem(struct bnx2x *bp);
-
 void bnx2x_init_rx_rings(struct bnx2x *bp);
 void bnx2x_free_skbs(struct bnx2x *bp);
 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
@@ -455,19 +517,20 @@ int bnx2x_set_features(struct net_device *dev, u32 features);
  */
 void bnx2x_tx_timeout(struct net_device *dev);
 
+/*********************** Inlines **********************************/
+/*********************** Fast path ********************************/
 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
 {
 	barrier(); /* status block is written to by the chip */
 	fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
 }
 
-static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
-					struct bnx2x_fastpath *fp,
-					u16 bd_prod, u16 rx_comp_prod,
-					u16 rx_sge_prod)
+static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
+			struct bnx2x_fastpath *fp, u16 bd_prod,
+			u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
 {
 	struct ustorm_eth_rx_producers rx_prods = {0};
-	int i;
+	u32 i;
 
 	/* Update producers */
 	rx_prods.bd_prod = bd_prod;
@@ -484,10 +547,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
 	 */
 	wmb();
 
-	for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
-		REG_WR(bp,
-		       BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
-		       ((u32 *)&rx_prods)[i]);
+	for (i = 0; i < sizeof(rx_prods)/4; i++)
+		REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
 
 	mmiowb(); /* keep prod updates ordered */
 
@@ -517,7 +578,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
 	barrier();
 }
 
-static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
+static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
 					  u8 idu_sb_id, bool is_Pf)
 {
 	u32 data, ctl, cnt = 100;
@@ -525,7 +586,7 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
 	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
 	u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
 	u32 sb_bit =  1 << (idu_sb_id%32);
-	u32 func_encode = BP_FUNC(bp) |
+	u32 func_encode = func |
 			((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
 	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
 
@@ -588,15 +649,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
 	barrier();
 }
 
-static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
-		      u16 index, u8 op, u8 update)
-{
-	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
-
-	bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
-			     igu_addr);
-}
-
 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
 				u16 index, u8 op, u8 update)
 {
@@ -703,7 +755,7 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
 }
 
 /**
- * disables tx from stack point of view
+ * bnx2x_tx_disable - disables tx from stack point of view
  *
  * @bp:		driver handle
  */
@@ -738,7 +790,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
 	int i;
 
 	/* Add NAPI objects */
-	for_each_napi_queue(bp, i)
+	for_each_rx_queue(bp, i)
 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
 			       bnx2x_poll, BNX2X_NAPI_WEIGHT);
 }
@@ -747,7 +799,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_napi_queue(bp, i)
+	for_each_rx_queue(bp, i)
 		netif_napi_del(&bnx2x_fp(bp, i, napi));
 }
 
@@ -777,7 +829,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
 		int idx = RX_SGE_CNT * i - 1;
 
 		for (j = 0; j < 2; j++) {
-			SGE_MASK_CLEAR_BIT(fp, idx);
+			BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
 			idx--;
 		}
 	}
@@ -787,7 +839,7 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
 {
 	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
 	memset(fp->sge_mask, 0xff,
-	       (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
+	       (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
 
 	/* Clear the two last indices in the page to 1:
 	   these are the indices that correspond to the "next" element,
@@ -869,12 +921,61 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
 				   dma_unmap_addr(cons_rx_buf, mapping),
 				   RX_COPY_THRESH, DMA_FROM_DEVICE);
 
-	prod_rx_buf->skb = cons_rx_buf->skb;
 	dma_unmap_addr_set(prod_rx_buf, mapping,
 			   dma_unmap_addr(cons_rx_buf, mapping));
+	prod_rx_buf->skb = cons_rx_buf->skb;
 	*prod_bd = *cons_bd;
 }
 
+/************************* Init ******************************************/
+
+/**
+ * bnx2x_func_start - init function
+ *
+ * @bp:		driver handle
+ *
+ * Must be called before sending CLIENT_SETUP for the first client.
+ */
+static inline int bnx2x_func_start(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {0};
+	struct bnx2x_func_start_params *start_params =
+		&func_params.params.start;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_START;
+
+	/* Function parameters */
+	start_params->mf_mode = bp->mf_mode;
+	start_params->sd_vlan_tag = bp->mf_ov;
+		start_params->network_cos_mode = OVERRIDE_COS;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+
+/**
+ * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
+ *
+ * @fw_hi:	pointer to upper part
+ * @fw_mid:	pointer to middle part
+ * @fw_lo:	pointer to lower part
+ * @mac:	pointer to MAC address
+ */
+static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
+					 u8 *mac)
+{
+	((u8 *)fw_hi)[0]  = mac[1];
+	((u8 *)fw_hi)[1]  = mac[0];
+	((u8 *)fw_mid)[0] = mac[3];
+	((u8 *)fw_mid)[1] = mac[2];
+	((u8 *)fw_lo)[0]  = mac[5];
+	((u8 *)fw_lo)[1]  = mac[4];
+}
+
 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
 					   struct bnx2x_fastpath *fp, int last)
 {
@@ -893,21 +994,20 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
 	int i;
 
 	for (i = 0; i < last; i++) {
-		struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
-		struct sk_buff *skb = rx_buf->skb;
+		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
+		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+		struct sk_buff *skb = first_buf->skb;
 
 		if (skb == NULL) {
 			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
 			continue;
 		}
-
-		if (fp->tpa_state[i] == BNX2X_TPA_START)
+		if (tpa_info->tpa_state == BNX2X_TPA_START)
 			dma_unmap_single(&bp->pdev->dev,
-					 dma_unmap_addr(rx_buf, mapping),
+					 dma_unmap_addr(first_buf, mapping),
 					 fp->rx_buf_size, DMA_FROM_DEVICE);
-
 		dev_kfree_skb(skb);
-		rx_buf->skb = NULL;
+		first_buf->skb = NULL;
 	}
 }
 
@@ -1036,31 +1136,199 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
 	return i - fp->eth_q_stats.rx_skb_alloc_failed;
 }
 
+/* Statistics ID are global per chip/path, while Client IDs for E1x are per
+ * port.
+ */
+static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
+{
+	if (!CHIP_IS_E1x(fp->bp))
+		return fp->cl_id;
+	else
+		return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
+}
+
+static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
+					       bnx2x_obj_type obj_type)
+{
+	struct bnx2x *bp = fp->bp;
+
+	/* Configure classification DBs */
+	bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
+			   BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+			   bnx2x_sp_mapping(bp, mac_rdata),
+			   BNX2X_FILTER_MAC_PENDING,
+			   &bp->sp_state, obj_type,
+			   &bp->macs_pool);
+}
+
+/**
+ * bnx2x_get_path_func_num - get number of active functions
+ *
+ * @bp:		driver handle
+ *
+ * Calculates the number of active (not hidden) functions on the
+ * current path.
+ */
+static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
+{
+	u8 func_num = 0, i;
+
+	/* 57710 has only one function per-port */
+	if (CHIP_IS_E1(bp))
+		return 1;
+
+	/* Calculate a number of functions enabled on the current
+	 * PATH/PORT.
+	 */
+	if (CHIP_REV_IS_SLOW(bp)) {
+		if (IS_MF(bp))
+			func_num = 4;
+		else
+			func_num = 2;
+	} else {
+		for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
+			u32 func_config =
+				MF_CFG_RD(bp,
+					  func_mf_config[BP_PORT(bp) + 2 * i].
+					  config);
+			func_num +=
+				((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
+		}
+	}
+
+	WARN_ON(!func_num);
+
+	return func_num;
+}
+
+static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
+{
+	/* RX_MODE controlling object */
+	bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
+
+	/* multicast configuration controlling object */
+	bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
+			     BP_FUNC(bp), BP_FUNC(bp),
+			     bnx2x_sp(bp, mcast_rdata),
+			     bnx2x_sp_mapping(bp, mcast_rdata),
+			     BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
+			     BNX2X_OBJ_TYPE_RX);
+
+	/* Setup CAM credit pools */
+	bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
+				   bnx2x_get_path_func_num(bp));
+
+	/* RSS configuration object */
+	bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
+				  bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
+				  bnx2x_sp(bp, rss_rdata),
+				  bnx2x_sp_mapping(bp, rss_rdata),
+				  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
+				  BNX2X_OBJ_TYPE_RX);
+}
+
+static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
+{
+	if (CHIP_IS_E1x(fp->bp))
+		return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
+	else
+		return fp->cl_id;
+}
+
+static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+	struct bnx2x *bp = fp->bp;
+
+	if (!CHIP_IS_E1x(bp))
+		return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+	else
+		return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+}
+
+
 #ifdef BCM_CNIC
+static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
+{
+	return bp->cnic_base_cl_id + cl_idx +
+		(bp->pf_num >> 1) * NONE_ETH_CONTEXT_USE;
+}
+
+static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
+{
+
+	/* the 'first' id is allocated for the cnic */
+	return bp->base_fw_ndsb;
+}
+
+static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
+{
+	return bp->igu_base_sb;
+}
+
+
 static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
 {
-	bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
-		BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
+	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+	unsigned long q_type = 0;
+
+	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+						     BNX2X_FCOE_ETH_CL_ID_IDX);
+	/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
+	 *  16 ETH clients per function when CNIC is enabled!
+	 *
+	 *  Fix it ASAP!!!
+	 */
 	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
 	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
 	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
 	bnx2x_fcoe(bp, bp) = bp;
-	bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
 	bnx2x_fcoe(bp, index) = FCOE_IDX;
 	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
 	bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
 	/* qZone id equals to FW (per path) client id */
-	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
-		BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
-				ETH_MAX_RX_CLIENTS_E1H);
+	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
 	/* init shortcut */
-	bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
-	    USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
-	    USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
-
+	bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+		bnx2x_rx_ustorm_prods_offset(fp);
+
+	/* Configure Queue State object */
+	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+	bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp),
+		bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata),
+			      q_type);
+
+	DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
+			   "igu_sb %d\n",
+	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+	   fp->igu_sb_id);
 }
 #endif
 
+static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
+				       struct bnx2x_fastpath *fp)
+{
+	int cnt = 1000;
+
+	while (bnx2x_has_tx_work_unload(fp)) {
+		if (!cnt) {
+			BNX2X_ERR("timeout waiting for queue[%d]: "
+				 "fp->tx_pkt_prod(%d) != fp->tx_pkt_cons(%d)\n",
+				  fp->index, fp->tx_pkt_prod, fp->tx_pkt_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+			bnx2x_panic();
+			return -EBUSY;
+#else
+			break;
+#endif
+		}
+		cnt--;
+		usleep_range(1000, 1000);
+	}
+
+	return 0;
+}
+
 int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
 
 static inline void __storm_memset_struct(struct bnx2x *bp,
@@ -1071,48 +1339,81 @@ static inline void __storm_memset_struct(struct bnx2x *bp,
 		REG_WR(bp, addr + (i * 4), data[i]);
 }
 
-static inline void storm_memset_mac_filters(struct bnx2x *bp,
-			struct tstorm_eth_mac_filter_config *mac_filters,
-			u16 abs_fid)
+static inline void storm_memset_func_cfg(struct bnx2x *bp,
+				struct tstorm_eth_function_common_config *tcfg,
+				u16 abs_fid)
 {
-	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+	size_t size = sizeof(struct tstorm_eth_function_common_config);
 
 	u32 addr = BAR_TSTRORM_INTMEM +
-			TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
+			TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
 
-	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+	__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
 }
 
 static inline void storm_memset_cmng(struct bnx2x *bp,
 				struct cmng_struct_per_port *cmng,
 				u8 port)
 {
-	size_t size =
-		sizeof(struct rate_shaping_vars_per_port) +
-		sizeof(struct fairness_vars_per_port) +
-		sizeof(struct safc_struct_per_port) +
-		sizeof(struct pfc_struct_per_port);
+	size_t size = sizeof(struct cmng_struct_per_port);
 
 	u32 addr = BAR_XSTRORM_INTMEM +
 			XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
 
 	__storm_memset_struct(bp, addr, size, (u32 *)cmng);
+}
+
+/**
+ * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
+ *
+ * @bp:		driver handle
+ * @mask:	bits that need to be cleared
+ */
+static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
+{
+	int tout = 5000; /* Wait for 5 secs tops */
+
+	while (tout--) {
+		smp_mb();
+		netif_addr_lock_bh(bp->dev);
+		if (!(bp->sp_state & mask)) {
+			netif_addr_unlock_bh(bp->dev);
+			return true;
+		}
+		netif_addr_unlock_bh(bp->dev);
 
-	addr += size + 4 /* SKIP DCB+LLFC */;
-	size = sizeof(struct cmng_struct_per_port) -
-		size /* written */ - 4 /*skipped*/;
+		usleep_range(1000, 1000);
+	}
+
+	smp_mb();
+
+	netif_addr_lock_bh(bp->dev);
+	if (bp->sp_state & mask) {
+		BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
+			  "mask 0x%lx\n", bp->sp_state, mask);
+		netif_addr_unlock_bh(bp->dev);
+		return false;
+	}
+	netif_addr_unlock_bh(bp->dev);
 
-	__storm_memset_struct(bp, addr, size,
-			      (u32 *)(cmng->traffic_type_to_priority_cos));
+	return true;
 }
 
-/* HW Lock for shared dual port PHYs */
+/**
+ * bnx2x_set_ctx_validation - set CDU context validation values
+ *
+ * @bp:		driver handle
+ * @cxt:	context of the connection on the host memory
+ * @cid:	SW CID of the connection to be configured
+ */
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+			      u32 cid);
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+				    u8 sb_index, u8 disable, u16 usec);
 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
 void bnx2x_release_phy_lock(struct bnx2x *bp);
 
-void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
-					u8 sb_index, u8 disable, u16 usec);
-
 /**
  * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
  *

+ 69 - 110
drivers/net/bnx2x/bnx2x_dcb.c

@@ -55,15 +55,14 @@ static void bnx2x_pfc_set(struct bnx2x *bp)
 	struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
 	u32 pri_bit, val = 0;
 	u8 pri;
+	int i;
 
 	/* Tx COS configuration */
-	if (bp->dcbx_port_params.ets.cos_params[0].pauseable)
-		pfc_params.rx_cos0_priority_mask =
-			bp->dcbx_port_params.ets.cos_params[0].pri_bitmask;
-	if (bp->dcbx_port_params.ets.cos_params[1].pauseable)
-		pfc_params.rx_cos1_priority_mask =
-			bp->dcbx_port_params.ets.cos_params[1].pri_bitmask;
-
+	for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
+		if (bp->dcbx_port_params.ets.cos_params[i].pauseable)
+			pfc_params.rx_cos_priority_mask[i] =
+				bp->dcbx_port_params.ets.
+					cos_params[i].pri_bitmask;
 
 	/**
 	 * Rx COS configuration
@@ -378,7 +377,7 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
 
 static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
 {
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		if (BP_PORT(bp)) {
 			BNX2X_ERR("4 port mode is not supported");
 			return;
@@ -406,7 +405,7 @@ static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
 		      0 /* connectionless */,
 		      0 /* dataHi is zero */,
 		      0 /* dataLo is zero */,
-		      1 /* common */);
+		      NONE_CONNECTION_TYPE);
 }
 
 static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
@@ -417,7 +416,7 @@ static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
 		      0, /* connectionless */
 		      U64_HI(bnx2x_sp_mapping(bp, pfc_config)),
 		      U64_LO(bnx2x_sp_mapping(bp, pfc_config)),
-		      1  /* commmon */);
+		      NONE_CONNECTION_TYPE);
 }
 
 static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
@@ -425,7 +424,7 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
 	struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
 	u8	status = 0;
 
-	bnx2x_ets_disabled(&bp->link_params);
+	bnx2x_ets_disabled(&bp->link_params/*, &bp->link_vars*/);
 
 	if (!ets->enabled)
 		return;
@@ -527,6 +526,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
 		BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
 		return -EINVAL;
 	}
+
 	rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
 				 DCBX_READ_LOCAL_MIB);
 
@@ -563,15 +563,6 @@ u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
 		DCB_APP_IDTYPE_ETHTYPE;
 }
 
-static inline
-void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
-{
-	int i;
-	for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
-		bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
-							~DCBX_APP_ENTRY_VALID;
-}
-
 int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
 {
 	int i, err = 0;
@@ -597,32 +588,28 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
 }
 #endif
 
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+	if (SHMEM2_HAS(bp, drv_flags)) {
+		u32 drv_flags;
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+		drv_flags = SHMEM2_RD(bp, drv_flags);
+
+		if (set)
+			SET_FLAGS(drv_flags, flags);
+		else
+			RESET_FLAGS(drv_flags, flags);
+
+		SHMEM2_WR(bp, drv_flags, drv_flags);
+		DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+		bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+	}
+}
+
 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
 {
 	switch (state) {
 	case BNX2X_DCBX_STATE_NEG_RECEIVED:
-#ifdef BCM_CNIC
-		if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
-			struct cnic_ops *c_ops;
-			struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
-			bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-			cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
-			cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
-
-			rcu_read_lock();
-			c_ops = rcu_dereference(bp->cnic_ops);
-			if (c_ops) {
-				bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD);
-				rcu_read_unlock();
-				return;
-			}
-			rcu_read_unlock();
-		}
-
-		/* fall through if no CNIC initialized  */
-	case BNX2X_DCBX_STATE_ISCSI_STOPPED:
-#endif
-
 		{
 			DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
 #ifdef BCM_DCBNL
@@ -646,41 +633,28 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
 			bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
 						 bp->dcbx_error);
 
-			if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
-#ifdef BCM_DCBNL
-				/**
-				 * Add new app tlvs to dcbnl
-				 */
-				bnx2x_dcbnl_update_applist(bp, false);
-#endif
-				bnx2x_dcbx_stop_hw_tx(bp);
-				return;
-			}
-			/* fall through */
+			/* mark DCBX result for PMF migration */
+			bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
 #ifdef BCM_DCBNL
 			/**
-			 * Invalidate the local app tlvs if they are not added
-			 * to the dcbnl app list to avoid deleting them from
-			 * the list later on
+			 * Add new app tlvs to dcbnl
 			 */
-			bnx2x_dcbx_invalidate_local_apps(bp);
+			bnx2x_dcbnl_update_applist(bp, false);
 #endif
+			bnx2x_dcbx_stop_hw_tx(bp);
+
+			return;
 		}
 	case BNX2X_DCBX_STATE_TX_PAUSED:
 		DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
 		bnx2x_pfc_set_pfc(bp);
 
 		bnx2x_dcbx_update_ets_params(bp);
-		if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
-			bnx2x_dcbx_resume_hw_tx(bp);
-			return;
-		}
-		/* fall through */
+		bnx2x_dcbx_resume_hw_tx(bp);
+		return;
 	case BNX2X_DCBX_STATE_TX_RELEASED:
 		DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
-		if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD)
-			bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
-
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
 		return;
 	default:
 		BNX2X_ERR("Unknown DCBX_STATE\n");
@@ -868,7 +842,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
 
 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
 {
-	if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp)) {
+	if (!CHIP_IS_E1x(bp) && !CHIP_MODE_IS_4_PORT(bp)) {
 		bp->dcb_state = dcb_on;
 		bp->dcbx_enabled = dcbx_enabled;
 	} else {
@@ -966,7 +940,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
 	DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
 	   bp->dcb_state, bp->port.pmf);
 
-	if (bp->dcb_state ==  BNX2X_DCB_STATE_ON && bp->port.pmf &&
+	if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
 	    SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
 		dcbx_lldp_params_offset =
 			SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -974,6 +948,8 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
 		DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
 		   dcbx_lldp_params_offset);
 
+		bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+
 		if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
 			bnx2x_dcbx_lldp_updated_params(bp,
 						       dcbx_lldp_params_offset);
@@ -981,46 +957,12 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
 			bnx2x_dcbx_admin_mib_updated_params(bp,
 				dcbx_lldp_params_offset);
 
-			/* set default configuration BC has */
-			bnx2x_dcbx_set_params(bp,
-					      BNX2X_DCBX_STATE_NEG_RECEIVED);
-
+			/* Let HW start negotiation */
 			bnx2x_fw_command(bp,
 					 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
 		}
 	}
 }
-
-void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp)
-{
-	struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES];
-	u32 i = 0, addr;
-	memset(pricos, 0, sizeof(pricos));
-	/* Default initialization */
-	for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++)
-		pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
-
-	/* Store per port struct to internal memory */
-	addr = BAR_XSTRORM_INTMEM +
-			XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
-			offsetof(struct cmng_struct_per_port,
-				 traffic_type_to_priority_cos);
-	__storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos);
-
-
-	/* LLFC disabled.*/
-	REG_WR8(bp , BAR_XSTRORM_INTMEM +
-		    XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
-		    offsetof(struct cmng_struct_per_port, llfc_mode),
-			LLFC_MODE_NONE);
-
-	/* DCBX disabled.*/
-	REG_WR8(bp , BAR_XSTRORM_INTMEM +
-		    XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
-		    offsetof(struct cmng_struct_per_port, dcb_enabled),
-			DCB_DISABLED);
-}
-
 static void
 bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
 			    struct flow_control_configuration *pfc_fw_cfg)
@@ -1591,13 +1533,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
 
 	/* Fw version should be incremented each update */
 	pfc_fw_cfg->dcb_version = ++bp->dcb_version;
-	pfc_fw_cfg->dcb_enabled = DCB_ENABLED;
-
-	/* Default initialization */
-	for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) {
-		tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
-		tt2cos[pri].cos = 0;
-	}
+	pfc_fw_cfg->dcb_enabled = 1;
 
 	/* Fill priority parameters */
 	for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
@@ -1605,14 +1541,37 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
 		pri_bit = 1 << tt2cos[pri].priority;
 
 		/* Fill COS parameters based on COS calculated to
-		 * make it more generally for future use */
+		 * make it more general for future use */
 		for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
 			if (bp->dcbx_port_params.ets.cos_params[cos].
 						pri_bitmask & pri_bit)
 					tt2cos[pri].cos = cos;
 	}
+
+	/* we never want the FW to add a 0 vlan tag */
+	pfc_fw_cfg->dont_add_pri_0_en = 1;
+
 	bnx2x_dcbx_print_cos_params(bp,	pfc_fw_cfg);
 }
+
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
+{
+	/* if we need to syncronize DCBX result from prev PMF
+	 * read it from shmem and update bp accordingly
+	 */
+	if (SHMEM2_HAS(bp, drv_flags) &&
+	   GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
+		/* Read neg results if dcbx is in the FW */
+		if (bnx2x_dcbx_read_shmem_neg_results(bp))
+			return;
+
+		bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+					  bp->dcbx_error);
+		bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+					 bp->dcbx_error);
+	}
+}
+
 /* DCB netlink */
 #ifdef BCM_DCBNL
 

+ 0 - 3
drivers/net/bnx2x/bnx2x_dcb.h

@@ -179,9 +179,6 @@ void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
 
 enum {
 	BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
-#ifdef BCM_CNIC
-	BNX2X_DCBX_STATE_ISCSI_STOPPED,
-#endif
 	BNX2X_DCBX_STATE_TX_PAUSED,
 	BNX2X_DCBX_STATE_TX_RELEASED
 };

File diff suppressed because it is too large
+ 505 - 384
drivers/net/bnx2x/bnx2x_dump.h


+ 255 - 137
drivers/net/bnx2x/bnx2x_ethtool.c

@@ -38,8 +38,6 @@ static const struct {
 	char string[ETH_GSTRING_LEN];
 } bnx2x_q_stats_arr[] = {
 /* 1 */	{ Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
-	{ Q_STATS_OFFSET32(error_bytes_received_hi),
-						8, "[%s]: rx_error_bytes" },
 	{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
 						8, "[%s]: rx_ucast_packets" },
 	{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
@@ -53,13 +51,18 @@ static const struct {
 					 4, "[%s]: rx_skb_alloc_discard" },
 	{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
 
-/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),	8, "[%s]: tx_bytes" },
-	{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+	{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),	8, "[%s]: tx_bytes" },
+/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
 						8, "[%s]: tx_ucast_packets" },
 	{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
 						8, "[%s]: tx_mcast_packets" },
 	{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
-						8, "[%s]: tx_bcast_packets" }
+						8, "[%s]: tx_bcast_packets" },
+	{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
+						8, "[%s]: tpa_aggregations" },
+	{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+					8, "[%s]: tpa_aggregated_frames"},
+	{ Q_STATS_OFFSET32(total_tpa_bytes_hi),	8, "[%s]: tpa_bytes"}
 };
 
 #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@@ -99,8 +102,8 @@ static const struct {
 				8, STATS_FLAGS_BOTH, "rx_discards" },
 	{ STATS_OFFSET32(mac_filter_discard),
 				4, STATS_FLAGS_PORT, "rx_filtered_packets" },
-	{ STATS_OFFSET32(xxoverflow_discard),
-				4, STATS_FLAGS_PORT, "rx_fw_discards" },
+	{ STATS_OFFSET32(mf_tag_discard),
+				4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
 	{ STATS_OFFSET32(brb_drop_hi),
 				8, STATS_FLAGS_PORT, "rx_brb_discard" },
 	{ STATS_OFFSET32(brb_truncate_hi),
@@ -159,7 +162,13 @@ static const struct {
 	{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
 			8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
 	{ STATS_OFFSET32(pause_frames_sent_hi),
-				8, STATS_FLAGS_PORT, "tx_pause_frames" }
+				8, STATS_FLAGS_PORT, "tx_pause_frames" },
+	{ STATS_OFFSET32(total_tpa_aggregations_hi),
+			8, STATS_FLAGS_FUNC, "tpa_aggregations" },
+	{ STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+			8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
+	{ STATS_OFFSET32(total_tpa_bytes_hi),
+			8, STATS_FLAGS_FUNC, "tpa_bytes"}
 };
 
 #define BNX2X_NUM_STATS		ARRAY_SIZE(bnx2x_stats_arr)
@@ -517,7 +526,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
 			if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
 				regdump_len += wreg_addrs_e1h[i].size *
 					(1 + wreg_addrs_e1h[i].read_regs_count);
-	} else if (CHIP_IS_E2(bp)) {
+	} else if (!CHIP_IS_E1x(bp)) {
 		for (i = 0; i < REGS_COUNT; i++)
 			if (IS_E2_ONLINE(reg_addrs[i].info))
 				regdump_len += reg_addrs[i].size;
@@ -589,7 +598,7 @@ static void bnx2x_get_regs(struct net_device *dev,
 		dump_hdr.info = RI_E1_ONLINE;
 	else if (CHIP_IS_E1H(bp))
 		dump_hdr.info = RI_E1H_ONLINE;
-	else if (CHIP_IS_E2(bp))
+	else if (!CHIP_IS_E1x(bp))
 		dump_hdr.info = RI_E2_ONLINE |
 		(BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
 
@@ -610,14 +619,18 @@ static void bnx2x_get_regs(struct net_device *dev,
 					*p++ = REG_RD(bp,
 						      reg_addrs[i].addr + j*4);
 
-	} else if (CHIP_IS_E2(bp)) {
+	} else if (!CHIP_IS_E1x(bp)) {
 		for (i = 0; i < REGS_COUNT; i++)
 			if (IS_E2_ONLINE(reg_addrs[i].info))
 				for (j = 0; j < reg_addrs[i].size; j++)
 					*p++ = REG_RD(bp,
 					      reg_addrs[i].addr + j*4);
 
-		bnx2x_read_pages_regs_e2(bp, p);
+		if (CHIP_IS_E2(bp))
+			bnx2x_read_pages_regs_e2(bp, p);
+		else
+			/* E3 paged registers read is unimplemented yet */
+			WARN_ON(1);
 	}
 	/* Re-enable parity attentions */
 	bnx2x_clear_blocks_parity(bp);
@@ -625,8 +638,6 @@ static void bnx2x_get_regs(struct net_device *dev,
 		bnx2x_enable_blocks_parity(bp);
 }
 
-#define PHY_FW_VER_LEN			20
-
 static void bnx2x_get_drvinfo(struct net_device *dev,
 			      struct ethtool_drvinfo *info)
 {
@@ -1334,60 +1345,129 @@ static const struct {
 	{ "idle check (online)" }
 };
 
+enum {
+	BNX2X_CHIP_E1_OFST = 0,
+	BNX2X_CHIP_E1H_OFST,
+	BNX2X_CHIP_E2_OFST,
+	BNX2X_CHIP_E3_OFST,
+	BNX2X_CHIP_E3B0_OFST,
+	BNX2X_CHIP_MAX_OFST
+};
+
+#define BNX2X_CHIP_MASK_E1	(1 << BNX2X_CHIP_E1_OFST)
+#define BNX2X_CHIP_MASK_E1H	(1 << BNX2X_CHIP_E1H_OFST)
+#define BNX2X_CHIP_MASK_E2	(1 << BNX2X_CHIP_E2_OFST)
+#define BNX2X_CHIP_MASK_E3	(1 << BNX2X_CHIP_E3_OFST)
+#define BNX2X_CHIP_MASK_E3B0	(1 << BNX2X_CHIP_E3B0_OFST)
+
+#define BNX2X_CHIP_MASK_ALL	((1 << BNX2X_CHIP_MAX_OFST) - 1)
+#define BNX2X_CHIP_MASK_E1X	(BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
+
 static int bnx2x_test_registers(struct bnx2x *bp)
 {
 	int idx, i, rc = -ENODEV;
-	u32 wr_val = 0;
+	u32 wr_val = 0, hw;
 	int port = BP_PORT(bp);
 	static const struct {
+		u32 hw;
 		u32 offset0;
 		u32 offset1;
 		u32 mask;
 	} reg_tbl[] = {
-/* 0 */		{ BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
-		{ DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
-		{ HC_REG_AGG_INT_0,                    4, 0x000003ff },
-		{ PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
-		{ PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
-		{ PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
-		{ PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
-		{ PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
-		{ PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
-		{ PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
-/* 10 */	{ PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
-		{ QM_REG_CONNNUM_0,                    4, 0x000fffff },
-		{ TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
-		{ SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
-		{ SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
-		{ XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
-		{ XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
-		{ XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
-		{ NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
-		{ NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
-/* 20 */	{ NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
-		{ NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
-		{ NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
-		{ NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
-		{ NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
-		{ NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
-		{ NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
-		{ NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
-		{ NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
-		{ NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
-/* 30 */	{ NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
-		{ NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
-		{ NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
-		{ NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
-		{ NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
-		{ NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
-		{ NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
-
-		{ 0xffffffff, 0, 0x00000000 }
+/* 0 */		{ BNX2X_CHIP_MASK_ALL,
+			BRB1_REG_PAUSE_LOW_THRESHOLD_0,	4, 0x000003ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			DORQ_REG_DB_ADDR0,		4, 0xffffffff },
+		{ BNX2X_CHIP_MASK_E1X,
+			HC_REG_AGG_INT_0,		4, 0x000003ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PBF_REG_MAC_IF0_ENABLE,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
+			PBF_REG_P0_INIT_CRD,		4, 0x000007ff },
+		{ BNX2X_CHIP_MASK_E3B0,
+			PBF_REG_INIT_CRD_Q0,		4, 0x000007ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PRS_REG_CID_PORT_0,		4, 0x00ffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_CDU0_L2P,	4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_TM0_L2P,		4, 0x000fffff },
+/* 10 */	{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_TSDM0_L2P,	4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			QM_REG_CONNNUM_0,		4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			TM_REG_LIN0_MAX_ACTIVE_CID,	4, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			SRC_REG_KEYRSS0_0,		40, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			SRC_REG_KEYRSS0_7,		40, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_WU_DA_CNT_CMD00,	4, 0x00000003 },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_GLB_DEL_ACK_MAX_CNT_0,	4, 0x000000ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_T_BIT,		4, 0x00000001 },
+/* 20 */	{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_EMAC0_IN_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_BMAC0_IN_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_XCM0_OUT_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_BRB0_OUT_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_XCM_MASK,		4, 0x00000007 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_ACPI_PAT_6_LEN,	68, 0x000000ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_ACPI_PAT_0_CRC,	68, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_MAC_0_0,	160, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_IP_0_1,	160, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_IPV4_IPV6_0,	160, 0x00000001 },
+/* 30 */	{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_UDP_0,	160, 0x0000ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_TCP_0,	160, 0x0000ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_VLAN_ID_0,	160, 0x00000fff },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_XGXS_SERDES0_MODE_SEL,	4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_STATUS_INTERRUPT_PORT0,	4, 0x07ffffff },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_SERDES0_CTRL_PHY_ADDR,	16, 0x0000001f },
+
+		{ BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
 	};
 
 	if (!netif_running(bp->dev))
 		return rc;
 
+	if (CHIP_IS_E1(bp))
+		hw = BNX2X_CHIP_MASK_E1;
+	else if (CHIP_IS_E1H(bp))
+		hw = BNX2X_CHIP_MASK_E1H;
+	else if (CHIP_IS_E2(bp))
+		hw = BNX2X_CHIP_MASK_E2;
+	else if (CHIP_IS_E3B0(bp))
+		hw = BNX2X_CHIP_MASK_E3B0;
+	else /* e3 A0 */
+		hw = BNX2X_CHIP_MASK_E3;
+
 	/* Repeat the test twice:
 	   First by writing 0x00000000, second by writing 0xffffffff */
 	for (idx = 0; idx < 2; idx++) {
@@ -1403,8 +1483,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
 
 		for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
 			u32 offset, mask, save_val, val;
-			if (CHIP_IS_E2(bp) &&
-			    reg_tbl[i].offset0 == HC_REG_AGG_INT_0)
+			if (!(hw & reg_tbl[i].hw))
 				continue;
 
 			offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
@@ -1421,7 +1500,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
 
 			/* verify value is as expected */
 			if ((val & mask) != (wr_val & mask)) {
-				DP(NETIF_MSG_PROBE,
+				DP(NETIF_MSG_HW,
 				   "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
 				   offset, val, wr_val, mask);
 				goto test_reg_exit;
@@ -1438,7 +1517,7 @@ test_reg_exit:
 static int bnx2x_test_memory(struct bnx2x *bp)
 {
 	int i, j, rc = -ENODEV;
-	u32 val;
+	u32 val, index;
 	static const struct {
 		u32 offset;
 		int size;
@@ -1453,32 +1532,44 @@ static int bnx2x_test_memory(struct bnx2x *bp)
 
 		{ 0xffffffff, 0 }
 	};
+
 	static const struct {
 		char *name;
 		u32 offset;
-		u32 e1_mask;
-		u32 e1h_mask;
-		u32 e2_mask;
+		u32 hw_mask[BNX2X_CHIP_MAX_OFST];
 	} prty_tbl[] = {
-		{ "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0,   0 },
-		{ "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2, 0 },
-		{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0,   0 },
-		{ "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0,   0 },
-		{ "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0,   0 },
-		{ "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0,   0 },
-
-		{ NULL, 0xffffffff, 0, 0, 0 }
+		{ "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,
+			{0x2,     0x2, 0, 0} },
+		{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
+			{0,       0,   0, 0} },
+		{ "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,
+			{0x3ffc1, 0,   0, 0} },
+
+		{ NULL, 0xffffffff, {0, 0, 0, 0} }
 	};
 
 	if (!netif_running(bp->dev))
 		return rc;
 
+	if (CHIP_IS_E1(bp))
+		index = BNX2X_CHIP_E1_OFST;
+	else if (CHIP_IS_E1H(bp))
+		index = BNX2X_CHIP_E1H_OFST;
+	else if (CHIP_IS_E2(bp))
+		index = BNX2X_CHIP_E2_OFST;
+	else /* e3 */
+		index = BNX2X_CHIP_E3_OFST;
+
 	/* pre-Check the parity status */
 	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
 		val = REG_RD(bp, prty_tbl[i].offset);
-		if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
-		    (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
-		    (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
+		if (val & ~(prty_tbl[i].hw_mask[index])) {
 			DP(NETIF_MSG_HW,
 			   "%s is 0x%x\n", prty_tbl[i].name, val);
 			goto test_mem_exit;
@@ -1493,9 +1584,7 @@ static int bnx2x_test_memory(struct bnx2x *bp)
 	/* Check the parity status */
 	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
 		val = REG_RD(bp, prty_tbl[i].offset);
-		if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
-		    (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
-		    (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
+		if (val & ~(prty_tbl[i].hw_mask[index])) {
 			DP(NETIF_MSG_HW,
 			   "%s is 0x%x\n", prty_tbl[i].name, val);
 			goto test_mem_exit;
@@ -1512,12 +1601,16 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
 {
 	int cnt = 1400;
 
-	if (link_up)
+	if (link_up) {
 		while (bnx2x_link_test(bp, is_serdes) && cnt--)
-			msleep(10);
+			msleep(20);
+
+		if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
+			DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
+	}
 }
 
-static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
+static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
 {
 	unsigned int pkt_size, num_pkts, i;
 	struct sk_buff *skb;
@@ -1526,14 +1619,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
 	struct bnx2x_fastpath *fp_tx = &bp->fp[0];
 	u16 tx_start_idx, tx_idx;
 	u16 rx_start_idx, rx_idx;
-	u16 pkt_prod, bd_prod;
+	u16 pkt_prod, bd_prod, rx_comp_cons;
 	struct sw_tx_bd *tx_buf;
 	struct eth_tx_start_bd *tx_start_bd;
 	struct eth_tx_parse_bd_e1x  *pbd_e1x = NULL;
 	struct eth_tx_parse_bd_e2  *pbd_e2 = NULL;
 	dma_addr_t mapping;
 	union eth_rx_cqe *cqe;
-	u8 cqe_fp_flags;
+	u8 cqe_fp_flags, cqe_fp_type;
 	struct sw_rx_bd *rx_buf;
 	u16 len;
 	int rc = -ENODEV;
@@ -1545,7 +1638,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
 			return -EINVAL;
 		break;
 	case BNX2X_MAC_LOOPBACK:
-		bp->link_params.loopback_mode = LOOPBACK_BMAC;
+		bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
+						LOOPBACK_XMAC : LOOPBACK_BMAC;
 		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
 		break;
 	default:
@@ -1566,6 +1660,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
 	memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
 	for (i = ETH_HLEN; i < pkt_size; i++)
 		packet[i] = (unsigned char) (i & 0xff);
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		rc = -ENOMEM;
+		dev_kfree_skb(skb);
+		BNX2X_ERR("Unable to map SKB\n");
+		goto test_loopback_exit;
+	}
 
 	/* send the loopback packet */
 	num_pkts = 0;
@@ -1580,8 +1682,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
 
 	bd_prod = TX_BD(fp_tx->tx_bd_prod);
 	tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
-	mapping = dma_map_single(&bp->pdev->dev, skb->data,
-				 skb_headlen(skb), DMA_TO_DEVICE);
 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 	tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -1611,6 +1711,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
 	DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
 
 	mmiowb();
+	barrier();
 
 	num_pkts++;
 	fp_tx->tx_bd_prod += 2; /* start + pbd */
@@ -1639,9 +1740,11 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
 	if (rx_idx != rx_start_idx + num_pkts)
 		goto test_loopback_exit;
 
-	cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
+	rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
+	cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
 	cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
-	if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
+	cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+	if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
 		goto test_loopback_rx_exit;
 
 	len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
@@ -1649,6 +1752,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
 		goto test_loopback_rx_exit;
 
 	rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
+	dma_sync_single_for_device(&bp->pdev->dev,
+				   dma_unmap_addr(rx_buf, mapping),
+				   fp_rx->rx_buf_size, DMA_FROM_DEVICE);
 	skb = rx_buf->skb;
 	skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
 	for (i = ETH_HLEN; i < pkt_size; i++)
@@ -1674,7 +1780,7 @@ test_loopback_exit:
 	return rc;
 }
 
-static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
+static int bnx2x_test_loopback(struct bnx2x *bp)
 {
 	int rc = 0, res;
 
@@ -1687,13 +1793,13 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
 	bnx2x_netif_stop(bp, 1);
 	bnx2x_acquire_phy_lock(bp);
 
-	res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
+	res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
 	if (res) {
 		DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
 		rc |= BNX2X_PHY_LOOPBACK_FAILED;
 	}
 
-	res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
+	res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
 	if (res) {
 		DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
 		rc |= BNX2X_MAC_LOOPBACK_FAILED;
@@ -1765,39 +1871,20 @@ test_nvram_exit:
 	return rc;
 }
 
+/* Send an EMPTY ramrod on the first queue */
 static int bnx2x_test_intr(struct bnx2x *bp)
 {
-	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
-	int i, rc;
+	struct bnx2x_queue_state_params params = {0};
 
 	if (!netif_running(bp->dev))
 		return -ENODEV;
 
-	config->hdr.length = 0;
-	if (CHIP_IS_E1(bp))
-		config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
-	else
-		config->hdr.offset = BP_FUNC(bp);
-	config->hdr.client_id = bp->fp->cl_id;
-	config->hdr.reserved1 = 0;
-
-	bp->set_mac_pending = 1;
-	smp_wmb();
-	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
-			   U64_HI(bnx2x_sp_mapping(bp, mac_config)),
-			   U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
-	if (rc == 0) {
-		for (i = 0; i < 10; i++) {
-			if (!bp->set_mac_pending)
-				break;
-			smp_rmb();
-			msleep_interruptible(10);
-		}
-		if (i == 10)
-			rc = -ENODEV;
-	}
+	params.q_obj = &bp->fp->q_obj;
+	params.cmd = BNX2X_Q_CMD_EMPTY;
 
-	return rc;
+	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+	return bnx2x_queue_state_change(bp, &params);
 }
 
 static void bnx2x_self_test(struct net_device *dev,
@@ -1836,7 +1923,7 @@ static void bnx2x_self_test(struct net_device *dev,
 		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 		bnx2x_nic_load(bp, LOAD_DIAG);
 		/* wait until link state is restored */
-		bnx2x_wait_for_link(bp, link_up, is_serdes);
+		bnx2x_wait_for_link(bp, 1, is_serdes);
 
 		if (bnx2x_test_registers(bp) != 0) {
 			buf[0] = 1;
@@ -1847,7 +1934,7 @@ static void bnx2x_self_test(struct net_device *dev,
 			etest->flags |= ETH_TEST_FL_FAILED;
 		}
 
-		buf[2] = bnx2x_test_loopback(bp, link_up);
+		buf[2] = bnx2x_test_loopback(bp);
 		if (buf[2] != 0)
 			etest->flags |= ETH_TEST_FL_FAILED;
 
@@ -1885,6 +1972,14 @@ static void bnx2x_self_test(struct net_device *dev,
 #define IS_MF_MODE_STAT(bp) \
 			(IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
 
+/* ethtool statistics are displayed for all regular ethernet queues and the
+ * fcoe L2 queue if not disabled
+ */
+static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
+{
+	return BNX2X_NUM_ETH_QUEUES(bp);
+}
+
 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
 {
 	struct bnx2x *bp = netdev_priv(dev);
@@ -1893,7 +1988,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
 	switch (stringset) {
 	case ETH_SS_STATS:
 		if (is_multi(bp)) {
-			num_stats = BNX2X_NUM_STAT_QUEUES(bp) *
+			num_stats = bnx2x_num_stat_queues(bp) *
 				BNX2X_NUM_Q_STATS;
 			if (!IS_MF_MODE_STAT(bp))
 				num_stats += BNX2X_NUM_STATS;
@@ -1926,14 +2021,9 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 	case ETH_SS_STATS:
 		if (is_multi(bp)) {
 			k = 0;
-			for_each_napi_queue(bp, i) {
+			for_each_eth_queue(bp, i) {
 				memset(queue_name, 0, sizeof(queue_name));
-
-				if (IS_FCOE_IDX(i))
-					sprintf(queue_name, "fcoe");
-				else
-					sprintf(queue_name, "%d", i);
-
+				sprintf(queue_name, "%d", i);
 				for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
 					snprintf(buf + (k + j)*ETH_GSTRING_LEN,
 						ETH_GSTRING_LEN,
@@ -1972,7 +2062,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
 
 	if (is_multi(bp)) {
 		k = 0;
-		for_each_napi_queue(bp, i) {
+		for_each_eth_queue(bp, i) {
 			hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
 			for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
 				if (bnx2x_q_stats_arr[j].size == 0) {
@@ -2090,14 +2180,30 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev,
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	size_t copy_size =
-		min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
+		min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
+	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+	size_t i;
 
 	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
 		return -EOPNOTSUPP;
 
-	indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
-	memcpy(indir->ring_index, bp->rx_indir_table,
-	       copy_size * sizeof(bp->rx_indir_table[0]));
+	/* Get the current configuration of the RSS indirection table */
+	bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
+
+	/*
+	 * We can't use a memcpy() as an internal storage of an
+	 * indirection table is a u8 array while indir->ring_index
+	 * points to an array of u32.
+	 *
+	 * Indirection table contains the FW Client IDs, so we need to
+	 * align the returned table to the Client ID of the leading RSS
+	 * queue.
+	 */
+	for (i = 0; i < copy_size; i++)
+		indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
+
+	indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+
 	return 0;
 }
 
@@ -2106,21 +2212,33 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev,
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	size_t i;
+	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+	u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
 
 	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
 		return -EOPNOTSUPP;
 
-	/* Validate size and indices */
-	if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
+	/* validate the size */
+	if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
 		return -EINVAL;
-	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
-		if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
+
+	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+		/* validate the indices */
+		if (indir->ring_index[i] >= num_eth_queues)
 			return -EINVAL;
+		/*
+		 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+		 * as an internal storage of an indirection table is a u8 array
+		 * while indir->ring_index points to an array of u32.
+		 *
+		 * Indirection table contains the FW Client IDs, so we need to
+		 * align the received table to the Client ID of the leading RSS
+		 * queue
+		 */
+		ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+	}
 
-	memcpy(bp->rx_indir_table, indir->ring_index,
-	       indir->size * sizeof(bp->rx_indir_table[0]));
-	bnx2x_push_indir_table(bp);
-	return 0;
+	return bnx2x_config_rss_pf(bp, ind_table, false);
 }
 
 static const struct ethtool_ops bnx2x_ethtool_ops = {

+ 199 - 320
drivers/net/bnx2x/bnx2x_fw_defs.h

@@ -10,249 +10,221 @@
 #ifndef BNX2X_FW_DEFS_H
 #define BNX2X_FW_DEFS_H
 
-#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base)
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[148].base)
 #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[141].base + ((assertListEntry) * IRO[141].m1))
-#define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
-	(IRO[144].base + ((pfId) * IRO[144].m1))
+	(IRO[147].base + ((assertListEntry) * IRO[147].m1))
 #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
-	(IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \
-	IRO[149].m2))
+	(IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
+	IRO[153].m2))
 #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
-	(IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \
-	IRO[150].m2))
+	(IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
+	IRO[154].m2))
 #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
-	(IRO[156].base + ((funcId) * IRO[156].m1))
+	(IRO[159].base + ((funcId) * IRO[159].m1))
 #define CSTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[146].base + ((funcId) * IRO[146].m1))
-#define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base)
-#define CSTORM_IGU_MODE_OFFSET (IRO[154].base)
+	(IRO[149].base + ((funcId) * IRO[149].m1))
+#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
 #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-	(IRO[311].base + ((pfId) * IRO[311].m1))
+	(IRO[315].base + ((pfId) * IRO[315].m1))
 #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
-	(IRO[312].base + ((pfId) * IRO[312].m1))
-	#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
-	(IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \
-	IRO[304].m2))
-	#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
-	(IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \
-	IRO[306].m2))
-	#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
-	(IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \
-	IRO[305].m2))
-	#define \
-	CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
-	(IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \
-	IRO[307].m2))
-	#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
-	(IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \
-	IRO[303].m2))
-	#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
-	(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \
-	IRO[309].m2))
-	#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
-	(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \
-	IRO[308].m2))
+	(IRO[316].base + ((pfId) * IRO[316].m1))
+#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
+	(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
+	(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+	(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+	(IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+	(IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+	(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+	(IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
 #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-	(IRO[310].base + ((pfId) * IRO[310].m1))
+	(IRO[314].base + ((pfId) * IRO[314].m1))
 #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[302].base + ((pfId) * IRO[302].m1))
+	(IRO[306].base + ((pfId) * IRO[306].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[301].base + ((pfId) * IRO[301].m1))
+	(IRO[305].base + ((pfId) * IRO[305].m1))
 #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[300].base + ((pfId) * IRO[300].m1))
-#define CSTORM_PATH_ID_OFFSET (IRO[159].base)
+	(IRO[304].base + ((pfId) * IRO[304].m1))
+#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[151].base + ((funcId) * IRO[151].m1))
 #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
-	(IRO[137].base + ((pfId) * IRO[137].m1))
+	(IRO[142].base + ((pfId) * IRO[142].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
+	(IRO[143].base + ((pfId) * IRO[143].m1))
 #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
-	(IRO[136].base + ((pfId) * IRO[136].m1))
-#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size)
+	(IRO[141].base + ((pfId) * IRO[141].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
 #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
-	(IRO[138].base + ((pfId) * IRO[138].m1))
-#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size)
-#define CSTORM_STATS_FLAGS_OFFSET(pfId) \
-	(IRO[143].base + ((pfId) * IRO[143].m1))
+	(IRO[144].base + ((pfId) * IRO[144].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
+#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
+	(IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
 #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
-	(IRO[129].base + ((sbId) * IRO[129].m1))
+	(IRO[133].base + ((sbId) * IRO[133].m1))
+#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
+	(IRO[134].base + ((sbId) * IRO[134].m1))
+#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
+	(IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
 #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
-	(IRO[128].base + ((sbId) * IRO[128].m1))
-#define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size)
-#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
 	(IRO[132].base + ((sbId) * IRO[132].m1))
-#define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+	(IRO[137].base + ((sbId) * IRO[137].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
 #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
-	(IRO[151].base + ((vfId) * IRO[151].m1))
+	(IRO[155].base + ((vfId) * IRO[155].m1))
 #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
-	(IRO[152].base + ((vfId) * IRO[152].m1))
+	(IRO[156].base + ((vfId) * IRO[156].m1))
 #define CSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[147].base + ((funcId) * IRO[147].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base)
+	(IRO[150].base + ((funcId) * IRO[150].m1))
+#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
 #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
-	(IRO[198].base + ((pfId) * IRO[198].m1))
-#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base)
+	(IRO[203].base + ((pfId) * IRO[203].m1))
+#define TSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[102].base)
 #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[98].base + ((assertListEntry) * IRO[98].m1))
-	#define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \
-	(IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \
-	IRO[197].m2))
-#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base)
+	(IRO[101].base + ((assertListEntry) * IRO[101].m1))
+#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
 #define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
-	(IRO[105].base)
-#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
-	(IRO[96].base + ((pfId) * IRO[96].m1))
-#define TSTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[101].base + ((funcId) * IRO[101].m1))
+	(IRO[108].base)
 #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
-	(IRO[195].base + ((pfId) * IRO[195].m1))
-#define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base)
-#define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \
-	(IRO[91].base + ((pfId) * IRO[91].m1))
-#define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size)
-	#define \
-	TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \
-	(IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \
-	* IRO[260].m2))
+	(IRO[201].base + ((pfId) * IRO[201].m1))
+#define TSTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[103].base + ((funcId) * IRO[103].m1))
 #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
-	(IRO[264].base + ((pfId) * IRO[264].m1))
+	(IRO[271].base + ((pfId) * IRO[271].m1))
 #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
-	(IRO[265].base + ((pfId) * IRO[265].m1))
+	(IRO[272].base + ((pfId) * IRO[272].m1))
 #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
-	(IRO[266].base + ((pfId) * IRO[266].m1))
+	(IRO[273].base + ((pfId) * IRO[273].m1))
 #define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
-	(IRO[267].base + ((pfId) * IRO[267].m1))
+	(IRO[274].base + ((pfId) * IRO[274].m1))
 #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[263].base + ((pfId) * IRO[263].m1))
+	(IRO[270].base + ((pfId) * IRO[270].m1))
 #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[262].base + ((pfId) * IRO[262].m1))
+	(IRO[269].base + ((pfId) * IRO[269].m1))
 #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[261].base + ((pfId) * IRO[261].m1))
+	(IRO[268].base + ((pfId) * IRO[268].m1))
 #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
-	(IRO[259].base + ((pfId) * IRO[259].m1))
+	(IRO[267].base + ((pfId) * IRO[267].m1))
 #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
-	(IRO[269].base + ((pfId) * IRO[269].m1))
+	(IRO[276].base + ((pfId) * IRO[276].m1))
 #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-	(IRO[256].base + ((pfId) * IRO[256].m1))
+	(IRO[263].base + ((pfId) * IRO[263].m1))
 #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
-	(IRO[257].base + ((pfId) * IRO[257].m1))
+	(IRO[264].base + ((pfId) * IRO[264].m1))
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[265].base + ((pfId) * IRO[265].m1))
 #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
-	(IRO[258].base + ((pfId) * IRO[258].m1))
+	(IRO[266].base + ((pfId) * IRO[266].m1))
 #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
-	(IRO[196].base + ((pfId) * IRO[196].m1))
-	#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \
-	(IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \
-	IRO[100].m2))
-#define TSTORM_STATS_FLAGS_OFFSET(pfId) \
-	(IRO[95].base + ((pfId) * IRO[95].m1))
+	(IRO[202].base + ((pfId) * IRO[202].m1))
+#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[105].base + ((funcId) * IRO[105].m1))
 #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
-	(IRO[211].base + ((pfId) * IRO[211].m1))
+	(IRO[216].base + ((pfId) * IRO[216].m1))
 #define TSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[102].base + ((funcId) * IRO[102].m1))
-#define USTORM_AGG_DATA_OFFSET (IRO[201].base)
-#define USTORM_AGG_DATA_SIZE (IRO[201].size)
-#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base)
+	(IRO[104].base + ((funcId) * IRO[104].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
+#define USTORM_AGG_DATA_SIZE (IRO[206].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[177].base)
 #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[169].base + ((assertListEntry) * IRO[169].m1))
+	(IRO[176].base + ((assertListEntry) * IRO[176].m1))
+#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
+	(IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
+	IRO[205].m2))
 #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
-	(IRO[178].base + ((portId) * IRO[178].m1))
-#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
-	(IRO[172].base + ((pfId) * IRO[172].m1))
+	(IRO[183].base + ((portId) * IRO[183].m1))
 #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
-	(IRO[313].base + ((pfId) * IRO[313].m1))
+	(IRO[317].base + ((pfId) * IRO[317].m1))
 #define USTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[174].base + ((funcId) * IRO[174].m1))
-#define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base)
+	(IRO[178].base + ((funcId) * IRO[178].m1))
 #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
-	(IRO[277].base + ((pfId) * IRO[277].m1))
+	(IRO[281].base + ((pfId) * IRO[281].m1))
 #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
-	(IRO[278].base + ((pfId) * IRO[278].m1))
-#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
 	(IRO[282].base + ((pfId) * IRO[282].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+	(IRO[286].base + ((pfId) * IRO[286].m1))
 #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
-	(IRO[279].base + ((pfId) * IRO[279].m1))
+	(IRO[283].base + ((pfId) * IRO[283].m1))
 #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[275].base + ((pfId) * IRO[275].m1))
+	(IRO[279].base + ((pfId) * IRO[279].m1))
 #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[274].base + ((pfId) * IRO[274].m1))
+	(IRO[278].base + ((pfId) * IRO[278].m1))
 #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[273].base + ((pfId) * IRO[273].m1))
+	(IRO[277].base + ((pfId) * IRO[277].m1))
 #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-	(IRO[276].base + ((pfId) * IRO[276].m1))
-#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
 	(IRO[280].base + ((pfId) * IRO[280].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+	(IRO[284].base + ((pfId) * IRO[284].m1))
 #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
-	(IRO[281].base + ((pfId) * IRO[281].m1))
+	(IRO[285].base + ((pfId) * IRO[285].m1))
 #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
-	(IRO[176].base + ((pfId) * IRO[176].m1))
-	#define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \
-	(IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \
-	IRO[173].m2))
-	#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
-	(IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \
-	IRO[204].m2))
+	(IRO[182].base + ((pfId) * IRO[182].m1))
+#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[180].base + ((funcId) * IRO[180].m1))
+#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
+	(IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
+	IRO[209].m2))
 #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
-	(IRO[205].base + ((qzoneId) * IRO[205].m1))
-#define USTORM_STATS_FLAGS_OFFSET(pfId) \
-	(IRO[171].base + ((pfId) * IRO[171].m1))
-#define USTORM_TPA_BTR_OFFSET (IRO[202].base)
-#define USTORM_TPA_BTR_SIZE (IRO[202].size)
+	(IRO[210].base + ((qzoneId) * IRO[210].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
+#define USTORM_TPA_BTR_SIZE (IRO[207].size)
 #define USTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[175].base + ((funcId) * IRO[175].m1))
-#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base)
-#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base)
-#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base)
+	(IRO[179].base + ((funcId) * IRO[179].m1))
+#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
+#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
+#define XSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[51].base)
 #define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
-	(IRO[53].base + ((assertListEntry) * IRO[53].m1))
+	(IRO[50].base + ((assertListEntry) * IRO[50].m1))
 #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
-	(IRO[47].base + ((portId) * IRO[47].m1))
-#define XSTORM_E1HOV_OFFSET(pfId) \
-	(IRO[55].base + ((pfId) * IRO[55].m1))
-#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
-	(IRO[45].base + ((pfId) * IRO[45].m1))
+	(IRO[43].base + ((portId) * IRO[43].m1))
 #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
-	(IRO[49].base + ((pfId) * IRO[49].m1))
+	(IRO[45].base + ((pfId) * IRO[45].m1))
 #define XSTORM_FUNC_EN_OFFSET(funcId) \
-	(IRO[51].base + ((funcId) * IRO[51].m1))
-#define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base)
+	(IRO[47].base + ((funcId) * IRO[47].m1))
 #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
-	(IRO[290].base + ((pfId) * IRO[290].m1))
+	(IRO[294].base + ((pfId) * IRO[294].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
-	(IRO[293].base + ((pfId) * IRO[293].m1))
+	(IRO[297].base + ((pfId) * IRO[297].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
-	(IRO[294].base + ((pfId) * IRO[294].m1))
+	(IRO[298].base + ((pfId) * IRO[298].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
-	(IRO[295].base + ((pfId) * IRO[295].m1))
+	(IRO[299].base + ((pfId) * IRO[299].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
-	(IRO[296].base + ((pfId) * IRO[296].m1))
+	(IRO[300].base + ((pfId) * IRO[300].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
-	(IRO[297].base + ((pfId) * IRO[297].m1))
+	(IRO[301].base + ((pfId) * IRO[301].m1))
 #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
-	(IRO[298].base + ((pfId) * IRO[298].m1))
+	(IRO[302].base + ((pfId) * IRO[302].m1))
 #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
-	(IRO[299].base + ((pfId) * IRO[299].m1))
+	(IRO[303].base + ((pfId) * IRO[303].m1))
 #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
-	(IRO[289].base + ((pfId) * IRO[289].m1))
+	(IRO[293].base + ((pfId) * IRO[293].m1))
 #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
-	(IRO[288].base + ((pfId) * IRO[288].m1))
+	(IRO[292].base + ((pfId) * IRO[292].m1))
 #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
-	(IRO[287].base + ((pfId) * IRO[287].m1))
+	(IRO[291].base + ((pfId) * IRO[291].m1))
 #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
-	(IRO[292].base + ((pfId) * IRO[292].m1))
+	(IRO[296].base + ((pfId) * IRO[296].m1))
 #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
-	(IRO[291].base + ((pfId) * IRO[291].m1))
+	(IRO[295].base + ((pfId) * IRO[295].m1))
 #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
-	(IRO[286].base + ((pfId) * IRO[286].m1))
+	(IRO[290].base + ((pfId) * IRO[290].m1))
 #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
-	(IRO[285].base + ((pfId) * IRO[285].m1))
+	(IRO[289].base + ((pfId) * IRO[289].m1))
 #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
-	(IRO[284].base + ((pfId) * IRO[284].m1))
+	(IRO[288].base + ((pfId) * IRO[288].m1))
 #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
-	(IRO[283].base + ((pfId) * IRO[283].m1))
-#define XSTORM_PATH_ID_OFFSET (IRO[65].base)
-	#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \
-	(IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \
-	IRO[50].m2))
+	(IRO[287].base + ((pfId) * IRO[287].m1))
 #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
-	(IRO[48].base + ((pfId) * IRO[48].m1))
+	(IRO[44].base + ((pfId) * IRO[44].m1))
+#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[49].base + ((funcId) * IRO[49].m1))
 #define XSTORM_SPQ_DATA_OFFSET(funcId) \
 	(IRO[32].base + ((funcId) * IRO[32].m1))
 #define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
@@ -260,42 +232,37 @@
 	(IRO[30].base + ((funcId) * IRO[30].m1))
 #define XSTORM_SPQ_PROD_OFFSET(funcId) \
 	(IRO[31].base + ((funcId) * IRO[31].m1))
-#define XSTORM_STATS_FLAGS_OFFSET(pfId) \
-	(IRO[43].base + ((pfId) * IRO[43].m1))
 #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
-	(IRO[206].base + ((portId) * IRO[206].m1))
+	(IRO[211].base + ((portId) * IRO[211].m1))
 #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
-	(IRO[207].base + ((portId) * IRO[207].m1))
+	(IRO[212].base + ((portId) * IRO[212].m1))
 #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
-	(IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \
-	IRO[209].m2))
+	(IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
+	IRO[214].m2))
 #define XSTORM_VF_TO_PF_OFFSET(funcId) \
-	(IRO[52].base + ((funcId) * IRO[52].m1))
+	(IRO[48].base + ((funcId) * IRO[48].m1))
 #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
 
-/* RSS hash types */
-#define DEFAULT_HASH_TYPE 0
-#define IPV4_HASH_TYPE 1
-#define TCP_IPV4_HASH_TYPE 2
-#define IPV6_HASH_TYPE 3
-#define TCP_IPV6_HASH_TYPE 4
-#define VLAN_PRI_HASH_TYPE 5
-#define E1HOV_PRI_HASH_TYPE 6
-#define DSCP_HASH_TYPE 7
+/**
+* This file defines HSI constants for the ETH flow
+*/
+#ifdef _EVEREST_MICROCODE
+#include "Microcode\Generated\DataTypes\eth_rx_bd.h"
+#include "Microcode\Generated\DataTypes\eth_tx_bd.h"
+#include "Microcode\Generated\DataTypes\eth_rx_cqe.h"
+#include "Microcode\Generated\DataTypes\eth_rx_sge.h"
+#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h"
+#endif
 
 
 /* Ethernet Ring parameters */
 #define X_ETH_LOCAL_RING_SIZE 13
-#define FIRST_BD_IN_PKT 0
+#define FIRST_BD_IN_PKT	0
 #define PARSE_BD_INDEX 1
 #define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
 #define U_ETH_NUM_OF_SGES_TO_FETCH 8
 #define U_ETH_MAX_SGES_FOR_PACKET 3
 
-/*Tx params*/
-#define X_ETH_NO_VLAN 0
-#define X_ETH_OUTBAND_VLAN 1
-#define X_ETH_INBAND_VLAN 2
 /* Rx ring params */
 #define U_ETH_LOCAL_BD_RING_SIZE 8
 #define U_ETH_LOCAL_SGE_RING_SIZE 10
@@ -311,79 +278,64 @@
 #define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
 #define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
 
-#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
-#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
+#define U_ETH_BDS_PER_PAGE_MASK	(U_ETH_BDS_PER_PAGE-1)
+#define U_ETH_CQE_PER_PAGE_MASK	(TU_ETH_CQES_PER_PAGE-1)
 #define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
 
 #define U_ETH_UNDEFINED_Q 0xFF
 
-/* values of command IDs in the ramrod message */
-#define RAMROD_CMD_ID_ETH_UNUSED 0
-#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1
-#define RAMROD_CMD_ID_ETH_UPDATE 2
-#define RAMROD_CMD_ID_ETH_HALT 3
-#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4
-#define RAMROD_CMD_ID_ETH_ACTIVATE 5
-#define RAMROD_CMD_ID_ETH_DEACTIVATE 6
-#define RAMROD_CMD_ID_ETH_EMPTY 7
-#define RAMROD_CMD_ID_ETH_TERMINATE 8
-
-/* command values for set mac command */
-#define T_ETH_MAC_COMMAND_SET 0
-#define T_ETH_MAC_COMMAND_INVALIDATE 1
-
 #define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY 10
+#define ETH_NUM_OF_RSS_ENGINES_E2 72
+
+#define FILTER_RULES_COUNT 16
+#define MULTICAST_RULES_COUNT 16
+#define CLASSIFY_RULES_COUNT 16
 
 /*The CRC32 seed, that is used for the hash(reduction) multicast address */
-#define T_ETH_CRC32_HASH_SEED 0x00000000
+#define ETH_CRC32_HASH_SEED 0x00000000
+
+#define ETH_CRC32_HASH_BIT_SIZE	(8)
+#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
 
 /* Maximal L2 clients supported */
 #define ETH_MAX_RX_CLIENTS_E1 18
 #define ETH_MAX_RX_CLIENTS_E1H 28
+#define ETH_MAX_RX_CLIENTS_E2 152
+
+/* Maximal statistics client Ids */
+#define MAX_STAT_COUNTER_ID_E1 36
+#define MAX_STAT_COUNTER_ID_E1H	56
+#define MAX_STAT_COUNTER_ID_E2 140
+
+#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
+#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
+#define MAX_MAC_CREDIT_E2 272 /* Per Path */
+#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
 
-#define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H
 
 /* Maximal aggregation queues supported */
 #define ETH_MAX_AGGREGATION_QUEUES_E1 32
-#define ETH_MAX_AGGREGATION_QUEUES_E1H 64
+#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
 
-/* ETH RSS modes */
-#define ETH_RSS_MODE_DISABLED 0
-#define ETH_RSS_MODE_REGULAR 1
-#define ETH_RSS_MODE_VLAN_PRI 2
-#define ETH_RSS_MODE_E1HOV_PRI 3
-#define ETH_RSS_MODE_IP_DSCP 4
-#define ETH_RSS_MODE_E2_INTEG 5
 
+#define ETH_NUM_OF_MCAST_BINS 256
+#define ETH_NUM_OF_MCAST_ENGINES_E2 72
 
-/* ETH vlan filtering modes */
-#define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */
-#define ETH_VLAN_FILTER_SPECIFIC_VLAN \
-	1 /* Only the vlan_id is allowed */
-#define ETH_VLAN_FILTER_CLASSIFY \
-	2 /* vlan will be added to CAM for classification */
+#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
+	(ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
+	(ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
 
-/* Fast path CQE selection */
-#define ETH_FP_CQE_REGULAR 0
-#define ETH_FP_CQE_SGL 1
-#define ETH_FP_CQE_RAW 2
+#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
 
 
 /**
-* This file defines HSI constants common to all microcode flows
-*/
-
-/* Connection types */
-#define ETH_CONNECTION_TYPE 0
-#define TOE_CONNECTION_TYPE 1
-#define RDMA_CONNECTION_TYPE 2
-#define ISCSI_CONNECTION_TYPE 3
-#define FCOE_CONNECTION_TYPE 4
-#define RESERVED_CONNECTION_TYPE_0 5
-#define RESERVED_CONNECTION_TYPE_1 6
-#define RESERVED_CONNECTION_TYPE_2 7
-#define NONE_CONNECTION_TYPE 8
-
+ * This file defines HSI constants common to all microcode flows
+ */
 
 #define PROTOCOL_STATE_BIT_OFFSET 6
 
@@ -391,25 +343,9 @@
 #define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
 #define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
 
-/* values of command IDs in the ramrod message */
-#define RAMROD_CMD_ID_COMMON_FUNCTION_START 1
-#define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2
-#define RAMROD_CMD_ID_COMMON_CFC_DEL 3
-#define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4
-#define RAMROD_CMD_ID_COMMON_SET_MAC 5
-#define RAMROD_CMD_ID_COMMON_STAT_QUERY 6
-#define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7
-#define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8
-
 /* microcode fixed page page size 4K (chains and ring segments) */
 #define MC_PAGE_SIZE 4096
 
-
-/* Host coalescing constants */
-#define HC_IGU_BC_MODE 0
-#define HC_IGU_NBC_MODE 1
-/* Host coalescing constants. E1 includes E1H as well */
-
 /* Number of indices per slow-path SB */
 #define HC_SP_SB_MAX_INDICES 16
 
@@ -418,30 +354,17 @@
 #define HC_SB_MAX_INDICES_E2 8
 
 #define HC_SB_MAX_SB_E1X 32
-#define HC_SB_MAX_SB_E2 136
+#define HC_SB_MAX_SB_E2	136
 
 #define HC_SP_SB_ID 0xde
 
-#define HC_REGULAR_SEGMENT 0
-#define HC_DEFAULT_SEGMENT 1
 #define HC_SB_MAX_SM 2
 
 #define HC_SB_MAX_DYNAMIC_INDICES 4
-#define HC_FUNCTION_DISABLED 0xff
-/* used by the driver to get the SB offset */
-#define USTORM_ID 0
-#define CSTORM_ID 1
-#define XSTORM_ID 2
-#define TSTORM_ID 3
-#define ATTENTION_ID 4
 
 /* max number of slow path commands per port */
 #define MAX_RAMRODS_PER_PORT 8
 
-/* values for RX ETH CQE type field */
-#define RX_ETH_CQE_TYPE_ETH_FASTPATH 0
-#define RX_ETH_CQE_TYPE_ETH_RAMROD 1
-
 
 /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
 
@@ -451,7 +374,7 @@
 
 #define XSEMI_CLK1_RESUL_CHIP (1e-3)
 
-#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
+#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
 
 /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
 
@@ -460,72 +383,28 @@
 
 #define FW_LOG_LIST_SIZE 50
 
-#define NUM_OF_PROTOCOLS 4
 #define NUM_OF_SAFC_BITS 16
 #define MAX_COS_NUMBER 4
-
-#define FAIRNESS_COS_WRR_MODE 0
-#define FAIRNESS_COS_ETS_MODE 1
-
-
-/* Priority Flow Control (PFC) */
+#define MAX_TRAFFIC_TYPES 8
 #define MAX_PFC_PRIORITIES 8
-#define MAX_PFC_TRAFFIC_TYPES 8
-
-/* Available Traffic Types for Link Layer Flow Control */
-#define LLFC_TRAFFIC_TYPE_NW 0
-#define LLFC_TRAFFIC_TYPE_FCOE 1
-#define LLFC_TRAFFIC_TYPE_ISCSI 2
-	/***************** START OF E2 INTEGRATION \
-	CODE***************************************/
-#define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3
-	/***************** END OF E2 INTEGRATION \
-	CODE***************************************/
-#define LLFC_TRAFFIC_TYPE_MAX 4
 
 	/* used by array traffic_type_to_priority[] to mark traffic type \
 	that is not mapped to priority*/
 #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
 
-#define LLFC_MODE_NONE 0
-#define LLFC_MODE_PFC 1
-#define LLFC_MODE_SAFC 2
-
-#define DCB_DISABLED 0
-#define DCB_ENABLED 1
 
-#define UNKNOWN_ADDRESS 0
-#define UNICAST_ADDRESS 1
-#define MULTICAST_ADDRESS 2
-#define BROADCAST_ADDRESS 3
+#define C_ERES_PER_PAGE \
+	(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
 
-#define SINGLE_FUNCTION 0
-#define MULTI_FUNCTION_SD 1
-#define MULTI_FUNCTION_SI 2
+#define STATS_QUERY_CMD_COUNT 16
 
-#define IP_V4 0
-#define IP_V6 1
+#define NIV_LIST_TABLE_SIZE 4096
 
+#define INVALID_VNIC_ID	0xFF
 
-#define C_ERES_PER_PAGE \
-	(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
-#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
 
-#define EVENT_RING_OPCODE_VF_PF_CHANNEL 0
-#define EVENT_RING_OPCODE_FUNCTION_START 1
-#define EVENT_RING_OPCODE_FUNCTION_STOP 2
-#define EVENT_RING_OPCODE_CFC_DEL 3
-#define EVENT_RING_OPCODE_CFC_DEL_WB 4
-#define EVENT_RING_OPCODE_SET_MAC 5
-#define EVENT_RING_OPCODE_STAT_QUERY 6
-#define EVENT_RING_OPCODE_STOP_TRAFFIC 7
-#define EVENT_RING_OPCODE_START_TRAFFIC 8
-#define EVENT_RING_OPCODE_FORWARD_SETUP 9
-
-#define VF_PF_CHANNEL_STATE_READY 0
-#define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1
-
-#define VF_PF_CHANNEL_STATE_MAX_NUMBER 2
+#define UNDEF_IRO 0x80000000
 
 
 #endif /* BNX2X_FW_DEFS_H */

File diff suppressed because it is too large
+ 1024 - 718
drivers/net/bnx2x/bnx2x_hsi.h


+ 224 - 92
drivers/net/bnx2x/bnx2x_init.h

@@ -15,98 +15,34 @@
 #ifndef BNX2X_INIT_H
 #define BNX2X_INIT_H
 
-/* RAM0 size in bytes */
-#define STORM_INTMEM_SIZE_E1		0x5800
-#define STORM_INTMEM_SIZE_E1H		0x10000
-#define STORM_INTMEM_SIZE(bp) ((CHIP_IS_E1(bp) ? STORM_INTMEM_SIZE_E1 : \
-						    STORM_INTMEM_SIZE_E1H) / 4)
-
-
 /* Init operation types and structures */
-/* Common for both E1 and E1H */
-#define OP_RD			0x1 /* read single register */
-#define OP_WR			0x2 /* write single register */
-#define OP_IW			0x3 /* write single register using mailbox */
-#define OP_SW			0x4 /* copy a string to the device */
-#define OP_SI			0x5 /* copy a string using mailbox */
-#define OP_ZR			0x6 /* clear memory */
-#define OP_ZP			0x7 /* unzip then copy with DMAE */
-#define OP_WR_64		0x8 /* write 64 bit pattern */
-#define OP_WB			0x9 /* copy a string using DMAE */
-
-/* FPGA and EMUL specific operations */
-#define OP_WR_EMUL		0xa /* write single register on Emulation */
-#define OP_WR_FPGA		0xb /* write single register on FPGA */
-#define OP_WR_ASIC		0xc /* write single register on ASIC */
-
-/* Init stages */
-/* Never reorder stages !!! */
-#define COMMON_STAGE		0
-#define PORT0_STAGE		1
-#define PORT1_STAGE		2
-#define FUNC0_STAGE		3
-#define FUNC1_STAGE		4
-#define FUNC2_STAGE		5
-#define FUNC3_STAGE		6
-#define FUNC4_STAGE		7
-#define FUNC5_STAGE		8
-#define FUNC6_STAGE		9
-#define FUNC7_STAGE		10
-#define STAGE_IDX_MAX		11
-
-#define STAGE_START		0
-#define STAGE_END		1
-
-
-/* Indices of blocks */
-#define PRS_BLOCK		0
-#define SRCH_BLOCK		1
-#define TSDM_BLOCK		2
-#define TCM_BLOCK		3
-#define BRB1_BLOCK		4
-#define TSEM_BLOCK		5
-#define PXPCS_BLOCK		6
-#define EMAC0_BLOCK		7
-#define EMAC1_BLOCK		8
-#define DBU_BLOCK		9
-#define MISC_BLOCK		10
-#define DBG_BLOCK		11
-#define NIG_BLOCK		12
-#define MCP_BLOCK		13
-#define UPB_BLOCK		14
-#define CSDM_BLOCK		15
-#define USDM_BLOCK		16
-#define CCM_BLOCK		17
-#define UCM_BLOCK		18
-#define USEM_BLOCK		19
-#define CSEM_BLOCK		20
-#define XPB_BLOCK		21
-#define DQ_BLOCK		22
-#define TIMERS_BLOCK		23
-#define XSDM_BLOCK		24
-#define QM_BLOCK		25
-#define PBF_BLOCK		26
-#define XCM_BLOCK		27
-#define XSEM_BLOCK		28
-#define CDU_BLOCK		29
-#define DMAE_BLOCK		30
-#define PXP_BLOCK		31
-#define CFC_BLOCK		32
-#define HC_BLOCK		33
-#define PXP2_BLOCK		34
-#define MISC_AEU_BLOCK		35
-#define PGLUE_B_BLOCK		36
-#define IGU_BLOCK		37
-#define ATC_BLOCK		38
-#define QM_4PORT_BLOCK		39
-#define XSEM_4PORT_BLOCK		40
+enum {
+	OP_RD = 0x1,	/* read a single register */
+	OP_WR,		/* write a single register */
+	OP_SW,		/* copy a string to the device */
+	OP_ZR,		/* clear memory */
+	OP_ZP,		/* unzip then copy with DMAE */
+	OP_WR_64,	/* write 64 bit pattern */
+	OP_WB,		/* copy a string using DMAE */
+	OP_WB_ZR,	/* Clear a string using DMAE or indirect-wr */
+	/* Skip the following ops if all of the init modes don't match */
+	OP_IF_MODE_OR,
+	/* Skip the following ops if any of the init modes don't match */
+	OP_IF_MODE_AND,
+	OP_MAX
+};
 
+enum {
+	STAGE_START,
+	STAGE_END,
+};
 
 /* Returns the index of start or end of a specific block stage in ops array*/
 #define BLOCK_OPS_IDX(block, stage, end) \
-			(2*(((block)*STAGE_IDX_MAX) + (stage)) + (end))
+	(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
 
 
+/* structs for the various opcodes */
 struct raw_op {
 	u32 op:8;
 	u32 offset:24;
@@ -116,7 +52,7 @@ struct raw_op {
 struct op_read {
 	u32 op:8;
 	u32 offset:24;
-	u32 pad;
+	u32 val;
 };
 
 struct op_write {
@@ -125,15 +61,15 @@ struct op_write {
 	u32 val;
 };
 
-struct op_string_write {
+struct op_arr_write {
 	u32 op:8;
 	u32 offset:24;
-#ifdef __LITTLE_ENDIAN
-	u16 data_off;
-	u16 data_len;
-#else /* __BIG_ENDIAN */
+#ifdef __BIG_ENDIAN
 	u16 data_len;
 	u16 data_off;
+#else /* __LITTLE_ENDIAN */
+	u16 data_off;
+	u16 data_len;
 #endif
 };
 
@@ -143,14 +79,210 @@ struct op_zero {
 	u32 len;
 };
 
+struct op_if_mode {
+	u32 op:8;
+	u32 cmd_offset:24;
+	u32 mode_bit_map;
+};
+
+
 union init_op {
 	struct op_read		read;
 	struct op_write		write;
-	struct op_string_write	str_wr;
+	struct op_arr_write	arr_wr;
 	struct op_zero		zero;
 	struct raw_op		raw;
+	struct op_if_mode	if_mode;
+};
+
+
+/* Init Phases */
+enum {
+	PHASE_COMMON,
+	PHASE_PORT0,
+	PHASE_PORT1,
+	PHASE_PF0,
+	PHASE_PF1,
+	PHASE_PF2,
+	PHASE_PF3,
+	PHASE_PF4,
+	PHASE_PF5,
+	PHASE_PF6,
+	PHASE_PF7,
+	NUM_OF_INIT_PHASES
 };
 
+/* Init Modes */
+enum {
+	MODE_ASIC                      = 0x00000001,
+	MODE_FPGA                      = 0x00000002,
+	MODE_EMUL                      = 0x00000004,
+	MODE_E2                        = 0x00000008,
+	MODE_E3                        = 0x00000010,
+	MODE_PORT2                     = 0x00000020,
+	MODE_PORT4                     = 0x00000040,
+	MODE_SF                        = 0x00000080,
+	MODE_MF                        = 0x00000100,
+	MODE_MF_SD                     = 0x00000200,
+	MODE_MF_SI                     = 0x00000400,
+	MODE_MF_NIV                    = 0x00000800,
+	MODE_E3_A0                     = 0x00001000,
+	MODE_E3_B0                     = 0x00002000,
+	MODE_COS_BC                    = 0x00004000,
+	MODE_COS3                      = 0x00008000,
+	MODE_COS6                      = 0x00010000,
+	MODE_LITTLE_ENDIAN             = 0x00020000,
+	MODE_BIG_ENDIAN                = 0x00040000,
+};
+
+/* Init Blocks */
+enum {
+	BLOCK_ATC,
+	BLOCK_BRB1,
+	BLOCK_CCM,
+	BLOCK_CDU,
+	BLOCK_CFC,
+	BLOCK_CSDM,
+	BLOCK_CSEM,
+	BLOCK_DBG,
+	BLOCK_DMAE,
+	BLOCK_DORQ,
+	BLOCK_HC,
+	BLOCK_IGU,
+	BLOCK_MISC,
+	BLOCK_NIG,
+	BLOCK_PBF,
+	BLOCK_PGLUE_B,
+	BLOCK_PRS,
+	BLOCK_PXP2,
+	BLOCK_PXP,
+	BLOCK_QM,
+	BLOCK_SRC,
+	BLOCK_TCM,
+	BLOCK_TM,
+	BLOCK_TSDM,
+	BLOCK_TSEM,
+	BLOCK_UCM,
+	BLOCK_UPB,
+	BLOCK_USDM,
+	BLOCK_USEM,
+	BLOCK_XCM,
+	BLOCK_XPB,
+	BLOCK_XSDM,
+	BLOCK_XSEM,
+	BLOCK_MISC_AEU,
+	NUM_OF_INIT_BLOCKS
+};
+
+/* QM queue numbers */
+#define BNX2X_ETH_Q		0
+#define BNX2X_TOE_Q		3
+#define BNX2X_TOE_ACK_Q		6
+#define BNX2X_ISCSI_Q		9
+#define BNX2X_ISCSI_ACK_Q	8
+#define BNX2X_FCOE_Q		10
+
+/* Vnics per mode */
+#define BNX2X_PORT2_MODE_NUM_VNICS 4
+#define BNX2X_PORT4_MODE_NUM_VNICS 2
+
+/* COS offset for port1 in E3 B0 4port mode */
+#define BNX2X_E3B0_PORT1_COS_OFFSET 3
+
+/* QM Register addresses */
+#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
+	(QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
+#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
+	(QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
+#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
+	(QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
+
+/* extracts the QM queue number for the specified port and vnic */
+#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
+	((((port) << 1) | (vnic)) * 16 + (q_num))
+
+
+/* Maps the specified queue to the specified COS */
+static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
+{
+	/* find current COS mapping */
+	u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
+
+	/* check if queue->COS mapping has changed */
+	if (curr_cos != new_cos) {
+		u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
+		u32 reg_addr, reg_bit_map, vnic;
+
+		/* update parameters for 4port mode */
+		if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
+			num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
+			if (BP_PORT(bp)) {
+				curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+				new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+			}
+		}
+
+		/* change queue mapping for each VNIC */
+		for (vnic = 0; vnic < num_vnics; vnic++) {
+			u32 pf_q_num =
+				BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
+			u32 q_bit_map = 1 << (pf_q_num & 0x1f);
+
+			/* overwrite queue->VOQ mapping */
+			REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
+
+			/* clear queue bit from current COS bit map */
+			reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
+			reg_bit_map = REG_RD(bp, reg_addr);
+			REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
+
+			/* set queue bit in new COS bit map */
+			reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
+			reg_bit_map = REG_RD(bp, reg_addr);
+			REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
+
+			/* set/clear queue bit in command-queue bit map
+			(E2/E3A0 only, valid COS values are 0/1) */
+			if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
+				reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
+				reg_bit_map = REG_RD(bp, reg_addr);
+				q_bit_map = 1 << (2 * (pf_q_num & 0xf));
+				reg_bit_map = new_cos ?
+					      (reg_bit_map | q_bit_map) :
+					      (reg_bit_map & (~q_bit_map));
+				REG_WR(bp, reg_addr, reg_bit_map);
+			}
+		}
+	}
+}
+
+/* Configures the QM according to the specified per-traffic-type COSes */
+static inline void bnx2x_dcb_config_qm(struct bnx2x *bp,
+				       struct priority_cos *traffic_cos)
+{
+	bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
+			traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
+	bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
+			traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+	if (INIT_MODE_FLAGS(bp) & MODE_COS_BC) {
+		/* required only in backward compatible COS mode */
+		bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+		bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+		bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+		bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+	}
+}
+
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+			(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
 #define INITOP_SET		0	/* set the HW directly */
 #define INITOP_CLEAR		1	/* clear the HW directly */
 #define INITOP_INIT		2	/* set the init-value array */

+ 120 - 74
drivers/net/bnx2x/bnx2x_init_ops.h

@@ -15,13 +15,39 @@
 #ifndef BNX2X_INIT_OPS_H
 #define BNX2X_INIT_OPS_H
 
+
+#ifndef BP_ILT
+#define BP_ILT(bp)	NULL
+#endif
+
+#ifndef BP_FUNC
+#define BP_FUNC(bp)	0
+#endif
+
+#ifndef BP_PORT
+#define BP_PORT(bp)	0
+#endif
+
+#ifndef BNX2X_ILT_FREE
+#define BNX2X_ILT_FREE(x, y, sz)
+#endif
+
+#ifndef BNX2X_ILT_ZALLOC
+#define BNX2X_ILT_ZALLOC(x, y, sz)
+#endif
+
+#ifndef ILOG2
+#define ILOG2(x)	x
+#endif
+
 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
-static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
-				      u32 addr, u32 len);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
+				      dma_addr_t phys_addr, u32 addr,
+				      u32 len);
 
-static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
-			      u32 len)
+static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
+			      const u32 *data, u32 len)
 {
 	u32 i;
 
@@ -29,24 +55,32 @@ static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
 		REG_WR(bp, addr + i*4, data[i]);
 }
 
-static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
-			      u32 len)
+static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
+			      const u32 *data, u32 len)
 {
 	u32 i;
 
 	for (i = 0; i < len; i++)
-		REG_WR_IND(bp, addr + i*4, data[i]);
+		bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
 }
 
-static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
+static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
+				u8 wb)
 {
 	if (bp->dmae_ready)
 		bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+	else if (wb)
+		/*
+		 * Wide bus registers with no dmae need to be written
+		 * using indirect write.
+		 */
+		bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
 	else
 		bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
 }
 
-static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
+			    u32 len, u8 wb)
 {
 	u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
 	u32 buf_len32 = buf_len/4;
@@ -57,12 +91,20 @@ static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
 	for (i = 0; i < len; i += buf_len32) {
 		u32 cur_len = min(buf_len32, len - i);
 
-		bnx2x_write_big_buf(bp, addr + i*4, cur_len);
+		bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
 	}
 }
 
-static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
-			     u32 len64)
+static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+{
+	if (bp->dmae_ready)
+		bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+	else
+		bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
+			     const u32 *data, u32 len64)
 {
 	u32 buf_len32 = FW_BUF_SIZE/4;
 	u32 len = len64*2;
@@ -82,7 +124,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
 	for (i = 0; i < len; i += buf_len32) {
 		u32 cur_len = min(buf_len32, len - i);
 
-		bnx2x_write_big_buf(bp, addr + i*4, cur_len);
+		bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
 	}
 }
 
@@ -100,7 +142,8 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
 #define IF_IS_PRAM_ADDR(base, addr) \
 			if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
 
-static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
+static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
+				const u8 *data)
 {
 	IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
 		data = INIT_TSEM_INT_TABLE_DATA(bp);
@@ -129,31 +172,17 @@ static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
 	return data;
 }
 
-static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
+			     const u32 *data, u32 len)
 {
 	if (bp->dmae_ready)
-		bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+		VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
 	else
-		bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
-}
-
-static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
-			     u32 len)
-{
-	const u32 *old_data = data;
-
-	data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
-
-	if (bp->dmae_ready) {
-		if (old_data != data)
-			VIRT_WR_DMAE_LEN(bp, data, addr, len, 1);
-		else
-			VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
-	} else
 		bnx2x_init_ind_wr(bp, addr, data, len);
 }
 
-static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
+static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
+			u32 val_hi)
 {
 	u32 wb_write[2];
 
@@ -161,8 +190,8 @@ static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
 	wb_write[1] = val_hi;
 	REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
 }
-
-static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
+static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
+			     u32 blob_off)
 {
 	const u8 *data = NULL;
 	int rc;
@@ -186,39 +215,33 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
 static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
 {
 	u16 op_start =
-		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)];
+		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+						     STAGE_START)];
 	u16 op_end =
-		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
+		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+						     STAGE_END)];
 	union init_op *op;
-	int hw_wr;
-	u32 i, op_type, addr, len;
+	u32 op_idx, op_type, addr, len;
 	const u32 *data, *data_base;
 
 	/* If empty block */
 	if (op_start == op_end)
 		return;
 
-	if (CHIP_REV_IS_FPGA(bp))
-		hw_wr = OP_WR_FPGA;
-	else if (CHIP_REV_IS_EMUL(bp))
-		hw_wr = OP_WR_EMUL;
-	else
-		hw_wr = OP_WR_ASIC;
-
 	data_base = INIT_DATA(bp);
 
-	for (i = op_start; i < op_end; i++) {
-
-		op = (union init_op *)&(INIT_OPS(bp)[i]);
+	for (op_idx = op_start; op_idx < op_end; op_idx++) {
 
-		op_type = op->str_wr.op;
-		addr = op->str_wr.offset;
-		len = op->str_wr.data_len;
-		data = data_base + op->str_wr.data_off;
-
-		/* HW/EMUL specific */
-		if ((op_type > OP_WB) && (op_type == hw_wr))
-			op_type = OP_WR;
+		op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
+		/* Get generic data */
+		op_type = op->raw.op;
+		addr = op->raw.offset;
+		/* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
+		 * OP_WR64 (we assume that op_arr_write and op_write have the
+		 * same structure).
+		 */
+		len = op->arr_wr.data_len;
+		data = data_base + op->arr_wr.data_off;
 
 		switch (op_type) {
 		case OP_RD:
@@ -233,21 +256,39 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
 		case OP_WB:
 			bnx2x_init_wr_wb(bp, addr, data, len);
 			break;
-		case OP_SI:
-			bnx2x_init_ind_wr(bp, addr, data, len);
-			break;
 		case OP_ZR:
-			bnx2x_init_fill(bp, addr, 0, op->zero.len);
+			bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
+			break;
+		case OP_WB_ZR:
+			bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
 			break;
 		case OP_ZP:
 			bnx2x_init_wr_zp(bp, addr, len,
-					 op->str_wr.data_off);
+					 op->arr_wr.data_off);
 			break;
 		case OP_WR_64:
 			bnx2x_init_wr_64(bp, addr, data, len);
 			break;
+		case OP_IF_MODE_AND:
+			/* if any of the flags doesn't match, skip the
+			 * conditional block.
+			 */
+			if ((INIT_MODE_FLAGS(bp) &
+				op->if_mode.mode_bit_map) !=
+				op->if_mode.mode_bit_map)
+				op_idx += op->if_mode.cmd_offset;
+			break;
+		case OP_IF_MODE_OR:
+			/* if all the flags don't match, skip the conditional
+			 * block.
+			 */
+			if ((INIT_MODE_FLAGS(bp) &
+				op->if_mode.mode_bit_map) == 0)
+				op_idx += op->if_mode.cmd_offset;
+			break;
 		default:
-			/* happens whenever an op is of a diff HW */
+			/* Should never get here! */
+
 			break;
 		}
 	}
@@ -417,7 +458,8 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
 		PXP2_REG_RQ_BW_WR_UBOUND30}
 };
 
-static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
+static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
+			       int w_order)
 {
 	u32 val, i;
 
@@ -491,19 +533,21 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
 	if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
 		REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
 
-	if (CHIP_IS_E2(bp))
+	if (CHIP_IS_E3(bp))
+		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
+	else if (CHIP_IS_E2(bp))
 		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
 	else
 		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
 
-	if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1(bp)) {
 		/*    MPS      w_order     optimal TH      presently TH
 		 *    128         0             0               2
 		 *    256         1             1               3
 		 *    >=512       2             2               3
 		 */
 		/* DMAE is special */
-		if (CHIP_IS_E2(bp)) {
+		if (!CHIP_IS_E1H(bp)) {
 			/* E2 can use optimal TH */
 			val = w_order;
 			REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
@@ -557,8 +601,8 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
 #define ILT_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
 #define ILT_RANGE(f, l)		(((l) << 10) | f)
 
-static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
-				 u32 size, u8 memop)
+static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
+				 struct ilt_line *line, u32 size, u8 memop)
 {
 	if (memop == ILT_MEMOP_FREE) {
 		BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
@@ -572,7 +616,8 @@ static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
 }
 
 
-static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
+static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
+				   u8 memop)
 {
 	int i, rc;
 	struct bnx2x_ilt *ilt = BP_ILT(bp);
@@ -617,8 +662,8 @@ static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
 	bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
 }
 
-static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
-				   int idx, u8 initop)
+static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
+				   struct bnx2x_ilt *ilt, int idx, u8 initop)
 {
 	dma_addr_t	null_mapping;
 	int abs_idx = ilt->start_line + idx;
@@ -733,7 +778,7 @@ static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
 }
 
 static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
-					    u32 psz_reg, u8 initop)
+				      u32 psz_reg, u8 initop)
 {
 	struct bnx2x_ilt *ilt = BP_ILT(bp);
 	struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
@@ -848,7 +893,8 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
 
 	/* Initialize T2 */
 	for (i = 0; i < src_cid_count-1; i++)
-		t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent));
+		t2[i].next = (u64)(t2_mapping +
+			     (i+1)*sizeof(struct src_ent));
 
 	/* tell the searcher where the T2 table is */
 	REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);

+ 54 - 52
drivers/net/bnx2x/bnx2x_link.c

@@ -25,6 +25,8 @@
 #include <linux/mutex.h>
 
 #include "bnx2x.h"
+#include "bnx2x_cmn.h"
+
 
 /********************************************************/
 #define ETH_HLEN			14
@@ -874,6 +876,54 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
 	}
 }
 
+/******************************************************************************
+* Description:
+*  This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+*  not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+					      u8 cos_entry,
+					      u32 priority_mask, u8 port)
+{
+	u32 nig_reg_rx_priority_mask_add = 0;
+
+	switch (cos_entry) {
+	case 0:
+	     nig_reg_rx_priority_mask_add = (port) ?
+		 NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+		 NIG_REG_P0_RX_COS0_PRIORITY_MASK;
+	     break;
+	case 1:
+	    nig_reg_rx_priority_mask_add = (port) ?
+		NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+		NIG_REG_P0_RX_COS1_PRIORITY_MASK;
+	    break;
+	case 2:
+	    nig_reg_rx_priority_mask_add = (port) ?
+		NIG_REG_P1_RX_COS2_PRIORITY_MASK :
+		NIG_REG_P0_RX_COS2_PRIORITY_MASK;
+	    break;
+	case 3:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
+	    break;
+	case 4:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
+	    break;
+	case 5:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
+	    break;
+	}
+
+	REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
+
+	return 0;
+}
 static void bnx2x_update_pfc_nig(struct link_params *params,
 		struct link_vars *vars,
 		struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -958,15 +1008,12 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
 	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val);
 
 	if (nig_params) {
+		u8 i = 0;
 		pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
 
-		REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK :
-		       NIG_REG_P0_RX_COS0_PRIORITY_MASK,
-		       nig_params->rx_cos0_priority_mask);
-
-		REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK :
-		       NIG_REG_P0_RX_COS1_PRIORITY_MASK,
-		       nig_params->rx_cos1_priority_mask);
+		for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
+			bnx2x_pfc_nig_rx_priority_mask(bp, i,
+		nig_params->rx_cos_priority_mask[i], port);
 
 		REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
 		       NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
@@ -1824,26 +1871,6 @@ void bnx2x_link_status_update(struct link_params *params,
 				vars->line_speed = SPEED_10000;
 				break;
 
-			case LINK_12GTFD:
-				vars->line_speed = SPEED_12000;
-				break;
-
-			case LINK_12_5GTFD:
-				vars->line_speed = SPEED_12500;
-				break;
-
-			case LINK_13GTFD:
-				vars->line_speed = SPEED_13000;
-				break;
-
-			case LINK_15GTFD:
-				vars->line_speed = SPEED_15000;
-				break;
-
-			case LINK_16GTFD:
-				vars->line_speed = SPEED_16000;
-				break;
-
 			default:
 				break;
 		}
@@ -2667,31 +2694,6 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
 			vars->link_status |= LINK_10GTFD;
 			break;
 
-		case GP_STATUS_12G_HIG:
-			new_line_speed = SPEED_12000;
-			vars->link_status |= LINK_12GTFD;
-			break;
-
-		case GP_STATUS_12_5G:
-			new_line_speed = SPEED_12500;
-			vars->link_status |= LINK_12_5GTFD;
-			break;
-
-		case GP_STATUS_13G:
-			new_line_speed = SPEED_13000;
-			vars->link_status |= LINK_13GTFD;
-			break;
-
-		case GP_STATUS_15G:
-			new_line_speed = SPEED_15000;
-			vars->link_status |= LINK_15GTFD;
-			break;
-
-		case GP_STATUS_16G:
-			new_line_speed = SPEED_16000;
-			vars->link_status |= LINK_16GTFD;
-			break;
-
 		default:
 			DP(NETIF_MSG_LINK,
 				  "link speed unsupported gp_status 0x%x\n",

+ 19 - 2
drivers/net/bnx2x/bnx2x_link.h

@@ -81,6 +81,7 @@
 #define PFC_BRB_FULL_LB_XOFF_THRESHOLD				170
 #define PFC_BRB_FULL_LB_XON_THRESHOLD				250
 
+#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
 /***********************************************************/
 /*                         Structs                         */
 /***********************************************************/
@@ -262,6 +263,8 @@ struct link_vars {
 #define MAC_TYPE_NONE		0
 #define MAC_TYPE_EMAC		1
 #define MAC_TYPE_BMAC		2
+#define MAC_TYPE_UMAC		3
+#define MAC_TYPE_XMAC		4
 
 	u8 phy_link_up; /* internal phy link indication */
 	u8 link_up;
@@ -363,6 +366,20 @@ int bnx2x_phy_probe(struct link_params *params);
 u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
 			     u32 shmem2_base, u8 port);
 
+/* DCBX structs */
+
+/* Number of maximum COS per chip */
+#define DCBX_E2E3_MAX_NUM_COS		(2)
+#define DCBX_E3B0_MAX_NUM_COS_PORT0	(6)
+#define DCBX_E3B0_MAX_NUM_COS_PORT1	(3)
+#define DCBX_E3B0_MAX_NUM_COS		( \
+			MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
+			    DCBX_E3B0_MAX_NUM_COS_PORT1))
+
+#define DCBX_MAX_NUM_COS			( \
+			MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
+			    DCBX_E2E3_MAX_NUM_COS))
+
 /* PFC port configuration params */
 struct bnx2x_nig_brb_pfc_port_params {
 	/* NIG */
@@ -370,8 +387,8 @@ struct bnx2x_nig_brb_pfc_port_params {
 	u32 llfc_out_en;
 	u32 llfc_enable;
 	u32 pkt_priority_to_cos;
-	u32 rx_cos0_priority_mask;
-	u32 rx_cos1_priority_mask;
+	u8 num_of_rx_cos_priority_mask;
+	u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
 	u32 llfc_high_priority_classes;
 	u32 llfc_low_priority_classes;
 	/* BRB */

File diff suppressed because it is too large
+ 636 - 141
drivers/net/bnx2x/bnx2x_main.c


+ 166 - 28
drivers/net/bnx2x/bnx2x_reg.h

@@ -422,6 +422,7 @@
 #define CFC_REG_NUM_LCIDS_ALLOC 				 0x104020
 /* [R 9] Number of Arriving LCIDs in Link List Block */
 #define CFC_REG_NUM_LCIDS_ARRIVING				 0x104004
+#define CFC_REG_NUM_LCIDS_INSIDE_PF				 0x104120
 /* [R 9] Number of Leaving LCIDs in Link List Block */
 #define CFC_REG_NUM_LCIDS_LEAVING				 0x104018
 #define CFC_REG_WEAK_ENABLE_PF					 0x104124
@@ -783,6 +784,7 @@
 /* [RW 3] The number of simultaneous outstanding requests to Context Fetch
    Interface. */
 #define DORQ_REG_OUTST_REQ					 0x17003c
+#define DORQ_REG_PF_USAGE_CNT					 0x1701d0
 #define DORQ_REG_REGN						 0x170038
 /* [R 4] Current value of response A counter credit. Initial credit is
    configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
@@ -1645,6 +1647,17 @@
 #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN	 (0x1<<4)
 #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST	 (0x1<<2)
 #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN	 (0x1<<3)
+/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
+ * not it is the recipient of the message on the MDIO interface. The value
+ * is compared to the value on ctrl_md_devad. Drives output
+ * misc_xgxs0_phy_addr. Global register. */
+#define MISC_REG_WC0_CTRL_PHY_ADDR				 0xa9cc
+/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
+ * Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_RX_STAT_GR64_LO				 0x200
+/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
+ * 31:0. Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_TX_STAT_GTXPOK_LO				 0
 #define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN			 (0x1<<0)
 #define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN			 (0x1<<0)
 #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT	 (0x1<<0)
@@ -1838,6 +1851,10 @@
 #define NIG_REG_LLH1_FUNC_MEM					 0x161c0
 #define NIG_REG_LLH1_FUNC_MEM_ENABLE				 0x16160
 #define NIG_REG_LLH1_FUNC_MEM_SIZE				 16
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ * sending it to the BRB or calculating WoL on it. This bit controls port 1
+ * only. The legacy llh_multi_function_mode bit controls port 0. */
+#define NIG_REG_LLH1_MF_MODE					 0x18614
 /* [RW 8] init credit counter for port1 in LLH */
 #define NIG_REG_LLH1_XCM_INIT_CREDIT				 0x10564
 #define NIG_REG_LLH1_XCM_MASK					 0x10134
@@ -1889,6 +1906,26 @@
  * than one bit may be set; allowing multiple priorities to be mapped to one
  * COS. */
 #define NIG_REG_P0_RX_COS1_PRIORITY_MASK			 0x1805c
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK			 0x186b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
+ * priority is mapped to COS 3 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK			 0x186b4
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
+ * priority is mapped to COS 4 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK			 0x186b8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
+ * priority is mapped to COS 5 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK			 0x186bc
 /* [RW 15] Specify which of the credit registers the client is to be mapped
  * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
  * clients that are not subject to WFQ credit blocking - their
@@ -1926,6 +1963,9 @@
  * for management at priority 0; debug traffic at priorities 1 and 2; COS0
  * traffic at priority 3; and COS1 traffic at priority 4. */
 #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT			 0x180e4
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P1_HDRS_AFTER_BASIC				 0x1818c
 #define NIG_REG_P1_LLH_FUNC_MEM2				 0x184c0
 #define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE			 0x18460
 /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
@@ -1944,6 +1984,11 @@
  * than one bit may be set; allowing multiple priorities to be mapped to one
  * COS. */
 #define NIG_REG_P1_RX_COS1_PRIORITY_MASK			 0x181b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK			 0x186f8
 /* [RW 1] Pause enable for port0. This register may get 1 only when
    ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
    port */
@@ -2033,6 +2078,15 @@
 #define PBF_REG_COS1_UPPER_BOUND				 0x15c060
 /* [RW 31] The weight of COS1 in the ETS command arbiter. */
 #define PBF_REG_COS1_WEIGHT					 0x15c058
+/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_LB_Q					 0x140338
+/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q0					 0x14033c
+/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q1					 0x140340
 /* [RW 1] Disable processing further tasks from port 0 (after ending the
    current task in process). */
 #define PBF_REG_DISABLE_NEW_TASK_PROC_P0			 0x14005c
@@ -2050,14 +2104,25 @@
 /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
  * Ethernet header. */
 #define PBF_REG_HDRS_AFTER_BASIC				 0x15c0a8
-/* [RW 1] Indicates which COS is conncted to the highest priority in the
- * command arbiter. */
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PBF_REG_HDRS_AFTER_TAG_0				 0x15c0b8
+/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+ * priority in the command arbiter. */
 #define PBF_REG_HIGH_PRIORITY_COS_NUM				 0x15c04c
 #define PBF_REG_IF_ENABLE_REG					 0x140044
 /* [RW 1] Init bit. When set the initial credits are copied to the credit
    registers (except the port credits). Should be set and then reset after
    the configuration of the block has ended. */
 #define PBF_REG_INIT						 0x140000
+/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_LB_Q					 0x15c248
+/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q0					 0x15c230
+/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q1					 0x15c234
 /* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
    copied to the credit register. Should be set and then reset after the
    configuration of the port has ended. */
@@ -2070,6 +2135,15 @@
    copied to the credit register. Should be set and then reset after the
    configuration of the port has ended. */
 #define PBF_REG_INIT_P4 					 0x14000c
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * the LB queue. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q			 0x140354
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 0. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0			 0x140358
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 1. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1			 0x14035c
 /* [RW 1] Enable for mac interface 0. */
 #define PBF_REG_MAC_IF0_ENABLE					 0x140030
 /* [RW 1] Enable for mac interface 1. */
@@ -2090,24 +2164,49 @@
 /* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
    lines. */
 #define PBF_REG_P0_INIT_CRD					 0x1400d0
-/* [RW 1] Indication that pause is enabled for port 0. */
-#define PBF_REG_P0_PAUSE_ENABLE 				 0x140014
-/* [R 8] Number of tasks in port 0 task queue. */
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 0. Reset upon init. */
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT			 0x140308
+/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
+#define PBF_REG_P0_PAUSE_ENABLE					 0x140014
+/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
 #define PBF_REG_P0_TASK_CNT					 0x140204
-/* [R 11] Current credit for port 1 in the tx port buffers in 16 byte lines. */
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 0. Reset upon init. */
+#define PBF_REG_P0_TQ_LINES_FREED_CNT				 0x1402f0
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
+#define PBF_REG_P0_TQ_OCCUPANCY					 0x1402fc
+/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
+ * buffers in 16 byte lines. */
 #define PBF_REG_P1_CREDIT					 0x140208
-/* [RW 11] Initial credit for port 1 in the tx port buffers in 16 byte
-   lines. */
+/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
+ * buffers in 16 byte lines. */
 #define PBF_REG_P1_INIT_CRD					 0x1400d4
-/* [R 8] Number of tasks in port 1 task queue. */
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 1. Reset upon init. */
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT			 0x14030c
+/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
 #define PBF_REG_P1_TASK_CNT					 0x14020c
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 1. Reset upon init. */
+#define PBF_REG_P1_TQ_LINES_FREED_CNT				 0x1402f4
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
+#define PBF_REG_P1_TQ_OCCUPANCY					 0x140300
 /* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
 #define PBF_REG_P4_CREDIT					 0x140210
 /* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
    lines. */
 #define PBF_REG_P4_INIT_CRD					 0x1400e0
-/* [R 8] Number of tasks in port 4 task queue. */
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 4. Reset upon init. */
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT			 0x140310
+/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
 #define PBF_REG_P4_TASK_CNT					 0x140214
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 4. Reset upon init. */
+#define PBF_REG_P4_TQ_LINES_FREED_CNT				 0x1402f8
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
+#define PBF_REG_P4_TQ_OCCUPANCY					 0x140304
 /* [RW 5] Interrupt mask register #0 read/write */
 #define PBF_REG_PBF_INT_MASK					 0x1401d4
 /* [R 5] Interrupt register #0 read */
@@ -2116,6 +2215,27 @@
 #define PBF_REG_PBF_PRTY_MASK					 0x1401e4
 /* [RC 20] Parity register #0 read clear */
 #define PBF_REG_PBF_PRTY_STS_CLR				 0x1401dc
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PBF_REG_TAG_ETHERTYPE_0					 0x15c090
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PBF_REG_TAG_LEN_0					 0x15c09c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
+ * queue. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q				 0x14038c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
+ * queue 0. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0				 0x140390
+/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
+ * Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1				 0x140394
+/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
+ * queue. */
+#define PBF_REG_TQ_OCCUPANCY_LB_Q				 0x1403a8
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
+#define PBF_REG_TQ_OCCUPANCY_Q0					 0x1403ac
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
+#define PBF_REG_TQ_OCCUPANCY_Q1					 0x1403b0
 #define PB_REG_CONTROL						 0
 /* [RW 2] Interrupt mask register #0 read/write */
 #define PB_REG_PB_INT_MASK					 0x28
@@ -2445,10 +2565,24 @@
 /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
  * Ethernet header. */
 #define PRS_REG_HDRS_AFTER_BASIC				 0x40238
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header for port 0 packets. */
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0				 0x40270
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1				 0x40290
+/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PRS_REG_HDRS_AFTER_TAG_0				 0x40248
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
+ * port 0 packets */
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0				 0x40280
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1				 0x402a0
 /* [RW 4] The increment value to send in the CFC load request message */
 #define PRS_REG_INC_VALUE					 0x40048
 /* [RW 6] Bit-map indicating which headers must appear in the packet */
 #define PRS_REG_MUST_HAVE_HDRS					 0x40254
+/* [RW 6] Bit-map indicating which headers must appear in the packet for
+ * port 0 packets */
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0				 0x4028c
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1				 0x402ac
 #define PRS_REG_NIC_MODE					 0x40138
 /* [RW 8] The 8-bit event ID for cases where there is no match on the
    connection. Used in packet start message to TCM. */
@@ -2497,6 +2631,11 @@
 #define PRS_REG_SERIAL_NUM_STATUS_MSB				 0x40158
 /* [R 4] debug only: SRC current credit. Transaction based. */
 #define PRS_REG_SRC_CURRENT_CREDIT				 0x4016c
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PRS_REG_TAG_ETHERTYPE_0					 0x401d4
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PRS_REG_TAG_LEN_0					 0x4022c
 /* [R 8] debug only: TCM current credit. Cycle based. */
 #define PRS_REG_TCM_CURRENT_CREDIT				 0x40160
 /* [R 8] debug only: TSDM current credit. Transaction based. */
@@ -3081,6 +3220,7 @@
 #define QM_REG_BYTECREDITAFULLTHR				 0x168094
 /* [RW 4] The initial credit for interface */
 #define QM_REG_CMINITCRD_0					 0x1680cc
+#define QM_REG_BYTECRDCMDQ_0					 0x16e6e8
 #define QM_REG_CMINITCRD_1					 0x1680d0
 #define QM_REG_CMINITCRD_2					 0x1680d4
 #define QM_REG_CMINITCRD_3					 0x1680d8
@@ -3171,7 +3311,10 @@
 /* [RW 2] The PCI attributes field used in the PCI request. */
 #define QM_REG_PCIREQAT 					 0x168054
 #define QM_REG_PF_EN						 0x16e70c
-/* [R 16] The byte credit of port 0 */
+/* [R 24] The number of tasks stored in the QM for the PF. only even
+ * functions are valid in E2 (odd I registers will be hard wired to 0) */
+#define QM_REG_PF_USG_CNT_0					 0x16e040
+/* [R 16] NOT USED */
 #define QM_REG_PORT0BYTECRD					 0x168300
 /* [R 16] The byte credit of port 1 */
 #define QM_REG_PORT1BYTECRD					 0x168304
@@ -3783,6 +3926,8 @@
 #define TM_REG_LIN0_LOGIC_ADDR					 0x164240
 /* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
 #define TM_REG_LIN0_MAX_ACTIVE_CID				 0x164048
+/* [ST 16] Linear0 Number of scans counter. */
+#define TM_REG_LIN0_NUM_SCANS					 0x1640a0
 /* [WB 64] Linear0 phy address. */
 #define TM_REG_LIN0_PHY_ADDR					 0x164270
 /* [RW 1] Linear0 physical address valid. */
@@ -3790,6 +3935,7 @@
 #define TM_REG_LIN0_SCAN_ON					 0x1640d0
 /* [RW 24] Linear0 array scan timeout. */
 #define TM_REG_LIN0_SCAN_TIME					 0x16403c
+#define TM_REG_LIN0_VNIC_UC					 0x164128
 /* [RW 32] Linear1 logic address. */
 #define TM_REG_LIN1_LOGIC_ADDR					 0x164250
 /* [WB 64] Linear1 phy address. */
@@ -4845,8 +4991,10 @@
 #define XSDM_REG_NUM_OF_Q8_CMD					 0x166264
 /* [ST 32] The number of commands received in queue 9 */
 #define XSDM_REG_NUM_OF_Q9_CMD					 0x166268
-/* [RW 13] The start address in the internal RAM for queue counters */
-#define XSDM_REG_Q_COUNTER_START_ADDR				 0x166010
+/* [W 17] Generate an operation after completion; bit-16 is
+ * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
+ * bits 4:0 are the T124Param[4:0] */
+#define XSDM_REG_OPERATION_GEN					 0x1664c4
 /* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
 #define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY			 0x166548
 /* [R 1] parser fifo empty in sdm_sync block */
@@ -5129,6 +5277,8 @@
 #define MISC_REGISTERS_RESET_REG_1_RST_PXPV			 (0x1<<27)
 #define MISC_REGISTERS_RESET_REG_1_SET				 0x584
 #define MISC_REGISTERS_RESET_REG_2_CLEAR			 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0			 (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1			 (0x1<<25)
 #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0			 (0x1<<0)
 #define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE		 (0x1<<14)
 #define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE		 (0x1<<15)
@@ -5161,6 +5311,7 @@
 #define MISC_REGISTERS_SPIO_OUTPUT_HIGH 			 1
 #define MISC_REGISTERS_SPIO_OUTPUT_LOW				 0
 #define MISC_REGISTERS_SPIO_SET_POS				 8
+#define HW_LOCK_DRV_FLAGS					 10
 #define HW_LOCK_MAX_RESOURCE_VALUE				 31
 #define HW_LOCK_RESOURCE_GPIO					 1
 #define HW_LOCK_RESOURCE_MDIO					 0
@@ -5168,7 +5319,6 @@
 #define HW_LOCK_RESOURCE_RESERVED_08				 8
 #define HW_LOCK_RESOURCE_SPIO					 2
 #define HW_LOCK_RESOURCE_UNDI					 5
-#define PRS_FLAG_OVERETH_IPV4					 1
 #define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT		      (0x1<<4)
 #define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR		      (0x1<<5)
 #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR		      (1<<18)
@@ -5320,6 +5470,8 @@
 #define GRCBASE_PXP2		0x120000
 #define GRCBASE_PBF		0x140000
 #define GRCBASE_XPB		0x161000
+#define GRCBASE_MSTAT0	    0x162000
+#define GRCBASE_MSTAT1	    0x162800
 #define GRCBASE_TIMERS		0x164000
 #define GRCBASE_XSDM		0x166000
 #define GRCBASE_QM		0x168000
@@ -6243,11 +6395,6 @@ Theotherbitsarereservedandshouldbezero*/
 #define IGU_ADDR_MSI_ADDR_HI	0x0212
 #define IGU_ADDR_MSI_DATA		0x0213
 
-#define IGU_INT_ENABLE			0
-#define IGU_INT_DISABLE 		1
-#define IGU_INT_NOP				2
-#define IGU_INT_NOP2			3
-
 #define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup  0
 #define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup  1
 #define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup  2
@@ -6318,15 +6465,6 @@ Theotherbitsarereservedandshouldbezero*/
 #define IGU_BC_BASE_DSB_PROD   128
 #define IGU_NORM_BASE_DSB_PROD 136
 
-#define IGU_CTRL_CMD_TYPE_WR\
-	1
-#define IGU_CTRL_CMD_TYPE_RD\
-	0
-
-#define IGU_SEG_ACCESS_NORM   0
-#define IGU_SEG_ACCESS_DEF    1
-#define IGU_SEG_ACCESS_ATTN   2
-
 	/* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
 	[5:2] = 0; [1:0] = PF number) */
 #define IGU_FID_ENCODE_IS_PF	    (0x1<<6)

+ 5131 - 617
drivers/net/bnx2x/bnx2x_sp.c

@@ -1,3 +1,21 @@
+/* bnx2x_sp.c: Broadcom Everest network driver.
+ *
+ * Copyright 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
 #include <linux/version.h>
 #include <linux/module.h>
 #include <linux/crc32.h>
@@ -8,812 +26,5308 @@
 #include "bnx2x_cmn.h"
 #include "bnx2x_sp.h"
 
+#define BNX2X_MAX_EMUL_MULTI		16
+
+/**** Exe Queue interfaces ****/
 
 /**
- * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
+ * bnx2x_exe_queue_init - init the Exe Queue object
  *
- * @bp:		driver handle
- * @set:	set or clear an entry (1 or 0)
- * @mac:	pointer to a buffer containing a MAC
- * @cl_bit_vec:	bit vector of clients to register a MAC for
- * @cam_offset:	offset in a CAM to use
- * @is_bcast:	is the set MAC a broadcast address (for E1 only)
+ * @o:		poiter to the object
+ * @exe_len:	length
+ * @owner:	poiter to the owner
+ * @validate:	validate function pointer
+ * @optimize:	optimize function pointer
+ * @exec:	execute function pointer
+ * @get:	get function pointer
  */
-void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
-			    u32 cl_bit_vec, u8 cam_offset,
-			    u8 is_bcast)
+static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
+					struct bnx2x_exe_queue_obj *o,
+					int exe_len,
+					union bnx2x_qable_obj *owner,
+					exe_q_validate validate,
+					exe_q_optimize optimize,
+					exe_q_execute exec,
+					exe_q_get get)
 {
-	struct mac_configuration_cmd *config =
-		(struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
-	int ramrod_flags = WAIT_RAMROD_COMMON;
-
-	bp->set_mac_pending = 1;
-
-	config->hdr.length = 1;
-	config->hdr.offset = cam_offset;
-	config->hdr.client_id = 0xff;
-	/* Mark the single MAC configuration ramrod as opposed to a
-	 * UC/MC list configuration).
-	 */
-	config->hdr.echo = 1;
-
-	/* primary MAC */
-	config->config_table[0].msb_mac_addr =
-					swab16(*(u16 *)&mac[0]);
-	config->config_table[0].middle_mac_addr =
-					swab16(*(u16 *)&mac[2]);
-	config->config_table[0].lsb_mac_addr =
-					swab16(*(u16 *)&mac[4]);
-	config->config_table[0].clients_bit_vector =
-					cpu_to_le32(cl_bit_vec);
-	config->config_table[0].vlan_id = 0;
-	config->config_table[0].pf_id = BP_FUNC(bp);
-	if (set)
-		SET_FLAG(config->config_table[0].flags,
-			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-			T_ETH_MAC_COMMAND_SET);
-	else
-		SET_FLAG(config->config_table[0].flags,
-			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-			T_ETH_MAC_COMMAND_INVALIDATE);
+	memset(o, 0, sizeof(*o));
 
-	if (is_bcast)
-		SET_FLAG(config->config_table[0].flags,
-			MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
+	INIT_LIST_HEAD(&o->exe_queue);
+	INIT_LIST_HEAD(&o->pending_comp);
 
-	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  PF_ID %d  CLID mask %d\n",
-	   (set ? "setting" : "clearing"),
-	   config->config_table[0].msb_mac_addr,
-	   config->config_table[0].middle_mac_addr,
-	   config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
+	spin_lock_init(&o->lock);
 
-	mb();
+	o->exe_chunk_len = exe_len;
+	o->owner         = owner;
 
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
-		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
-		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
+	/* Owner specific callbacks */
+	o->validate      = validate;
+	o->optimize      = optimize;
+	o->execute       = exec;
+	o->get           = get;
 
-	/* Wait for a completion */
-	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
+	DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
+			 "length of %d\n", exe_len);
 }
 
+static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
+					     struct bnx2x_exeq_elem *elem)
+{
+	DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
+	kfree(elem);
+}
 
-static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
+static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
 {
-	return CHIP_REV_IS_SLOW(bp) ?
-		(BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
-		(BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
+	struct bnx2x_exeq_elem *elem;
+	int cnt = 0;
+
+	spin_lock_bh(&o->lock);
+
+	list_for_each_entry(elem, &o->exe_queue, link)
+		cnt++;
+
+	spin_unlock_bh(&o->lock);
+
+	return cnt;
 }
 
-/* set mc list, do not wait as wait implies sleep and
- * set_rx_mode can be invoked from non-sleepable context.
+/**
+ * bnx2x_exe_queue_add - add a new element to the execution queue
+ *
+ * @bp:		driver handle
+ * @o:		queue
+ * @cmd:	new command to add
+ * @restore:	true - do not optimize the command
  *
- * Instead we use the same ramrod data buffer each time we need
- * to configure a list of addresses, and use the fact that the
- * list of MACs is changed in an incremental way and that the
- * function is called under the netif_addr_lock. A temporary
- * inconsistent CAM configuration (possible in case of a very fast
- * sequence of add/del/add on the host side) will shortly be
- * restored by the handler of the last ramrod.
+ * If the element is optimized or is illegal, frees it.
  */
-int bnx2x_set_e1_mc_list(struct bnx2x *bp)
+static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
+				      struct bnx2x_exe_queue_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      bool restore)
 {
-	int i = 0, old;
-	struct net_device *dev = bp->dev;
-	u8 offset = bnx2x_e1_cam_mc_offset(bp);
-	struct netdev_hw_addr *ha;
-	struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
-	dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
+	int rc;
 
-	if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
-		return -EINVAL;
+	spin_lock_bh(&o->lock);
 
-	netdev_for_each_mc_addr(ha, dev) {
-		/* copy mac */
-		config_cmd->config_table[i].msb_mac_addr =
-			swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
-		config_cmd->config_table[i].middle_mac_addr =
-			swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
-		config_cmd->config_table[i].lsb_mac_addr =
-			swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
-
-		config_cmd->config_table[i].vlan_id = 0;
-		config_cmd->config_table[i].pf_id = BP_FUNC(bp);
-		config_cmd->config_table[i].clients_bit_vector =
-			cpu_to_le32(1 << BP_L_ID(bp));
-
-		SET_FLAG(config_cmd->config_table[i].flags,
-			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-			T_ETH_MAC_COMMAND_SET);
-
-		DP(NETIF_MSG_IFUP,
-		   "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
-		   config_cmd->config_table[i].msb_mac_addr,
-		   config_cmd->config_table[i].middle_mac_addr,
-		   config_cmd->config_table[i].lsb_mac_addr);
-		i++;
-	}
-	old = config_cmd->hdr.length;
-	if (old > i) {
-		for (; i < old; i++) {
-			if (CAM_IS_INVALID(config_cmd->
-					   config_table[i])) {
-				/* already invalidated */
-				break;
-			}
-			/* invalidate */
-			SET_FLAG(config_cmd->config_table[i].flags,
-				MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-				T_ETH_MAC_COMMAND_INVALIDATE);
+	if (!restore) {
+		/* Try to cancel this element queue */
+		rc = o->optimize(bp, o->owner, elem);
+		if (rc)
+			goto free_and_exit;
+
+		/* Check if this request is ok */
+		rc = o->validate(bp, o->owner, elem);
+		if (rc) {
+			BNX2X_ERR("Preamble failed: %d\n", rc);
+			goto free_and_exit;
 		}
 	}
 
-	wmb();
-
-	config_cmd->hdr.length = i;
-	config_cmd->hdr.offset = offset;
-	config_cmd->hdr.client_id = 0xff;
-	/* Mark that this ramrod doesn't use bp->set_mac_pending for
-	 * synchronization.
-	 */
-	config_cmd->hdr.echo = 0;
-
-	mb();
-
-	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
-		   U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
-}
+	/* If so, add it to the execution queue */
+	list_add_tail(&elem->link, &o->exe_queue);
 
-void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
-{
-	int i;
-	struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
-	dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
-	int ramrod_flags = WAIT_RAMROD_COMMON;
-	u8 offset = bnx2x_e1_cam_mc_offset(bp);
+	spin_unlock_bh(&o->lock);
 
-	for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
-		SET_FLAG(config_cmd->config_table[i].flags,
-			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
-			T_ETH_MAC_COMMAND_INVALIDATE);
+	return 0;
 
-	wmb();
+free_and_exit:
+	bnx2x_exe_queue_free_elem(bp, elem);
 
-	config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
-	config_cmd->hdr.offset = offset;
-	config_cmd->hdr.client_id = 0xff;
-	/* We'll wait for a completion this time... */
-	config_cmd->hdr.echo = 1;
+	spin_unlock_bh(&o->lock);
 
-	bp->set_mac_pending = 1;
+	return rc;
 
-	mb();
+}
 
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
-		      U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+static inline void __bnx2x_exe_queue_reset_pending(
+	struct bnx2x *bp,
+	struct bnx2x_exe_queue_obj *o)
+{
+	struct bnx2x_exeq_elem *elem;
 
-	/* Wait for a completion */
-	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
-				ramrod_flags);
+	while (!list_empty(&o->pending_comp)) {
+		elem = list_first_entry(&o->pending_comp,
+					struct bnx2x_exeq_elem, link);
 
+		list_del(&elem->link);
+		bnx2x_exe_queue_free_elem(bp, elem);
+	}
 }
 
-/* Accept one or more multicasts */
-int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
+static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
+						 struct bnx2x_exe_queue_obj *o)
 {
-	struct net_device *dev = bp->dev;
-	struct netdev_hw_addr *ha;
-	u32 mc_filter[MC_HASH_SIZE];
-	u32 crc, bit, regidx;
-	int i;
 
-	memset(mc_filter, 0, 4 * MC_HASH_SIZE);
+	spin_lock_bh(&o->lock);
 
-	netdev_for_each_mc_addr(ha, dev) {
-		DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
-		   bnx2x_mc_addr(ha));
-
-		crc = crc32c_le(0, bnx2x_mc_addr(ha),
-				ETH_ALEN);
-		bit = (crc >> 24) & 0xff;
-		regidx = bit >> 5;
-		bit &= 0x1f;
-		mc_filter[regidx] |= (1 << bit);
-	}
+	__bnx2x_exe_queue_reset_pending(bp, o);
 
-	for (i = 0; i < MC_HASH_SIZE; i++)
-		REG_WR(bp, MC_HASH_OFFSET(bp, i),
-		       mc_filter[i]);
+	spin_unlock_bh(&o->lock);
 
-	return 0;
 }
 
-void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
+/**
+ * bnx2x_exe_queue_step - execute one execution chunk atomically
+ *
+ * @bp:			driver handle
+ * @o:			queue
+ * @ramrod_flags:	flags
+ *
+ * (Atomicy is ensured using the exe_queue->lock).
+ */
+static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
+				       struct bnx2x_exe_queue_obj *o,
+				       unsigned long *ramrod_flags)
 {
-	int i;
+	struct bnx2x_exeq_elem *elem, spacer;
+	int cur_len = 0, rc;
 
-	for (i = 0; i < MC_HASH_SIZE; i++)
-		REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
-}
+	memset(&spacer, 0, sizeof(spacer));
 
-/* must be called under rtnl_lock */
-void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
-{
-	u32 mask = (1 << cl_id);
+	spin_lock_bh(&o->lock);
 
-	/* initial seeting is BNX2X_ACCEPT_NONE */
-	u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
-	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
-	u8 unmatched_unicast = 0;
+	/*
+	 * Next step should not be performed until the current is finished,
+	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
+	 * properly clear object internals without sending any command to the FW
+	 * which also implies there won't be any completion to clear the
+	 * 'pending' list.
+	 */
+	if (!list_empty(&o->pending_comp)) {
+		if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+			DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
+					 "resetting pending_comp\n");
+			__bnx2x_exe_queue_reset_pending(bp, o);
+		} else {
+			spin_unlock_bh(&o->lock);
+			return 1;
+		}
+	}
 
-	if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
-		unmatched_unicast = 1;
+	/*
+	 * Run through the pending commands list and create a next
+	 * execution chunk.
+	 */
+	while (!list_empty(&o->exe_queue)) {
+		elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
+					link);
+		WARN_ON(!elem->cmd_len);
 
-	if (filters & BNX2X_PROMISCUOUS_MODE) {
-		/* promiscious - accept all, drop none */
-		drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
-		accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
-		if (IS_MF_SI(bp)) {
+		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
+			cur_len += elem->cmd_len;
 			/*
-			 * SI mode defines to accept in promiscuos mode
-			 * only unmatched packets
+			 * Prevent from both lists being empty when moving an
+			 * element. This will allow the call of
+			 * bnx2x_exe_queue_empty() without locking.
 			 */
-			unmatched_unicast = 1;
-			accp_all_ucast = 0;
-		}
+			list_add_tail(&spacer.link, &o->pending_comp);
+			mb();
+			list_del(&elem->link);
+			list_add_tail(&elem->link, &o->pending_comp);
+			list_del(&spacer.link);
+		} else
+			break;
 	}
-	if (filters & BNX2X_ACCEPT_UNICAST) {
-		/* accept matched ucast */
-		drop_all_ucast = 0;
-	}
-	if (filters & BNX2X_ACCEPT_MULTICAST)
-		/* accept matched mcast */
-		drop_all_mcast = 0;
 
-	if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
-		/* accept all mcast */
-		drop_all_ucast = 0;
-		accp_all_ucast = 1;
-	}
-	if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
-		/* accept all mcast */
-		drop_all_mcast = 0;
-		accp_all_mcast = 1;
-	}
-	if (filters & BNX2X_ACCEPT_BROADCAST) {
-		/* accept (all) bcast */
-		drop_all_bcast = 0;
-		accp_all_bcast = 1;
+	/* Sanity check */
+	if (!cur_len) {
+		spin_unlock_bh(&o->lock);
+		return 0;
 	}
 
-	bp->mac_filters.ucast_drop_all = drop_all_ucast ?
-		bp->mac_filters.ucast_drop_all | mask :
-		bp->mac_filters.ucast_drop_all & ~mask;
+	rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
+	if (rc < 0)
+		/*
+		 *  In case of an error return the commands back to the queue
+		 *  and reset the pending_comp.
+		 */
+		list_splice_init(&o->pending_comp, &o->exe_queue);
+	else if (!rc)
+		/*
+		 * If zero is returned, means there are no outstanding pending
+		 * completions and we may dismiss the pending list.
+		 */
+		__bnx2x_exe_queue_reset_pending(bp, o);
 
-	bp->mac_filters.mcast_drop_all = drop_all_mcast ?
-		bp->mac_filters.mcast_drop_all | mask :
-		bp->mac_filters.mcast_drop_all & ~mask;
+	spin_unlock_bh(&o->lock);
+	return rc;
+}
 
-	bp->mac_filters.bcast_drop_all = drop_all_bcast ?
-		bp->mac_filters.bcast_drop_all | mask :
-		bp->mac_filters.bcast_drop_all & ~mask;
+static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
+{
+	bool empty = list_empty(&o->exe_queue);
 
-	bp->mac_filters.ucast_accept_all = accp_all_ucast ?
-		bp->mac_filters.ucast_accept_all | mask :
-		bp->mac_filters.ucast_accept_all & ~mask;
+	/* Don't reorder!!! */
+	mb();
 
-	bp->mac_filters.mcast_accept_all = accp_all_mcast ?
-		bp->mac_filters.mcast_accept_all | mask :
-		bp->mac_filters.mcast_accept_all & ~mask;
+	return empty && list_empty(&o->pending_comp);
+}
 
-	bp->mac_filters.bcast_accept_all = accp_all_bcast ?
-		bp->mac_filters.bcast_accept_all | mask :
-		bp->mac_filters.bcast_accept_all & ~mask;
+static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
+	struct bnx2x *bp)
+{
+	DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
+	return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
+}
 
-	bp->mac_filters.unmatched_unicast = unmatched_unicast ?
-		bp->mac_filters.unmatched_unicast | mask :
-		bp->mac_filters.unmatched_unicast & ~mask;
+/************************ raw_obj functions ***********************************/
+static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
+{
+	return !!test_bit(o->state, o->pstate);
 }
 
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
 {
-	int mode = bp->rx_mode;
-	int port = BP_PORT(bp);
-	u16 cl_id;
-	u32 def_q_filters = 0;
+	smp_mb__before_clear_bit();
+	clear_bit(o->state, o->pstate);
+	smp_mb__after_clear_bit();
+}
 
-	/* All but management unicast packets should pass to the host as well */
-	u32 llh_mask =
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
-		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
+static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
+{
+	smp_mb__before_clear_bit();
+	set_bit(o->state, o->pstate);
+	smp_mb__after_clear_bit();
+}
 
-	switch (mode) {
-	case BNX2X_RX_MODE_NONE: /* no Rx */
-		def_q_filters = BNX2X_ACCEPT_NONE;
-#ifdef BCM_CNIC
-		if (!NO_FCOE(bp)) {
-			cl_id = bnx2x_fcoe(bp, cl_id);
-			bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
-		}
-#endif
-		break;
+/**
+ * bnx2x_state_wait - wait until the given bit(state) is cleared
+ *
+ * @bp:		device handle
+ * @state:	state which is to be cleared
+ * @state_p:	state buffer
+ *
+ */
+static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
+				   unsigned long *pstate)
+{
+	/* can take a while if any port is running */
+	int cnt = 5000;
 
-	case BNX2X_RX_MODE_NORMAL:
-		def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
-				BNX2X_ACCEPT_MULTICAST;
-#ifdef BCM_CNIC
-		if (!NO_FCOE(bp)) {
-			cl_id = bnx2x_fcoe(bp, cl_id);
-			bnx2x_rxq_set_mac_filters(bp, cl_id,
-						  BNX2X_ACCEPT_UNICAST |
-						  BNX2X_ACCEPT_MULTICAST);
-		}
-#endif
-		break;
 
-	case BNX2X_RX_MODE_ALLMULTI:
-		def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
-				BNX2X_ACCEPT_ALL_MULTICAST;
-#ifdef BCM_CNIC
-		/*
-		 *  Prevent duplication of multicast packets by configuring FCoE
-		 *  L2 Client to receive only matched unicast frames.
-		 */
-		if (!NO_FCOE(bp)) {
-			cl_id = bnx2x_fcoe(bp, cl_id);
-			bnx2x_rxq_set_mac_filters(bp, cl_id,
-						  BNX2X_ACCEPT_UNICAST);
-		}
-#endif
-		break;
+	if (CHIP_REV_IS_EMUL(bp))
+		cnt *= 20;
 
-	case BNX2X_RX_MODE_PROMISC:
-		def_q_filters |= BNX2X_PROMISCUOUS_MODE;
-#ifdef BCM_CNIC
-		/*
-		 *  Prevent packets duplication by configuring DROP_ALL for FCoE
-		 *  L2 Client.
-		 */
-		if (!NO_FCOE(bp)) {
-			cl_id = bnx2x_fcoe(bp, cl_id);
-			bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
-		}
+	DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
+
+	might_sleep();
+	while (cnt--) {
+		if (!test_bit(state, pstate)) {
+#ifdef BNX2X_STOP_ON_ERROR
+			DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
 #endif
-		/* pass management unicast packets as well */
-		llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
-		break;
+			return 0;
+		}
 
-	default:
-		BNX2X_ERR("BAD rx mode (%d)\n", mode);
-		break;
-	}
+		usleep_range(1000, 1000);
 
-	cl_id = BP_L_ID(bp);
-	bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
+		if (bp->panic)
+			return -EIO;
+	}
 
-	REG_WR(bp,
-	       (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
-		       NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
+	/* timeout! */
+	BNX2X_ERR("timeout waiting for state %d\n", state);
+#ifdef BNX2X_STOP_ON_ERROR
+	bnx2x_panic();
+#endif
 
-	DP(NETIF_MSG_IFUP, "rx mode %d\n"
-		"drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
-		"accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
-		"unmatched_ucast 0x%x\n", mode,
-		bp->mac_filters.ucast_drop_all,
-		bp->mac_filters.mcast_drop_all,
-		bp->mac_filters.bcast_drop_all,
-		bp->mac_filters.ucast_accept_all,
-		bp->mac_filters.mcast_accept_all,
-		bp->mac_filters.bcast_accept_all,
-		bp->mac_filters.unmatched_unicast
-	);
+	return -EBUSY;
+}
 
-	storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
+static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
+{
+	return bnx2x_state_wait(bp, raw->state, raw->pstate);
 }
 
-/* RSS configuration */
-static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
-				       u32 addr, dma_addr_t mapping)
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/* credit handling callbacks */
+static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
 {
-	REG_WR(bp,  addr, U64_LO(mapping));
-	REG_WR(bp,  addr + 4, U64_HI(mapping));
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	WARN_ON(!mp);
+
+	return mp->get_entry(mp, offset);
 }
 
-static inline void __storm_fill(struct bnx2x *bp,
-				       u32 addr, size_t size, u32 val)
+static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
 {
-	int i;
-	for (i = 0; i < size/4; i++)
-		REG_WR(bp,  addr + (i * 4), val);
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	WARN_ON(!mp);
+
+	return mp->get(mp, 1);
 }
 
-static inline void storm_memset_ustats_zero(struct bnx2x *bp,
-					    u8 port, u16 stat_id)
+static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
 {
-	size_t size = sizeof(struct ustorm_per_client_stats);
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 
-	u32 addr = BAR_USTRORM_INTMEM +
-			USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
+	WARN_ON(!vp);
 
-	__storm_fill(bp, addr, size, 0);
+	return vp->get_entry(vp, offset);
 }
 
-static inline void storm_memset_tstats_zero(struct bnx2x *bp,
-					    u8 port, u16 stat_id)
+static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
 {
-	size_t size = sizeof(struct tstorm_per_client_stats);
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 
-	u32 addr = BAR_TSTRORM_INTMEM +
-			TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
+	WARN_ON(!vp);
 
-	__storm_fill(bp, addr, size, 0);
+	return vp->get(vp, 1);
 }
 
-static inline void storm_memset_xstats_zero(struct bnx2x *bp,
-					    u8 port, u16 stat_id)
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
 {
-	size_t size = sizeof(struct xstorm_per_client_stats);
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->get(mp, 1))
+		return false;
 
-	u32 addr = BAR_XSTRORM_INTMEM +
-			XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
+	if (!vp->get(vp, 1)) {
+		mp->put(mp, 1);
+		return false;
+	}
 
-	__storm_fill(bp, addr, size, 0);
+	return true;
 }
 
+static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	return mp->put_entry(mp, offset);
+}
 
-static inline void storm_memset_spq_addr(struct bnx2x *bp,
-					 dma_addr_t mapping, u16 abs_fid)
+static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
 {
-	u32 addr = XSEM_REG_FAST_MEMORY +
-			XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
 
-	__storm_memset_dma_mapping(bp, addr, mapping);
+	return mp->put(mp, 1);
 }
 
-static inline void storm_memset_xstats_flags(struct bnx2x *bp,
-				struct stats_indication_flags *flags,
-				u16 abs_fid)
+static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
 {
-	size_t size = sizeof(struct stats_indication_flags);
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	return vp->put_entry(vp, offset);
+}
 
-	u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
+static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 
-	__storm_memset_struct(bp, addr, size, (u32 *)flags);
+	return vp->put(vp, 1);
 }
 
-static inline void storm_memset_tstats_flags(struct bnx2x *bp,
-				struct stats_indication_flags *flags,
-				u16 abs_fid)
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
 {
-	size_t size = sizeof(struct stats_indication_flags);
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->put(mp, 1))
+		return false;
 
-	u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
+	if (!vp->put(vp, 1)) {
+		mp->get(mp, 1);
+		return false;
+	}
 
-	__storm_memset_struct(bp, addr, size, (u32 *)flags);
+	return true;
 }
 
-static inline void storm_memset_ustats_flags(struct bnx2x *bp,
-				struct stats_indication_flags *flags,
-				u16 abs_fid)
+/* check_add() callbacks */
+static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
+			       union bnx2x_classification_ramrod_data *data)
 {
-	size_t size = sizeof(struct stats_indication_flags);
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	if (!is_valid_ether_addr(data->mac.mac))
+		return -EINVAL;
 
-	u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
+	/* Check if a requested MAC already exists */
+	list_for_each_entry(pos, &o->head, link)
+		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+			return -EEXIST;
 
-	__storm_memset_struct(bp, addr, size, (u32 *)flags);
+	return 0;
 }
 
-static inline void storm_memset_cstats_flags(struct bnx2x *bp,
-				struct stats_indication_flags *flags,
-				u16 abs_fid)
+static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
+				union bnx2x_classification_ramrod_data *data)
 {
-	size_t size = sizeof(struct stats_indication_flags);
+	struct bnx2x_vlan_mac_registry_elem *pos;
 
-	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
+	list_for_each_entry(pos, &o->head, link)
+		if (data->vlan.vlan == pos->u.vlan.vlan)
+			return -EEXIST;
 
-	__storm_memset_struct(bp, addr, size, (u32 *)flags);
+	return 0;
 }
 
-static inline void storm_memset_xstats_addr(struct bnx2x *bp,
-					   dma_addr_t mapping, u16 abs_fid)
+static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
+				   union bnx2x_classification_ramrod_data *data)
 {
-	u32 addr = BAR_XSTRORM_INTMEM +
-		XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+			     ETH_ALEN)))
+			return -EEXIST;
 
-	__storm_memset_dma_mapping(bp, addr, mapping);
+	return 0;
 }
 
-static inline void storm_memset_tstats_addr(struct bnx2x *bp,
-					   dma_addr_t mapping, u16 abs_fid)
+
+/* check_del() callbacks */
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
+			    union bnx2x_classification_ramrod_data *data)
 {
-	u32 addr = BAR_TSTRORM_INTMEM +
-		TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+			return pos;
 
-	__storm_memset_dma_mapping(bp, addr, mapping);
+	return NULL;
 }
 
-static inline void storm_memset_ustats_addr(struct bnx2x *bp,
-					   dma_addr_t mapping, u16 abs_fid)
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
+			     union bnx2x_classification_ramrod_data *data)
 {
-	u32 addr = BAR_USTRORM_INTMEM +
-		USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+	struct bnx2x_vlan_mac_registry_elem *pos;
 
-	__storm_memset_dma_mapping(bp, addr, mapping);
+	list_for_each_entry(pos, &o->head, link)
+		if (data->vlan.vlan == pos->u.vlan.vlan)
+			return pos;
+
+	return NULL;
 }
 
-static inline void storm_memset_cstats_addr(struct bnx2x *bp,
-					   dma_addr_t mapping, u16 abs_fid)
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
+				 union bnx2x_classification_ramrod_data *data)
 {
-	u32 addr = BAR_CSTRORM_INTMEM +
-		CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+			     ETH_ALEN)))
+			return pos;
 
-	__storm_memset_dma_mapping(bp, addr, mapping);
+	return NULL;
 }
 
-static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
-					 u16 pf_id)
+/* check_move() callback */
+static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
+			     struct bnx2x_vlan_mac_obj *dst_o,
+			     union bnx2x_classification_ramrod_data *data)
 {
-	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
-		pf_id);
-	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
-		pf_id);
-	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
-		pf_id);
-	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
-		pf_id);
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	int rc;
+
+	/* Check if we can delete the requested configuration from the first
+	 * object.
+	 */
+	pos = src_o->check_del(src_o, data);
+
+	/*  check if configuration can be added */
+	rc = dst_o->check_add(dst_o, data);
+
+	/* If this classification can not be added (is already set)
+	 * or can't be deleted - return an error.
+	 */
+	if (rc || !pos)
+		return false;
+
+	return true;
 }
 
-static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
-					u8 enable)
+static bool bnx2x_check_move_always_err(
+	struct bnx2x_vlan_mac_obj *src_o,
+	struct bnx2x_vlan_mac_obj *dst_o,
+	union bnx2x_classification_ramrod_data *data)
 {
-	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
-		enable);
-	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
-		enable);
-	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
-		enable);
-	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
-		enable);
+	return false;
 }
 
-static inline void storm_memset_func_cfg(struct bnx2x *bp,
-				struct tstorm_eth_function_common_config *tcfg,
-				u16 abs_fid)
+
+static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
 {
-	size_t size = sizeof(struct tstorm_eth_function_common_config);
+	struct bnx2x_raw_obj *raw = &o->raw;
+	u8 rx_tx_flag = 0;
 
-	u32 addr = BAR_TSTRORM_INTMEM +
-			TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
 
-	__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
+
+	return rx_tx_flag;
 }
 
-void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+/* LLH CAM line allocations */
+enum {
+	LLH_CAM_ISCSI_ETH_LINE = 0,
+	LLH_CAM_ETH_LINE,
+	LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+				 bool add, unsigned char *dev_addr, int index)
 {
-	struct tstorm_eth_function_common_config tcfg = {0};
-	u16 rss_flgs;
+	u32 wb_data[2];
+	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
+			 NIG_REG_LLH0_FUNC_MEM;
+
+	if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+		return;
+
+	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
+			 (add ? "ADD" : "DELETE"), index);
+
+	if (add) {
+		/* LLH_FUNC_MEM is a u64 WB register */
+		reg_offset += 8*index;
 
-	/* tpa */
-	if (p->func_flgs & FUNC_FLG_TPA)
-		tcfg.config_flags |=
-		TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
+		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+			      (dev_addr[4] <<  8) |  dev_addr[5]);
+		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
 
-	/* set rss flags */
-	rss_flgs = (p->rss->mode <<
-		TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
+		REG_WR_DMAE(bp, reg_offset, wb_data, 2);
+	}
 
-	if (p->rss->cap & RSS_IPV4_CAP)
-		rss_flgs |= RSS_IPV4_CAP_MASK;
-	if (p->rss->cap & RSS_IPV4_TCP_CAP)
-		rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
-	if (p->rss->cap & RSS_IPV6_CAP)
-		rss_flgs |= RSS_IPV6_CAP_MASK;
-	if (p->rss->cap & RSS_IPV6_TCP_CAP)
-		rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
+	REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
+}
 
-	tcfg.config_flags |= rss_flgs;
-	tcfg.rss_result_mask = p->rss->result_mask;
+/**
+ * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
+ *
+ * @bp:		device handle
+ * @o:		queue for which we want to configure this rule
+ * @add:	if true the command is an ADD command, DEL otherwise
+ * @opcode:	CLASSIFY_RULE_OPCODE_XXX
+ * @hdr:	pointer to a header to setup
+ *
+ */
+static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
+	struct eth_classify_cmd_header *hdr)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
 
-	storm_memset_func_cfg(bp, &tcfg, p->func_id);
+	hdr->client_id = raw->cl_id;
+	hdr->func_id = raw->func_id;
 
-	/* Enable the function in the FW */
-	storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
-	storm_memset_func_en(bp, p->func_id, 1);
+	/* Rx or/and Tx (internal switching) configuration ? */
+	hdr->cmd_general_data |=
+		bnx2x_vlan_mac_get_rx_tx_flag(o);
 
-	/* statistics */
-	if (p->func_flgs & FUNC_FLG_STATS) {
-		struct stats_indication_flags stats_flags = {0};
-		stats_flags.collect_eth = 1;
+	if (add)
+		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
 
-		storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
-		storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
+	hdr->cmd_general_data |=
+		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
+}
 
-		storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
-		storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
+ *
+ * @cid:	connection id
+ * @type:	BNX2X_FILTER_XXX_PENDING
+ * @hdr:	poiter to header to setup
+ * @rule_cnt:
+ *
+ * currently we always configure one rule and echo field to contain a CID and an
+ * opcode type.
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
+				struct eth_classify_header *hdr, int rule_cnt)
+{
+	hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
+	hdr->rule_cnt = (u8)rule_cnt;
+}
 
-		storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
-		storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
 
-		storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
-		storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
+/* hw_config() callbacks */
+static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
+				 struct bnx2x_vlan_mac_obj *o,
+				 struct bnx2x_exeq_elem *elem, int rule_idx,
+				 int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
+	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
+
+	/*
+	 * Set LLH CAM entry: currently only iSCSI and ETH macs are
+	 * relevant. In addition, current implementation is tuned for a
+	 * single ETH MAC.
+	 *
+	 * When multiple unicast ETH MACs PF configuration in switch
+	 * independent mode is required (NetQ, multiple netdev MACs,
+	 * etc.), consider better utilisation of 8 per function MAC
+	 * entries in the LLH register. There is also
+	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
+	 * total number of CAM entries to 16.
+	 *
+	 * Currently we won't configure NIG for MACs other than a primary ETH
+	 * MAC and iSCSI L2 MAC.
+	 *
+	 * If this MAC is moving from one Queue to another, no need to change
+	 * NIG configuration.
+	 */
+	if (cmd != BNX2X_VLAN_MAC_MOVE) {
+		if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
+			bnx2x_set_mac_in_nig(bp, add, mac,
+					     LLH_CAM_ISCSI_ETH_LINE);
+		else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
+			bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
 	}
 
-	/* spq */
-	if (p->func_flgs & FUNC_FLG_SPQ) {
-		storm_memset_spq_addr(bp, p->spq_map, p->func_id);
-		REG_WR(bp, XSEM_REG_FAST_MEMORY +
-		       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Setup a command header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
+				      &rule_entry->mac.header);
+
+	DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
+			 "Queue %d\n", (add ? "add" : "delete"),
+			 BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
+
+	/* Set a MAC itself */
+	bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+			      &rule_entry->mac.mac_mid,
+			      &rule_entry->mac.mac_lsb, mac);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_MAC,
+					      &rule_entry->mac.header);
+
+		/* Set a MAC itself */
+		bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+				      &rule_entry->mac.mac_mid,
+				      &rule_entry->mac.mac_lsb, mac);
 	}
+
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
 }
 
-static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
-				    struct bnx2x_client_init_params *params,
-				    u8 activate,
-				    struct client_init_ramrod_data *data)
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
+ *
+ * @bp:		device handle
+ * @o:		queue
+ * @type:
+ * @cam_offset:	offset in cam memory
+ * @hdr:	pointer to a header to setup
+ *
+ * E1/E1H
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
+	struct mac_configuration_hdr *hdr)
 {
-	/* Clear the buffer */
-	memset(data, 0, sizeof(*data));
-
-	/* general */
-	data->general.client_id = params->rxq_params.cl_id;
-	data->general.statistics_counter_id = params->rxq_params.stat_id;
-	data->general.statistics_en_flg =
-		(params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
-	data->general.is_fcoe_flg =
-		(params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
-	data->general.activate_flg = activate;
-	data->general.sp_client_id = params->rxq_params.spcl_id;
+	struct bnx2x_raw_obj *r = &o->raw;
 
-	/* Rx data */
-	data->rx.tpa_en_flg =
-		(params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
-	data->rx.vmqueue_mode_en_flg = 0;
-	data->rx.cache_line_alignment_log_size =
-		params->rxq_params.cache_line_log;
-	data->rx.enable_dynamic_hc =
-		(params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
-	data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
-	data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
-	data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
+	hdr->length = 1;
+	hdr->offset = (u8)cam_offset;
+	hdr->client_id = 0xff;
+	hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
+}
 
-	/* We don't set drop flags */
-	data->rx.drop_ip_cs_err_flg = 0;
-	data->rx.drop_tcp_cs_err_flg = 0;
-	data->rx.drop_ttl0_flg = 0;
-	data->rx.drop_udp_cs_err_flg = 0;
+static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
+	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	u32 cl_bit_vec = (1 << r->cl_id);
+
+	cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
+	cfg_entry->pf_id = r->func_id;
+	cfg_entry->vlan_id = cpu_to_le16(vlan_id);
+
+	if (add) {
+		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_SET);
+		SET_FLAG(cfg_entry->flags,
+			 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
+
+		/* Set a MAC in a ramrod data */
+		bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
+				      &cfg_entry->middle_mac_addr,
+				      &cfg_entry->lsb_mac_addr, mac);
+	} else
+		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_INVALIDATE);
+}
 
-	data->rx.inner_vlan_removal_enable_flg =
-		(params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
-	data->rx.outer_vlan_removal_enable_flg =
-		(params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
-	data->rx.status_block_id = params->rxq_params.fw_sb_id;
-	data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
-	data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
-	data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
-	data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
-	data->rx.bd_page_base.lo =
-		cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
-	data->rx.bd_page_base.hi =
-		cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
-	data->rx.sge_page_base.lo =
-		cpu_to_le32(U64_LO(params->rxq_params.sge_map));
-	data->rx.sge_page_base.hi =
-		cpu_to_le32(U64_HI(params->rxq_params.sge_map));
-	data->rx.cqe_page_base.lo =
-		cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
-	data->rx.cqe_page_base.hi =
-		cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
-	data->rx.is_leading_rss =
-		(params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
-	data->rx.is_approx_mcast = data->rx.is_leading_rss;
+static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
+	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
+{
+	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
+	struct bnx2x_raw_obj *raw = &o->raw;
 
-	/* Tx data */
-	data->tx.enforce_security_flg = 0; /* VF specific */
-	data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
-	data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
-	data->tx.mtu = 0; /* VF specific */
-	data->tx.tx_bd_page_base.lo =
-		cpu_to_le32(U64_LO(params->txq_params.dscr_map));
-	data->tx.tx_bd_page_base.hi =
-		cpu_to_le32(U64_HI(params->txq_params.dscr_map));
+	bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
+					 &config->hdr);
+	bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
+					 cfg_entry);
 
-	/* flow control data */
-	data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
-	data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
-	data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
-	data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
-	data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
-	data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
-	data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
-
-	data->fc.safc_group_num = params->txq_params.cos;
-	data->fc.safc_group_en_flg =
-		(params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
-	data->fc.traffic_type =
-		(params->ramrod_params.flags & CLIENT_IS_FCOE) ?
-		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
+	DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
+			 (add ? "setting" : "clearing"),
+			 BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
 }
 
-
-int bnx2x_setup_fw_client(struct bnx2x *bp,
-			  struct bnx2x_client_init_params *params,
-			  u8 activate,
-			  struct client_init_ramrod_data *data,
-			  dma_addr_t data_mapping)
+/**
+ * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  struct bnx2x_exeq_elem *elem, int rule_idx,
+				  int cam_offset)
 {
-	u16 hc_usec;
-	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
-	int ramrod_flags = 0, rc;
-
-	/* HC and context validation values */
-	hc_usec = params->txq_params.hc_rate ?
-		1000000 / params->txq_params.hc_rate : 0;
-	bnx2x_update_coalesce_sb_index(bp,
-			params->txq_params.fw_sb_id,
-			params->txq_params.sb_cq_index,
-			!(params->txq_params.flags & QUEUE_FLG_HC),
-			hc_usec);
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/*
+	 * 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
+	 */
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
 
-	*(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
 
-	hc_usec = params->rxq_params.hc_rate ?
-		1000000 / params->rxq_params.hc_rate : 0;
-	bnx2x_update_coalesce_sb_index(bp,
-			params->rxq_params.fw_sb_id,
-			params->rxq_params.sb_cq_index,
-			!(params->rxq_params.flags & QUEUE_FLG_HC),
-			hc_usec);
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
+				     ETH_VLAN_FILTER_ANY_VLAN, config);
+}
 
-	bnx2x_set_ctx_validation(params->rxq_params.cxt,
-				 params->rxq_params.cid);
+static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  struct bnx2x_exeq_elem *elem, int rule_idx,
+				  int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	int cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
+				      &rule_entry->vlan.header);
+
+	DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
+			 vlan);
+
+	/* Set a VLAN itself */
+	rule_entry->vlan.vlan = cpu_to_le16(vlan);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_VLAN,
+					      &rule_entry->vlan.header);
+
+		/* Set a VLAN itself */
+		rule_entry->vlan.vlan = cpu_to_le16(vlan);
+	}
 
-	/* zero stats */
-	if (params->txq_params.flags & QUEUE_FLG_STATS)
-		storm_memset_xstats_zero(bp, BP_PORT(bp),
-					 params->txq_params.stat_id);
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
 
-	if (params->rxq_params.flags & QUEUE_FLG_STATS) {
-		storm_memset_ustats_zero(bp, BP_PORT(bp),
-					 params->rxq_params.stat_id);
-		storm_memset_tstats_zero(bp, BP_PORT(bp),
-					 params->rxq_params.stat_id);
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+				      struct bnx2x_vlan_mac_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	int cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+				      &rule_entry->pair.header);
+
+	/* Set VLAN and MAC themselvs */
+	rule_entry->pair.vlan = cpu_to_le16(vlan);
+	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+			      &rule_entry->pair.mac_mid,
+			      &rule_entry->pair.mac_lsb, mac);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_PAIR,
+					      &rule_entry->pair.header);
+
+		/* Set a VLAN itself */
+		rule_entry->pair.vlan = cpu_to_le16(vlan);
+		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+				      &rule_entry->pair.mac_mid,
+				      &rule_entry->pair.mac_lsb, mac);
 	}
 
-	/* Fill the ramrod data */
-	bnx2x_fill_cl_init_data(bp, params, activate, data);
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
 
-	/* SETUP ramrod.
-	 *
-	 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
-	 * barrier except from mmiowb() is needed to impose a
-	 * proper ordering of memory operations.
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset:	cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+				       struct bnx2x_vlan_mac_obj *o,
+				       struct bnx2x_exeq_elem *elem,
+				       int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/*
+	 * 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
 	 */
-	mmiowb();
-
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
 
-	bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
-		      U64_HI(data_mapping), U64_LO(data_mapping), 0);
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
 
-	/* Wait for completion */
-	rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
-				 params->ramrod_params.index,
-				 params->ramrod_params.pstate,
-				 ramrod_flags);
-	return rc;
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+				     ETH_VLAN_FILTER_CLASSIFY, config);
 }
 
-void bnx2x_push_indir_table(struct bnx2x *bp)
-{
-	int func = BP_FUNC(bp);
+#define list_next_entry(pos, member) \
+	list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
+ *
+ * @bp:		device handle
+ * @p:		command parameters
+ * @ppos:	pointer to the cooky
+ *
+ * reconfigure next MAC/VLAN/VLAN-MAC element from the
+ * previously configured elements list.
+ *
+ * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
+ * into an account
+ *
+ * pointer to the cooky  - that should be given back in the next call to make
+ * function handle the next element. If *ppos is set to NULL it will restart the
+ * iterator. If returned *ppos == NULL this means that the last element has been
+ * handled.
+ *
+ */
+static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
+			   struct bnx2x_vlan_mac_ramrod_params *p,
+			   struct bnx2x_vlan_mac_registry_elem **ppos)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+
+	/* If list is empty - there is nothing to do here */
+	if (list_empty(&o->head)) {
+		*ppos = NULL;
+		return 0;
+	}
+
+	/* make a step... */
+	if (*ppos == NULL)
+		*ppos = list_first_entry(&o->head,
+					 struct bnx2x_vlan_mac_registry_elem,
+					 link);
+	else
+		*ppos = list_next_entry(*ppos, link);
+
+	pos = *ppos;
+
+	/* If it's the last step - return NULL */
+	if (list_is_last(&pos->link, &o->head))
+		*ppos = NULL;
+
+	/* Prepare a 'user_req' */
+	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
+
+	/* Set the command */
+	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
+
+	/* Set vlan_mac_flags */
+	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
+
+	/* Set a restore bit */
+	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+	return bnx2x_config_vlan_mac(bp, p);
+}
+
+/*
+ * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
+ * pointer to an element with a specific criteria and NULL if such an element
+ * hasn't been found.
+ */
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_mac_ramrod_data *data =
+		&elem->cmd_data.vlan_mac.u.vlan_mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		bnx2x_qable_obj
+ * @elem:	bnx2x_exeq_elem
+ *
+ * Checks that the requested configuration can be added. If yes and if
+ * requested, consume CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ *
+ */
+static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
+					      union bnx2x_qable_obj *qo,
+					      struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	int rc;
+
+	/* Check the registry */
+	rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
+	if (rc) {
+		DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
+				 "current registry state\n");
+		return rc;
+	}
+
+	/*
+	 * Check if there is a pending ADD command for this
+	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
+	 */
+	if (exeq->get(exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
+		return -EEXIST;
+	}
+
+	/*
+	 * TODO: Check the pending MOVE from other objects where this
+	 * object is a destination object.
+	 */
+
+	/* Consume the credit if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    o->get_credit(o)))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		quable object to check
+ * @elem:	element that needs to be deleted
+ *
+ * Checks that the requested configuration can be deleted. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
+					      union bnx2x_qable_obj *qo,
+					      struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_exeq_elem query_elem;
+
+	/* If this classification can not be deleted (doesn't exist)
+	 * - return a BNX2X_EXIST.
+	 */
+	pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+	if (!pos) {
+		DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
+				 "current registry state\n");
+		return -EEXIST;
+	}
+
+	/*
+	 * Check if there are pending DEL or MOVE commands for this
+	 * MAC/VLAN/VLAN-MAC. Return an error if so.
+	 */
+	memcpy(&query_elem, elem, sizeof(query_elem));
+
+	/* Check for MOVE commands */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
+	if (exeq->get(exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending MOVE command already\n");
+		return -EINVAL;
+	}
+
+	/* Check for DEL commands */
+	if (exeq->get(exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
+		return -EEXIST;
+	}
+
+	/* Return the credit to the credit pool if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    o->put_credit(o))) {
+		BNX2X_ERR("Failed to return a credit\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		quable object to check (source)
+ * @elem:	element that needs to be moved
+ *
+ * Checks that the requested configuration can be moved. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
+					       union bnx2x_qable_obj *qo,
+					       struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
+	struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
+	struct bnx2x_exeq_elem query_elem;
+	struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
+	struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
+
+	/*
+	 * Check if we can perform this operation based on the current registry
+	 * state.
+	 */
+	if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
+		DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
+				 "current registry state\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check if there is an already pending DEL or MOVE command for the
+	 * source object or ADD command for a destination object. Return an
+	 * error if so.
+	 */
+	memcpy(&query_elem, elem, sizeof(query_elem));
+
+	/* Check DEL on source */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+	if (src_exeq->get(src_exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending DEL command on the source "
+			  "queue already\n");
+		return -EINVAL;
+	}
+
+	/* Check MOVE on source */
+	if (src_exeq->get(src_exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
+		return -EEXIST;
+	}
+
+	/* Check ADD on destination */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+	if (dest_exeq->get(dest_exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending ADD command on the "
+			  "destination queue already\n");
+		return -EINVAL;
+	}
+
+	/* Consume the credit if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    dest_o->get_credit(dest_o)))
+		return -EINVAL;
+
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    src_o->put_credit(src_o))) {
+		/* return the credit taken from dest... */
+		dest_o->put_credit(dest_o);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
+				   union bnx2x_qable_obj *qo,
+				   struct bnx2x_exeq_elem *elem)
+{
+	switch (elem->cmd_data.vlan_mac.cmd) {
+	case BNX2X_VLAN_MAC_ADD:
+		return bnx2x_validate_vlan_mac_add(bp, qo, elem);
+	case BNX2X_VLAN_MAC_DEL:
+		return bnx2x_validate_vlan_mac_del(bp, qo, elem);
+	case BNX2X_VLAN_MAC_MOVE:
+		return bnx2x_validate_vlan_mac_move(bp, qo, elem);
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ *
+ */
+static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
+			       struct bnx2x_vlan_mac_obj *o)
+{
+	int cnt = 5000, rc;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	while (cnt--) {
+		/* Wait for the current command to complete */
+		rc = raw->wait_comp(bp, raw);
+		if (rc)
+			return rc;
+
+		/* Wait until there are no pending commands */
+		if (!bnx2x_exe_queue_empty(exeq))
+			usleep_range(1000, 1000);
+		else
+			return 0;
+	}
+
+	return -EBUSY;
+}
+
+/**
+ * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @cqe:
+ * @cont:	if true schedule next execution chunk
+ *
+ */
+static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
+				   struct bnx2x_vlan_mac_obj *o,
+				   union event_ring_elem *cqe,
+				   unsigned long *ramrod_flags)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc;
+
+	/* Reset pending list */
+	bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+
+	/* Clear pending */
+	r->clear_pending(r);
+
+	/* If ramrod failed this is most likely a SW bug */
+	if (cqe->message.error)
+		return -EINVAL;
+
+	/* Run the next bulk of pending commands if requeted */
+	if (test_bit(RAMROD_CONT, ramrod_flags)) {
+		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+		if (rc < 0)
+			return rc;
+	}
+
+	/* If there is more work to do return PENDING */
+	if (!bnx2x_exe_queue_empty(&o->exe_queue))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_qable_obj
+ * @elem:	bnx2x_exeq_elem
+ */
+static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
+				   union bnx2x_qable_obj *qo,
+				   struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem query, *pos;
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+
+	memcpy(&query, elem, sizeof(query));
+
+	switch (elem->cmd_data.vlan_mac.cmd) {
+	case BNX2X_VLAN_MAC_ADD:
+		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+		break;
+	case BNX2X_VLAN_MAC_DEL:
+		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+		break;
+	default:
+		/* Don't handle anything other than ADD or DEL */
+		return 0;
+	}
+
+	/* If we found the appropriate element - delete it */
+	pos = exeq->get(exeq, &query);
+	if (pos) {
+
+		/* Return the credit of the optimized command */
+		if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+			      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
+			if ((query.cmd_data.vlan_mac.cmd ==
+			     BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
+				BNX2X_ERR("Failed to return the credit for the "
+					  "optimized ADD command\n");
+				return -EINVAL;
+			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
+				BNX2X_ERR("Failed to recover the credit from "
+					  "the optimized DEL command\n");
+				return -EINVAL;
+			}
+		}
+
+		DP(BNX2X_MSG_SP, "Optimizing %s command\n",
+			   (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+			   "ADD" : "DEL");
+
+		list_del(&pos->link);
+		bnx2x_exe_queue_free_elem(bp, pos);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
+ *
+ * @bp:	  device handle
+ * @o:
+ * @elem:
+ * @restore:
+ * @re:
+ *
+ * prepare a registry element according to the current command request.
+ */
+static inline int bnx2x_vlan_mac_get_registry_elem(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o,
+	struct bnx2x_exeq_elem *elem,
+	bool restore,
+	struct bnx2x_vlan_mac_registry_elem **re)
+{
+	int cmd = elem->cmd_data.vlan_mac.cmd;
+	struct bnx2x_vlan_mac_registry_elem *reg_elem;
+
+	/* Allocate a new registry element if needed. */
+	if (!restore &&
+	    ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
+		reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
+		if (!reg_elem)
+			return -ENOMEM;
+
+		/* Get a new CAM offset */
+		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
+			/*
+			 * This shell never happen, because we have checked the
+			 * CAM availiability in the 'validate'.
+			 */
+			WARN_ON(1);
+			kfree(reg_elem);
+			return -EINVAL;
+		}
+
+		DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
+
+		/* Set a VLAN-MAC data */
+		memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
+			  sizeof(reg_elem->u));
+
+		/* Copy the flags (needed for DEL and RESTORE flows) */
+		reg_elem->vlan_mac_flags =
+			elem->cmd_data.vlan_mac.vlan_mac_flags;
+	} else /* DEL, RESTORE */
+		reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+
+	*re = reg_elem;
+	return 0;
+}
+
+/**
+ * bnx2x_execute_vlan_mac - execute vlan mac command
+ *
+ * @bp:			device handle
+ * @qo:
+ * @exe_chunk:
+ * @ramrod_flags:
+ *
+ * go and send a ramrod!
+ */
+static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
+				  union bnx2x_qable_obj *qo,
+				  struct list_head *exe_chunk,
+				  unsigned long *ramrod_flags)
+{
+	struct bnx2x_exeq_elem *elem;
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc, idx = 0;
+	bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
+	bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
+	struct bnx2x_vlan_mac_registry_elem *reg_elem;
+	int cmd;
+
+	/*
+	 * If DRIVER_ONLY execution is requested, cleanup a registry
+	 * and exit. Otherwise send a ramrod to FW.
+	 */
+	if (!drv_only) {
+		WARN_ON(r->check_pending(r));
+
+		/* Set pending */
+		r->set_pending(r);
+
+		/* Fill tha ramrod data */
+		list_for_each_entry(elem, exe_chunk, link) {
+			cmd = elem->cmd_data.vlan_mac.cmd;
+			/*
+			 * We will add to the target object in MOVE command, so
+			 * change the object for a CAM search.
+			 */
+			if (cmd == BNX2X_VLAN_MAC_MOVE)
+				cam_obj = elem->cmd_data.vlan_mac.target_obj;
+			else
+				cam_obj = o;
+
+			rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
+							      elem, restore,
+							      &reg_elem);
+			if (rc)
+				goto error_exit;
+
+			WARN_ON(!reg_elem);
+
+			/* Push a new entry into the registry */
+			if (!restore &&
+			    ((cmd == BNX2X_VLAN_MAC_ADD) ||
+			    (cmd == BNX2X_VLAN_MAC_MOVE)))
+				list_add(&reg_elem->link, &cam_obj->head);
+
+			/* Configure a single command in a ramrod data buffer */
+			o->set_one_rule(bp, o, elem, idx,
+					reg_elem->cam_offset);
+
+			/* MOVE command consumes 2 entries in the ramrod data */
+			if (cmd == BNX2X_VLAN_MAC_MOVE)
+				idx += 2;
+			else
+				idx++;
+		}
+
+		/* Commit the data writes towards the memory */
+		mb();
+
+		rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
+				   U64_HI(r->rdata_mapping),
+				   U64_LO(r->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			goto error_exit;
+	}
+
+	/* Now, when we are done with the ramrod - clean up the registry */
+	list_for_each_entry(elem, exe_chunk, link) {
+		cmd = elem->cmd_data.vlan_mac.cmd;
+		if ((cmd == BNX2X_VLAN_MAC_DEL) ||
+		    (cmd == BNX2X_VLAN_MAC_MOVE)) {
+			reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+
+			WARN_ON(!reg_elem);
+
+			o->put_cam_offset(o, reg_elem->cam_offset);
+			list_del(&reg_elem->link);
+			kfree(reg_elem);
+		}
+	}
+
+	if (!drv_only)
+		return 1;
+	else
+		return 0;
+
+error_exit:
+	r->clear_pending(r);
+
+	/* Cleanup a registry in case of a failure */
+	list_for_each_entry(elem, exe_chunk, link) {
+		cmd = elem->cmd_data.vlan_mac.cmd;
+
+		if (cmd == BNX2X_VLAN_MAC_MOVE)
+			cam_obj = elem->cmd_data.vlan_mac.target_obj;
+		else
+			cam_obj = o;
+
+		/* Delete all newly added above entries */
+		if (!restore &&
+		    ((cmd == BNX2X_VLAN_MAC_ADD) ||
+		    (cmd == BNX2X_VLAN_MAC_MOVE))) {
+			reg_elem = o->check_del(cam_obj,
+						&elem->cmd_data.vlan_mac.u);
+			if (reg_elem) {
+				list_del(&reg_elem->link);
+				kfree(reg_elem);
+			}
+		}
+	}
+
+	return rc;
+}
+
+static inline int bnx2x_vlan_mac_push_new_cmd(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_ramrod_params *p)
+{
+	struct bnx2x_exeq_elem *elem;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+	/* Allocate the execution queue element */
+	elem = bnx2x_exe_queue_alloc_elem(bp);
+	if (!elem)
+		return -ENOMEM;
+
+	/* Set the command 'length' */
+	switch (p->user_req.cmd) {
+	case BNX2X_VLAN_MAC_MOVE:
+		elem->cmd_len = 2;
+		break;
+	default:
+		elem->cmd_len = 1;
+	}
+
+	/* Fill the object specific info */
+	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
+
+	/* Try to add a new command to the pending list */
+	return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
+}
+
+/**
+ * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
+ *
+ * @bp:	  device handle
+ * @p:
+ *
+ */
+int bnx2x_config_vlan_mac(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_ramrod_params *p)
+{
+	int rc = 0;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+	unsigned long *ramrod_flags = &p->ramrod_flags;
+	bool cont = test_bit(RAMROD_CONT, ramrod_flags);
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	/*
+	 * Add new elements to the execution list for commands that require it.
+	 */
+	if (!cont) {
+		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
+		if (rc)
+			return rc;
+	}
+
+	/*
+	 * If nothing will be executed further in this iteration we want to
+	 * return PENDING if there are pending commands
+	 */
+	if (!bnx2x_exe_queue_empty(&o->exe_queue))
+		rc = 1;
+
+	/* Execute commands if required */
+	if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
+	    test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
+		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+		if (rc < 0)
+			return rc;
+	}
+
+	/*
+	 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
+	 * then user want to wait until the last command is done.
+	 */
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+		/*
+		 * Wait maximum for the current exe_queue length iterations plus
+		 * one (for the current pending command).
+		 */
+		int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
+
+		while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
+		       max_iterations--) {
+
+			/* Wait for the current command to complete */
+			rc = raw->wait_comp(bp, raw);
+			if (rc)
+				return rc;
+
+			/* Make a next step */
+			rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
+						  ramrod_flags);
+			if (rc < 0)
+				return rc;
+		}
+
+		return 0;
+	}
+
+	return rc;
+}
+
+
+
+/**
+ * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
+ *
+ * @bp:			device handle
+ * @o:
+ * @vlan_mac_flags:
+ * @ramrod_flags:	execution flags to be used for this deletion
+ *
+ * if the last operation has completed successfully and there are no
+ * moreelements left, positive value if the last operation has completed
+ * successfully and there are more previously configured elements, negative
+ * value is current operation has failed.
+ */
+static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  unsigned long *vlan_mac_flags,
+				  unsigned long *ramrod_flags)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos = NULL;
+	int rc = 0;
+	struct bnx2x_vlan_mac_ramrod_params p;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+
+	/* Clear pending commands first */
+
+	spin_lock_bh(&exeq->lock);
+
+	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
+		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
+		    *vlan_mac_flags)
+			list_del(&exeq_pos->link);
+	}
+
+	spin_unlock_bh(&exeq->lock);
+
+	/* Prepare a command request */
+	memset(&p, 0, sizeof(p));
+	p.vlan_mac_obj = o;
+	p.ramrod_flags = *ramrod_flags;
+	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+	/*
+	 * Add all but the last VLAN-MAC to the execution queue without actually
+	 * execution anything.
+	 */
+	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
+	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
+	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
+
+	list_for_each_entry(pos, &o->head, link) {
+		if (pos->vlan_mac_flags == *vlan_mac_flags) {
+			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
+			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
+			rc = bnx2x_config_vlan_mac(bp, &p);
+			if (rc < 0) {
+				BNX2X_ERR("Failed to add a new DEL command\n");
+				return rc;
+			}
+		}
+	}
+
+	p.ramrod_flags = *ramrod_flags;
+	__set_bit(RAMROD_CONT, &p.ramrod_flags);
+
+	return bnx2x_config_vlan_mac(bp, &p);
+}
+
+static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
+	u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
+	unsigned long *pstate, bnx2x_obj_type type)
+{
+	raw->func_id = func_id;
+	raw->cid = cid;
+	raw->cl_id = cl_id;
+	raw->rdata = rdata;
+	raw->rdata_mapping = rdata_mapping;
+	raw->state = state;
+	raw->pstate = pstate;
+	raw->obj_type = type;
+	raw->check_pending = bnx2x_raw_check_pending;
+	raw->clear_pending = bnx2x_raw_clear_pending;
+	raw->set_pending = bnx2x_raw_set_pending;
+	raw->wait_comp = bnx2x_raw_wait;
+}
+
+static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
+	u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
+	int state, unsigned long *pstate, bnx2x_obj_type type,
+	struct bnx2x_credit_pool_obj *macs_pool,
+	struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	INIT_LIST_HEAD(&o->head);
+
+	o->macs_pool = macs_pool;
+	o->vlans_pool = vlans_pool;
+
+	o->delete_all = bnx2x_vlan_mac_del_all;
+	o->restore = bnx2x_vlan_mac_restore;
+	o->complete = bnx2x_complete_vlan_mac;
+	o->wait = bnx2x_wait_vlan_mac;
+
+	bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
+			   state, pstate, type);
+}
+
+
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_obj *mac_obj,
+			u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			dma_addr_t rdata_mapping, int state,
+			unsigned long *pstate, bnx2x_obj_type type,
+			struct bnx2x_credit_pool_obj *macs_pool)
+{
+	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
+
+	bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, NULL);
+
+	/* CAM credit pool handling */
+	mac_obj->get_credit = bnx2x_get_credit_mac;
+	mac_obj->put_credit = bnx2x_put_credit_mac;
+	mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1x(bp)) {
+		mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
+		mac_obj->check_del         = bnx2x_check_mac_del;
+		mac_obj->check_add         = bnx2x_check_mac_add;
+		mac_obj->check_move        = bnx2x_check_move_always_err;
+		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_mac);
+	} else {
+		mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
+		mac_obj->check_del         = bnx2x_check_mac_del;
+		mac_obj->check_add         = bnx2x_check_mac_add;
+		mac_obj->check_move        = bnx2x_check_move;
+		mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_mac);
+	}
+}
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+			 struct bnx2x_vlan_mac_obj *vlan_obj,
+			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			 dma_addr_t rdata_mapping, int state,
+			 unsigned long *pstate, bnx2x_obj_type type,
+			 struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type, NULL,
+				   vlans_pool);
+
+	vlan_obj->get_credit = bnx2x_get_credit_vlan;
+	vlan_obj->put_credit = bnx2x_put_credit_vlan;
+	vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
+	vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
+
+	if (CHIP_IS_E1x(bp)) {
+		BNX2X_ERR("Do not support chips others than E2 and newer\n");
+		BUG();
+	} else {
+		vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
+		vlan_obj->check_del         = bnx2x_check_vlan_del;
+		vlan_obj->check_add         = bnx2x_check_vlan_add;
+		vlan_obj->check_move        = bnx2x_check_move;
+		vlan_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan);
+	}
+}
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj =
+		(union bnx2x_qable_obj *)vlan_mac_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, vlans_pool);
+
+	/* CAM pool handling */
+	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+	/*
+	 * CAM offset is relevant for 57710 and 57711 chips only which have a
+	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
+	 * will be taken from MACs' pool object only.
+	 */
+	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1(bp)) {
+		BNX2X_ERR("Do not support chips others than E2\n");
+		BUG();
+	} else if (CHIP_IS_E1H(bp)) {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
+		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	} else {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move;
+		vlan_mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue,
+				     CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	}
+
+}
+
+/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+static inline void __storm_memset_mac_filters(struct bnx2x *bp,
+			struct tstorm_eth_mac_filter_config *mac_filters,
+			u16 pf_id)
+{
+	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+
+	u32 addr = BAR_TSTRORM_INTMEM +
+			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+}
+
+static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
+				 struct bnx2x_rx_mode_ramrod_params *p)
+{
+	/* update the bp MAC filter structure  */
+	u32 mask = (1 << p->cl_id);
+
+	struct tstorm_eth_mac_filter_config *mac_filters =
+		(struct tstorm_eth_mac_filter_config *)p->rdata;
+
+	/* initial seeting is drop-all */
+	u8 drop_all_ucast = 1, drop_all_mcast = 1;
+	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
+	u8 unmatched_unicast = 0;
+
+    /* In e1x there we only take into account rx acceot flag since tx switching
+     * isn't enabled. */
+	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
+		/* accept matched ucast */
+		drop_all_ucast = 0;
+
+	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
+		/* accept matched mcast */
+		drop_all_mcast = 0;
+
+	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
+		/* accept all mcast */
+		drop_all_ucast = 0;
+		accp_all_ucast = 1;
+	}
+	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
+		/* accept all mcast */
+		drop_all_mcast = 0;
+		accp_all_mcast = 1;
+	}
+	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
+		/* accept (all) bcast */
+		accp_all_bcast = 1;
+	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
+		/* accept unmatched unicasts */
+		unmatched_unicast = 1;
+
+	mac_filters->ucast_drop_all = drop_all_ucast ?
+		mac_filters->ucast_drop_all | mask :
+		mac_filters->ucast_drop_all & ~mask;
+
+	mac_filters->mcast_drop_all = drop_all_mcast ?
+		mac_filters->mcast_drop_all | mask :
+		mac_filters->mcast_drop_all & ~mask;
+
+	mac_filters->ucast_accept_all = accp_all_ucast ?
+		mac_filters->ucast_accept_all | mask :
+		mac_filters->ucast_accept_all & ~mask;
+
+	mac_filters->mcast_accept_all = accp_all_mcast ?
+		mac_filters->mcast_accept_all | mask :
+		mac_filters->mcast_accept_all & ~mask;
+
+	mac_filters->bcast_accept_all = accp_all_bcast ?
+		mac_filters->bcast_accept_all | mask :
+		mac_filters->bcast_accept_all & ~mask;
+
+	mac_filters->unmatched_unicast = unmatched_unicast ?
+		mac_filters->unmatched_unicast | mask :
+		mac_filters->unmatched_unicast & ~mask;
+
+	DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
+			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+			 mac_filters->ucast_drop_all,
+			 mac_filters->mcast_drop_all,
+			 mac_filters->ucast_accept_all,
+			 mac_filters->mcast_accept_all,
+			 mac_filters->bcast_accept_all);
+
+	/* write the MAC filter structure*/
+	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
+
+	/* The operation is completed */
+	clear_bit(p->state, p->pstate);
+	smp_mb__after_clear_bit();
+
+	return 0;
+}
+
+/* Setup ramrod data */
+static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
+				struct eth_classify_header *hdr,
+				u8 rule_cnt)
+{
+	hdr->echo = cid;
+	hdr->rule_cnt = rule_cnt;
+}
+
+static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
+				unsigned long accept_flags,
+				struct eth_filter_rules_cmd *cmd,
+				bool clear_accept_all)
+{
+	u16 state;
+
+	/* start with 'drop-all' */
+	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
+		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+	if (accept_flags) {
+		if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
+			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+
+		if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
+			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+		if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
+			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+		}
+
+		if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
+			state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+		}
+		if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
+			state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+		if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
+			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+		}
+		if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
+			state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+	}
+
+	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
+	if (clear_accept_all) {
+		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+	}
+
+	cmd->state = cpu_to_le16(state);
+
+}
+
+static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
+				struct bnx2x_rx_mode_ramrod_params *p)
+{
+	struct eth_filter_rules_ramrod_data *data = p->rdata;
+	int rc;
+	u8 rule_idx = 0;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	/* Setup ramrod data */
+
+	/* Tx (internal switching) */
+	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+		data->rules[rule_idx].client_id = p->cl_id;
+		data->rules[rule_idx].func_id = p->func_id;
+
+		data->rules[rule_idx].cmd_general_data =
+			ETH_FILTER_RULES_CMD_TX_CMD;
+
+		bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
+			&(data->rules[rule_idx++]), false);
+	}
+
+	/* Rx */
+	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+		data->rules[rule_idx].client_id = p->cl_id;
+		data->rules[rule_idx].func_id = p->func_id;
+
+		data->rules[rule_idx].cmd_general_data =
+			ETH_FILTER_RULES_CMD_RX_CMD;
+
+		bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
+			&(data->rules[rule_idx++]), false);
+	}
+
+
+	/*
+	 * If FCoE Queue configuration has been requested configure the Rx and
+	 * internal switching modes for this queue in separate rules.
+	 *
+	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
+	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
+	 */
+	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
+		/*  Tx (internal switching) */
+		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+			data->rules[rule_idx].func_id = p->func_id;
+
+			data->rules[rule_idx].cmd_general_data =
+						ETH_FILTER_RULES_CMD_TX_CMD;
+
+			bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
+						     &(data->rules[rule_idx++]),
+						       true);
+		}
+
+		/* Rx */
+		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+			data->rules[rule_idx].func_id = p->func_id;
+
+			data->rules[rule_idx].cmd_general_data =
+						ETH_FILTER_RULES_CMD_RX_CMD;
+
+			bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
+						     &(data->rules[rule_idx++]),
+						       true);
+		}
+	}
+
+	/*
+	 * Set the ramrod header (most importantly - number of rules to
+	 * configure).
+	 */
+	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
+
+	DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
+			 "tx_accept_flags 0x%lx\n",
+			 data->header.rule_cnt, p->rx_accept_flags,
+			 p->tx_accept_flags);
+
+	/* Commit writes towards the memory before sending a ramrod */
+	mb();
+
+	/* Send a ramrod */
+	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
+			   U64_HI(p->rdata_mapping),
+			   U64_LO(p->rdata_mapping),
+			   ETH_CONNECTION_TYPE);
+	if (rc)
+		return rc;
+
+	/* Ramrod completion is pending */
+	return 1;
+}
+
+static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
+				      struct bnx2x_rx_mode_ramrod_params *p)
+{
+	return bnx2x_state_wait(bp, p->state, p->pstate);
+}
+
+static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
+				    struct bnx2x_rx_mode_ramrod_params *p)
+{
+	/* Do nothing */
+	return 0;
+}
+
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p)
+{
+	int rc;
+
+	/* Configure the new classification in the chip */
+	rc = p->rx_mode_obj->config_rx_mode(bp, p);
+	if (rc < 0)
+		return rc;
+
+	/* Wait for a ramrod completion if was requested */
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+		rc = p->rx_mode_obj->wait_comp(bp, p);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+			    struct bnx2x_rx_mode_obj *o)
+{
+	if (CHIP_IS_E1x(bp)) {
+		o->wait_comp      = bnx2x_empty_rx_mode_wait;
+		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
+	} else {
+		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
+		o->config_rx_mode = bnx2x_set_rx_mode_e2;
+	}
+}
+
+/********************* Multicast verbs: SET, CLEAR ****************************/
+static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
+{
+	return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+}
+
+struct bnx2x_mcast_mac_elem {
+	struct list_head link;
+	u8 mac[ETH_ALEN];
+	u8 pad[2]; /* For a natural alignment of the following buffer */
+};
+
+struct bnx2x_pending_mcast_cmd {
+	struct list_head link;
+	int type; /* BNX2X_MCAST_CMD_X */
+	union {
+		struct list_head macs_head;
+		u32 macs_num; /* Needed for DEL command */
+		int next_bin; /* Needed for RESTORE flow with aprox match */
+	} data;
+
+	bool done; /* set to true, when the command has been handled,
+		    * practically used in 57712 handling only, where one pending
+		    * command may be handled in a few operations. As long as for
+		    * other chips every operation handling is completed in a
+		    * single ramrod, there is no need to utilize this field.
+		    */
+};
+
+static int bnx2x_mcast_wait(struct bnx2x *bp,
+			    struct bnx2x_mcast_obj *o)
+{
+	if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
+			o->raw.wait_comp(bp, &o->raw))
+		return -EBUSY;
+
+	return 0;
+}
+
+static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
+				   struct bnx2x_mcast_obj *o,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   int cmd)
+{
+	int total_sz;
+	struct bnx2x_pending_mcast_cmd *new_cmd;
+	struct bnx2x_mcast_mac_elem *cur_mac = NULL;
+	struct bnx2x_mcast_list_elem *pos;
+	int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
+			     p->mcast_list_len : 0);
+
+	/* If the command is empty ("handle pending commands only"), break */
+	if (!p->mcast_list_len)
+		return 0;
+
+	total_sz = sizeof(*new_cmd) +
+		macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
+
+	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
+	new_cmd = kzalloc(total_sz, GFP_ATOMIC);
+
+	if (!new_cmd)
+		return -ENOMEM;
+
+	DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
+			 "macs_list_len=%d\n", cmd, macs_list_len);
+
+	INIT_LIST_HEAD(&new_cmd->data.macs_head);
+
+	new_cmd->type = cmd;
+	new_cmd->done = false;
+
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		cur_mac = (struct bnx2x_mcast_mac_elem *)
+			  ((u8 *)new_cmd + sizeof(*new_cmd));
+
+		/* Push the MACs of the current command into the pendig command
+		 * MACs list: FIFO
+		 */
+		list_for_each_entry(pos, &p->mcast_list, link) {
+			memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
+			list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
+			cur_mac++;
+		}
+
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		new_cmd->data.macs_num = p->mcast_list_len;
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		new_cmd->data.next_bin = 0;
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* Push the new pending command to the tail of the pending list: FIFO */
+	list_add_tail(&new_cmd->link, &o->pending_cmds_head);
+
+	o->set_sched(o);
+
+	return 1;
+}
+
+/**
+ * bnx2x_mcast_get_next_bin - get the next set bin (index)
+ *
+ * @o:
+ * @last:	index to start looking from (including)
+ *
+ * returns the next found (set) bin or a negative value if none is found.
+ */
+static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
+{
+	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
+
+	for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
+		if (o->registry.aprox_match.vec[i])
+			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
+				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
+				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
+						       vec, cur_bit)) {
+					return cur_bit;
+				}
+			}
+		inner_start = 0;
+	}
+
+	/* None found */
+	return -1;
+}
+
+/**
+ * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
+ *
+ * @o:
+ *
+ * returns the index of the found bin or -1 if none is found
+ */
+static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
+{
+	int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
+
+	if (cur_bit >= 0)
+		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
+
+	return cur_bit;
+}
+
+static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	u8 rx_tx_flag = 0;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
+
+	return rx_tx_flag;
+}
+
+static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
+					struct bnx2x_mcast_obj *o, int idx,
+					union bnx2x_mcast_config_data *cfg_data,
+					int cmd)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
+	u8 func_id = r->func_id;
+	u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
+	int bin;
+
+	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
+
+	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
+
+	/* Get a bin and update a bins' vector */
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
+		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		/* If there were no more bins to clear
+		 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
+		 * clear any (0xff) bin.
+		 * See bnx2x_mcast_validate_e2() for explanation when it may
+		 * happen.
+		 */
+		bin = bnx2x_mcast_clear_first_bin(o);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		bin = cfg_data->bin;
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return;
+	}
+
+	DP(BNX2X_MSG_SP, "%s bin %d\n",
+			 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
+			 "Setting"  : "Clearing"), bin);
+
+	data->rules[idx].bin_id    = (u8)bin;
+	data->rules[idx].func_id   = func_id;
+	data->rules[idx].engine_id = o->engine_id;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
+ *
+ * @bp:		device handle
+ * @o:
+ * @start_bin:	index in the registry to start from (including)
+ * @rdata_idx:	index in the ramrod data to start from
+ *
+ * returns last handled bin index or -1 if all bins have been handled
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e2(
+	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
+	int *rdata_idx)
+{
+	int cur_bin, cnt = *rdata_idx;
+	union bnx2x_mcast_config_data cfg_data = {0};
+
+	/* go through the registry and configure the bins from it */
+	for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
+	    cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
+
+		cfg_data.bin = (u8)cur_bin;
+		o->set_one_rule(bp, o, cnt, &cfg_data,
+				BNX2X_MCAST_CMD_RESTORE);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
+
+		/* Break if we reached the maximum number
+		 * of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*rdata_idx = cnt;
+
+	return cur_bin;
+}
+
+static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+	int cnt = *line_idx;
+	union bnx2x_mcast_config_data cfg_data = {0};
+
+	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
+				 link) {
+
+		cfg_data.mac = &pmac_pos->mac[0];
+		o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+				 " mcast MAC\n",
+				 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
+
+		list_del(&pmac_pos->link);
+
+		/* Break if we reached the maximum number
+		 * of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*line_idx = cnt;
+
+	/* if no more MACs to configure - we are done */
+	if (list_empty(&cmd_pos->data.macs_head))
+		cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	int cnt = *line_idx;
+
+	while (cmd_pos->data.macs_num) {
+		o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
+
+		cnt++;
+
+		cmd_pos->data.macs_num--;
+
+		  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+				   cmd_pos->data.macs_num, cnt);
+
+		/* Break if we reached the maximum
+		 * number of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*line_idx = cnt;
+
+	/* If we cleared all bins - we are done */
+	if (!cmd_pos->data.macs_num)
+		cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
+						line_idx);
+
+	if (cmd_pos->data.next_bin < 0)
+		/* If o->set_restore returned -1 we are done */
+		cmd_pos->done = true;
+	else
+		/* Start from the next bin next time */
+		cmd_pos->data.next_bin++;
+}
+
+static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
+	int cnt = 0;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
+				 link) {
+		switch (cmd_pos->type) {
+		case BNX2X_MCAST_CMD_ADD:
+			bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
+			break;
+
+		case BNX2X_MCAST_CMD_DEL:
+			bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
+			break;
+
+		case BNX2X_MCAST_CMD_RESTORE:
+			bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
+							   &cnt);
+			break;
+
+		default:
+			BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+			return -EINVAL;
+		}
+
+		/* If the command has been completed - remove it from the list
+		 * and free the memory
+		 */
+		if (cmd_pos->done) {
+			list_del(&cmd_pos->link);
+			kfree(cmd_pos);
+		}
+
+		/* Break if we reached the maximum number of rules */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	return cnt;
+}
+
+static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	int *line_idx)
+{
+	struct bnx2x_mcast_list_elem *mlist_pos;
+	union bnx2x_mcast_config_data cfg_data = {0};
+	int cnt = *line_idx;
+
+	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+		cfg_data.mac = mlist_pos->mac;
+		o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+				 " mcast MAC\n",
+				 BNX2X_MAC_PRN_LIST(mlist_pos->mac));
+	}
+
+	*line_idx = cnt;
+}
+
+static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	int *line_idx)
+{
+	int cnt = *line_idx, i;
+
+	for (i = 0; i < p->mcast_list_len; i++) {
+		o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
+				 p->mcast_list_len - i - 1);
+	}
+
+	*line_idx = cnt;
+}
+
+/**
+ * bnx2x_mcast_handle_current_cmd -
+ *
+ * @bp:		device handle
+ * @p:
+ * @cmd:
+ * @start_cnt:	first line in the ramrod data that may be used
+ *
+ * This function is called iff there is enough place for the current command in
+ * the ramrod data.
+ * Returns number of lines filled in the ramrod data in total.
+ */
+static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
+			struct bnx2x_mcast_ramrod_params *p, int cmd,
+			int start_cnt)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int cnt = start_cnt;
+
+	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		o->hdl_restore(bp, o, 0, &cnt);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* The current command has been handled */
+	p->mcast_list_len = 0;
+
+	return cnt;
+}
+
+static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int reg_sz = o->get_registry_size(o);
+
+	switch (cmd) {
+	/* DEL command deletes all currently configured MACs */
+	case BNX2X_MCAST_CMD_DEL:
+		o->set_registry_size(o, 0);
+		/* Don't break */
+
+	/* RESTORE command will restore the entire multicast configuration */
+	case BNX2X_MCAST_CMD_RESTORE:
+		/* Here we set the approximate amount of work to do, which in
+		 * fact may be only less as some MACs in postponed ADD
+		 * command(s) scheduled before this command may fall into
+		 * the same bin and the actual number of bins set in the
+		 * registry would be less than we estimated here. See
+		 * bnx2x_mcast_set_one_rule_e2() for further details.
+		 */
+		p->mcast_list_len = reg_sz;
+		break;
+
+	case BNX2X_MCAST_CMD_ADD:
+	case BNX2X_MCAST_CMD_CONT:
+		/* Here we assume that all new MACs will fall into new bins.
+		 * However we will correct the real registry size after we
+		 * handle all pending commands.
+		 */
+		o->set_registry_size(o, reg_sz + p->mcast_list_len);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+
+	}
+
+	/* Increase the total number of MACs pending to be configured */
+	o->total_pending_num += p->mcast_list_len;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
+				      struct bnx2x_mcast_ramrod_params *p,
+				      int old_num_bins)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	o->set_registry_size(o, old_num_bins);
+	o->total_pending_num -= p->mcast_list_len;
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
+ *
+ * @bp:		device handle
+ * @p:
+ * @len:	number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
+					struct bnx2x_mcast_ramrod_params *p,
+					u8 len)
+{
+	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
+
+	data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
+			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+	data->header.rule_cnt = len;
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
+ *
+ * @bp:		device handle
+ * @o:
+ *
+ * Recalculate the actual number of set bins in the registry using Brian
+ * Kernighan's algorithm: it's execution complexity is as a number of set bins.
+ *
+ * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
+ */
+static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
+						  struct bnx2x_mcast_obj *o)
+{
+	int i, cnt = 0;
+	u64 elem;
+
+	for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
+		elem = o->registry.aprox_match.vec[i];
+		for (; elem; cnt++)
+			elem &= elem - 1;
+	}
+
+	o->set_registry_size(o, cnt);
+
+	return 0;
+}
+
+static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p,
+				int cmd)
+{
+	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
+	int cnt = 0, rc;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
+
+	/* If there are no more pending commands - clear SCHEDULED state */
+	if (list_empty(&o->pending_cmds_head))
+		o->clear_sched(o);
+
+	/* The below may be true iff there was enough room in ramrod
+	 * data for all pending commands and for the current
+	 * command. Otherwise the current command would have been added
+	 * to the pending commands and p->mcast_list_len would have been
+	 * zeroed.
+	 */
+	if (p->mcast_list_len > 0)
+		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
+
+	/* We've pulled out some MACs - update the total number of
+	 * outstanding.
+	 */
+	o->total_pending_num -= cnt;
+
+	/* send a ramrod */
+	WARN_ON(o->total_pending_num < 0);
+	WARN_ON(cnt > o->max_cmd_len);
+
+	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
+
+	/* Update a registry size if there are no more pending operations.
+	 *
+	 * We don't want to change the value of the registry size if there are
+	 * pending operations because we want it to always be equal to the
+	 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
+	 * set bins after the last requested operation in order to properly
+	 * evaluate the size of the next DEL/RESTORE operation.
+	 *
+	 * Note that we update the registry itself during command(s) handling
+	 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
+	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
+	 * with a limited amount of update commands (per MAC/bin) and we don't
+	 * know in this scope what the actual state of bins configuration is
+	 * going to be after this ramrod.
+	 */
+	if (!o->total_pending_num)
+		bnx2x_mcast_refresh_registry_e2(bp, o);
+
+	/* Commit writes towards the memory before sending a ramrod */
+	mb();
+
+	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
+	 * RAMROD_PENDING status immediately.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		raw->clear_pending(raw);
+		return 0;
+	} else {
+		/* Send a ramrod */
+		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+				   raw->cid, U64_HI(raw->rdata_mapping),
+				   U64_LO(raw->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			return rc;
+
+		/* Ramrod completion is pending */
+		return 1;
+	}
+}
+
+static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
+				    struct bnx2x_mcast_ramrod_params *p,
+				    int cmd)
+{
+	/* Mark, that there is a work to do */
+	if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+		p->mcast_list_len = 1;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
+				       struct bnx2x_mcast_ramrod_params *p,
+				       int old_num_bins)
+{
+	/* Do nothing */
+}
+
+#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
+do { \
+	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
+} while (0)
+
+static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
+					   struct bnx2x_mcast_obj *o,
+					   struct bnx2x_mcast_ramrod_params *p,
+					   u32 *mc_filter)
+{
+	struct bnx2x_mcast_list_elem *mlist_pos;
+	int bit;
+
+	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+		bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
+		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+
+		DP(BNX2X_MSG_SP, "About to configure "
+				 BNX2X_MAC_FMT" mcast MAC, bin %d\n",
+				 BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
+
+		/* bookkeeping... */
+		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
+				  bit);
+	}
+}
+
+static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	u32 *mc_filter)
+{
+	int bit;
+
+	for (bit = bnx2x_mcast_get_next_bin(o, 0);
+	     bit >= 0;
+	     bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
+		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+		DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
+	}
+}
+
+/* On 57711 we write the multicast MACs' aproximate match
+ * table by directly into the TSTORM's internal RAM. So we don't
+ * really need to handle any tricks to make it work.
+ */
+static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
+				 struct bnx2x_mcast_ramrod_params *p,
+				 int cmd)
+{
+	int i;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	/* If CLEAR_ONLY has been requested - clear the registry
+	 * and clear a pending bit.
+	 */
+	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		u32 mc_filter[MC_HASH_SIZE] = {0};
+
+		/* Set the multicast filter bits before writing it into
+		 * the internal memory.
+		 */
+		switch (cmd) {
+		case BNX2X_MCAST_CMD_ADD:
+			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
+			break;
+
+		case BNX2X_MCAST_CMD_DEL:
+			DP(BNX2X_MSG_SP, "Invalidating multicast "
+					 "MACs configuration\n");
+
+			/* clear the registry */
+			memset(o->registry.aprox_match.vec, 0,
+			       sizeof(o->registry.aprox_match.vec));
+			break;
+
+		case BNX2X_MCAST_CMD_RESTORE:
+			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
+			break;
+
+		default:
+			BNX2X_ERR("Unknown command: %d\n", cmd);
+			return -EINVAL;
+		}
+
+		/* Set the mcast filter in the internal memory */
+		for (i = 0; i < MC_HASH_SIZE; i++)
+			REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
+	} else
+		/* clear the registry */
+		memset(o->registry.aprox_match.vec, 0,
+		       sizeof(o->registry.aprox_match.vec));
+
+	/* We are done */
+	r->clear_pending(r);
+
+	return 0;
+}
+
+static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int reg_sz = o->get_registry_size(o);
+
+	switch (cmd) {
+	/* DEL command deletes all currently configured MACs */
+	case BNX2X_MCAST_CMD_DEL:
+		o->set_registry_size(o, 0);
+		/* Don't break */
+
+	/* RESTORE command will restore the entire multicast configuration */
+	case BNX2X_MCAST_CMD_RESTORE:
+		p->mcast_list_len = reg_sz;
+		  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+				   cmd, p->mcast_list_len);
+		break;
+
+	case BNX2X_MCAST_CMD_ADD:
+	case BNX2X_MCAST_CMD_CONT:
+		/* Multicast MACs on 57710 are configured as unicast MACs and
+		 * there is only a limited number of CAM entries for that
+		 * matter.
+		 */
+		if (p->mcast_list_len > o->max_cmd_len) {
+			BNX2X_ERR("Can't configure more than %d multicast MACs"
+				   "on 57710\n", o->max_cmd_len);
+			return -EINVAL;
+		}
+		/* Every configured MAC should be cleared if DEL command is
+		 * called. Only the last ADD command is relevant as long as
+		 * every ADD commands overrides the previous configuration.
+		 */
+		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+		if (p->mcast_list_len > 0)
+			o->set_registry_size(o, p->mcast_list_len);
+
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+
+	}
+
+	/* We want to ensure that commands are executed one by one for 57710.
+	 * Therefore each none-empty command will consume o->max_cmd_len.
+	 */
+	if (p->mcast_list_len)
+		o->total_pending_num += o->max_cmd_len;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
+				      struct bnx2x_mcast_ramrod_params *p,
+				      int old_num_macs)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	o->set_registry_size(o, old_num_macs);
+
+	/* If current command hasn't been handled yet and we are
+	 * here means that it's meant to be dropped and we have to
+	 * update the number of outstandling MACs accordingly.
+	 */
+	if (p->mcast_list_len)
+		o->total_pending_num -= o->max_cmd_len;
+}
+
+static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
+					struct bnx2x_mcast_obj *o, int idx,
+					union bnx2x_mcast_config_data *cfg_data,
+					int cmd)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(r->rdata);
+
+	/* copy mac */
+	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
+		bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
+				      &data->config_table[idx].middle_mac_addr,
+				      &data->config_table[idx].lsb_mac_addr,
+				      cfg_data->mac);
+
+		data->config_table[idx].vlan_id = 0;
+		data->config_table[idx].pf_id = r->func_id;
+		data->config_table[idx].clients_bit_vector =
+			cpu_to_le32(1 << r->cl_id);
+
+		SET_FLAG(data->config_table[idx].flags,
+			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_SET);
+	}
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
+ *
+ * @bp:		device handle
+ * @p:
+ * @len:	number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
+					struct bnx2x_mcast_ramrod_params *p,
+					u8 len)
+{
+	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(r->rdata);
+
+	u8 offset = (CHIP_REV_IS_SLOW(bp) ?
+		     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
+		     BNX2X_MAX_MULTICAST*(1 + r->func_id));
+
+	data->hdr.offset = offset;
+	data->hdr.client_id = 0xff;
+	data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
+			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+	data->hdr.length = len;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
+ *
+ * @bp:		device handle
+ * @o:
+ * @start_idx:	index in the registry to start from
+ * @rdata_idx:	index in the ramrod data to start from
+ *
+ * restore command for 57710 is like all other commands - always a stand alone
+ * command - start_idx and rdata_idx will always be 0. This function will always
+ * succeed.
+ * returns -1 to comply with 57712 variant.
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e1(
+	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
+	int *rdata_idx)
+{
+	struct bnx2x_mcast_mac_elem *elem;
+	int i = 0;
+	union bnx2x_mcast_config_data cfg_data = {0};
+
+	/* go through the registry and configure the MACs from it. */
+	list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
+		cfg_data.mac = &elem->mac[0];
+		o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
+
+		i++;
+
+		  DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+				   " mcast MAC\n",
+				   BNX2X_MAC_PRN_LIST(cfg_data.mac));
+	}
+
+	*rdata_idx = i;
+
+	return -1;
+}
+
+
+static inline int bnx2x_mcast_handle_pending_cmds_e1(
+	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_pending_mcast_cmd *cmd_pos;
+	struct bnx2x_mcast_mac_elem *pmac_pos;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	union bnx2x_mcast_config_data cfg_data = {0};
+	int cnt = 0;
+
+
+	/* If nothing to be done - return */
+	if (list_empty(&o->pending_cmds_head))
+		return 0;
+
+	/* Handle the first command */
+	cmd_pos = list_first_entry(&o->pending_cmds_head,
+				   struct bnx2x_pending_mcast_cmd, link);
+
+	switch (cmd_pos->type) {
+	case BNX2X_MCAST_CMD_ADD:
+		list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
+			cfg_data.mac = &pmac_pos->mac[0];
+			o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+			cnt++;
+
+			DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+					 " mcast MAC\n",
+					 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
+		}
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		cnt = cmd_pos->data.macs_num;
+		DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		o->hdl_restore(bp, o, 0, &cnt);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+		return -EINVAL;
+	}
+
+	list_del(&cmd_pos->link);
+	kfree(cmd_pos);
+
+	return cnt;
+}
+
+/**
+ * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
+ *
+ * @fw_hi:
+ * @fw_mid:
+ * @fw_lo:
+ * @mac:
+ */
+static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+					 __le16 *fw_lo, u8 *mac)
+{
+	mac[1] = ((u8 *)fw_hi)[0];
+	mac[0] = ((u8 *)fw_hi)[1];
+	mac[3] = ((u8 *)fw_mid)[0];
+	mac[2] = ((u8 *)fw_mid)[1];
+	mac[5] = ((u8 *)fw_lo)[0];
+	mac[4] = ((u8 *)fw_lo)[1];
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e1 -
+ *
+ * @bp:		device handle
+ * @cnt:
+ *
+ * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
+ * and update the registry correspondingly: if ADD - allocate a memory and add
+ * the entries to the registry (list), if DELETE - clear the registry and free
+ * the memory.
+ */
+static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
+						  struct bnx2x_mcast_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct bnx2x_mcast_mac_elem *elem;
+	struct mac_configuration_cmd *data =
+			(struct mac_configuration_cmd *)(raw->rdata);
+
+	/* If first entry contains a SET bit - the command was ADD,
+	 * otherwise - DEL_ALL
+	 */
+	if (GET_FLAG(data->config_table[0].flags,
+			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
+		int i, len = data->hdr.length;
+
+		/* Break if it was a RESTORE command */
+		if (!list_empty(&o->registry.exact_match.macs))
+			return 0;
+
+		elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+		if (!elem) {
+			BNX2X_ERR("Failed to allocate registry memory\n");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < len; i++, elem++) {
+			bnx2x_get_fw_mac_addr(
+				&data->config_table[i].msb_mac_addr,
+				&data->config_table[i].middle_mac_addr,
+				&data->config_table[i].lsb_mac_addr,
+				elem->mac);
+			DP(BNX2X_MSG_SP, "Adding registry entry for ["
+					 BNX2X_MAC_FMT"]\n",
+				   BNX2X_MAC_PRN_LIST(elem->mac));
+			list_add_tail(&elem->link,
+				      &o->registry.exact_match.macs);
+		}
+	} else {
+		elem = list_first_entry(&o->registry.exact_match.macs,
+					struct bnx2x_mcast_mac_elem, link);
+		DP(BNX2X_MSG_SP, "Deleting a registry\n");
+		kfree(elem);
+		INIT_LIST_HEAD(&o->registry.exact_match.macs);
+	}
+
+	return 0;
+}
+
+static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p,
+				int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	int cnt = 0, i, rc;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	/* First set all entries as invalid */
+	for (i = 0; i < o->max_cmd_len ; i++)
+		SET_FLAG(data->config_table[i].flags,
+			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_INVALIDATE);
+
+	/* Handle pending commands first */
+	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
+
+	/* If there are no more pending commands - clear SCHEDULED state */
+	if (list_empty(&o->pending_cmds_head))
+		o->clear_sched(o);
+
+	/* The below may be true iff there were no pending commands */
+	if (!cnt)
+		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
+
+	/* For 57710 every command has o->max_cmd_len length to ensure that
+	 * commands are done one at a time.
+	 */
+	o->total_pending_num -= o->max_cmd_len;
+
+	/* send a ramrod */
+
+	WARN_ON(cnt > o->max_cmd_len);
+
+	/* Set ramrod header (in particular, a number of entries to update) */
+	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
+
+	/* update a registry: we need the registry contents to be always up
+	 * to date in order to be able to execute a RESTORE opcode. Here
+	 * we use the fact that for 57710 we sent one command at a time
+	 * hence we may take the registry update out of the command handling
+	 * and do it in a simpler way here.
+	 */
+	rc = bnx2x_mcast_refresh_registry_e1(bp, o);
+	if (rc)
+		return rc;
+
+	/* Commit writes towards the memory before sending a ramrod */
+	mb();
+
+	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
+	 * RAMROD_PENDING status immediately.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		raw->clear_pending(raw);
+		return 0;
+	} else {
+		/* Send a ramrod */
+		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
+				   U64_HI(raw->rdata_mapping),
+				   U64_LO(raw->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			return rc;
+
+		/* Ramrod completion is pending */
+		return 1;
+	}
+
+}
+
+static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
+{
+	return o->registry.exact_match.num_macs_set;
+}
+
+static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
+{
+	return o->registry.aprox_match.num_bins_set;
+}
+
+static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
+						int n)
+{
+	o->registry.exact_match.num_macs_set = n;
+}
+
+static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
+						int n)
+{
+	o->registry.aprox_match.num_bins_set = n;
+}
+
+int bnx2x_config_mcast(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p,
+		       int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc = 0, old_reg_size;
+
+	/* This is needed to recover number of currently configured mcast macs
+	 * in case of failure.
+	 */
+	old_reg_size = o->get_registry_size(o);
+
+	/* Do some calculations and checks */
+	rc = o->validate(bp, p, cmd);
+	if (rc)
+		return rc;
+
+	/* Return if there is no work to do */
+	if ((!p->mcast_list_len) && (!o->check_sched(o)))
+		return 0;
+
+	DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
+			 "o->max_cmd_len=%d\n", o->total_pending_num,
+			 p->mcast_list_len, o->max_cmd_len);
+
+	/* Enqueue the current command to the pending list if we can't complete
+	 * it in the current iteration
+	 */
+	if (r->check_pending(r) ||
+	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
+		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
+		if (rc < 0)
+			goto error_exit1;
+
+		/* As long as the current command is in a command list we
+		 * don't need to handle it separately.
+		 */
+		p->mcast_list_len = 0;
+	}
+
+	if (!r->check_pending(r)) {
+
+		/* Set 'pending' state */
+		r->set_pending(r);
+
+		/* Configure the new classification in the chip */
+		rc = o->config_mcast(bp, p, cmd);
+		if (rc < 0)
+			goto error_exit2;
+
+		/* Wait for a ramrod completion if was requested */
+		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+			rc = o->wait_comp(bp, o);
+	}
+
+	return rc;
+
+error_exit2:
+	r->clear_pending(r);
+
+error_exit1:
+	o->revert(bp, p, old_reg_size);
+
+	return rc;
+}
+
+static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
+{
+	smp_mb__before_clear_bit();
+	clear_bit(o->sched_state, o->raw.pstate);
+	smp_mb__after_clear_bit();
+}
+
+static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
+{
+	smp_mb__before_clear_bit();
+	set_bit(o->sched_state, o->raw.pstate);
+	smp_mb__after_clear_bit();
+}
+
+static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
+{
+	return !!test_bit(o->sched_state, o->raw.pstate);
+}
+
+static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
+{
+	return o->raw.check_pending(&o->raw) || o->check_sched(o);
+}
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+			  struct bnx2x_mcast_obj *mcast_obj,
+			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+			  int state, unsigned long *pstate, bnx2x_obj_type type)
+{
+	memset(mcast_obj, 0, sizeof(*mcast_obj));
+
+	bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
+			   rdata, rdata_mapping, state, pstate, type);
+
+	mcast_obj->engine_id = engine_id;
+
+	INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
+
+	mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
+	mcast_obj->check_sched = bnx2x_mcast_check_sched;
+	mcast_obj->set_sched = bnx2x_mcast_set_sched;
+	mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
+
+	if (CHIP_IS_E1(bp)) {
+		mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
+		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
+		mcast_obj->hdl_restore       =
+			bnx2x_mcast_handle_restore_cmd_e1;
+		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
+
+		if (CHIP_REV_IS_SLOW(bp))
+			mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
+		else
+			mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
+
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
+		mcast_obj->validate          = bnx2x_mcast_validate_e1;
+		mcast_obj->revert            = bnx2x_mcast_revert_e1;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_exact;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_exact;
+
+		/* 57710 is the only chip that uses the exact match for mcast
+		 * at the moment.
+		 */
+		INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
+
+	} else if (CHIP_IS_E1H(bp)) {
+		mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
+		mcast_obj->enqueue_cmd   = NULL;
+		mcast_obj->hdl_restore   = NULL;
+		mcast_obj->check_pending = bnx2x_mcast_check_pending;
+
+		/* 57711 doesn't send a ramrod, so it has unlimited credit
+		 * for one command.
+		 */
+		mcast_obj->max_cmd_len       = -1;
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = NULL;
+		mcast_obj->validate          = bnx2x_mcast_validate_e1h;
+		mcast_obj->revert            = bnx2x_mcast_revert_e1h;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_aprox;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_aprox;
+	} else {
+		mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
+		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
+		mcast_obj->hdl_restore       =
+			bnx2x_mcast_handle_restore_cmd_e2;
+		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
+		/* TODO: There should be a proper HSI define for this number!!!
+		 */
+		mcast_obj->max_cmd_len       = 16;
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
+		mcast_obj->validate          = bnx2x_mcast_validate_e2;
+		mcast_obj->revert            = bnx2x_mcast_revert_e2;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_aprox;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_aprox;
+	}
+}
+
+/*************************** Credit handling **********************************/
+
+/**
+ * atomic_add_ifless - add if the result is less than a given value.
+ *
+ * @v:	pointer of type atomic_t
+ * @a:	the amount to add to v...
+ * @u:	...if (v + a) is less than u.
+ *
+ * returns true if (v + a) was less than u, and false otherwise.
+ *
+ */
+static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c + a >= u))
+			return false;
+
+		old = atomic_cmpxchg((v), c, c + a);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+
+	return true;
+}
+
+/**
+ * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
+ *
+ * @v:	pointer of type atomic_t
+ * @a:	the amount to dec from v...
+ * @u:	...if (v - a) is more or equal than u.
+ *
+ * returns true if (v - a) was more or equal than u, and false
+ * otherwise.
+ */
+static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c - a < u))
+			return false;
+
+		old = atomic_cmpxchg((v), c, c - a);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+
+	return true;
+}
+
+static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+	bool rc;
+
+	smp_mb();
+	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
+	smp_mb();
+
+	return rc;
+}
+
+static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+	bool rc;
+
+	smp_mb();
+
+	/* Don't let to refill if credit + cnt > pool_sz */
+	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
+
+	smp_mb();
+
+	return rc;
+}
+
+static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
+{
+	int cur_credit;
+
+	smp_mb();
+	cur_credit = atomic_read(&o->credit);
+
+	return cur_credit;
+}
+
+static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
+					  int cnt)
+{
+	return true;
+}
+
+
+static bool bnx2x_credit_pool_get_entry(
+	struct bnx2x_credit_pool_obj *o,
+	int *offset)
+{
+	int idx, vec, i;
+
+	*offset = -1;
+
+	/* Find "internal cam-offset" then add to base for this object... */
+	for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
+
+		/* Skip the current vector if there are no free entries in it */
+		if (!o->pool_mirror[vec])
+			continue;
+
+		/* If we've got here we are going to find a free entry */
+		for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
+		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
+
+			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
+				/* Got one!! */
+				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
+				*offset = o->base_pool_offset + idx;
+				return true;
+			}
+	}
+
+	return false;
+}
+
+static bool bnx2x_credit_pool_put_entry(
+	struct bnx2x_credit_pool_obj *o,
+	int offset)
+{
+	if (offset < o->base_pool_offset)
+		return false;
+
+	offset -= o->base_pool_offset;
+
+	if (offset >= o->pool_sz)
+		return false;
+
+	/* Return the entry to the pool */
+	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
+
+	return true;
+}
+
+static bool bnx2x_credit_pool_put_entry_always_true(
+	struct bnx2x_credit_pool_obj *o,
+	int offset)
+{
+	return true;
+}
+
+static bool bnx2x_credit_pool_get_entry_always_true(
+	struct bnx2x_credit_pool_obj *o,
+	int *offset)
+{
+	*offset = -1;
+	return true;
+}
+/**
+ * bnx2x_init_credit_pool - initialize credit pool internals.
+ *
+ * @p:
+ * @base:	Base entry in the CAM to use.
+ * @credit:	pool size.
+ *
+ * If base is negative no CAM entries handling will be performed.
+ * If credit is negative pool operations will always succeed (unlimited pool).
+ *
+ */
+static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+					  int base, int credit)
+{
+	/* Zero the object first */
+	memset(p, 0, sizeof(*p));
+
+	/* Set the table to all 1s */
+	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
+
+	/* Init a pool as full */
+	atomic_set(&p->credit, credit);
+
+	/* The total poll size */
+	p->pool_sz = credit;
+
+	p->base_pool_offset = base;
+
+	/* Commit the change */
+	smp_mb();
+
+	p->check = bnx2x_credit_pool_check;
+
+	/* if pool credit is negative - disable the checks */
+	if (credit >= 0) {
+		p->put      = bnx2x_credit_pool_put;
+		p->get      = bnx2x_credit_pool_get;
+		p->put_entry = bnx2x_credit_pool_put_entry;
+		p->get_entry = bnx2x_credit_pool_get_entry;
+	} else {
+		p->put      = bnx2x_credit_pool_always_true;
+		p->get      = bnx2x_credit_pool_always_true;
+		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+	}
+
+	/* If base is negative - disable entries handling */
+	if (base < 0) {
+		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+	}
+}
+
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+				struct bnx2x_credit_pool_obj *p, u8 func_id,
+				u8 func_num)
+{
+/* TODO: this will be defined in consts as well... */
+#define BNX2X_CAM_SIZE_EMUL 5
+
+	int cam_sz;
+
+	if (CHIP_IS_E1(bp)) {
+		/* In E1, Multicast is saved in cam... */
+		if (!CHIP_REV_IS_SLOW(bp))
+			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
+		else
+			cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
+
+		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+
+	} else if (CHIP_IS_E1H(bp)) {
+		/* CAM credit is equaly divided between all active functions
+		 * on the PORT!.
+		 */
+		if ((func_num > 0)) {
+			if (!CHIP_REV_IS_SLOW(bp))
+				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
+			else
+				cam_sz = BNX2X_CAM_SIZE_EMUL;
+			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+		} else {
+			/* this should never happen! Block MAC operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+		}
+
+	} else {
+
+		/*
+		 * CAM credit is equaly divided between all active functions
+		 * on the PATH.
+		 */
+		if ((func_num > 0)) {
+			if (!CHIP_REV_IS_SLOW(bp))
+				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+			else
+				cam_sz = BNX2X_CAM_SIZE_EMUL;
+
+			/*
+			 * No need for CAM entries handling for 57712 and
+			 * newer.
+			 */
+			bnx2x_init_credit_pool(p, -1, cam_sz);
+		} else {
+			/* this should never happen! Block MAC operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+		}
+
+	}
+}
+
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+				 struct bnx2x_credit_pool_obj *p,
+				 u8 func_id,
+				 u8 func_num)
+{
+	if (CHIP_IS_E1x(bp)) {
+		/*
+		 * There is no VLAN credit in HW on 57710 and 57711 only
+		 * MAC / MAC-VLAN can be set
+		 */
+		bnx2x_init_credit_pool(p, 0, -1);
+	} else {
+		/*
+		 * CAM credit is equaly divided between all active functions
+		 * on the PATH.
+		 */
+		if (func_num > 0) {
+			int credit = MAX_VLAN_CREDIT_E2 / func_num;
+			bnx2x_init_credit_pool(p, func_id * credit, credit);
+		} else
+			/* this should never happen! Block VLAN operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+	}
+}
+
+/****************** RSS Configuration ******************/
+/**
+ * bnx2x_debug_print_ind_table - prints the indirection table configuration.
+ *
+ * @bp:		driver hanlde
+ * @p:		pointer to rss configuration
+ *
+ * Prints it when NETIF_MSG_IFUP debug level is configured.
+ */
+static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
+					struct bnx2x_config_rss_params *p)
+{
 	int i;
 
-	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
-		return;
+	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
+	DP(BNX2X_MSG_SP, "0x0000: ");
+	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
+
+		/* Print 4 bytes in a line */
+		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
+		    (((i + 1) & 0x3) == 0)) {
+			DP_CONT(BNX2X_MSG_SP, "\n");
+			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
+		}
+	}
+
+	DP_CONT(BNX2X_MSG_SP, "\n");
+}
+
+/**
+ * bnx2x_setup_rss - configure RSS
+ *
+ * @bp:		device handle
+ * @p:		rss configuration
+ *
+ * sends on UPDATE ramrod for that matter.
+ */
+static int bnx2x_setup_rss(struct bnx2x *bp,
+			   struct bnx2x_config_rss_params *p)
+{
+	struct bnx2x_rss_config_obj *o = p->rss_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct eth_rss_update_ramrod_data *data =
+		(struct eth_rss_update_ramrod_data *)(r->rdata);
+	u8 rss_mode = 0;
+	int rc;
+
+	memset(data, 0, sizeof(*data));
+
+	DP(BNX2X_MSG_SP, "Configuring RSS\n");
+
+	/* Set an echo field */
+	data->echo = (r->cid & BNX2X_SWCID_MASK) |
+		     (r->state << BNX2X_SWCID_SHIFT);
+
+	/* RSS mode */
+	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_DISABLED;
+	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_REGULAR;
+	else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_VLAN_PRI;
+	else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_E1HOV_PRI;
+	else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_IP_DSCP;
+
+	data->rss_mode = rss_mode;
+
+	DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
+
+	/* RSS capabilities */
+	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+
+	/* Hashing mask */
+	data->rss_result_mask = p->rss_result_mask;
+
+	/* RSS engine ID */
+	data->rss_engine_id = o->engine_id;
+
+	DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
+
+	/* Indirection table */
+	memcpy(data->indirection_table, p->ind_table,
+		  T_ETH_INDIRECTION_TABLE_SIZE);
+
+	/* Remember the last configuration */
+	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+
+	/* Print the indirection table */
+	if (netif_msg_ifup(bp))
+		bnx2x_debug_print_ind_table(bp, p);
+
+	/* RSS keys */
+	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
+		memcpy(&data->rss_key[0], &p->rss_key[0],
+		       sizeof(data->rss_key));
+		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
+	}
+
+	/* Commit writes towards the memory before sending a ramrod */
+	mb();
+
+	/* Send a ramrod */
+	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
+			   U64_HI(r->rdata_mapping),
+			   U64_LO(r->rdata_mapping),
+			   ETH_CONNECTION_TYPE);
+
+	if (rc < 0)
+		return rc;
+
+	return 1;
+}
+
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+			     u8 *ind_table)
+{
+	memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
+}
+
+int bnx2x_config_rss(struct bnx2x *bp,
+		     struct bnx2x_config_rss_params *p)
+{
+	int rc;
+	struct bnx2x_rss_config_obj *o = p->rss_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	/* Do nothing if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+		return 0;
+
+	r->set_pending(r);
+
+	rc = o->config_rss(bp, p);
+	if (rc < 0) {
+		r->clear_pending(r);
+		return rc;
+	}
+
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+		rc = r->wait_comp(bp, r);
+
+	return rc;
+}
+
+
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+			       struct bnx2x_rss_config_obj *rss_obj,
+			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+			       void *rdata, dma_addr_t rdata_mapping,
+			       int state, unsigned long *pstate,
+			       bnx2x_obj_type type)
+{
+	bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
+			   rdata_mapping, state, pstate, type);
+
+	rss_obj->engine_id  = engine_id;
+	rss_obj->config_rss = bnx2x_setup_rss;
+}
+
+/********************** Queue state object ***********************************/
+
+/**
+ * bnx2x_queue_state_change - perform Queue state change transition
+ *
+ * @bp:		device handle
+ * @params:	parameters to perform the transition
+ *
+ * returns 0 in case of successfully completed transition, negative error
+ * code in case of failure, positive (EBUSY) value if there is a completion
+ * to that is still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous commands).
+ *
+ */
+int bnx2x_queue_state_change(struct bnx2x *bp,
+			     struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	int rc, pending_bit;
+	unsigned long *pending = &o->pending;
+
+	/* Check that the requested transition is legal */
+	if (o->check_transition(bp, o, params))
+		return -EINVAL;
+
+	/* Set "pending" bit */
+	pending_bit = o->set_pending(o, params);
+
+	/* Don't send a command if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
+		o->complete_cmd(bp, o, pending_bit);
+	else {
+		/* Send a ramrod */
+		rc = o->send_cmd(bp, params);
+		if (rc) {
+			o->next_state = BNX2X_Q_STATE_MAX;
+			clear_bit(pending_bit, pending);
+			smp_mb__after_clear_bit();
+			return rc;
+		}
+
+		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+			rc = o->wait_comp(bp, o, pending_bit);
+			if (rc)
+				return rc;
+
+			return 0;
+		}
+	}
+
+	return !!test_bit(pending_bit, pending);
+}
+
+
+static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
+				   struct bnx2x_queue_state_params *params)
+{
+	enum bnx2x_queue_cmd cmd = params->cmd, bit;
+
+	/* ACTIVATE and DEACTIVATE commands are implemented on top of
+	 * UPDATE command.
+	 */
+	if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
+	    (cmd == BNX2X_Q_CMD_DEACTIVATE))
+		bit = BNX2X_Q_CMD_UPDATE;
+	else
+		bit = cmd;
+
+	set_bit(bit, &obj->pending);
+	return bit;
+}
+
+static int bnx2x_queue_wait_comp(struct bnx2x *bp,
+				 struct bnx2x_queue_sp_obj *o,
+				 enum bnx2x_queue_cmd cmd)
+{
+	return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_queue_comp_cmd - complete the state change command.
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *o,
+				enum bnx2x_queue_cmd cmd)
+{
+	unsigned long cur_pending = o->pending;
+
+	if (!test_and_clear_bit(cmd, &cur_pending)) {
+		BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
+			  "pending 0x%lx, next_state %d\n", cmd, o->cid,
+			  o->state, cur_pending, o->next_state);
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
+			 "setting state to %d\n", cmd, o->cid, o->next_state);
+
+	o->state = o->next_state;
+	o->next_state = BNX2X_Q_STATE_MAX;
+
+	/* It's important that o->state and o->next_state are
+	 * updated before o->pending.
+	 */
+	wmb();
+
+	clear_bit(cmd, &o->pending);
+	smp_mb__after_clear_bit();
+
+	return 0;
+}
+
+static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
+				struct bnx2x_queue_state_params *cmd_params,
+				struct client_init_ramrod_data *data)
+{
+	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
+
+	/* Rx data */
+
+	/* IPv6 TPA supported for E2 and above only */
+	data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA, &params->flags) *
+				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
+}
+
+static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
+				struct bnx2x_queue_state_params *cmd_params,
+				struct client_init_ramrod_data *data)
+{
+	struct bnx2x_queue_sp_obj *o = cmd_params->q_obj;
+	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
+
+
+	/* general */
+	data->general.client_id = o->cl_id;
+
+	if (test_bit(BNX2X_Q_FLG_STATS, &params->flags)) {
+		data->general.statistics_counter_id =
+					params->gen_params.stat_id;
+		data->general.statistics_en_flg = 1;
+		data->general.statistics_zero_flg =
+			test_bit(BNX2X_Q_FLG_ZERO_STATS, &params->flags);
+	} else
+		data->general.statistics_counter_id =
+					DISABLE_STATISTIC_COUNTER_ID_VALUE;
+
+	data->general.is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, &params->flags);
+	data->general.activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE,
+					      &params->flags);
+	data->general.sp_client_id = params->gen_params.spcl_id;
+	data->general.mtu = cpu_to_le16(params->gen_params.mtu);
+	data->general.func_id = o->func_id;
+
+
+	data->general.cos = params->txq_params.cos;
+
+	data->general.traffic_type =
+		test_bit(BNX2X_Q_FLG_FCOE, &params->flags) ?
+		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
+
+	/* Rx data */
+	data->rx.tpa_en = test_bit(BNX2X_Q_FLG_TPA, &params->flags) *
+				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
+	data->rx.vmqueue_mode_en_flg = 0;
+
+	data->rx.cache_line_alignment_log_size =
+		params->rxq_params.cache_line_log;
+	data->rx.enable_dynamic_hc =
+		test_bit(BNX2X_Q_FLG_DHC, &params->flags);
+	data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
+	data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
+	data->rx.max_agg_size = cpu_to_le16(params->rxq_params.tpa_agg_sz);
+
+	/* Always start in DROP_ALL mode */
+	data->rx.state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
+				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
+
+	/* We don't set drop flags */
+	data->rx.drop_ip_cs_err_flg = 0;
+	data->rx.drop_tcp_cs_err_flg = 0;
+	data->rx.drop_ttl0_flg = 0;
+	data->rx.drop_udp_cs_err_flg = 0;
+	data->rx.inner_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_FLG_VLAN, &params->flags);
+	data->rx.outer_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_FLG_OV, &params->flags);
+	data->rx.status_block_id = params->rxq_params.fw_sb_id;
+	data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
+	data->rx.max_tpa_queues = params->rxq_params.max_tpa_queues;
+	data->rx.max_bytes_on_bd = cpu_to_le16(params->rxq_params.buf_sz);
+	data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
+	data->rx.bd_page_base.lo =
+		cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
+	data->rx.bd_page_base.hi =
+		cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
+	data->rx.sge_page_base.lo =
+		cpu_to_le32(U64_LO(params->rxq_params.sge_map));
+	data->rx.sge_page_base.hi =
+		cpu_to_le32(U64_HI(params->rxq_params.sge_map));
+	data->rx.cqe_page_base.lo =
+		cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
+	data->rx.cqe_page_base.hi =
+		cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
+	data->rx.is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS,
+					   &params->flags);
+
+	if (test_bit(BNX2X_Q_FLG_MCAST, &params->flags)) {
+		data->rx.approx_mcast_engine_id = o->func_id;
+		data->rx.is_approx_mcast = 1;
+	}
+
+	data->rx.rss_engine_id = params->rxq_params.rss_engine_id;
+
+	/* flow control data */
+	data->rx.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
+	data->rx.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
+	data->rx.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
+	data->rx.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
+	data->rx.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
+	data->rx.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
+	data->rx.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
+
+	/* silent vlan removal */
+	data->rx.silent_vlan_removal_flg =
+		test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &params->flags);
+	data->rx.silent_vlan_value =
+		cpu_to_le16(params->rxq_params.silent_removal_value);
+	data->rx.silent_vlan_mask =
+		cpu_to_le16(params->rxq_params.silent_removal_mask);
+
+	/* Tx data */
+	data->tx.enforce_security_flg =
+		test_bit(BNX2X_Q_FLG_TX_SEC, &params->flags);
+	data->tx.default_vlan =
+		cpu_to_le16(params->txq_params.default_vlan);
+	data->tx.default_vlan_flg =
+		test_bit(BNX2X_Q_FLG_DEF_VLAN, &params->flags);
+	data->tx.tx_switching_flg =
+		test_bit(BNX2X_Q_FLG_TX_SWITCH, &params->flags);
+	data->tx.anti_spoofing_flg =
+		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, &params->flags);
+	data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
+	data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
+	data->tx.tss_leading_client_id = params->txq_params.tss_leading_cl_id;
+
+	data->tx.tx_bd_page_base.lo =
+		cpu_to_le32(U64_LO(params->txq_params.dscr_map));
+	data->tx.tx_bd_page_base.hi =
+		cpu_to_le32(U64_HI(params->txq_params.dscr_map));
+
+	/* Don't configure any Tx switching mode during queue SETUP */
+	data->tx.state = 0;
+}
+
+
+/**
+ * bnx2x_q_init - init HW/FW queue
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * HW/FW initial Queue configuration:
+ *      - HC: Rx and Tx
+ *      - CDU context validation
+ *
+ */
+static inline int bnx2x_q_init(struct bnx2x *bp,
+			       struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct bnx2x_queue_init_params *init = &params->params.init;
+	u16 hc_usec;
+
+	/* Tx HC configuration */
+	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
+	    test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
+		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
+
+		bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
+			init->tx.sb_cq_index,
+			!test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
+			hc_usec);
+	}
+
+	/* Rx HC configuration */
+	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
+	    test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
+		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
+
+		bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
+			init->rx.sb_cq_index,
+			!test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
+			hc_usec);
+	}
+
+	/* Set CDU context validation values */
+	bnx2x_set_ctx_validation(bp, init->cxt, o->cid);
+
+	/* As no ramrod is sent, complete the command immediately  */
+	o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
+
+	mmiowb();
+	smp_mb();
+
+	return 0;
+}
+
+static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_init_ramrod_data *rdata =
+		(struct client_init_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+
+	mb();
+
+	return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_init_ramrod_data *rdata =
+		(struct client_init_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+	bnx2x_q_fill_setup_data_e2(bp, params, rdata);
+
+	mb();
+
+	return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static void bnx2x_q_fill_update_data(struct bnx2x *bp,
+				     struct bnx2x_queue_sp_obj *obj,
+				     struct bnx2x_queue_update_params *params,
+				     struct client_update_ramrod_data *data)
+{
+	/* Client ID of the client to update */
+	data->client_id = obj->cl_id;
+
+	/* Function ID of the client to update */
+	data->func_id = obj->func_id;
+
+	/* Default VLAN value */
+	data->default_vlan = cpu_to_le16(params->def_vlan);
+
+	/* Inner VLAN stripping */
+	data->inner_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
+	data->inner_vlan_removal_change_flg =
+		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+			 &params->update_flags);
+
+	/* Outer VLAN sripping */
+	data->outer_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
+	data->outer_vlan_removal_change_flg =
+		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+			 &params->update_flags);
+
+	/* Drop packets that have source MAC that doesn't belong to this
+	 * Queue.
+	 */
+	data->anti_spoofing_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
+	data->anti_spoofing_change_flg =
+		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
+
+	/* Activate/Deactivate */
+	data->activate_flg =
+		test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
+	data->activate_change_flg =
+		test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
+
+	/* Enable default VLAN */
+	data->default_vlan_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
+	data->default_vlan_change_flg =
+		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+			 &params->update_flags);
+
+	/* silent vlan removal */
+	data->silent_vlan_change_flg =
+		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+			 &params->update_flags);
+	data->silent_vlan_removal_flg =
+		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
+	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
+	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+}
+
+static inline int bnx2x_q_send_update(struct bnx2x *bp,
+				      struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_update_ramrod_data *rdata =
+		(struct client_update_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_update_data(bp, o, &params->params.update, rdata);
+
+	mb();
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, o->cid,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+/**
+ * bnx2x_q_send_deactivate - send DEACTIVATE command
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_update_params *update = &params->params.update;
+
+	memset(update, 0, sizeof(*update));
+
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+	return bnx2x_q_send_update(bp, params);
+}
+
+/**
+ * bnx2x_q_send_activate - send ACTIVATE command
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_activate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_update_params *update = &params->params.update;
+
+	memset(update, 0, sizeof(*update));
+
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+	return bnx2x_q_send_update(bp, params);
+}
+
+static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	/* TODO: Not implemented yet. */
+	return -1;
+}
+
+static inline int bnx2x_q_send_halt(struct bnx2x *bp,
+				    struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, o->cid, 0, o->cl_id,
+			     ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
+				       struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, o->cid, 0, 0,
+			     NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, o->cid, 0, 0,
+			     ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_empty(struct bnx2x *bp,
+				     struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, o->cid, 0, 0,
+			     ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_INIT:
+		return bnx2x_q_init(bp, params);
+	case BNX2X_Q_CMD_DEACTIVATE:
+		return bnx2x_q_send_deactivate(bp, params);
+	case BNX2X_Q_CMD_ACTIVATE:
+		return bnx2x_q_send_activate(bp, params);
+	case BNX2X_Q_CMD_UPDATE:
+		return bnx2x_q_send_update(bp, params);
+	case BNX2X_Q_CMD_UPDATE_TPA:
+		return bnx2x_q_send_update_tpa(bp, params);
+	case BNX2X_Q_CMD_HALT:
+		return bnx2x_q_send_halt(bp, params);
+	case BNX2X_Q_CMD_CFC_DEL:
+		return bnx2x_q_send_cfc_del(bp, params);
+	case BNX2X_Q_CMD_TERMINATE:
+		return bnx2x_q_send_terminate(bp, params);
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_q_send_empty(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
+				    struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_SETUP:
+		return bnx2x_q_send_setup_e1x(bp, params);
+	case BNX2X_Q_CMD_INIT:
+	case BNX2X_Q_CMD_DEACTIVATE:
+	case BNX2X_Q_CMD_ACTIVATE:
+	case BNX2X_Q_CMD_UPDATE:
+	case BNX2X_Q_CMD_UPDATE_TPA:
+	case BNX2X_Q_CMD_HALT:
+	case BNX2X_Q_CMD_CFC_DEL:
+	case BNX2X_Q_CMD_TERMINATE:
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_queue_send_cmd_cmn(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
+				   struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_SETUP:
+		return bnx2x_q_send_setup_e2(bp, params);
+	case BNX2X_Q_CMD_INIT:
+	case BNX2X_Q_CMD_DEACTIVATE:
+	case BNX2X_Q_CMD_ACTIVATE:
+	case BNX2X_Q_CMD_UPDATE:
+	case BNX2X_Q_CMD_UPDATE_TPA:
+	case BNX2X_Q_CMD_HALT:
+	case BNX2X_Q_CMD_CFC_DEL:
+	case BNX2X_Q_CMD_TERMINATE:
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_queue_send_cmd_cmn(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+/**
+ * bnx2x_queue_chk_transition - check state machine of a regular Queue
+ *
+ * @bp:		device handle
+ * @o:
+ * @params:
+ *
+ * (not Forwarding)
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ *         -EINVAL otherwise.
+ */
+static int bnx2x_queue_chk_transition(struct bnx2x *bp,
+				      struct bnx2x_queue_sp_obj *o,
+				      struct bnx2x_queue_state_params *params)
+{
+	enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
+	enum bnx2x_queue_cmd cmd = params->cmd;
+
+	switch (state) {
+	case BNX2X_Q_STATE_RESET:
+		if (cmd == BNX2X_Q_CMD_INIT)
+			next_state = BNX2X_Q_STATE_INITIALIZED;
+
+		break;
+	case BNX2X_Q_STATE_INITIALIZED:
+		if (cmd == BNX2X_Q_CMD_SETUP) {
+			if (test_bit(BNX2X_Q_FLG_ACTIVE,
+				     &params->params.setup.flags))
+				next_state = BNX2X_Q_STATE_ACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_INACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_ACTIVE:
+		if (cmd == BNX2X_Q_CMD_DEACTIVATE)
+			next_state = BNX2X_Q_STATE_INACTIVE;
+
+		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+			next_state = BNX2X_Q_STATE_ACTIVE;
+
+		else if (cmd == BNX2X_Q_CMD_HALT)
+			next_state = BNX2X_Q_STATE_STOPPED;
+
+		else if (cmd == BNX2X_Q_CMD_UPDATE) {
+			struct bnx2x_queue_update_params *update_params =
+				&params->params.update;
+
+			/* If "active" state change is requested, update the
+			 *  state accordingly.
+			 */
+			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+				     &update_params->update_flags) &&
+			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+				      &update_params->update_flags))
+				next_state = BNX2X_Q_STATE_INACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_ACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_INACTIVE:
+		if (cmd == BNX2X_Q_CMD_ACTIVATE)
+			next_state = BNX2X_Q_STATE_ACTIVE;
+
+		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+			next_state = BNX2X_Q_STATE_INACTIVE;
+
+		else if (cmd == BNX2X_Q_CMD_HALT)
+			next_state = BNX2X_Q_STATE_STOPPED;
+
+		else if (cmd == BNX2X_Q_CMD_UPDATE) {
+			struct bnx2x_queue_update_params *update_params =
+				&params->params.update;
+
+			/* If "active" state change is requested, update the
+			 * state accordingly.
+			 */
+			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+				     &update_params->update_flags) &&
+			    test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+				     &update_params->update_flags))
+				next_state = BNX2X_Q_STATE_ACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_INACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_STOPPED:
+		if (cmd == BNX2X_Q_CMD_TERMINATE)
+			next_state = BNX2X_Q_STATE_TERMINATED;
+
+		break;
+	case BNX2X_Q_STATE_TERMINATED:
+		if (cmd == BNX2X_Q_CMD_CFC_DEL)
+			next_state = BNX2X_Q_STATE_RESET;
+
+		break;
+	default:
+		BNX2X_ERR("Illegal state: %d\n", state);
+	}
+
+	/* Transition is assured */
+	if (next_state != BNX2X_Q_STATE_MAX) {
+		DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
+				 state, cmd, next_state);
+		o->next_state = next_state;
+		return 0;
+	}
+
+	DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
+
+	return -EINVAL;
+}
+
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+			  struct bnx2x_queue_sp_obj *obj,
+			  u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			  dma_addr_t rdata_mapping, unsigned long type)
+{
+	memset(obj, 0, sizeof(*obj));
+
+	obj->cid = cid;
+	obj->cl_id = cl_id;
+	obj->func_id = func_id;
+	obj->rdata = rdata;
+	obj->rdata_mapping = rdata_mapping;
+	obj->type = type;
+	obj->next_state = BNX2X_Q_STATE_MAX;
+
+	if (CHIP_IS_E1x(bp))
+		obj->send_cmd = bnx2x_queue_send_cmd_e1x;
+	else
+		obj->send_cmd = bnx2x_queue_send_cmd_e2;
+
+	obj->check_transition = bnx2x_queue_chk_transition;
+
+	obj->complete_cmd = bnx2x_queue_comp_cmd;
+	obj->wait_comp = bnx2x_queue_wait_comp;
+	obj->set_pending = bnx2x_queue_set_pending;
+}
+
+/********************** Function state object *********************************/
+
+static int bnx2x_func_wait_comp(struct bnx2x *bp,
+				struct bnx2x_func_sp_obj *o,
+				enum bnx2x_func_cmd cmd)
+{
+	return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_func_state_change_comp - complete the state machine transition
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Called on state change transition. Completes the state
+ * machine transition only - no HW interaction.
+ */
+static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
+					       struct bnx2x_func_sp_obj *o,
+					       enum bnx2x_func_cmd cmd)
+{
+	unsigned long cur_pending = o->pending;
+
+	if (!test_and_clear_bit(cmd, &cur_pending)) {
+		BNX2X_ERR("Bad MC reply %d for func %d in state %d "
+			  "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
+			  o->state, cur_pending, o->next_state);
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
+			 "%d\n", cmd, BP_FUNC(bp), o->next_state);
+
+	o->state = o->next_state;
+	o->next_state = BNX2X_F_STATE_MAX;
+
+	/* It's important that o->state and o->next_state are
+	 * updated before o->pending.
+	 */
+	wmb();
+
+	clear_bit(cmd, &o->pending);
+	smp_mb__after_clear_bit();
+
+	return 0;
+}
+
+/**
+ * bnx2x_func_comp_cmd - complete the state change command
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_func_comp_cmd(struct bnx2x *bp,
+			       struct bnx2x_func_sp_obj *o,
+			       enum bnx2x_func_cmd cmd)
+{
+	/* Complete the state machine part first, check if it's a
+	 * legal completion.
+	 */
+	int rc = bnx2x_func_state_change_comp(bp, o, cmd);
+	return rc;
+}
+
+/**
+ * bnx2x_func_chk_transition - perform function state machine transition
+ *
+ * @bp:		device handle
+ * @o:
+ * @params:
+ *
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ *         -EINVAL otherwise.
+ */
+static int bnx2x_func_chk_transition(struct bnx2x *bp,
+				     struct bnx2x_func_sp_obj *o,
+				     struct bnx2x_func_state_params *params)
+{
+	enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
+	enum bnx2x_func_cmd cmd = params->cmd;
+
+	switch (state) {
+	case BNX2X_F_STATE_RESET:
+		if (cmd == BNX2X_F_CMD_HW_INIT)
+			next_state = BNX2X_F_STATE_INITIALIZED;
+
+		break;
+	case BNX2X_F_STATE_INITIALIZED:
+		if (cmd == BNX2X_F_CMD_START)
+			next_state = BNX2X_F_STATE_STARTED;
+
+		else if (cmd == BNX2X_F_CMD_HW_RESET)
+			next_state = BNX2X_F_STATE_RESET;
+
+		break;
+	case BNX2X_F_STATE_STARTED:
+		if (cmd == BNX2X_F_CMD_STOP)
+			next_state = BNX2X_F_STATE_INITIALIZED;
+
+		break;
+	default:
+		BNX2X_ERR("Unknown state: %d\n", state);
+	}
+
+	/* Transition is assured */
+	if (next_state != BNX2X_F_STATE_MAX) {
+		DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
+				 state, cmd, next_state);
+		o->next_state = next_state;
+		return 0;
+	}
+
+	DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
+			 state, cmd);
+
+	return -EINVAL;
+}
+
+/**
+ * bnx2x_func_init_func - performs HW init at function stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
+ * HW blocks.
+ */
+static inline int bnx2x_func_init_func(struct bnx2x *bp,
+				       const struct bnx2x_func_sp_drv_ops *drv)
+{
+	return drv->init_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_init_port - performs HW init at port stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
+ * FUNCTION-only HW blocks.
+ *
+ */
+static inline int bnx2x_func_init_port(struct bnx2x *bp,
+				       const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_port(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_cmn_chip(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_port(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn - performs HW init at common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
+				      const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_cmn(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_port(bp, drv);
+}
+
+static int bnx2x_func_hw_init(struct bnx2x *bp,
+			      struct bnx2x_func_state_params *params)
+{
+	u32 load_code = params->params.hw_init.load_phase;
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+	int rc = 0;
+
+	DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
+			 BP_ABS_FUNC(bp), load_code);
+
+	/* Prepare buffers for unzipping the FW */
+	rc = drv->gunzip_init(bp);
+	if (rc)
+		return rc;
+
+	/* Prepare FW */
+	rc = drv->init_fw(bp);
+	if (rc) {
+		BNX2X_ERR("Error loading firmware\n");
+		goto fw_init_err;
+	}
+
+	/* Handle the beginning of COMMON_XXX pases separatelly... */
+	switch (load_code) {
+	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+		rc = bnx2x_func_init_cmn_chip(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_COMMON:
+		rc = bnx2x_func_init_cmn(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_PORT:
+		rc = bnx2x_func_init_port(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+		rc = bnx2x_func_init_func(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	default:
+		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+		rc = -EINVAL;
+	}
+
+init_hw_err:
+	drv->release_fw(bp);
+
+fw_init_err:
+	drv->gunzip_end(bp);
+
+	/* In case of success, complete the comand immediatelly: no ramrods
+	 * have been sent.
+	 */
+	if (!rc)
+		o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
+
+	return rc;
+}
+
+/**
+ * bnx2x_func_reset_func - reset HW at function stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
+ * FUNCTION-only HW blocks.
+ */
+static inline void bnx2x_func_reset_func(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	drv->reset_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_reset_port - reser HW at port stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
+ * FUNCTION-only and PORT-only HW blocks.
+ *
+ *                 !!!IMPORTANT!!!
+ *
+ * It's important to call reset_port before reset_func() as the last thing
+ * reset_func does is pf_disable() thus disabling PGLUE_B, which
+ * makes impossible any DMAE transactions.
+ */
+static inline void bnx2x_func_reset_port(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	drv->reset_hw_port(bp);
+	bnx2x_func_reset_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_reset_cmn - reser HW at common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
+ * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
+ * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
+ */
+static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	bnx2x_func_reset_port(bp, drv);
+	drv->reset_hw_cmn(bp);
+}
+
+
+static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
+				      struct bnx2x_func_state_params *params)
+{
+	u32 reset_phase = params->params.hw_reset.reset_phase;
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+
+	DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
+			 reset_phase);
+
+	switch (reset_phase) {
+	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
+		bnx2x_func_reset_cmn(bp, drv);
+		break;
+	case FW_MSG_CODE_DRV_UNLOAD_PORT:
+		bnx2x_func_reset_port(bp, drv);
+		break;
+	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
+		bnx2x_func_reset_func(bp, drv);
+		break;
+	default:
+		BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
+			   reset_phase);
+		break;
+	}
+
+	/* Complete the comand immediatelly: no ramrods have been sent. */
+	o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
+
+	return 0;
+}
+
+static inline int bnx2x_func_send_start(struct bnx2x *bp,
+					struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct function_start_data *rdata =
+		(struct function_start_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_func_start_params *start_params = &params->params.start;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->function_mode = cpu_to_le16(start_params->mf_mode);
+	rdata->sd_vlan_tag   = start_params->sd_vlan_tag;
+	rdata->path_id       = BP_PATH(bp);
+	rdata->network_cos_mode = start_params->network_cos_mode;
+
+	mb();
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_stop(struct bnx2x *bp,
+				       struct bnx2x_func_state_params *params)
+{
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
+			     NONE_CONNECTION_TYPE);
+}
+
+static int bnx2x_func_send_cmd(struct bnx2x *bp,
+			       struct bnx2x_func_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_F_CMD_HW_INIT:
+		return bnx2x_func_hw_init(bp, params);
+	case BNX2X_F_CMD_START:
+		return bnx2x_func_send_start(bp, params);
+	case BNX2X_F_CMD_STOP:
+		return bnx2x_func_send_stop(bp, params);
+	case BNX2X_F_CMD_HW_RESET:
+		return bnx2x_func_hw_reset(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+void bnx2x_init_func_obj(struct bnx2x *bp,
+			 struct bnx2x_func_sp_obj *obj,
+			 void *rdata, dma_addr_t rdata_mapping,
+			 struct bnx2x_func_sp_drv_ops *drv_iface)
+{
+	memset(obj, 0, sizeof(*obj));
+
+	mutex_init(&obj->one_pending_mutex);
+
+	obj->rdata = rdata;
+	obj->rdata_mapping = rdata_mapping;
+
+	obj->send_cmd = bnx2x_func_send_cmd;
+	obj->check_transition = bnx2x_func_chk_transition;
+	obj->complete_cmd = bnx2x_func_comp_cmd;
+	obj->wait_comp = bnx2x_func_wait_comp;
+
+	obj->drv = drv_iface;
+}
+
+/**
+ * bnx2x_func_state_change - perform Function state change transition
+ *
+ * @bp:		device handle
+ * @params:	parameters to perform the transaction
+ *
+ * returns 0 in case of successfully completed transition,
+ *         negative error code in case of failure, positive
+ *         (EBUSY) value if there is a completion to that is
+ *         still pending (possible only if RAMROD_COMP_WAIT is
+ *         not set in params->ramrod_flags for asynchronous
+ *         commands).
+ */
+int bnx2x_func_state_change(struct bnx2x *bp,
+			    struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	int rc;
+	enum bnx2x_func_cmd cmd = params->cmd;
+	unsigned long *pending = &o->pending;
+
+	mutex_lock(&o->one_pending_mutex);
+
+	/* Check that the requested transition is legal */
+	if (o->check_transition(bp, o, params)) {
+		mutex_unlock(&o->one_pending_mutex);
+		return -EINVAL;
+	}
+
+	/* Set "pending" bit */
+	set_bit(cmd, pending);
+
+	/* Don't send a command if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+		bnx2x_func_state_change_comp(bp, o, cmd);
+		mutex_unlock(&o->one_pending_mutex);
+	} else {
+		/* Send a ramrod */
+		rc = o->send_cmd(bp, params);
+
+		mutex_unlock(&o->one_pending_mutex);
+
+		if (rc) {
+			o->next_state = BNX2X_F_STATE_MAX;
+			clear_bit(cmd, pending);
+			smp_mb__after_clear_bit();
+			return rc;
+		}
+
+		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+			rc = o->wait_comp(bp, o, cmd);
+			if (rc)
+				return rc;
+
+			return 0;
+		}
+	}
 
-	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
-		REG_WR8(bp, BAR_TSTRORM_INTMEM +
-			TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
-			bp->fp->cl_id + bp->rx_indir_table[i]);
+	return !!test_bit(cmd, pending);
 }

+ 1235 - 43
drivers/net/bnx2x/bnx2x_sp.h

@@ -1,43 +1,1235 @@
-#ifndef BNX2X_SP
-#define BNX2X_SP
-
-#include "bnx2x_reg.h"
-
-/* MAC configuration */
-void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
-			    u32 cl_bit_vec, u8 cam_offset,
-			    u8 is_bcast);
-
-/* Multicast */
-void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
-void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
-int bnx2x_set_e1_mc_list(struct bnx2x *bp);
-int bnx2x_set_e1h_mc_list(struct bnx2x *bp);
-
-/* Rx mode */
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
-void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters);
-
-/* RSS configuration */
-void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
-void bnx2x_push_indir_table(struct bnx2x *bp);
-
-/* Queue configuration */
-static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
-{
-	/* ustorm cxt validation */
-	cxt->ustorm_ag_context.cdu_usage =
-		CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
-				       ETH_CONNECTION_TYPE);
-	/* xcontext validation */
-	cxt->xstorm_ag_context.cdu_reserved =
-		CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
-				       ETH_CONNECTION_TYPE);
-}
-
-int bnx2x_setup_fw_client(struct bnx2x *bp,
-			  struct bnx2x_client_init_params *params,
-			  u8 activate,
-			  struct client_init_ramrod_data *data,
-			  dma_addr_t data_mapping);
-#endif /* BNX2X_SP */
+/* bnx2x_sp.h: Broadcom Everest network driver.
+ *
+ * Copyright 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+#ifndef BNX2X_SP_VERBS
+#define BNX2X_SP_VERBS
+
+struct bnx2x;
+struct eth_context;
+
+/* Bits representing general command's configuration */
+enum {
+	RAMROD_TX,
+	RAMROD_RX,
+	/* Wait until all pending commands complete */
+	RAMROD_COMP_WAIT,
+	/* Don't send a ramrod, only update a registry */
+	RAMROD_DRV_CLR_ONLY,
+	/* Configure HW according to the current object state */
+	RAMROD_RESTORE,
+	 /* Execute the next command now */
+	RAMROD_EXEC,
+	/*
+	 * Don't add a new command and continue execution of posponed
+	 * commands. If not set a new command will be added to the
+	 * pending commands list.
+	 */
+	RAMROD_CONT,
+};
+
+typedef enum {
+	BNX2X_OBJ_TYPE_RX,
+	BNX2X_OBJ_TYPE_TX,
+	BNX2X_OBJ_TYPE_RX_TX,
+} bnx2x_obj_type;
+
+/* Filtering states */
+enum {
+	BNX2X_FILTER_MAC_PENDING,
+	BNX2X_FILTER_VLAN_PENDING,
+	BNX2X_FILTER_VLAN_MAC_PENDING,
+	BNX2X_FILTER_RX_MODE_PENDING,
+	BNX2X_FILTER_RX_MODE_SCHED,
+	BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+	BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+	BNX2X_FILTER_FCOE_ETH_START_SCHED,
+	BNX2X_FILTER_FCOE_ETH_STOP_SCHED,
+	BNX2X_FILTER_MCAST_PENDING,
+	BNX2X_FILTER_MCAST_SCHED,
+	BNX2X_FILTER_RSS_CONF_PENDING,
+};
+
+struct bnx2x_raw_obj {
+	u8		func_id;
+
+	/* Queue params */
+	u8		cl_id;
+	u32		cid;
+
+	/* Ramrod data buffer params */
+	void		*rdata;
+	dma_addr_t	rdata_mapping;
+
+	/* Ramrod state params */
+	int		state;   /* "ramrod is pending" state bit */
+	unsigned long	*pstate; /* pointer to state buffer */
+
+	bnx2x_obj_type	obj_type;
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_raw_obj *o);
+
+	bool (*check_pending)(struct bnx2x_raw_obj *o);
+	void (*clear_pending)(struct bnx2x_raw_obj *o);
+	void (*set_pending)(struct bnx2x_raw_obj *o);
+};
+
+/************************* VLAN-MAC commands related parameters ***************/
+struct bnx2x_mac_ramrod_data {
+	u8 mac[ETH_ALEN];
+};
+
+struct bnx2x_vlan_ramrod_data {
+	u16 vlan;
+};
+
+struct bnx2x_vlan_mac_ramrod_data {
+	u8 mac[ETH_ALEN];
+	u16 vlan;
+};
+
+union bnx2x_classification_ramrod_data {
+	struct bnx2x_mac_ramrod_data mac;
+	struct bnx2x_vlan_ramrod_data vlan;
+	struct bnx2x_vlan_mac_ramrod_data vlan_mac;
+};
+
+/* VLAN_MAC commands */
+enum bnx2x_vlan_mac_cmd {
+	BNX2X_VLAN_MAC_ADD,
+	BNX2X_VLAN_MAC_DEL,
+	BNX2X_VLAN_MAC_MOVE,
+};
+
+struct bnx2x_vlan_mac_data {
+	/* Requested command: BNX2X_VLAN_MAC_XX */
+	enum bnx2x_vlan_mac_cmd cmd;
+	/*
+	 * used to contain the data related vlan_mac_flags bits from
+	 * ramrod parameters.
+	 */
+	unsigned long vlan_mac_flags;
+
+	/* Needed for MOVE command */
+	struct bnx2x_vlan_mac_obj *target_obj;
+
+	union bnx2x_classification_ramrod_data u;
+};
+
+/*************************** Exe Queue obj ************************************/
+union bnx2x_exe_queue_cmd_data {
+	struct bnx2x_vlan_mac_data vlan_mac;
+
+	struct {
+		/* TODO */
+	} mcast;
+};
+
+struct bnx2x_exeq_elem {
+	struct list_head		link;
+
+	/* Length of this element in the exe_chunk. */
+	int				cmd_len;
+
+	union bnx2x_exe_queue_cmd_data	cmd_data;
+};
+
+union bnx2x_qable_obj;
+
+union bnx2x_exeq_comp_elem {
+	union event_ring_elem *elem;
+};
+
+struct bnx2x_exe_queue_obj;
+
+typedef int (*exe_q_validate)(struct bnx2x *bp,
+			      union bnx2x_qable_obj *o,
+			      struct bnx2x_exeq_elem *elem);
+
+/**
+ * @return positive is entry was optimized, 0 - if not, negative
+ *         in case of an error.
+ */
+typedef int (*exe_q_optimize)(struct bnx2x *bp,
+			      union bnx2x_qable_obj *o,
+			      struct bnx2x_exeq_elem *elem);
+typedef int (*exe_q_execute)(struct bnx2x *bp,
+			     union bnx2x_qable_obj *o,
+			     struct list_head *exe_chunk,
+			     unsigned long *ramrod_flags);
+typedef struct bnx2x_exeq_elem *
+			(*exe_q_get)(struct bnx2x_exe_queue_obj *o,
+				     struct bnx2x_exeq_elem *elem);
+
+struct bnx2x_exe_queue_obj {
+	/*
+	 * Commands pending for an execution.
+	 */
+	struct list_head	exe_queue;
+
+	/*
+	 * Commands pending for an completion.
+	 */
+	struct list_head	pending_comp;
+
+	spinlock_t		lock;
+
+	/* Maximum length of commands' list for one execution */
+	int			exe_chunk_len;
+
+	union bnx2x_qable_obj	*owner;
+
+	/****** Virtual functions ******/
+	/**
+	 * Called before commands execution for commands that are really
+	 * going to be executed (after 'optimize').
+	 *
+	 * Must run under exe_queue->lock
+	 */
+	exe_q_validate		validate;
+
+
+	/**
+	 * This will try to cancel the current pending commands list
+	 * considering the new command.
+	 *
+	 * Must run under exe_queue->lock
+	 */
+	exe_q_optimize		optimize;
+
+	/**
+	 * Run the next commands chunk (owner specific).
+	 */
+	exe_q_execute		execute;
+
+	/**
+	 * Return the exe_queue element containing the specific command
+	 * if any. Otherwise return NULL.
+	 */
+	exe_q_get		get;
+};
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/*
+ * Element in the VLAN_MAC registry list having all currenty configured
+ * rules.
+ */
+struct bnx2x_vlan_mac_registry_elem {
+	struct list_head	link;
+
+	/*
+	 * Used to store the cam offset used for the mac/vlan/vlan-mac.
+	 * Relevant for 57710 and 57711 only. VLANs and MACs share the
+	 * same CAM for these chips.
+	 */
+	int			cam_offset;
+
+	/* Needed for DEL and RESTORE flows */
+	unsigned long		vlan_mac_flags;
+
+	union bnx2x_classification_ramrod_data u;
+};
+
+/* Bits representing VLAN_MAC commands specific flags */
+enum {
+	BNX2X_UC_LIST_MAC,
+	BNX2X_ETH_MAC,
+	BNX2X_ISCSI_ETH_MAC,
+	BNX2X_NETQ_ETH_MAC,
+	BNX2X_DONT_CONSUME_CAM_CREDIT,
+	BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+};
+
+struct bnx2x_vlan_mac_ramrod_params {
+	/* Object to run the command from */
+	struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+	/* General command flags: COMP_WAIT, etc. */
+	unsigned long ramrod_flags;
+
+	/* Command specific configuration request */
+	struct bnx2x_vlan_mac_data user_req;
+};
+
+struct bnx2x_vlan_mac_obj {
+	struct bnx2x_raw_obj raw;
+
+	/* Bookkeeping list: will prevent the addition of already existing
+	 * entries.
+	 */
+	struct list_head		head;
+
+	/* TODO: Add it's initialization in the init functions */
+	struct bnx2x_exe_queue_obj	exe_queue;
+
+	/* MACs credit pool */
+	struct bnx2x_credit_pool_obj	*macs_pool;
+
+	/* VLANs credit pool */
+	struct bnx2x_credit_pool_obj	*vlans_pool;
+
+	/* RAMROD command to be used */
+	int				ramrod_cmd;
+
+	/**
+	 * Checks if ADD-ramrod with the given params may be performed.
+	 *
+	 * @return zero if the element may be added
+	 */
+
+	int (*check_add)(struct bnx2x_vlan_mac_obj *o,
+			 union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 * Checks if DEL-ramrod with the given params may be performed.
+	 *
+	 * @return true if the element may be deleted
+	 */
+	struct bnx2x_vlan_mac_registry_elem *
+		(*check_del)(struct bnx2x_vlan_mac_obj *o,
+			     union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 * Checks if DEL-ramrod with the given params may be performed.
+	 *
+	 * @return true if the element may be deleted
+	 */
+	bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o,
+			   struct bnx2x_vlan_mac_obj *dst_o,
+			   union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 *  Update the relevant credit object(s) (consume/return
+	 *  correspondingly).
+	 */
+	bool (*get_credit)(struct bnx2x_vlan_mac_obj *o);
+	bool (*put_credit)(struct bnx2x_vlan_mac_obj *o);
+	bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset);
+	bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset);
+
+	/**
+	 * Configures one rule in the ramrod data buffer.
+	 */
+	void (*set_one_rule)(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *o,
+			     struct bnx2x_exeq_elem *elem, int rule_idx,
+			     int cam_offset);
+
+	/**
+	*  Delete all configured elements having the given
+	*  vlan_mac_flags specification. Assumes no pending for
+	*  execution commands. Will schedule all all currently
+	*  configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
+	*  specification for deletion and will use the given
+	*  ramrod_flags for the last DEL operation.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param ramrod_flags RAMROD_XX flags
+	 *
+	 * @return 0 if the last operation has completed successfully
+	 *         and there are no more elements left, positive value
+	 *         if there are pending for completion commands,
+	 *         negative value in case of failure.
+	 */
+	int (*delete_all)(struct bnx2x *bp,
+			  struct bnx2x_vlan_mac_obj *o,
+			  unsigned long *vlan_mac_flags,
+			  unsigned long *ramrod_flags);
+
+	/**
+	 * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
+	 * configured elements list.
+	 *
+	 * @param bp
+	 * @param p Command parameters (RAMROD_COMP_WAIT bit in
+	 *          ramrod_flags is only taken into an account)
+	 * @param ppos a pointer to the cooky that should be given back in the
+	 *        next call to make function handle the next element. If
+	 *        *ppos is set to NULL it will restart the iterator.
+	 *        If returned *ppos == NULL this means that the last
+	 *        element has been handled.
+	 *
+	 * @return int
+	 */
+	int (*restore)(struct bnx2x *bp,
+		       struct bnx2x_vlan_mac_ramrod_params *p,
+		       struct bnx2x_vlan_mac_registry_elem **ppos);
+
+	/**
+	 * Should be called on a completion arival.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param cqe Completion element we are handling
+	 * @param ramrod_flags if RAMROD_CONT is set the next bulk of
+	 *		       pending commands will be executed.
+	 *		       RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
+	 *		       may also be set if needed.
+	 *
+	 * @return 0 if there are neither pending nor waiting for
+	 *         completion commands. Positive value if there are
+	 *         pending for execution or for completion commands.
+	 *         Negative value in case of an error (including an
+	 *         error in the cqe).
+	 */
+	int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+			union event_ring_elem *cqe,
+			unsigned long *ramrod_flags);
+
+	/**
+	 * Wait for completion of all commands. Don't schedule new ones,
+	 * just wait. It assumes that the completion code will schedule
+	 * for new commands.
+	 */
+	int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
+};
+
+/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+
+/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
+ * a bnx2x_rx_mode_ramrod_params.
+ */
+enum {
+	BNX2X_RX_MODE_FCOE_ETH,
+	BNX2X_RX_MODE_ISCSI_ETH,
+};
+
+enum {
+	BNX2X_ACCEPT_UNICAST,
+	BNX2X_ACCEPT_MULTICAST,
+	BNX2X_ACCEPT_ALL_UNICAST,
+	BNX2X_ACCEPT_ALL_MULTICAST,
+	BNX2X_ACCEPT_BROADCAST,
+	BNX2X_ACCEPT_UNMATCHED,
+	BNX2X_ACCEPT_ANY_VLAN
+};
+
+struct bnx2x_rx_mode_ramrod_params {
+	struct bnx2x_rx_mode_obj *rx_mode_obj;
+	unsigned long *pstate;
+	int state;
+	u8 cl_id;
+	u32 cid;
+	u8 func_id;
+	unsigned long ramrod_flags;
+	unsigned long rx_mode_flags;
+
+	/*
+	 * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+	 * a tstorm_eth_mac_filter_config (e1x).
+	 */
+	void *rdata;
+	dma_addr_t rdata_mapping;
+
+	/* Rx mode settings */
+	unsigned long rx_accept_flags;
+
+	/* internal switching settings */
+	unsigned long tx_accept_flags;
+};
+
+struct bnx2x_rx_mode_obj {
+	int (*config_rx_mode)(struct bnx2x *bp,
+			      struct bnx2x_rx_mode_ramrod_params *p);
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p);
+};
+
+/********************** Set multicast group ***********************************/
+
+struct bnx2x_mcast_list_elem {
+	struct list_head link;
+	u8 *mac;
+};
+
+union bnx2x_mcast_config_data {
+	u8 *mac;
+	u8 bin; /* used in a RESTORE flow */
+};
+
+struct bnx2x_mcast_ramrod_params {
+	struct bnx2x_mcast_obj *mcast_obj;
+
+	/* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
+	unsigned long ramrod_flags;
+
+	struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */
+	/** TODO:
+	 *      - rename it to macs_num.
+	 *      - Add a new command type for handling pending commands
+	 *        (remove "zero semantics").
+	 *
+	 *  Length of mcast_list. If zero and ADD_CONT command - post
+	 *  pending commands.
+	 */
+	int mcast_list_len;
+};
+
+enum {
+	BNX2X_MCAST_CMD_ADD,
+	BNX2X_MCAST_CMD_CONT,
+	BNX2X_MCAST_CMD_DEL,
+	BNX2X_MCAST_CMD_RESTORE,
+};
+
+struct bnx2x_mcast_obj {
+	struct bnx2x_raw_obj raw;
+
+	union {
+		struct {
+		#define BNX2X_MCAST_BINS_NUM	256
+		#define BNX2X_MCAST_VEC_SZ	(BNX2X_MCAST_BINS_NUM / 64)
+			u64 vec[BNX2X_MCAST_VEC_SZ];
+
+			/** Number of BINs to clear. Should be updated
+			 *  immediately when a command arrives in order to
+			 *  properly create DEL commands.
+			 */
+			int num_bins_set;
+		} aprox_match;
+
+		struct {
+			struct list_head macs;
+			int num_macs_set;
+		} exact_match;
+	} registry;
+
+	/* Pending commands */
+	struct list_head pending_cmds_head;
+
+	/* A state that is set in raw.pstate, when there are pending commands */
+	int sched_state;
+
+	/* Maximal number of mcast MACs configured in one command */
+	int max_cmd_len;
+
+	/* Total number of currently pending MACs to configure: both
+	 * in the pending commands list and in the current command.
+	 */
+	int total_pending_num;
+
+	u8 engine_id;
+
+	/**
+	 * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
+	 */
+	int (*config_mcast)(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+	/**
+	 * Fills the ramrod data during the RESTORE flow.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param start_idx Registry index to start from
+	 * @param rdata_idx Index in the ramrod data to start from
+	 *
+	 * @return -1 if we handled the whole registry or index of the last
+	 *         handled registry element.
+	 */
+	int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+			   int start_bin, int *rdata_idx);
+
+	int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+			   struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+	void (*set_one_rule)(struct bnx2x *bp,
+			     struct bnx2x_mcast_obj *o, int idx,
+			     union bnx2x_mcast_config_data *cfg_data, int cmd);
+
+	/** Checks if there are more mcast MACs to be set or a previous
+	 *  command is still pending.
+	 */
+	bool (*check_pending)(struct bnx2x_mcast_obj *o);
+
+	/**
+	 * Set/Clear/Check SCHEDULED state of the object
+	 */
+	void (*set_sched)(struct bnx2x_mcast_obj *o);
+	void (*clear_sched)(struct bnx2x_mcast_obj *o);
+	bool (*check_sched)(struct bnx2x_mcast_obj *o);
+
+	/* Wait until all pending commands complete */
+	int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o);
+
+	/**
+	 * Handle the internal object counters needed for proper
+	 * commands handling. Checks that the provided parameters are
+	 * feasible.
+	 */
+	int (*validate)(struct bnx2x *bp,
+			struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+	/**
+	 * Restore the values of internal counters in case of a failure.
+	 */
+	void (*revert)(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p,
+		       int old_num_bins);
+
+	int (*get_registry_size)(struct bnx2x_mcast_obj *o);
+	void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
+};
+
+/*************************** Credit handling **********************************/
+struct bnx2x_credit_pool_obj {
+
+	/* Current amount of credit in the pool */
+	atomic_t	credit;
+
+	/* Maximum allowed credit. put() will check against it. */
+	int		pool_sz;
+
+	/*
+	 *  Allocate a pool table statically.
+	 *
+	 *  Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272)
+	 *
+	 *  The set bit in the table will mean that the entry is available.
+	 */
+#define BNX2X_POOL_VEC_SIZE	(MAX_MAC_CREDIT_E2 / 64)
+	u64		pool_mirror[BNX2X_POOL_VEC_SIZE];
+
+	/* Base pool offset (initialized differently */
+	int		base_pool_offset;
+
+	/**
+	 * Get the next free pool entry.
+	 *
+	 * @return true if there was a free entry in the pool
+	 */
+	bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry);
+
+	/**
+	 * Return the entry back to the pool.
+	 *
+	 * @return true if entry is legal and has been successfully
+	 *         returned to the pool.
+	 */
+	bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry);
+
+	/**
+	 * Get the requested amount of credit from the pool.
+	 *
+	 * @param cnt Amount of requested credit
+	 * @return true if the operation is successful
+	 */
+	bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+	/**
+	 * Returns the credit to the pool.
+	 *
+	 * @param cnt Amount of credit to return
+	 * @return true if the operation is successful
+	 */
+	bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+	/**
+	 * Reads the current amount of credit.
+	 */
+	int (*check)(struct bnx2x_credit_pool_obj *o);
+};
+
+/*************************** RSS configuration ********************************/
+enum {
+	/* RSS_MODE bits are mutually exclusive */
+	BNX2X_RSS_MODE_DISABLED,
+	BNX2X_RSS_MODE_REGULAR,
+	BNX2X_RSS_MODE_VLAN_PRI,
+	BNX2X_RSS_MODE_E1HOV_PRI,
+	BNX2X_RSS_MODE_IP_DSCP,
+
+	BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
+
+	BNX2X_RSS_IPV4,
+	BNX2X_RSS_IPV4_TCP,
+	BNX2X_RSS_IPV6,
+	BNX2X_RSS_IPV6_TCP,
+};
+
+struct bnx2x_config_rss_params {
+	struct bnx2x_rss_config_obj *rss_obj;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long	ramrod_flags;
+
+	/* BNX2X_RSS_X bits */
+	unsigned long	rss_flags;
+
+	/* Number hash bits to take into an account */
+	u8		rss_result_mask;
+
+	/* Indirection table */
+	u8		ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+	/* RSS hash values */
+	u32		rss_key[10];
+
+	/* valid only iff BNX2X_RSS_UPDATE_TOE is set */
+	u16		toe_rss_bitmap;
+};
+
+struct bnx2x_rss_config_obj {
+	struct bnx2x_raw_obj	raw;
+
+	/* RSS engine to use */
+	u8			engine_id;
+
+	/* Last configured indirection table */
+	u8			ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+	int (*config_rss)(struct bnx2x *bp,
+			  struct bnx2x_config_rss_params *p);
+};
+
+/*********************** Queue state update ***********************************/
+
+/* UPDATE command options */
+enum {
+	BNX2X_Q_UPDATE_IN_VLAN_REM,
+	BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_OUT_VLAN_REM,
+	BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_ANTI_SPOOF,
+	BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
+	BNX2X_Q_UPDATE_ACTIVATE,
+	BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+	BNX2X_Q_UPDATE_DEF_VLAN_EN,
+	BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+	BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_SILENT_VLAN_REM
+};
+
+/* Allowed Queue states */
+enum bnx2x_q_state {
+	BNX2X_Q_STATE_RESET,
+	BNX2X_Q_STATE_INITIALIZED,
+	BNX2X_Q_STATE_ACTIVE,
+	BNX2X_Q_STATE_INACTIVE,
+	BNX2X_Q_STATE_STOPPED,
+	BNX2X_Q_STATE_TERMINATED,
+	BNX2X_Q_STATE_FLRED,
+	BNX2X_Q_STATE_MAX,
+};
+
+/* Allowed commands */
+enum bnx2x_queue_cmd {
+	BNX2X_Q_CMD_INIT,
+	BNX2X_Q_CMD_SETUP,
+	BNX2X_Q_CMD_DEACTIVATE,
+	BNX2X_Q_CMD_ACTIVATE,
+	BNX2X_Q_CMD_UPDATE,
+	BNX2X_Q_CMD_UPDATE_TPA,
+	BNX2X_Q_CMD_HALT,
+	BNX2X_Q_CMD_CFC_DEL,
+	BNX2X_Q_CMD_TERMINATE,
+	BNX2X_Q_CMD_EMPTY,
+	BNX2X_Q_CMD_MAX,
+};
+
+/* queue SETUP + INIT flags */
+enum {
+	BNX2X_Q_FLG_TPA,
+	BNX2X_Q_FLG_STATS,
+	BNX2X_Q_FLG_ZERO_STATS,
+	BNX2X_Q_FLG_ACTIVE,
+	BNX2X_Q_FLG_OV,
+	BNX2X_Q_FLG_VLAN,
+	BNX2X_Q_FLG_COS,
+	BNX2X_Q_FLG_HC,
+	BNX2X_Q_FLG_HC_EN,
+	BNX2X_Q_FLG_DHC,
+	BNX2X_Q_FLG_FCOE,
+	BNX2X_Q_FLG_LEADING_RSS,
+	BNX2X_Q_FLG_MCAST,
+	BNX2X_Q_FLG_DEF_VLAN,
+	BNX2X_Q_FLG_TX_SWITCH,
+	BNX2X_Q_FLG_TX_SEC,
+	BNX2X_Q_FLG_ANTI_SPOOF,
+	BNX2X_Q_FLG_SILENT_VLAN_REM
+};
+
+/* Queue type options: queue type may be a compination of below. */
+enum bnx2x_q_type {
+	/** TODO: Consider moving both these flags into the init()
+	 *        ramrod params.
+	 */
+	BNX2X_Q_TYPE_HAS_RX,
+	BNX2X_Q_TYPE_HAS_TX,
+};
+
+struct bnx2x_queue_init_params {
+	struct {
+		unsigned long	flags;
+		u16		hc_rate;
+		u8		fw_sb_id;
+		u8		sb_cq_index;
+	} tx;
+
+	struct {
+		unsigned long	flags;
+		u16		hc_rate;
+		u8		fw_sb_id;
+		u8		sb_cq_index;
+	} rx;
+
+	/* CID context in the host memory */
+	struct eth_context *cxt;
+};
+
+struct bnx2x_queue_update_params {
+	unsigned long	update_flags; /* BNX2X_Q_UPDATE_XX bits */
+	u16		def_vlan;
+	u16		silent_removal_value;
+	u16		silent_removal_mask;
+};
+
+struct rxq_pause_params {
+	u16		bd_th_lo;
+	u16		bd_th_hi;
+	u16		rcq_th_lo;
+	u16		rcq_th_hi;
+	u16		sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */
+	u16		sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */
+	u16		pri_map;
+};
+
+/* general */
+struct bnx2x_general_setup_params {
+	/* valid iff BNX2X_Q_FLG_STATS */
+	u8		stat_id;
+
+	u8		spcl_id;
+	u16		mtu;
+};
+
+struct bnx2x_rxq_setup_params {
+	/* dma */
+	dma_addr_t	dscr_map;
+	dma_addr_t	sge_map;
+	dma_addr_t	rcq_map;
+	dma_addr_t	rcq_np_map;
+
+	u16		drop_flags;
+	u16		buf_sz;
+	u8		fw_sb_id;
+	u8		cl_qzone_id;
+
+	/* valid iff BNX2X_Q_FLG_TPA */
+	u16		tpa_agg_sz;
+	u16		sge_buf_sz;
+	u8		max_sges_pkt;
+	u8		max_tpa_queues;
+	u8		rss_engine_id;
+
+	u8		cache_line_log;
+
+	u8		sb_cq_index;
+
+	/* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
+	u16 silent_removal_value;
+	u16 silent_removal_mask;
+};
+
+struct bnx2x_txq_setup_params {
+	/* dma */
+	dma_addr_t	dscr_map;
+
+	u8		fw_sb_id;
+	u8		sb_cq_index;
+	u8		cos;		/* valid iff BNX2X_Q_FLG_COS */
+	u16		traffic_type;
+	/* equals to the leading rss client id, used for TX classification*/
+	u8		tss_leading_cl_id;
+
+	/* valid iff BNX2X_Q_FLG_DEF_VLAN */
+	u16		default_vlan;
+};
+
+struct bnx2x_queue_setup_params {
+	struct rxq_pause_params pause;
+	struct bnx2x_general_setup_params gen_params;
+	struct bnx2x_rxq_setup_params rxq_params;
+	struct bnx2x_txq_setup_params txq_params;
+	unsigned long flags;
+};
+
+
+struct bnx2x_queue_state_params {
+	struct bnx2x_queue_sp_obj *q_obj;
+
+	/* Current command */
+	enum bnx2x_queue_cmd cmd;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long	ramrod_flags;
+
+	/* Params according to the current command */
+	union {
+		struct bnx2x_queue_update_params update;
+		struct bnx2x_queue_setup_params   setup;
+		struct bnx2x_queue_init_params	  init;
+	} params;
+};
+
+struct bnx2x_queue_sp_obj {
+	u32		cid;
+	u8		cl_id;
+	u8		func_id;
+
+	enum bnx2x_q_state state, next_state;
+
+	/* bits from enum bnx2x_q_type */
+	unsigned long	type;
+
+	/* BNX2X_Q_CMD_XX bits. This object implements "one
+	 * pending" paradigm but for debug and tracing purposes it's
+	 * more convinient to have different bits for different
+	 * commands.
+	 */
+	unsigned long	pending;
+
+	/* Buffer to use as a ramrod data and its mapping */
+	void		*rdata;
+	dma_addr_t	rdata_mapping;
+
+	/**
+	 * Performs one state change according to the given parameters.
+	 *
+	 * @return 0 in case of success and negative value otherwise.
+	 */
+	int (*send_cmd)(struct bnx2x *bp,
+			struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Sets the pending bit according to the requested transition.
+	 */
+	int (*set_pending)(struct bnx2x_queue_sp_obj *o,
+			   struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Checks that the requested state transition is legal.
+	 */
+	int (*check_transition)(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *o,
+				struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Completes the pending command.
+	 */
+	int (*complete_cmd)(struct bnx2x *bp,
+			    struct bnx2x_queue_sp_obj *o,
+			    enum bnx2x_queue_cmd);
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_queue_sp_obj *o,
+			 enum bnx2x_queue_cmd cmd);
+};
+
+/********************** Function state update *********************************/
+/* Allowed Function states */
+enum bnx2x_func_state {
+	BNX2X_F_STATE_RESET,
+	BNX2X_F_STATE_INITIALIZED,
+	BNX2X_F_STATE_STARTED,
+	BNX2X_F_STATE_MAX,
+};
+
+/* Allowed Function commands */
+enum bnx2x_func_cmd {
+	BNX2X_F_CMD_HW_INIT,
+	BNX2X_F_CMD_START,
+	BNX2X_F_CMD_STOP,
+	BNX2X_F_CMD_HW_RESET,
+	BNX2X_F_CMD_MAX,
+};
+
+struct bnx2x_func_hw_init_params {
+	/* A load phase returned by MCP.
+	 *
+	 * May be:
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON
+	 *		FW_MSG_CODE_DRV_LOAD_PORT
+	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
+	 */
+	u32 load_phase;
+};
+
+struct bnx2x_func_hw_reset_params {
+	/* A load phase returned by MCP.
+	 *
+	 * May be:
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON
+	 *		FW_MSG_CODE_DRV_LOAD_PORT
+	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
+	 */
+	u32 reset_phase;
+};
+
+struct bnx2x_func_start_params {
+	/* Multi Function mode:
+	 *	- Single Function
+	 *	- Switch Dependent
+	 *	- Switch Independent
+	 */
+	u16 mf_mode;
+
+	/* Switch Dependent mode outer VLAN tag */
+	u16 sd_vlan_tag;
+
+	/* Function cos mode */
+	u8 network_cos_mode;
+};
+
+struct bnx2x_func_state_params {
+	struct bnx2x_func_sp_obj *f_obj;
+
+	/* Current command */
+	enum bnx2x_func_cmd cmd;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long	ramrod_flags;
+
+	/* Params according to the current command */
+	union {
+		struct bnx2x_func_hw_init_params hw_init;
+		struct bnx2x_func_hw_reset_params hw_reset;
+		struct bnx2x_func_start_params start;
+	} params;
+};
+
+struct bnx2x_func_sp_drv_ops {
+	/* Init tool + runtime initialization:
+	 *      - Common Chip
+	 *      - Common (per Path)
+	 *      - Port
+	 *      - Function phases
+	 */
+	int (*init_hw_cmn_chip)(struct bnx2x *bp);
+	int (*init_hw_cmn)(struct bnx2x *bp);
+	int (*init_hw_port)(struct bnx2x *bp);
+	int (*init_hw_func)(struct bnx2x *bp);
+
+	/* Reset Function HW: Common, Port, Function phases. */
+	void (*reset_hw_cmn)(struct bnx2x *bp);
+	void (*reset_hw_port)(struct bnx2x *bp);
+	void (*reset_hw_func)(struct bnx2x *bp);
+
+	/* Init/Free GUNZIP resources */
+	int (*gunzip_init)(struct bnx2x *bp);
+	void (*gunzip_end)(struct bnx2x *bp);
+
+	/* Prepare/Release FW resources */
+	int (*init_fw)(struct bnx2x *bp);
+	void (*release_fw)(struct bnx2x *bp);
+};
+
+struct bnx2x_func_sp_obj {
+	enum bnx2x_func_state	state, next_state;
+
+	/* BNX2X_FUNC_CMD_XX bits. This object implements "one
+	 * pending" paradigm but for debug and tracing purposes it's
+	 * more convinient to have different bits for different
+	 * commands.
+	 */
+	unsigned long		pending;
+
+	/* Buffer to use as a ramrod data and its mapping */
+	void			*rdata;
+	dma_addr_t		rdata_mapping;
+
+	/* this mutex validates that when pending flag is taken, the next
+	 * ramrod to be sent will be the one set the pending bit
+	 */
+	struct mutex		one_pending_mutex;
+
+	/* Driver interface */
+	struct bnx2x_func_sp_drv_ops	*drv;
+
+	/**
+	 * Performs one state change according to the given parameters.
+	 *
+	 * @return 0 in case of success and negative value otherwise.
+	 */
+	int (*send_cmd)(struct bnx2x *bp,
+			struct bnx2x_func_state_params *params);
+
+	/**
+	 * Checks that the requested state transition is legal.
+	 */
+	int (*check_transition)(struct bnx2x *bp,
+				struct bnx2x_func_sp_obj *o,
+				struct bnx2x_func_state_params *params);
+
+	/**
+	 * Completes the pending command.
+	 */
+	int (*complete_cmd)(struct bnx2x *bp,
+			    struct bnx2x_func_sp_obj *o,
+			    enum bnx2x_func_cmd cmd);
+
+	int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o,
+			 enum bnx2x_func_cmd cmd);
+};
+
+/********************** Interfaces ********************************************/
+/* Queueable objects set */
+union bnx2x_qable_obj {
+	struct bnx2x_vlan_mac_obj vlan_mac;
+};
+/************** Function state update *********/
+void bnx2x_init_func_obj(struct bnx2x *bp,
+			 struct bnx2x_func_sp_obj *obj,
+			 void *rdata, dma_addr_t rdata_mapping,
+			 struct bnx2x_func_sp_drv_ops *drv_iface);
+
+int bnx2x_func_state_change(struct bnx2x *bp,
+			    struct bnx2x_func_state_params *params);
+
+/******************* Queue State **************/
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+			  struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 cid,
+			  u8 func_id, void *rdata, dma_addr_t rdata_mapping,
+			  unsigned long type);
+
+int bnx2x_queue_state_change(struct bnx2x *bp,
+			     struct bnx2x_queue_state_params *params);
+
+/********************* VLAN-MAC ****************/
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_obj *mac_obj,
+			u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			dma_addr_t rdata_mapping, int state,
+			unsigned long *pstate, bnx2x_obj_type type,
+			struct bnx2x_credit_pool_obj *macs_pool);
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+			 struct bnx2x_vlan_mac_obj *vlan_obj,
+			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			 dma_addr_t rdata_mapping, int state,
+			 unsigned long *pstate, bnx2x_obj_type type,
+			 struct bnx2x_credit_pool_obj *vlans_pool);
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool);
+
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+			  struct bnx2x_vlan_mac_ramrod_params *p);
+
+int bnx2x_vlan_mac_move(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_ramrod_params *p,
+			struct bnx2x_vlan_mac_obj *dest_o);
+
+/********************* RX MODE ****************/
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+			    struct bnx2x_rx_mode_obj *o);
+
+/**
+ * Send and RX_MODE ramrod according to the provided parameters.
+ *
+ * @param bp
+ * @param p Command parameters
+ *
+ * @return 0 - if operation was successfull and there is no pending completions,
+ *         positive number - if there are pending completions,
+ *         negative - if there were errors
+ */
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p);
+
+/****************** MULTICASTS ****************/
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+			  struct bnx2x_mcast_obj *mcast_obj,
+			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+			  int state, unsigned long *pstate,
+			  bnx2x_obj_type type);
+
+/**
+ * Configure multicast MACs list. May configure a new list
+ * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
+ * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
+ * configuration, continue to execute the pending commands
+ * (BNX2X_MCAST_CMD_CONT).
+ *
+ * If previous command is still pending or if number of MACs to
+ * configure is more that maximum number of MACs in one command,
+ * the current command will be enqueued to the tail of the
+ * pending commands list.
+ *
+ * @param bp
+ * @param p
+ * @param command to execute: BNX2X_MCAST_CMD_X
+ *
+ * @return 0 is operation was sucessfull and there are no pending completions,
+ *         negative if there were errors, positive if there are pending
+ *         completions.
+ */
+int bnx2x_config_mcast(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+/****************** CREDIT POOL ****************/
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+				struct bnx2x_credit_pool_obj *p, u8 func_id,
+				u8 func_num);
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+				 struct bnx2x_credit_pool_obj *p, u8 func_id,
+				 u8 func_num);
+
+
+/****************** RSS CONFIGURATION ****************/
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+			       struct bnx2x_rss_config_obj *rss_obj,
+			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+			       void *rdata, dma_addr_t rdata_mapping,
+			       int state, unsigned long *pstate,
+			       bnx2x_obj_type type);
+
+/**
+ * Updates RSS configuration according to provided parameters.
+ *
+ * @param bp
+ * @param p
+ *
+ * @return 0 in case of success
+ */
+int bnx2x_config_rss(struct bnx2x *bp,
+		     struct bnx2x_config_rss_params *p);
+
+/**
+ * Return the current ind_table configuration.
+ *
+ * @param bp
+ * @param ind_table buffer to fill with the current indirection
+ *                  table content. Should be at least
+ *                  T_ETH_INDIRECTION_TABLE_SIZE bytes long.
+ */
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+			     u8 *ind_table);
+
+#endif /* BNX2X_SP_VERBS */

File diff suppressed because it is too large
+ 504 - 324
drivers/net/bnx2x/bnx2x_stats.c


+ 186 - 42
drivers/net/bnx2x/bnx2x_stats.h

@@ -14,48 +14,11 @@
  * Statistics and Link management by Yitchak Gertner
  *
  */
-
 #ifndef BNX2X_STATS_H
 #define BNX2X_STATS_H
 
 #include <linux/types.h>
 
-struct bnx2x_eth_q_stats {
-	u32 total_bytes_received_hi;
-	u32 total_bytes_received_lo;
-	u32 total_bytes_transmitted_hi;
-	u32 total_bytes_transmitted_lo;
-	u32 total_unicast_packets_received_hi;
-	u32 total_unicast_packets_received_lo;
-	u32 total_multicast_packets_received_hi;
-	u32 total_multicast_packets_received_lo;
-	u32 total_broadcast_packets_received_hi;
-	u32 total_broadcast_packets_received_lo;
-	u32 total_unicast_packets_transmitted_hi;
-	u32 total_unicast_packets_transmitted_lo;
-	u32 total_multicast_packets_transmitted_hi;
-	u32 total_multicast_packets_transmitted_lo;
-	u32 total_broadcast_packets_transmitted_hi;
-	u32 total_broadcast_packets_transmitted_lo;
-	u32 valid_bytes_received_hi;
-	u32 valid_bytes_received_lo;
-
-	u32 error_bytes_received_hi;
-	u32 error_bytes_received_lo;
-	u32 etherstatsoverrsizepkts_hi;
-	u32 etherstatsoverrsizepkts_lo;
-	u32 no_buff_discard_hi;
-	u32 no_buff_discard_lo;
-
-	u32 driver_xoff;
-	u32 rx_err_discard_pkt;
-	u32 rx_skb_alloc_failed;
-	u32 hw_csum_err;
-};
-
-#define Q_STATS_OFFSET32(stat_name) \
-			(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
-
 struct nig_stats {
 	u32 brb_discard;
 	u32 brb_packet;
@@ -212,7 +175,7 @@ struct bnx2x_eth_stats {
 	u32 brb_truncate_lo;
 
 	u32 mac_filter_discard;
-	u32 xxoverflow_discard;
+	u32 mf_tag_discard;
 	u32 brb_truncate_discard;
 	u32 mac_discard;
 
@@ -222,16 +185,197 @@ struct bnx2x_eth_stats {
 	u32 hw_csum_err;
 
 	u32 nig_timer_max;
+
+	/* TPA */
+	u32 total_tpa_aggregations_hi;
+	u32 total_tpa_aggregations_lo;
+	u32 total_tpa_aggregated_frames_hi;
+	u32 total_tpa_aggregated_frames_lo;
+	u32 total_tpa_bytes_hi;
+	u32 total_tpa_bytes_lo;
+};
+
+
+struct bnx2x_eth_q_stats {
+	u32 total_unicast_bytes_received_hi;
+	u32 total_unicast_bytes_received_lo;
+	u32 total_broadcast_bytes_received_hi;
+	u32 total_broadcast_bytes_received_lo;
+	u32 total_multicast_bytes_received_hi;
+	u32 total_multicast_bytes_received_lo;
+	u32 total_bytes_received_hi;
+	u32 total_bytes_received_lo;
+	u32 total_unicast_bytes_transmitted_hi;
+	u32 total_unicast_bytes_transmitted_lo;
+	u32 total_broadcast_bytes_transmitted_hi;
+	u32 total_broadcast_bytes_transmitted_lo;
+	u32 total_multicast_bytes_transmitted_hi;
+	u32 total_multicast_bytes_transmitted_lo;
+	u32 total_bytes_transmitted_hi;
+	u32 total_bytes_transmitted_lo;
+	u32 total_unicast_packets_received_hi;
+	u32 total_unicast_packets_received_lo;
+	u32 total_multicast_packets_received_hi;
+	u32 total_multicast_packets_received_lo;
+	u32 total_broadcast_packets_received_hi;
+	u32 total_broadcast_packets_received_lo;
+	u32 total_unicast_packets_transmitted_hi;
+	u32 total_unicast_packets_transmitted_lo;
+	u32 total_multicast_packets_transmitted_hi;
+	u32 total_multicast_packets_transmitted_lo;
+	u32 total_broadcast_packets_transmitted_hi;
+	u32 total_broadcast_packets_transmitted_lo;
+	u32 valid_bytes_received_hi;
+	u32 valid_bytes_received_lo;
+
+	u32 etherstatsoverrsizepkts_hi;
+	u32 etherstatsoverrsizepkts_lo;
+	u32 no_buff_discard_hi;
+	u32 no_buff_discard_lo;
+
+	u32 driver_xoff;
+	u32 rx_err_discard_pkt;
+	u32 rx_skb_alloc_failed;
+	u32 hw_csum_err;
+
+	u32 total_packets_received_checksum_discarded_hi;
+	u32 total_packets_received_checksum_discarded_lo;
+	u32 total_packets_received_ttl0_discarded_hi;
+	u32 total_packets_received_ttl0_discarded_lo;
+	u32 total_transmitted_dropped_packets_error_hi;
+	u32 total_transmitted_dropped_packets_error_lo;
+
+	/* TPA */
+	u32 total_tpa_aggregations_hi;
+	u32 total_tpa_aggregations_lo;
+	u32 total_tpa_aggregated_frames_hi;
+	u32 total_tpa_aggregated_frames_lo;
+	u32 total_tpa_bytes_hi;
+	u32 total_tpa_bytes_lo;
 };
 
-#define STATS_OFFSET32(stat_name) \
-			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+	do { \
+		s_lo += a_lo; \
+		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+	} while (0)
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+	do { \
+		if (m_lo < s_lo) { \
+			/* underflow */ \
+			d_hi = m_hi - s_hi; \
+			if (d_hi > 0) { \
+				/* we can 'loan' 1 */ \
+				d_hi--; \
+				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+			} else { \
+				/* m_hi <= s_hi */ \
+				d_hi = 0; \
+				d_lo = 0; \
+			} \
+		} else { \
+			/* m_lo >= s_lo */ \
+			if (m_hi < s_hi) { \
+				d_hi = 0; \
+				d_lo = 0; \
+			} else { \
+				/* m_hi >= s_hi */ \
+				d_hi = m_hi - s_hi; \
+				d_lo = m_lo - s_lo; \
+			} \
+		} \
+	} while (0)
+
+#define UPDATE_STAT64(s, t) \
+	do { \
+		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+		pstats->mac_stx[0].t##_hi = new->s##_hi; \
+		pstats->mac_stx[0].t##_lo = new->s##_lo; \
+		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+		       pstats->mac_stx[1].t##_lo, diff.lo); \
+	} while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+	do { \
+		DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+			diff.lo, new->s##_lo, old->s##_lo); \
+		ADD_64(estats->t##_hi, diff.hi, \
+		       estats->t##_lo, diff.lo); \
+	} while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+	do { \
+		s_lo += a; \
+		s_hi += (s_lo < a) ? 1 : 0; \
+	} while (0)
+
+#define ADD_STAT64(diff, t) \
+	do { \
+		ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
+		       pstats->mac_stx[1].t##_lo, new->diff##_lo); \
+	} while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+	do { \
+		ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+			      pstats->mac_stx[1].s##_lo, \
+			      new->s); \
+	} while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+		old_tclient->s = tclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+		old_uclient->s = uclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
+		old_xclient->s = xclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+	do { \
+		DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+	} while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+	do { \
+		SUB_64(m_hi, 0, m_lo, s); \
+	} while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+		SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
 
-/* Forward declaration */
+/* forward */
 struct bnx2x;
 
 void bnx2x_stats_init(struct bnx2x *bp);
 
-extern const u32 dmae_reg_go_c[];
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
 
 #endif /* BNX2X_STATS_H */

+ 84 - 80
drivers/net/cnic.c

@@ -1,6 +1,6 @@
 /* cnic.c: Broadcom CNIC core network driver.
  *
- * Copyright (c) 2006-2010 Broadcom Corporation
+ * Copyright (c) 2006-2011 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -836,7 +836,6 @@ static void cnic_free_resc(struct cnic_dev *dev)
 	cp->ctx_blks = 0;
 
 	cnic_free_dma(dev, &cp->gbl_buf_info);
-	cnic_free_dma(dev, &cp->conn_buf_info);
 	cnic_free_dma(dev, &cp->kwq_info);
 	cnic_free_dma(dev, &cp->kwq_16_data_info);
 	cnic_free_dma(dev, &cp->kcq2.dma);
@@ -1176,7 +1175,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
 	cp->iscsi_start_cid = start_cid;
 	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 		cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
 		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
 		if (!cp->fcoe_init_cid)
@@ -1232,18 +1231,12 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
 	if (ret)
 		goto error;
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
-		ret = cnic_alloc_kcq(dev, &cp->kcq2, false);
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
 		if (ret)
 			goto error;
 	}
 
-	pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
-			   BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
-	ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
-	if (ret)
-		goto error;
-
 	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
 	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
 	if (ret)
@@ -1610,6 +1603,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
 	struct iscsi_context *ictx;
 	struct regpair context_addr;
 	int i, j, n = 2, n_max;
+	u8 port = CNIC_PORT(cp);
 
 	ctx->ctx_flags = 0;
 	if (!req2->num_additional_wqes)
@@ -1661,6 +1655,17 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
 		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
 	ictx->xstorm_st_context.iscsi.flags.flags |=
 		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
+	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
+		ETH_P_8021Q;
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
+		cp->port_mode == CHIP_2_PORT_MODE) {
+
+		port = 0;
+	}
+	ictx->xstorm_st_context.common.flags =
+		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
+	ictx->xstorm_st_context.common.flags =
+		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
 
 	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
 	/* TSTORM requires the base address of RQ DB & not PTE */
@@ -1876,8 +1881,11 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
 	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
 				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
 
-	if (ret == 0)
+	if (ret == 0) {
 		wait_event(ctx->waitq, ctx->wait_cond);
+		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
+			return -EBUSY;
+	}
 
 	return ret;
 }
@@ -1912,8 +1920,10 @@ static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
 skip_cfc_delete:
 	cnic_free_bnx2x_conn_resc(dev, l5_cid);
 
-	atomic_dec(&cp->iscsi_conn);
-	clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+	if (!ret) {
+		atomic_dec(&cp->iscsi_conn);
+		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+	}
 
 destroy_reply:
 	memset(&kcqe, 0, sizeof(kcqe));
@@ -1972,8 +1982,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
 		tstorm_buf->ka_interval = kwqe3->ka_interval;
 		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
 	}
-	tstorm_buf->rcv_buf = kwqe3->rcv_buf;
-	tstorm_buf->snd_buf = kwqe3->snd_buf;
 	tstorm_buf->max_rt_time = 0xffffffff;
 }
 
@@ -2002,15 +2010,14 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
 		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
 		 mac[4]);
 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
-		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
+		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
-		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
 		 mac[2]);
 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
-		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
-		 mac[1]);
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
-		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
 		 mac[0]);
 }
 
@@ -2189,7 +2196,7 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
 	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
 	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
 
-	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
 				  FCOE_CONNECTION_TYPE, &l5_data);
 	return ret;
 }
@@ -2234,12 +2241,9 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
 	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
 	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
 	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
-	fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
-	fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
-	fcoe_init->eq_next_page_addr.lo =
-		cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
-	fcoe_init->eq_next_page_addr.hi =
-		(u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
+	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
+	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
+	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
 
 	fcoe_init->sb_num = cp->status_blk_num;
 	fcoe_init->eq_prod = MAX_KCQ_IDX;
@@ -2247,7 +2251,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
 	cp->kcq2.sw_prod_idx = 0;
 
 	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
-	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
 				  FCOE_CONNECTION_TYPE, &l5_data);
 	*work = 3;
 	return ret;
@@ -2463,7 +2467,7 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
 	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
 
 	memset(&l5_data, 0, sizeof(l5_data));
-	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
 				  FCOE_CONNECTION_TYPE, &l5_data);
 	return ret;
 }
@@ -2544,7 +2548,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
 		return -EAGAIN;		/* bnx2 is down */
 
-	if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
+	if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
 		return -EINVAL;
 
 	for (i = 0; i < num_wqes; ) {
@@ -2935,7 +2939,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
 		CNIC_WR16(dev, cp->kcq1.io_addr,
 			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
 
-		if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+		if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 			cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
 					   status_idx, IGU_INT_ENABLE, 1);
 			break;
@@ -3054,13 +3058,21 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
 		break;
 	}
 	case CNIC_CTL_COMPLETION_CMD: {
-		u32 cid = BNX2X_SW_CID(info->data.comp.cid);
+		struct cnic_ctl_completion *comp = &info->data.comp;
+		u32 cid = BNX2X_SW_CID(comp->cid);
 		u32 l5_cid;
 		struct cnic_local *cp = dev->cnic_priv;
 
 		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
 			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
 
+			if (unlikely(comp->error)) {
+				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
+				netdev_err(dev->netdev,
+					   "CID %x CFC delete comp error %x\n",
+					   cid, comp->error);
+			}
+
 			ctx->wait_cond = 1;
 			wake_up(&ctx->waitq);
 		}
@@ -3935,10 +3947,17 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
 
 	for (i = 0; i < cp->max_cid_space; i++) {
 		struct cnic_context *ctx = &cp->ctx_tbl[i];
+		int j;
 
 		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
 			msleep(10);
 
+		for (j = 0; j < 5; j++) {
+			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+				break;
+			msleep(20);
+		}
+
 		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
 			netdev_warn(dev->netdev, "CID %x not deleted\n",
 				   ctx->cid);
@@ -4005,6 +4024,7 @@ static void cnic_delete_task(struct work_struct *work)
 
 	for (i = 0; i < cp->max_cid_space; i++) {
 		struct cnic_context *ctx = &cp->ctx_tbl[i];
+		int err;
 
 		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
 		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
@@ -4018,13 +4038,15 @@ static void cnic_delete_task(struct work_struct *work)
 		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
 			continue;
 
-		cnic_bnx2x_destroy_ramrod(dev, i);
+		err = cnic_bnx2x_destroy_ramrod(dev, i);
 
 		cnic_free_bnx2x_conn_resc(dev, i);
-		if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
-			atomic_dec(&cp->iscsi_conn);
+		if (!err) {
+			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
+				atomic_dec(&cp->iscsi_conn);
 
-		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+		}
 	}
 
 	if (need_resched)
@@ -4620,7 +4642,7 @@ static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
 			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
 			offsetof(struct hc_status_block_data_e1x, index_data) +
 			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
-			offsetof(struct hc_index_data, timeout), 64 / 12);
+			offsetof(struct hc_index_data, timeout), 64 / 4);
 	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
 }
 
@@ -4636,7 +4658,6 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
 	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
 	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
-	int port = CNIC_PORT(cp);
 	int i;
 	u32 cli = cp->ethdev->iscsi_l2_client_id;
 	u32 val;
@@ -4677,10 +4698,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
 
 	/* reset xstorm per client statistics */
 	if (cli < MAX_STAT_COUNTER_ID) {
-		val = BAR_XSTRORM_INTMEM +
-		      XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
-		for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
-			CNIC_WR(dev, val + i * 4, 0);
+		data->general.statistics_zero_flg = 1;
+		data->general.statistics_en_flg = 1;
+		data->general.statistics_counter_id = cli;
 	}
 
 	cp->tx_cons_ptr =
@@ -4698,7 +4718,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
 				(udev->l2_ring + (2 * BCM_PAGE_SIZE));
 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
 	int i;
-	int port = CNIC_PORT(cp);
 	u32 cli = cp->ethdev->iscsi_l2_client_id;
 	int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
 	u32 val;
@@ -4706,10 +4725,10 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
 
 	/* General data */
 	data->general.client_id = cli;
-	data->general.statistics_en_flg = 1;
-	data->general.statistics_counter_id = cli;
 	data->general.activate_flg = 1;
 	data->general.sp_client_id = cli;
+	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
+	data->general.func_id = cp->pfid;
 
 	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
 		dma_addr_t buf_map;
@@ -4743,23 +4762,12 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
 	data->rx.status_block_id = BNX2X_DEF_SB_ID;
 
 	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
-	data->rx.bd_buff_size =	cpu_to_le16(cp->l2_single_buf_size);
 
-	data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
+	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
 	data->rx.outer_vlan_removal_enable_flg = 1;
-
-	/* reset tstorm and ustorm per client statistics */
-	if (cli < MAX_STAT_COUNTER_ID) {
-		val = BAR_TSTRORM_INTMEM +
-		      TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
-		for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
-			CNIC_WR(dev, val + i * 4, 0);
-
-		val = BAR_USTRORM_INTMEM +
-		      USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
-		for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
-			CNIC_WR(dev, val + i * 4, 0);
-	}
+	data->rx.silent_vlan_removal_flg = 1;
+	data->rx.silent_vlan_value = 0;
+	data->rx.silent_vlan_mask = 0xffff;
 
 	cp->rx_cons_ptr =
 		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
@@ -4775,7 +4783,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
 			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
 	cp->kcq1.sw_prod_idx = 0;
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
 
 		cp->kcq1.hw_prod_idx_ptr =
@@ -4791,7 +4799,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
 			&sb->sb.running_index[SM_RX_ID];
 	}
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
 
 		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
@@ -4808,10 +4816,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
 	struct cnic_eth_dev *ethdev = cp->ethdev;
-	int func = CNIC_FUNC(cp), ret, i;
+	int func = CNIC_FUNC(cp), ret;
 	u32 pfid;
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+	cp->port_mode = CHIP_PORT_MODE_NONE;
+
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 		u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
 
 		if (!(val & 1))
@@ -4819,10 +4829,13 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
 		else
 			val = (val >> 1) & 1;
 
-		if (val)
+		if (val) {
+			cp->port_mode = CHIP_4_PORT_MODE;
 			cp->pfid = func >> 1;
-		else
+		} else {
+			cp->port_mode = CHIP_4_PORT_MODE;
 			cp->pfid = func & 0x6;
+		}
 	} else {
 		cp->pfid = func;
 	}
@@ -4834,7 +4847,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
 	if (ret)
 		return -ENOMEM;
 
-	if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
 		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
 					BNX2X_FCOE_NUM_CONNECTIONS,
 					cp->fcoe_start_cid, 0);
@@ -4871,15 +4884,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
 		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
 		HC_INDEX_ISCSI_EQ_CONS);
 
-	for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
-		CNIC_WR(dev, BAR_TSTRORM_INTMEM +
-			TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
-			cp->conn_buf_info.pgtbl[2 * i]);
-		CNIC_WR(dev, BAR_TSTRORM_INTMEM +
-			TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
-			cp->conn_buf_info.pgtbl[(2 * i) + 1]);
-	}
-
 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
 		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
 		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
@@ -4927,7 +4931,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
 		cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
 
 		off = BAR_USTRORM_INTMEM +
-			(BNX2X_CHIP_IS_E2(cp->chip_id) ?
+			(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
 			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
 			 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
 
@@ -5277,7 +5281,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
 
 	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
 		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
-	if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
 	    !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
 		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
 
@@ -5293,7 +5297,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
 	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
 	cp->enable_int = cnic_enable_bnx2x_int;
 	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
-	if (BNX2X_CHIP_IS_E2(cp->chip_id))
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
 		cp->ack_int = cnic_ack_bnx2x_e2_msix;
 	else
 		cp->ack_int = cnic_ack_bnx2x_msix;

+ 20 - 15
drivers/net/cnic.h

@@ -1,6 +1,6 @@
 /* cnic.h: Broadcom CNIC core network driver.
  *
- * Copyright (c) 2006-2010 Broadcom Corporation
+ * Copyright (c) 2006-2011 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -68,11 +68,6 @@
 #define BNX2_PG_CTX_MAP			0x1a0034
 #define BNX2_ISCSI_CTX_MAP		0x1a0074
 
-struct cnic_redirect_entry {
-	struct dst_entry *old_dst;
-	struct dst_entry *new_dst;
-};
-
 #define MAX_COMPLETED_KCQE	64
 
 #define MAX_CNIC_L5_CONTEXT	256
@@ -171,6 +166,7 @@ struct cnic_context {
 	unsigned long		ctx_flags;
 #define	CTX_FL_OFFLD_START	0
 #define	CTX_FL_DELETE_WAIT	1
+#define	CTX_FL_CID_ERROR	2
 	u8			ulp_proto_id;
 	union {
 		struct cnic_iscsi	*iscsi;
@@ -245,7 +241,7 @@ struct cnic_local {
 	u16		rx_cons;
 	u16		tx_cons;
 
-	struct iro		*iro_arr;
+	const struct iro	*iro_arr;
 #define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
 
 	struct cnic_dma		kwq_info;
@@ -286,7 +282,6 @@ struct cnic_local {
 	struct cnic_sock	*csk_tbl;
 	struct cnic_id_tbl	csk_port_tbl;
 
-	struct cnic_dma		conn_buf_info;
 	struct cnic_dma		gbl_buf_info;
 
 	struct cnic_iscsi	*iscsi_tbl;
@@ -320,6 +315,11 @@ struct cnic_local {
 	u32			chip_id;
 	int			func;
 	u32			pfid;
+	u8			port_mode;
+#define CHIP_4_PORT_MODE	0
+#define CHIP_2_PORT_MODE	1
+#define CHIP_PORT_MODE_NONE	2
+
 	u32			shmem_base;
 
 	struct cnic_ops		*cnic_ops;
@@ -369,7 +369,6 @@ struct bnx2x_bd_chain_next {
 #define BNX2X_ISCSI_MAX_PENDING_R2TS	4
 #define BNX2X_ISCSI_R2TQE_SIZE		8
 #define BNX2X_ISCSI_HQ_BD_SIZE		64
-#define BNX2X_ISCSI_CONN_BUF_SIZE	64
 #define BNX2X_ISCSI_GLB_BUF_SIZE	64
 #define BNX2X_ISCSI_PBL_NOT_CACHED	0xff
 #define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED	0xff
@@ -406,6 +405,7 @@ struct bnx2x_bd_chain_next {
 #define BNX2X_CHIP_IS_E2(x)		\
 	(BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
 	 BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
+#define BNX2X_CHIP_IS_E2_PLUS(x) BNX2X_CHIP_IS_E2(x)
 
 #define IS_E1H_OFFSET       		BNX2X_CHIP_IS_E1H(cp->chip_id)
 
@@ -442,8 +442,8 @@ struct bnx2x_bd_chain_next {
 
 #define CNIC_PORT(cp)			((cp)->pfid & 1)
 #define CNIC_FUNC(cp)			((cp)->func)
-#define CNIC_PATH(cp)			(!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\
-					 (CNIC_FUNC(cp) & 1))
+#define CNIC_PATH(cp)			(!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
+					 0 : (CNIC_FUNC(cp) & 1))
 #define CNIC_E1HVN(cp)			((cp)->pfid >> 1)
 
 #define BNX2X_HW_CID(cp, x)		((CNIC_PORT(cp) << 23) | \
@@ -452,10 +452,15 @@ struct bnx2x_bd_chain_next {
 #define BNX2X_SW_CID(x)			(x & 0x1ffff)
 
 #define BNX2X_CL_QZONE_ID(cp, cli)					\
-		(cli + (CNIC_PORT(cp) * (BNX2X_CHIP_IS_E2(cp->chip_id) ?\
-					ETH_MAX_RX_CLIENTS_E2 :		\
-					ETH_MAX_RX_CLIENTS_E1H)))
+		(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli :		\
+		 cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
+
+#ifndef MAX_STAT_COUNTER_ID
+#define MAX_STAT_COUNTER_ID						\
+	(BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H :	\
+	 ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\
+	  MAX_STAT_COUNTER_ID_E1))
+#endif
 
-#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK	(0<<4)
 #endif
 

File diff suppressed because it is too large
+ 588 - 494
drivers/net/cnic_defs.h


+ 5 - 3
drivers/net/cnic_if.h

@@ -12,8 +12,8 @@
 #ifndef CNIC_IF_H
 #define CNIC_IF_H
 
-#define CNIC_MODULE_VERSION	"2.2.14"
-#define CNIC_MODULE_RELDATE	"Mar 30, 2011"
+#define CNIC_MODULE_VERSION	"2.5.3"
+#define CNIC_MODULE_RELDATE	"June 6, 2011"
 
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_ISCSI		1
@@ -99,6 +99,8 @@ struct kcqe {
 
 struct cnic_ctl_completion {
 	u32	cid;
+	u8	opcode;
+	u8	error;
 };
 
 struct cnic_ctl_info {
@@ -169,7 +171,7 @@ struct cnic_eth_dev {
 	struct pci_dev	*pdev;
 	void __iomem	*io_base;
 	void __iomem	*io_base2;
-	void		*iro_arr;
+	const void	*iro_arr;
 
 	u32		ctx_tbl_offset;
 	u32		ctx_tbl_len;

File diff suppressed because it is too large
+ 441 - 549
drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h


+ 8 - 2
drivers/scsi/bnx2fc/bnx2fc.h

@@ -62,7 +62,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"1.0.1"
+#define BNX2FC_VERSION		"1.0.3"
 
 #define PFX			"bnx2fc: "
 
@@ -262,9 +262,14 @@ struct bnx2fc_rport {
 #define BNX2FC_FLAG_UPLD_REQ_COMPL	0x8
 #define BNX2FC_FLAG_EXPL_LOGO		0x9
 
+	u8 src_addr[ETH_ALEN];
 	u32 max_sqes;
 	u32 max_rqes;
 	u32 max_cqes;
+	atomic_t free_sqes;
+
+	struct b577xx_doorbell_set_prod sq_db;
+	struct b577xx_fcoe_rx_doorbell rx_db;
 
 	struct fcoe_sqe *sq;
 	dma_addr_t sq_dma;
@@ -274,7 +279,7 @@ struct bnx2fc_rport {
 
 	struct fcoe_cqe *cq;
 	dma_addr_t cq_dma;
-	u32 cq_cons_idx;
+	u16 cq_cons_idx;
 	u8 cq_curr_toggle_bit;
 	u32 cq_mem_size;
 
@@ -505,6 +510,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
 						   struct fc_frame *,
 						   void *),
 				      void *arg, u32 timeout);
+void bnx2fc_arm_cq(struct bnx2fc_rport *tgt);
 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
 struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,

+ 103 - 36
drivers/scsi/bnx2fc/bnx2fc_constants.h

@@ -5,6 +5,12 @@
  * This file defines HSI constants for the FCoE flows
  */
 
+/* Current FCoE HSI version number composed of two fields (16 bit) */
+/* Implies on a change broken previous HSI */
+#define FCOE_HSI_MAJOR_VERSION (1)
+/* Implies on a change which does not broken previous HSI */
+#define FCOE_HSI_MINOR_VERSION (1)
+
 /* KWQ/KCQ FCoE layer code */
 #define FCOE_KWQE_LAYER_CODE   (7)
 
@@ -40,21 +46,62 @@
 #define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE	(0x3)
 #define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE	(0x4)
 #define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR			(0x5)
+#define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION   (0x6)
+
+/* CQE type */
+#define FCOE_PENDING_CQE_TYPE			0
+#define FCOE_UNSOLIC_CQE_TYPE			1
 
 /* Unsolicited CQE type */
 #define FCOE_UNSOLICITED_FRAME_CQE_TYPE			0
 #define FCOE_ERROR_DETECTION_CQE_TYPE			1
 #define FCOE_WARNING_DETECTION_CQE_TYPE			2
 
+/* E_D_TOV timer resolution in ms */
+#define FCOE_E_D_TOV_TIMER_RESOLUTION_MS (20)
+
+/* E_D_TOV timer resolution for SDM (4 micro) */
+#define FCOE_E_D_TOV_SDM_TIMER_RESOLUTION				\
+	(FCOE_E_D_TOV_TIMER_RESOLUTION_MS * 1000 / 4)
+
+/* REC timer resolution in ms */
+#define FCOE_REC_TIMER_RESOLUTION_MS (20)
+
+/* REC timer resolution for SDM (4 micro) */
+#define FCOE_REC_SDM_TIMER_RESOLUTION (FCOE_REC_TIMER_RESOLUTION_MS * 1000 / 4)
+
+/* E_D_TOV timer default wraparound value (2 sec) in 20 ms resolution */
+#define FCOE_E_D_TOV_DEFAULT_WRAPAROUND_VAL			\
+			(2000 / FCOE_E_D_TOV_TIMER_RESOLUTION_MS)
+
+/* REC_TOV timer default wraparound value (3 sec) in 20 ms resolution */
+#define FCOE_REC_TOV_DEFAULT_WRAPAROUND_VAL			\
+			(3000 / FCOE_REC_TIMER_RESOLUTION_MS)
+
+#define FCOE_NUM_OF_TIMER_TASKS  (8 * 1024)
+
+#define FCOE_NUM_OF_CACHED_TASKS_TIMER (8)
+
 /* Task context constants */
+/******** Remove FCP_CMD write tce sleep ***********************/
+/* In case timer services are required then shall be updated by Xstorm after
+ * start processing the task. In case no timer facilities are required then the
+ * driver would initialize the state to this value
+ *
+#define	FCOE_TASK_TX_STATE_NORMAL				0
+ * After driver has initialize the task in case timer services required *
+#define	FCOE_TASK_TX_STATE_INIT					1
+******** Remove FCP_CMD write tce sleep ***********************/
 /* After driver has initialize the task in case timer services required */
 #define	FCOE_TASK_TX_STATE_INIT					0
 /* In case timer services are required then shall be updated by Xstorm after
  * start processing the task. In case no timer facilities are required then the
- * driver would initialize the state to this value */
+ * driver would initialize the state to this value
+ */
 #define	FCOE_TASK_TX_STATE_NORMAL				1
 /* Task is under abort procedure. Updated in order to stop processing of
- * pending WQEs on this task */
+ * pending WQEs on this task
+ */
 #define	FCOE_TASK_TX_STATE_ABORT				2
 /* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
 #define	FCOE_TASK_TX_STATE_ERROR				3
@@ -66,17 +113,8 @@
 #define	FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP			6
 /* For sequence cleanup request task */
 #define	FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP			7
-/* Mark task as aborted and indicate that ABTS was not transmitted */
-#define	FCOE_TASK_TX_STATE_BEFORE_ABTS_TX			8
-/* Mark task as aborted and indicate that ABTS was transmitted */
-#define	FCOE_TASK_TX_STATE_AFTER_ABTS_TX			9
 /* For completion the ABTS task. */
-#define	FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED			10
-/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
- */
-#define	FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX		11
-/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
-#define	FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX		12
+#define	FCOE_TASK_TX_STATE_ABTS_TX				8
 
 #define	FCOE_TASK_RX_STATE_NORMAL				0
 #define	FCOE_TASK_RX_STATE_COMPLETED				1
@@ -86,25 +124,25 @@
 #define	FCOE_TASK_RX_STATE_WARNING				3
 /* For E_D_T_TOV timer expiration in Ustorm */
 #define	FCOE_TASK_RX_STATE_ERROR				4
-/* ABTS ACC arrived wait for local completion to finally complete the task. */
-#define	FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED			5
-/* local completion arrived wait for ABTS ACC to finally complete the task. */
-#define	FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED		6
+/* FW only: First visit at rx-path, part of the abts round trip */
+#define	FCOE_TASK_RX_STATE_ABTS_IN_PROCESS			5
+/* FW only: Second visit at rx-path, after ABTS frame transmitted */
+#define	FCOE_TASK_RX_STATE_ABTS_TRANSMITTED			6
 /* Special completion indication in case of task was aborted. */
 #define FCOE_TASK_RX_STATE_ABTS_COMPLETED			7
-/* Special completion indication in case of task was cleaned. */
-#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED		8
-/* Special completion indication (in task requested the exchange cleanup) in
- * case cleaned task is in non-valid. */
-#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED		9
+/* FW only: First visit at rx-path, part of the cleanup round trip */
+#define	FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_IN_PROCESS		8
+/* FW only: Special completion indication in case of task was cleaned. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED		9
+/* Not in used: Special completion indication (in task requested the exchange
+ * cleanup) in case cleaned task is in non-valid.
+ */
+#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED		10
 /* Special completion indication (in task requested the sequence cleanup) in
- * case cleaned task was already returned to normal. */
-#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP		10
-/* Exchange cleanup arrived wait until xfer will be handled to finally
- * complete the task. */
-#define	FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED		11
-/* Xfer handled, wait for exchange cleanup to finally complete the task. */
-#define	FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER	12
+ * case cleaned task was already returned to normal.
+ */
+#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP		11
+
 
 #define	FCOE_TASK_TYPE_WRITE			0
 #define	FCOE_TASK_TYPE_READ				1
@@ -120,11 +158,40 @@
 #define FCOE_TASK_CLASS_TYPE_3			0
 #define FCOE_TASK_CLASS_TYPE_2			1
 
+/* FCoE/FC packet fields  */
+#define	FCOE_ETH_TYPE					0x8906
+
+/* FCoE maximum elements in hash table */
+#define FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW	8
+
+/* FCoE half of the elements in hash table */
+#define FCOE_HALF_ELEMENTS_IN_HASH_TABLE_ROW			\
+			(FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW / 2)
+
+/* FcoE number of cached T2 entries */
+#define T_FCOE_NUMBER_OF_CACHED_T2_ENTRIES (4)
+
+/* FCoE maximum elements in hash table */
+#define FCOE_HASH_TBL_CHUNK_SIZE	16384
+
 /* Everest FCoE connection type */
 #define B577XX_FCOE_CONNECTION_TYPE		4
 
-/* Error codes for Error Reporting in fast path flows */
-/* XFER error codes */
+/* FCoE number of rows (in log). This number derives
+ * from the maximum connections supported which is 2048.
+ * TBA: Need a different constant for E2
+ */
+#define FCOE_MAX_NUM_SESSIONS_LOG		11
+
+#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN	12
+
+/* Error codes for Error Reporting in slow path flows */
+#define FCOE_SLOW_PATH_ERROR_CODE_TOO_MANY_FUNCS			0
+#define FCOE_SLOW_PATH_ERROR_CODE_NO_LICENSE				1
+
+/* Error codes for Error Reporting in fast path flows
+ * XFER error codes
+ */
 #define FCOE_ERROR_CODE_XFER_OOO_RO					0
 #define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED				1
 #define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN				2
@@ -155,17 +222,17 @@
 #define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET			23
 #define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET			24
 #define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET				25
-#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET			26
-#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ			27
+#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET				26
+#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ				27
 #define FCOE_ERROR_CODE_DATA_FCTL					28
 
 /* Middle path error codes */
-#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS				29
+#define FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE				29
 #define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET			30
 #define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET			31
 #define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET			32
 #define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET			33
-#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL				34
+#define FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL				34
 #define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY				35
 #define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL				36
 
@@ -173,7 +240,7 @@
 #define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL				37
 #define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD			38
 #define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL			39
-#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL			40
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL				40
 #define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH		41
 
 /* Common error codes */
@@ -185,7 +252,7 @@
 #define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES			47
 #define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR				48
 #define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG			49
-#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED		50
+#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED			50
 
 /* Unsolicited Rx error codes */
 #define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS			51

+ 4 - 3
drivers/scsi/bnx2fc/bnx2fc_els.c

@@ -83,7 +83,7 @@ int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
 	rrq.rrq_cmd = ELS_RRQ;
 	hton24(rrq.rrq_s_id, sid);
 	rrq.rrq_ox_id = htons(aborted_io_req->xid);
-	rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
+	rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
 
 retry_rrq:
 	rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
@@ -417,12 +417,13 @@ void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
 
 	hdr = (u64 *)fc_hdr;
 	temp_hdr = (u64 *)
-		&task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+		&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
 	hdr[0] = cpu_to_be64(temp_hdr[0]);
 	hdr[1] = cpu_to_be64(temp_hdr[1]);
 	hdr[2] = cpu_to_be64(temp_hdr[2]);
 
-	mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+	mp_req->resp_len =
+		task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
 
 	/* Parse ELS response */
 	if ((els_req->cb_func) && (els_req->cb_arg)) {

+ 2 - 2
drivers/scsi/bnx2fc/bnx2fc_fcoe.c

@@ -21,7 +21,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME		"bnx2fc"
 #define DRV_MODULE_VERSION	BNX2FC_VERSION
-#define DRV_MODULE_RELDATE	"Mar 17, 2011"
+#define DRV_MODULE_RELDATE	"Jun 10, 2011"
 
 
 static char version[] __devinitdata =
@@ -612,7 +612,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
 		BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
 		return bnx2fc_stats;
 	}
-	bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
+	bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt;
 	bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
 	bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
 	bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;

+ 237 - 205
drivers/scsi/bnx2fc/bnx2fc_hwi.c

@@ -100,6 +100,9 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
 	fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 					FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 
+	fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
+	fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
+
 	fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
 	fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
 					   ((u64) hba->hash_tbl_pbl_dma >> 32);
@@ -122,6 +125,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
 	fcoe_init3.error_bit_map_lo = 0xffffffff;
 	fcoe_init3.error_bit_map_hi = 0xffffffff;
 
+	fcoe_init3.perf_config = 1;
 
 	kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
 	kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
@@ -289,19 +293,19 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
 	ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
 
 
-	ofld_req4.src_mac_addr_lo32[0] =  port->data_src_addr[5];
+	ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
 							/* local mac */
-	ofld_req4.src_mac_addr_lo32[1] =  port->data_src_addr[4];
-	ofld_req4.src_mac_addr_lo32[2] =  port->data_src_addr[3];
-	ofld_req4.src_mac_addr_lo32[3] =  port->data_src_addr[2];
-	ofld_req4.src_mac_addr_hi16[0] =  port->data_src_addr[1];
-	ofld_req4.src_mac_addr_hi16[1] =  port->data_src_addr[0];
-	ofld_req4.dst_mac_addr_lo32[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
-	ofld_req4.dst_mac_addr_lo32[1] =  hba->ctlr.dest_addr[4];
-	ofld_req4.dst_mac_addr_lo32[2] =  hba->ctlr.dest_addr[3];
-	ofld_req4.dst_mac_addr_lo32[3] =  hba->ctlr.dest_addr[2];
-	ofld_req4.dst_mac_addr_hi16[0] =  hba->ctlr.dest_addr[1];
-	ofld_req4.dst_mac_addr_hi16[1] =  hba->ctlr.dest_addr[0];
+	ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
+	ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
+	ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
+	ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
+	ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
+	ofld_req4.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
+	ofld_req4.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
+	ofld_req4.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
+	ofld_req4.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
+	ofld_req4.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
+	ofld_req4.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
 
 	ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
 	ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -345,20 +349,21 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
 	enbl_req.hdr.flags =
 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 
-	enbl_req.src_mac_addr_lo32[0] =  port->data_src_addr[5];
+	enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
 							/* local mac */
-	enbl_req.src_mac_addr_lo32[1] =  port->data_src_addr[4];
-	enbl_req.src_mac_addr_lo32[2] =  port->data_src_addr[3];
-	enbl_req.src_mac_addr_lo32[3] =  port->data_src_addr[2];
-	enbl_req.src_mac_addr_hi16[0] =  port->data_src_addr[1];
-	enbl_req.src_mac_addr_hi16[1] =  port->data_src_addr[0];
-
-	enbl_req.dst_mac_addr_lo32[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
-	enbl_req.dst_mac_addr_lo32[1] =  hba->ctlr.dest_addr[4];
-	enbl_req.dst_mac_addr_lo32[2] =  hba->ctlr.dest_addr[3];
-	enbl_req.dst_mac_addr_lo32[3] =  hba->ctlr.dest_addr[2];
-	enbl_req.dst_mac_addr_hi16[0] =  hba->ctlr.dest_addr[1];
-	enbl_req.dst_mac_addr_hi16[1] =  hba->ctlr.dest_addr[0];
+	enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
+	enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
+	enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
+	enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
+	enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
+	memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
+
+	enbl_req.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
+	enbl_req.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
+	enbl_req.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
+	enbl_req.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
+	enbl_req.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
+	enbl_req.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
 
 	port_id = fc_host_port_id(lport->host);
 	if (port_id != tgt->sid) {
@@ -411,18 +416,19 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
 	disable_req.hdr.flags =
 		(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 
-	disable_req.src_mac_addr_lo32[0] =  port->data_src_addr[5];
-	disable_req.src_mac_addr_lo32[2] =  port->data_src_addr[3];
-	disable_req.src_mac_addr_lo32[3] =  port->data_src_addr[2];
-	disable_req.src_mac_addr_hi16[0] =  port->data_src_addr[1];
-	disable_req.src_mac_addr_hi16[1] =  port->data_src_addr[0];
+	disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
+	disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
+	disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
+	disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
+	disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
+	disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
 
-	disable_req.dst_mac_addr_lo32[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
-	disable_req.dst_mac_addr_lo32[1] =  hba->ctlr.dest_addr[4];
-	disable_req.dst_mac_addr_lo32[2] =  hba->ctlr.dest_addr[3];
-	disable_req.dst_mac_addr_lo32[3] =  hba->ctlr.dest_addr[2];
-	disable_req.dst_mac_addr_hi16[0] =  hba->ctlr.dest_addr[1];
-	disable_req.dst_mac_addr_hi16[1] =  hba->ctlr.dest_addr[0];
+	disable_req.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
+	disable_req.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
+	disable_req.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
+	disable_req.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
+	disable_req.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
+	disable_req.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
 
 	port_id = tgt->sid;
 	disable_req.s_id[0] = (port_id & 0x000000FF);
@@ -640,10 +646,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
 		xid = err_entry->fc_hdr.ox_id;
 		BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
 		BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
-			err_entry->err_warn_bitmap_hi,
-			err_entry->err_warn_bitmap_lo);
+			err_entry->data.err_warn_bitmap_hi,
+			err_entry->data.err_warn_bitmap_lo);
 		BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
-			err_entry->tx_buf_off, err_entry->rx_buf_off);
+			err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 
 		bnx2fc_return_rqe(tgt, 1);
 
@@ -722,10 +728,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
 		xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
 		BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
 		BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
-			err_entry->err_warn_bitmap_hi,
-			err_entry->err_warn_bitmap_lo);
+			err_entry->data.err_warn_bitmap_hi,
+			err_entry->data.err_warn_bitmap_lo);
 		BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
-			err_entry->tx_buf_off, err_entry->rx_buf_off);
+			err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 
 		bnx2fc_return_rqe(tgt, 1);
 		spin_unlock_bh(&tgt->tgt_lock);
@@ -762,9 +768,9 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
 	task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
 	task = &(task_page[index]);
 
-	num_rq = ((task->rx_wr_tx_rd.rx_flags &
-		   FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
-		   FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
+	num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
+		   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
+		   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
 
 	io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 
@@ -777,22 +783,19 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
 	/* Timestamp IO completion time */
 	cmd_type = io_req->cmd_type;
 
-	/* optimized completion path */
-	if (cmd_type == BNX2FC_SCSI_CMD) {
-		rx_state = ((task->rx_wr_tx_rd.rx_flags &
-			    FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
-			    FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
+	rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
+		    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
+		    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
 
+	/* Process other IO completion types */
+	switch (cmd_type) {
+	case BNX2FC_SCSI_CMD:
 		if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
 			bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
 			spin_unlock_bh(&tgt->tgt_lock);
 			return;
 		}
-	}
 
-	/* Process other IO completion types */
-	switch (cmd_type) {
-	case BNX2FC_SCSI_CMD:
 		if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
 			bnx2fc_process_abts_compl(io_req, task, num_rq);
 		else if (rx_state ==
@@ -819,8 +822,16 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
 		break;
 
 	case BNX2FC_ELS:
-		BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
-		bnx2fc_process_els_compl(io_req, task, num_rq);
+		if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
+			bnx2fc_process_els_compl(io_req, task, num_rq);
+		else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
+			bnx2fc_process_abts_compl(io_req, task, num_rq);
+		else if (rx_state ==
+			 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
+			bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+		else
+			printk(KERN_ERR PFX "Invalid rx state =  %d\n",
+				rx_state);
 		break;
 
 	case BNX2FC_CLEANUP:
@@ -835,6 +846,20 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
 	spin_unlock_bh(&tgt->tgt_lock);
 }
 
+void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
+{
+	struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
+	u32 msg;
+
+	wmb();
+	rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
+			FCOE_CQE_TOGGLE_BIT_SHIFT);
+	msg = *((u32 *)rx_db);
+	writel(cpu_to_le32(msg), tgt->ctx_base);
+	mmiowb();
+
+}
+
 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
 {
 	struct bnx2fc_work *work;
@@ -853,8 +878,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
 	struct fcoe_cqe *cq;
 	u32 cq_cons;
 	struct fcoe_cqe *cqe;
+	u32 num_free_sqes = 0;
 	u16 wqe;
-	bool more_cqes_found = false;
 
 	/*
 	 * cq_lock is a low contention lock used to protect
@@ -872,62 +897,51 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
 	cq_cons = tgt->cq_cons_idx;
 	cqe = &cq[cq_cons];
 
-	do {
-		more_cqes_found ^= true;
-
-		while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
-		       (tgt->cq_curr_toggle_bit <<
-		       FCOE_CQE_TOGGLE_BIT_SHIFT)) {
+	while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
+	       (tgt->cq_curr_toggle_bit <<
+	       FCOE_CQE_TOGGLE_BIT_SHIFT)) {
 
-			/* new entry on the cq */
-			if (wqe & FCOE_CQE_CQE_TYPE) {
-				/* Unsolicited event notification */
-				bnx2fc_process_unsol_compl(tgt, wqe);
-			} else {
-				struct bnx2fc_work *work = NULL;
-				struct bnx2fc_percpu_s *fps = NULL;
-				unsigned int cpu = wqe % num_possible_cpus();
-
-				fps = &per_cpu(bnx2fc_percpu, cpu);
-				spin_lock_bh(&fps->fp_work_lock);
-				if (unlikely(!fps->iothread))
-					goto unlock;
-
-				work = bnx2fc_alloc_work(tgt, wqe);
-				if (work)
-					list_add_tail(&work->list,
-							&fps->work_list);
+		/* new entry on the cq */
+		if (wqe & FCOE_CQE_CQE_TYPE) {
+			/* Unsolicited event notification */
+			bnx2fc_process_unsol_compl(tgt, wqe);
+		} else {
+			/* Pending work request completion */
+			struct bnx2fc_work *work = NULL;
+			struct bnx2fc_percpu_s *fps = NULL;
+			unsigned int cpu = wqe % num_possible_cpus();
+
+			fps = &per_cpu(bnx2fc_percpu, cpu);
+			spin_lock_bh(&fps->fp_work_lock);
+			if (unlikely(!fps->iothread))
+				goto unlock;
+
+			work = bnx2fc_alloc_work(tgt, wqe);
+			if (work)
+				list_add_tail(&work->list,
+					      &fps->work_list);
 unlock:
-				spin_unlock_bh(&fps->fp_work_lock);
+			spin_unlock_bh(&fps->fp_work_lock);
 
-				/* Pending work request completion */
-				if (fps->iothread && work)
-					wake_up_process(fps->iothread);
-				else
-					bnx2fc_process_cq_compl(tgt, wqe);
-			}
-			cqe++;
-			tgt->cq_cons_idx++;
-
-			if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
-				tgt->cq_cons_idx = 0;
-				cqe = cq;
-				tgt->cq_curr_toggle_bit =
-					1 - tgt->cq_curr_toggle_bit;
-			}
+			/* Pending work request completion */
+			if (fps->iothread && work)
+				wake_up_process(fps->iothread);
+			else
+				bnx2fc_process_cq_compl(tgt, wqe);
 		}
-		/* Re-arm CQ */
-		if (more_cqes_found) {
-			tgt->conn_db->cq_arm.lo = -1;
-			wmb();
+		cqe++;
+		tgt->cq_cons_idx++;
+		num_free_sqes++;
+
+		if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
+			tgt->cq_cons_idx = 0;
+			cqe = cq;
+			tgt->cq_curr_toggle_bit =
+				1 - tgt->cq_curr_toggle_bit;
 		}
-	} while (more_cqes_found);
-
-	/*
-	 * Commit tgt->cq_cons_idx change to the memory
-	 * spin_lock implies full memory barrier, no need to smp_wmb
-	 */
-
+	}
+	bnx2fc_arm_cq(tgt);
+	atomic_add(num_free_sqes, &tgt->free_sqes);
 	spin_unlock_bh(&tgt->cq_lock);
 	return 0;
 }
@@ -1141,7 +1155,11 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
 	case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
 		printk(KERN_ERR PFX "init_failure due to NIC error\n");
 		break;
-
+	case FCOE_KCQE_COMPLETION_STATUS_ERROR:
+		printk(KERN_ERR PFX "init failure due to compl status err\n");
+		break;
+	case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
+		printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
 	default:
 		printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
 	}
@@ -1247,21 +1265,14 @@ void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
 
 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
 {
-	struct b577xx_doorbell_set_prod ev_doorbell;
+	struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
 	u32 msg;
 
 	wmb();
-
-	memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
-	ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
-
-	ev_doorbell.prod = tgt->sq_prod_idx |
+	sq_db->prod = tgt->sq_prod_idx |
 				(tgt->sq_curr_toggle_bit << 15);
-	ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
-					B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
-	msg = *((u32 *)&ev_doorbell);
+	msg = *((u32 *)sq_db);
 	writel(cpu_to_le32(msg), tgt->ctx_base);
-
 	mmiowb();
 
 }
@@ -1322,18 +1333,26 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
 	memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
 
 	/* Tx Write Rx Read */
-	task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
-	task->tx_wr_rx_rd.init_flags = task_type <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
-	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
-	/* Common */
-	task->cmn.common_flags = context_id <<
-				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
-	task->cmn.general.cleanup_info.task_id = orig_xid;
-
-
+	/* init flags */
+	task->txwr_rxrd.const_ctx.init_flags = task_type <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |=
+				FCOE_TASK_DEV_TYPE_DISK <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+	task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
+
+	/* Tx flags */
+	task->txwr_rxrd.const_ctx.tx_flags =
+				FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
+
+	/* Rx Read Tx Write */
+	task->rxwr_txrd.const_ctx.init_flags = context_id <<
+				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+	task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+				FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
 }
 
 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
@@ -1342,6 +1361,7 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
 	struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
 	struct bnx2fc_rport *tgt = io_req->tgt;
 	struct fc_frame_header *fc_hdr;
+	struct fcoe_ext_mul_sges_ctx *sgl;
 	u8 task_type = 0;
 	u64 *hdr;
 	u64 temp_hdr[3];
@@ -1367,47 +1387,49 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
 	/* Tx only */
 	if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
 	    (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
-		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
 				(u32)mp_req->mp_req_bd_dma;
-		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
 				(u32)((u64)mp_req->mp_req_bd_dma >> 32);
-		task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
-		BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
-			      (unsigned long long)mp_req->mp_req_bd_dma);
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
 	}
 
 	/* Tx Write Rx Read */
-	task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
-	task->tx_wr_rx_rd.init_flags = task_type <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
-	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
-	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
-
-	/* Common */
-	task->cmn.data_2_trns = io_req->data_xfer_len;
-	context_id = tgt->context_id;
-	task->cmn.common_flags = context_id <<
-				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
-	task->cmn.common_flags |= 1 <<
-				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
-	task->cmn.common_flags |= 1 <<
-			FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+	/* init flags */
+	task->txwr_rxrd.const_ctx.init_flags = task_type <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |=
+				FCOE_TASK_DEV_TYPE_DISK <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+
+	/* tx flags */
+	task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
 
 	/* Rx Write Tx Read */
+	task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
+
+	/* rx flags */
+	task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+				FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
+
+	context_id = tgt->context_id;
+	task->rxwr_txrd.const_ctx.init_flags = context_id <<
+				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
 	fc_hdr = &(mp_req->req_fc_hdr);
 	if (task_type == FCOE_TASK_TYPE_MIDPATH) {
 		fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
 		fc_hdr->fh_rx_id = htons(0xffff);
-		task->rx_wr_tx_rd.rx_id = 0xffff;
+		task->rxwr_txrd.var_ctx.rx_id = 0xffff;
 	} else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
 		fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
 	}
 
 	/* Fill FC Header into middle path buffer */
-	hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+	hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
 	memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
 	hdr[0] = cpu_to_be64(temp_hdr[0]);
 	hdr[1] = cpu_to_be64(temp_hdr[1]);
@@ -1415,12 +1437,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
 
 	/* Rx Only */
 	if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+		sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
 
-		task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
-				(u32)mp_req->mp_resp_bd_dma;
-		task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+		sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
+		sgl->mul_sgl.cur_sge_addr.hi =
 				(u32)((u64)mp_req->mp_resp_bd_dma >> 32);
-		task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+		sgl->mul_sgl.sgl_size = 1;
 	}
 }
 
@@ -1431,6 +1453,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
 	struct io_bdt *bd_tbl = io_req->bd_tbl;
 	struct bnx2fc_rport *tgt = io_req->tgt;
+	struct fcoe_cached_sge_ctx *cached_sge;
+	struct fcoe_ext_mul_sges_ctx *sgl;
 	u64 *fcp_cmnd;
 	u64 tmp_fcp_cmnd[4];
 	u32 context_id;
@@ -1449,47 +1473,33 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
 
 	/* Tx only */
 	if (task_type == FCOE_TASK_TYPE_WRITE) {
-		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
 				(u32)bd_tbl->bd_tbl_dma;
-		task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
 				(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
-		task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
+		task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
 				bd_tbl->bd_valid;
 	}
 
 	/*Tx Write Rx Read */
 	/* Init state to NORMAL */
-	task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
-	task->tx_wr_rx_rd.init_flags = task_type <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
-	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
-	task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
-
-	/* Common */
-	task->cmn.data_2_trns = io_req->data_xfer_len;
-	context_id = tgt->context_id;
-	task->cmn.common_flags = context_id <<
-				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
-	task->cmn.common_flags |= 1 <<
-				FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
-	task->cmn.common_flags |= 1 <<
-			FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
-
-	/* Set initiative ownership */
-	task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
+	task->txwr_rxrd.const_ctx.init_flags = task_type <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |=
+				FCOE_TASK_DEV_TYPE_DISK <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+	task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+	/* tx flags */
+	task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
 
 	/* Set initial seq counter */
-	task->cmn.tx_low_seq_cnt = 1;
-
-	/* Set state to "waiting for the first packet" */
-	task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
+	task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
 
 	/* Fill FCP_CMND IU */
 	fcp_cmnd = (u64 *)
-		    task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
+		    task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
 	bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
 
 	/* swap fcp_cmnd */
@@ -1501,32 +1511,54 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
 	}
 
 	/* Rx Write Tx Read */
-	task->rx_wr_tx_rd.rx_id = 0xffff;
+	task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
+
+	context_id = tgt->context_id;
+	task->rxwr_txrd.const_ctx.init_flags = context_id <<
+				FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
+	/* rx flags */
+	/* Set state to "waiting for the first packet" */
+	task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+				FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
+
+	task->rxwr_txrd.var_ctx.rx_id = 0xffff;
 
 	/* Rx Only */
+	cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
+	sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
+	bd_count = bd_tbl->bd_valid;
 	if (task_type == FCOE_TASK_TYPE_READ) {
-
-		bd_count = bd_tbl->bd_valid;
 		if (bd_count == 1) {
 
 			struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
 
-			task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
-					fcoe_bd_tbl->buf_addr_lo;
-			task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
-					fcoe_bd_tbl->buf_addr_hi;
-			task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
-					fcoe_bd_tbl->buf_len;
-			task->tx_wr_rx_rd.init_flags |= 1 <<
-				FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
+			cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
+			cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
+			cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
+			task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
+		} else if (bd_count == 2) {
+			struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+			cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
+			cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
+			cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
+
+			fcoe_bd_tbl++;
+			cached_sge->second_buf_addr.lo =
+						 fcoe_bd_tbl->buf_addr_lo;
+			cached_sge->second_buf_addr.hi =
+						fcoe_bd_tbl->buf_addr_hi;
+			cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
+			task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+				FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
 		} else {
 
-			task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
-					(u32)bd_tbl->bd_tbl_dma;
-			task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+			sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
+			sgl->mul_sgl.cur_sge_addr.hi =
 					(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
-			task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
-					bd_tbl->bd_valid;
+			sgl->mul_sgl.sgl_size = bd_count;
 		}
 	}
 }

+ 16 - 7
drivers/scsi/bnx2fc/bnx2fc_io.c

@@ -425,6 +425,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
 	struct list_head *listp;
 	struct io_bdt *bd_tbl;
 	int index = RESERVE_FREE_LIST_INDEX;
+	u32 free_sqes;
 	u32 max_sqes;
 	u16 xid;
 
@@ -445,8 +446,10 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
 	 * cmgr lock
 	 */
 	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+	free_sqes = atomic_read(&tgt->free_sqes);
 	if ((list_empty(&(cmd_mgr->free_list[index]))) ||
-	    (tgt->num_active_ios.counter  >= max_sqes)) {
+	    (tgt->num_active_ios.counter  >= max_sqes) ||
+	    (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
 		BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
 			"ios(%d):sqes(%d)\n",
 			tgt->num_active_ios.counter, tgt->max_sqes);
@@ -463,6 +466,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
 	xid = io_req->xid;
 	cmd_mgr->cmds[xid] = io_req;
 	atomic_inc(&tgt->num_active_ios);
+	atomic_dec(&tgt->free_sqes);
 	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
 
 	INIT_LIST_HEAD(&io_req->link);
@@ -489,6 +493,7 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
 	struct bnx2fc_cmd *io_req;
 	struct list_head *listp;
 	struct io_bdt *bd_tbl;
+	u32 free_sqes;
 	u32 max_sqes;
 	u16 xid;
 	int index = get_cpu();
@@ -499,8 +504,10 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
 	 * cmgr lock
 	 */
 	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+	free_sqes = atomic_read(&tgt->free_sqes);
 	if ((list_empty(&cmd_mgr->free_list[index])) ||
-	    (tgt->num_active_ios.counter  >= max_sqes)) {
+	    (tgt->num_active_ios.counter  >= max_sqes) ||
+	    (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
 		spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
 		put_cpu();
 		return NULL;
@@ -513,6 +520,7 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
 	xid = io_req->xid;
 	cmd_mgr->cmds[xid] = io_req;
 	atomic_inc(&tgt->num_active_ios);
+	atomic_dec(&tgt->free_sqes);
 	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
 	put_cpu();
 
@@ -873,7 +881,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
 
 	/* Obtain oxid and rxid for the original exchange to be aborted */
 	fc_hdr->fh_ox_id = htons(io_req->xid);
-	fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
+	fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
 
 	sid = tgt->sid;
 	did = rport->port_id;
@@ -1189,7 +1197,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
 			kref_put(&io_req->refcount,
 				 bnx2fc_cmd_release); /* drop timer hold */
 
-	r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
+	r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
 
 	switch (r_ctl) {
 	case FC_RCTL_BA_ACC:
@@ -1344,12 +1352,13 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
 	fc_hdr = &(tm_req->resp_fc_hdr);
 	hdr = (u64 *)fc_hdr;
 	temp_hdr = (u64 *)
-		&task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+		&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
 	hdr[0] = cpu_to_be64(temp_hdr[0]);
 	hdr[1] = cpu_to_be64(temp_hdr[1]);
 	hdr[2] = cpu_to_be64(temp_hdr[2]);
 
-	tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+	tm_req->resp_len =
+		task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
 
 	rsp_buf = tm_req->resp_buf;
 
@@ -1724,7 +1733,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
 
 	/* Fetch fcp_rsp from task context and perform cmd completion */
 	fcp_rsp = (struct fcoe_fcp_rsp_payload *)
-		   &(task->cmn.general.rsp_info.fcp_rsp.payload);
+		   &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
 
 	/* parse fcp_rsp and obtain sense data from RQ if available */
 	bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);

+ 19 - 5
drivers/scsi/bnx2fc/bnx2fc_tgt.c

@@ -133,6 +133,8 @@ retry_ofld:
 		/* upload will take care of cleaning up sess resc */
 		lport->tt.rport_logoff(rdata);
 	}
+	/* Arm CQ */
+	bnx2fc_arm_cq(tgt);
 	return;
 
 ofld_err:
@@ -315,6 +317,8 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
 
 	struct fc_rport *rport = rdata->rport;
 	struct bnx2fc_hba *hba = port->priv;
+	struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
+	struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
 
 	tgt->rport = rport;
 	tgt->rdata = rdata;
@@ -335,6 +339,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
 	tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
 	tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
 	tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
+	atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX);
 
 	/* Initialize the toggle bit */
 	tgt->sq_curr_toggle_bit = 1;
@@ -345,7 +350,17 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
 	tgt->rq_cons_idx = 0;
 	atomic_set(&tgt->num_active_ios, 0);
 
-	tgt->work_time_slice = 2;
+	/* initialize sq doorbell */
+	sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
+	sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
+					B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
+	/* initialize rx doorbell */
+	rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) |
+			  (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) |
+			  (B577XX_FCOE_CONNECTION_TYPE <<
+				B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT));
+	rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) |
+		     (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT);
 
 	spin_lock_init(&tgt->tgt_lock);
 	spin_lock_init(&tgt->cq_lock);
@@ -758,8 +773,6 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
 	}
 	memset(tgt->lcq, 0, tgt->lcq_mem_size);
 
-	/* Arm CQ */
-	tgt->conn_db->cq_arm.lo = -1;
 	tgt->conn_db->rq_prod = 0x8000;
 
 	return 0;
@@ -787,6 +800,8 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
 		iounmap(tgt->ctx_base);
 		tgt->ctx_base = NULL;
 	}
+
+	spin_lock_bh(&tgt->cq_lock);
 	/* Free LCQ */
 	if (tgt->lcq) {
 		dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
@@ -828,17 +843,16 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
 		tgt->rq = NULL;
 	}
 	/* Free CQ */
-	spin_lock_bh(&tgt->cq_lock);
 	if (tgt->cq) {
 		dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
 				    tgt->cq, tgt->cq_dma);
 		tgt->cq = NULL;
 	}
-	spin_unlock_bh(&tgt->cq_lock);
 	/* Free SQ */
 	if (tgt->sq) {
 		dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
 				    tgt->sq, tgt->sq_dma);
 		tgt->sq = NULL;
 	}
+	spin_unlock_bh(&tgt->cq_lock);
 }

+ 8 - 4
drivers/scsi/bnx2i/57xx_iscsi_hsi.h

@@ -707,8 +707,10 @@ struct iscsi_kwqe_conn_update {
 #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
 #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
 #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4)
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6
 #elif defined(__LITTLE_ENDIAN)
 	u8 conn_flags;
 #define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
@@ -719,8 +721,10 @@ struct iscsi_kwqe_conn_update {
 #define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
 #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
 #define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4)
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6
 	u8 reserved2;
 	u8 max_outstanding_r2ts;
 	u8 session_error_recovery_level;

Some files were not shown because too many files changed in this diff