|
@@ -586,7 +586,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
|
|
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
|
|
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
|
|
|
|
|
|
/* lock the dmae channel */
|
|
/* lock the dmae channel */
|
|
- mutex_lock(&bp->dmae_mutex);
|
|
|
|
|
|
+ spin_lock_bh(&bp->dmae_lock);
|
|
|
|
|
|
/* reset completion */
|
|
/* reset completion */
|
|
*wb_comp = 0;
|
|
*wb_comp = 0;
|
|
@@ -617,7 +617,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
|
|
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
|
|
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
|
|
|
|
|
|
unlock:
|
|
unlock:
|
|
- mutex_unlock(&bp->dmae_mutex);
|
|
|
|
|
|
+ spin_unlock_bh(&bp->dmae_lock);
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1397,7 +1397,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
|
|
}
|
|
}
|
|
|
|
|
|
smp_mb__before_atomic_inc();
|
|
smp_mb__before_atomic_inc();
|
|
- atomic_inc(&bp->spq_left);
|
|
|
|
|
|
+ atomic_inc(&bp->cq_spq_left);
|
|
/* push the change in fp->state and towards the memory */
|
|
/* push the change in fp->state and towards the memory */
|
|
smp_wmb();
|
|
smp_wmb();
|
|
|
|
|
|
@@ -2484,8 +2484,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
|
|
rxq_init->sge_map = fp->rx_sge_mapping;
|
|
rxq_init->sge_map = fp->rx_sge_mapping;
|
|
rxq_init->rcq_map = fp->rx_comp_mapping;
|
|
rxq_init->rcq_map = fp->rx_comp_mapping;
|
|
rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
|
|
rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
|
|
- rxq_init->mtu = bp->dev->mtu;
|
|
|
|
- rxq_init->buf_sz = bp->rx_buf_size;
|
|
|
|
|
|
+
|
|
|
|
+ /* Always use mini-jumbo MTU for FCoE L2 ring */
|
|
|
|
+ if (IS_FCOE_FP(fp))
|
|
|
|
+ rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
|
|
|
|
+ else
|
|
|
|
+ rxq_init->mtu = bp->dev->mtu;
|
|
|
|
+
|
|
|
|
+ rxq_init->buf_sz = fp->rx_buf_size;
|
|
rxq_init->cl_qzone_id = fp->cl_qzone_id;
|
|
rxq_init->cl_qzone_id = fp->cl_qzone_id;
|
|
rxq_init->cl_id = fp->cl_id;
|
|
rxq_init->cl_id = fp->cl_id;
|
|
rxq_init->spcl_id = fp->cl_id;
|
|
rxq_init->spcl_id = fp->cl_id;
|
|
@@ -2737,11 +2743,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
|
|
|
|
|
|
spin_lock_bh(&bp->spq_lock);
|
|
spin_lock_bh(&bp->spq_lock);
|
|
|
|
|
|
- if (!atomic_read(&bp->spq_left)) {
|
|
|
|
- BNX2X_ERR("BUG! SPQ ring full!\n");
|
|
|
|
- spin_unlock_bh(&bp->spq_lock);
|
|
|
|
- bnx2x_panic();
|
|
|
|
- return -EBUSY;
|
|
|
|
|
|
+ if (common) {
|
|
|
|
+ if (!atomic_read(&bp->eq_spq_left)) {
|
|
|
|
+ BNX2X_ERR("BUG! EQ ring full!\n");
|
|
|
|
+ spin_unlock_bh(&bp->spq_lock);
|
|
|
|
+ bnx2x_panic();
|
|
|
|
+ return -EBUSY;
|
|
|
|
+ }
|
|
|
|
+ } else if (!atomic_read(&bp->cq_spq_left)) {
|
|
|
|
+ BNX2X_ERR("BUG! SPQ ring full!\n");
|
|
|
|
+ spin_unlock_bh(&bp->spq_lock);
|
|
|
|
+ bnx2x_panic();
|
|
|
|
+ return -EBUSY;
|
|
}
|
|
}
|
|
|
|
|
|
spe = bnx2x_sp_get_next(bp);
|
|
spe = bnx2x_sp_get_next(bp);
|
|
@@ -2772,20 +2785,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
|
|
spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
|
|
spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
|
|
|
|
|
|
/* stats ramrod has it's own slot on the spq */
|
|
/* stats ramrod has it's own slot on the spq */
|
|
- if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
|
|
|
|
|
|
+ if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
|
|
/* It's ok if the actual decrement is issued towards the memory
|
|
/* It's ok if the actual decrement is issued towards the memory
|
|
* somewhere between the spin_lock and spin_unlock. Thus no
|
|
* somewhere between the spin_lock and spin_unlock. Thus no
|
|
* more explict memory barrier is needed.
|
|
* more explict memory barrier is needed.
|
|
*/
|
|
*/
|
|
- atomic_dec(&bp->spq_left);
|
|
|
|
|
|
+ if (common)
|
|
|
|
+ atomic_dec(&bp->eq_spq_left);
|
|
|
|
+ else
|
|
|
|
+ atomic_dec(&bp->cq_spq_left);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
|
|
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
|
|
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
|
|
"SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
|
|
"SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
|
|
- "type(0x%x) left %x\n",
|
|
|
|
|
|
+ "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
|
|
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
|
|
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
|
|
(u32)(U64_LO(bp->spq_mapping) +
|
|
(u32)(U64_LO(bp->spq_mapping) +
|
|
(void *)bp->spq_prod_bd - (void *)bp->spq), command,
|
|
(void *)bp->spq_prod_bd - (void *)bp->spq), command,
|
|
- HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
|
|
|
|
|
|
+ HW_CID(bp, cid), data_hi, data_lo, type,
|
|
|
|
+ atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
|
|
|
|
|
|
bnx2x_sp_prod_update(bp);
|
|
bnx2x_sp_prod_update(bp);
|
|
spin_unlock_bh(&bp->spq_lock);
|
|
spin_unlock_bh(&bp->spq_lock);
|
|
@@ -3697,8 +3716,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|
sw_cons = bp->eq_cons;
|
|
sw_cons = bp->eq_cons;
|
|
sw_prod = bp->eq_prod;
|
|
sw_prod = bp->eq_prod;
|
|
|
|
|
|
- DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
|
|
|
|
- hw_cons, sw_cons, atomic_read(&bp->spq_left));
|
|
|
|
|
|
+ DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
|
|
|
|
+ hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
|
|
|
|
|
|
for (; sw_cons != hw_cons;
|
|
for (; sw_cons != hw_cons;
|
|
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
|
|
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
|
|
@@ -3763,13 +3782,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
|
|
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
|
|
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
|
|
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
|
|
DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
|
|
DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
|
|
- bp->set_mac_pending = 0;
|
|
|
|
|
|
+ if (elem->message.data.set_mac_event.echo)
|
|
|
|
+ bp->set_mac_pending = 0;
|
|
break;
|
|
break;
|
|
|
|
|
|
case (EVENT_RING_OPCODE_SET_MAC |
|
|
case (EVENT_RING_OPCODE_SET_MAC |
|
|
BNX2X_STATE_CLOSING_WAIT4_HALT):
|
|
BNX2X_STATE_CLOSING_WAIT4_HALT):
|
|
DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
|
|
DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
|
|
- bp->set_mac_pending = 0;
|
|
|
|
|
|
+ if (elem->message.data.set_mac_event.echo)
|
|
|
|
+ bp->set_mac_pending = 0;
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
/* unknown event log error and continue */
|
|
/* unknown event log error and continue */
|
|
@@ -3781,7 +3802,7 @@ next_spqe:
|
|
} /* for */
|
|
} /* for */
|
|
|
|
|
|
smp_mb__before_atomic_inc();
|
|
smp_mb__before_atomic_inc();
|
|
- atomic_add(spqe_cnt, &bp->spq_left);
|
|
|
|
|
|
+ atomic_add(spqe_cnt, &bp->eq_spq_left);
|
|
|
|
|
|
bp->eq_cons = sw_cons;
|
|
bp->eq_cons = sw_cons;
|
|
bp->eq_prod = sw_prod;
|
|
bp->eq_prod = sw_prod;
|
|
@@ -4208,13 +4229,13 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
|
|
|
|
|
|
for_each_eth_queue(bp, i)
|
|
for_each_eth_queue(bp, i)
|
|
bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
|
|
bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
|
|
- bp->rx_ticks, bp->tx_ticks);
|
|
|
|
|
|
+ bp->tx_ticks, bp->rx_ticks);
|
|
}
|
|
}
|
|
|
|
|
|
static void bnx2x_init_sp_ring(struct bnx2x *bp)
|
|
static void bnx2x_init_sp_ring(struct bnx2x *bp)
|
|
{
|
|
{
|
|
spin_lock_init(&bp->spq_lock);
|
|
spin_lock_init(&bp->spq_lock);
|
|
- atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
|
|
|
|
|
|
+ atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
|
|
|
|
|
|
bp->spq_prod_idx = 0;
|
|
bp->spq_prod_idx = 0;
|
|
bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
|
|
bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
|
|
@@ -4239,9 +4260,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
|
|
bp->eq_cons = 0;
|
|
bp->eq_cons = 0;
|
|
bp->eq_prod = NUM_EQ_DESC;
|
|
bp->eq_prod = NUM_EQ_DESC;
|
|
bp->eq_cons_sb = BNX2X_EQ_INDEX;
|
|
bp->eq_cons_sb = BNX2X_EQ_INDEX;
|
|
|
|
+ /* we want a warning message before it gets rought... */
|
|
|
|
+ atomic_set(&bp->eq_spq_left,
|
|
|
|
+ min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
|
|
}
|
|
}
|
|
|
|
|
|
-static void bnx2x_init_ind_table(struct bnx2x *bp)
|
|
|
|
|
|
+void bnx2x_push_indir_table(struct bnx2x *bp)
|
|
{
|
|
{
|
|
int func = BP_FUNC(bp);
|
|
int func = BP_FUNC(bp);
|
|
int i;
|
|
int i;
|
|
@@ -4249,13 +4273,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
|
|
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
|
|
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
|
|
return;
|
|
return;
|
|
|
|
|
|
- DP(NETIF_MSG_IFUP,
|
|
|
|
- "Initializing indirection table multi_mode %d\n", bp->multi_mode);
|
|
|
|
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
|
|
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
|
|
REG_WR8(bp, BAR_TSTRORM_INTMEM +
|
|
REG_WR8(bp, BAR_TSTRORM_INTMEM +
|
|
TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
|
|
TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
|
|
- bp->fp->cl_id + (i % (bp->num_queues -
|
|
|
|
- NONE_ETH_CONTEXT_USE)));
|
|
|
|
|
|
+ bp->fp->cl_id + bp->rx_indir_table[i]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void bnx2x_init_ind_table(struct bnx2x *bp)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
|
|
|
|
+ bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
|
|
|
|
+
|
|
|
|
+ bnx2x_push_indir_table(bp);
|
|
}
|
|
}
|
|
|
|
|
|
void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
|
|
void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
|
|
@@ -5851,7 +5882,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
|
|
BP_ABS_FUNC(bp), load_code);
|
|
BP_ABS_FUNC(bp), load_code);
|
|
|
|
|
|
bp->dmae_ready = 0;
|
|
bp->dmae_ready = 0;
|
|
- mutex_init(&bp->dmae_mutex);
|
|
|
|
|
|
+ spin_lock_init(&bp->dmae_lock);
|
|
rc = bnx2x_gunzip_init(bp);
|
|
rc = bnx2x_gunzip_init(bp);
|
|
if (rc)
|
|
if (rc)
|
|
return rc;
|
|
return rc;
|
|
@@ -6003,6 +6034,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
|
|
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
|
|
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
|
|
BCM_PAGE_SIZE * NUM_EQ_PAGES);
|
|
BCM_PAGE_SIZE * NUM_EQ_PAGES);
|
|
|
|
|
|
|
|
+ BNX2X_FREE(bp->rx_indir_table);
|
|
|
|
+
|
|
#undef BNX2X_PCI_FREE
|
|
#undef BNX2X_PCI_FREE
|
|
#undef BNX2X_KFREE
|
|
#undef BNX2X_KFREE
|
|
}
|
|
}
|
|
@@ -6133,6 +6166,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
|
|
/* EQ */
|
|
/* EQ */
|
|
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
|
|
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
|
|
BCM_PAGE_SIZE * NUM_EQ_PAGES);
|
|
BCM_PAGE_SIZE * NUM_EQ_PAGES);
|
|
|
|
+
|
|
|
|
+ BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
|
|
|
|
+ TSTORM_INDIRECTION_TABLE_SIZE);
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
alloc_mem_err:
|
|
alloc_mem_err:
|
|
@@ -6186,12 +6222,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
|
|
int ramrod_flags = WAIT_RAMROD_COMMON;
|
|
int ramrod_flags = WAIT_RAMROD_COMMON;
|
|
|
|
|
|
bp->set_mac_pending = 1;
|
|
bp->set_mac_pending = 1;
|
|
- smp_wmb();
|
|
|
|
|
|
|
|
config->hdr.length = 1;
|
|
config->hdr.length = 1;
|
|
config->hdr.offset = cam_offset;
|
|
config->hdr.offset = cam_offset;
|
|
config->hdr.client_id = 0xff;
|
|
config->hdr.client_id = 0xff;
|
|
- config->hdr.reserved1 = 0;
|
|
|
|
|
|
+ /* Mark the single MAC configuration ramrod as opposed to a
|
|
|
|
+ * UC/MC list configuration).
|
|
|
|
+ */
|
|
|
|
+ config->hdr.echo = 1;
|
|
|
|
|
|
/* primary MAC */
|
|
/* primary MAC */
|
|
config->config_table[0].msb_mac_addr =
|
|
config->config_table[0].msb_mac_addr =
|
|
@@ -6223,6 +6261,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
|
|
config->config_table[0].middle_mac_addr,
|
|
config->config_table[0].middle_mac_addr,
|
|
config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
|
|
config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
|
|
|
|
|
|
|
|
+ mb();
|
|
|
|
+
|
|
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
|
|
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
|
|
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
|
|
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
|
|
U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
|
|
U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
|
|
@@ -6287,20 +6327,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
|
|
if (CHIP_IS_E1H(bp))
|
|
if (CHIP_IS_E1H(bp))
|
|
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
|
|
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
|
|
else if (CHIP_MODE_IS_4_PORT(bp))
|
|
else if (CHIP_MODE_IS_4_PORT(bp))
|
|
- return BP_FUNC(bp) * 32 + rel_offset;
|
|
|
|
|
|
+ return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
|
|
else
|
|
else
|
|
- return BP_VN(bp) * 32 + rel_offset;
|
|
|
|
|
|
+ return E2_FUNC_MAX * rel_offset + BP_VN(bp);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
* LLH CAM line allocations: currently only iSCSI and ETH macs are
|
|
* LLH CAM line allocations: currently only iSCSI and ETH macs are
|
|
* relevant. In addition, current implementation is tuned for a
|
|
* relevant. In addition, current implementation is tuned for a
|
|
* single ETH MAC.
|
|
* single ETH MAC.
|
|
- *
|
|
|
|
- * When multiple unicast ETH MACs PF configuration in switch
|
|
|
|
- * independent mode is required (NetQ, multiple netdev MACs,
|
|
|
|
- * etc.), consider better utilisation of 16 per function MAC
|
|
|
|
- * entries in the LLH memory.
|
|
|
|
*/
|
|
*/
|
|
enum {
|
|
enum {
|
|
LLH_CAM_ISCSI_ETH_LINE = 0,
|
|
LLH_CAM_ISCSI_ETH_LINE = 0,
|
|
@@ -6375,14 +6410,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
|
|
bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
|
|
bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
-static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
|
|
|
|
|
|
+
|
|
|
|
+static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
|
|
|
|
+{
|
|
|
|
+ return CHIP_REV_IS_SLOW(bp) ?
|
|
|
|
+ (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
|
|
|
|
+ (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* set mc list, do not wait as wait implies sleep and
|
|
|
|
+ * set_rx_mode can be invoked from non-sleepable context.
|
|
|
|
+ *
|
|
|
|
+ * Instead we use the same ramrod data buffer each time we need
|
|
|
|
+ * to configure a list of addresses, and use the fact that the
|
|
|
|
+ * list of MACs is changed in an incremental way and that the
|
|
|
|
+ * function is called under the netif_addr_lock. A temporary
|
|
|
|
+ * inconsistent CAM configuration (possible in case of a very fast
|
|
|
|
+ * sequence of add/del/add on the host side) will shortly be
|
|
|
|
+ * restored by the handler of the last ramrod.
|
|
|
|
+ */
|
|
|
|
+static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
|
|
{
|
|
{
|
|
int i = 0, old;
|
|
int i = 0, old;
|
|
struct net_device *dev = bp->dev;
|
|
struct net_device *dev = bp->dev;
|
|
|
|
+ u8 offset = bnx2x_e1_cam_mc_offset(bp);
|
|
struct netdev_hw_addr *ha;
|
|
struct netdev_hw_addr *ha;
|
|
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
|
|
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
|
|
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
|
|
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
|
|
|
|
|
|
|
|
+ if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
/* copy mac */
|
|
/* copy mac */
|
|
config_cmd->config_table[i].msb_mac_addr =
|
|
config_cmd->config_table[i].msb_mac_addr =
|
|
@@ -6423,32 +6481,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ wmb();
|
|
|
|
+
|
|
config_cmd->hdr.length = i;
|
|
config_cmd->hdr.length = i;
|
|
config_cmd->hdr.offset = offset;
|
|
config_cmd->hdr.offset = offset;
|
|
config_cmd->hdr.client_id = 0xff;
|
|
config_cmd->hdr.client_id = 0xff;
|
|
- config_cmd->hdr.reserved1 = 0;
|
|
|
|
|
|
+ /* Mark that this ramrod doesn't use bp->set_mac_pending for
|
|
|
|
+ * synchronization.
|
|
|
|
+ */
|
|
|
|
+ config_cmd->hdr.echo = 0;
|
|
|
|
|
|
- bp->set_mac_pending = 1;
|
|
|
|
- smp_wmb();
|
|
|
|
|
|
+ mb();
|
|
|
|
|
|
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
|
|
|
|
|
|
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
|
|
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
|
|
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
|
|
}
|
|
}
|
|
-static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
|
|
|
|
|
|
+
|
|
|
|
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
|
|
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
|
|
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
|
|
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
|
|
int ramrod_flags = WAIT_RAMROD_COMMON;
|
|
int ramrod_flags = WAIT_RAMROD_COMMON;
|
|
|
|
+ u8 offset = bnx2x_e1_cam_mc_offset(bp);
|
|
|
|
|
|
- bp->set_mac_pending = 1;
|
|
|
|
- smp_wmb();
|
|
|
|
-
|
|
|
|
- for (i = 0; i < config_cmd->hdr.length; i++)
|
|
|
|
|
|
+ for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
|
|
SET_FLAG(config_cmd->config_table[i].flags,
|
|
SET_FLAG(config_cmd->config_table[i].flags,
|
|
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
|
|
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
|
|
T_ETH_MAC_COMMAND_INVALIDATE);
|
|
T_ETH_MAC_COMMAND_INVALIDATE);
|
|
|
|
|
|
|
|
+ wmb();
|
|
|
|
+
|
|
|
|
+ config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
|
|
|
|
+ config_cmd->hdr.offset = offset;
|
|
|
|
+ config_cmd->hdr.client_id = 0xff;
|
|
|
|
+ /* We'll wait for a completion this time... */
|
|
|
|
+ config_cmd->hdr.echo = 1;
|
|
|
|
+
|
|
|
|
+ bp->set_mac_pending = 1;
|
|
|
|
+
|
|
|
|
+ mb();
|
|
|
|
+
|
|
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
|
|
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
|
|
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
|
|
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
|
|
|
|
|
|
@@ -6458,6 +6531,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
|
|
|
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* Accept one or more multicasts */
|
|
|
|
+static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
|
|
|
|
+{
|
|
|
|
+ struct net_device *dev = bp->dev;
|
|
|
|
+ struct netdev_hw_addr *ha;
|
|
|
|
+ u32 mc_filter[MC_HASH_SIZE];
|
|
|
|
+ u32 crc, bit, regidx;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ memset(mc_filter, 0, 4 * MC_HASH_SIZE);
|
|
|
|
+
|
|
|
|
+ netdev_for_each_mc_addr(ha, dev) {
|
|
|
|
+ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
|
|
|
|
+ bnx2x_mc_addr(ha));
|
|
|
|
+
|
|
|
|
+ crc = crc32c_le(0, bnx2x_mc_addr(ha),
|
|
|
|
+ ETH_ALEN);
|
|
|
|
+ bit = (crc >> 24) & 0xff;
|
|
|
|
+ regidx = bit >> 5;
|
|
|
|
+ bit &= 0x1f;
|
|
|
|
+ mc_filter[regidx] |= (1 << bit);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < MC_HASH_SIZE; i++)
|
|
|
|
+ REG_WR(bp, MC_HASH_OFFSET(bp, i),
|
|
|
|
+ mc_filter[i]);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < MC_HASH_SIZE; i++)
|
|
|
|
+ REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
|
|
|
|
+}
|
|
|
|
+
|
|
#ifdef BCM_CNIC
|
|
#ifdef BCM_CNIC
|
|
/**
|
|
/**
|
|
* Set iSCSI MAC(s) at the next enties in the CAM after the ETH
|
|
* Set iSCSI MAC(s) at the next enties in the CAM after the ETH
|
|
@@ -6476,12 +6587,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
|
|
u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
|
|
u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
|
|
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
|
|
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
|
|
u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
|
|
u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
|
|
|
|
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
|
|
|
|
|
|
/* Send a SET_MAC ramrod */
|
|
/* Send a SET_MAC ramrod */
|
|
- bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
|
|
|
|
|
|
+ bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
|
|
cam_offset, 0);
|
|
cam_offset, 0);
|
|
|
|
|
|
- bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
|
|
|
|
|
|
+ bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -7123,20 +7235,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
|
|
/* Give HW time to discard old tx messages */
|
|
/* Give HW time to discard old tx messages */
|
|
msleep(1);
|
|
msleep(1);
|
|
|
|
|
|
- if (CHIP_IS_E1(bp)) {
|
|
|
|
- /* invalidate mc list,
|
|
|
|
- * wait and poll (interrupts are off)
|
|
|
|
- */
|
|
|
|
- bnx2x_invlidate_e1_mc_list(bp);
|
|
|
|
- bnx2x_set_eth_mac(bp, 0);
|
|
|
|
-
|
|
|
|
- } else {
|
|
|
|
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
|
|
|
|
|
|
+ bnx2x_set_eth_mac(bp, 0);
|
|
|
|
|
|
- bnx2x_set_eth_mac(bp, 0);
|
|
|
|
|
|
+ bnx2x_invalidate_uc_list(bp);
|
|
|
|
|
|
- for (i = 0; i < MC_HASH_SIZE; i++)
|
|
|
|
- REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
|
|
|
|
|
|
+ if (CHIP_IS_E1(bp))
|
|
|
|
+ bnx2x_invalidate_e1_mc_list(bp);
|
|
|
|
+ else {
|
|
|
|
+ bnx2x_invalidate_e1h_mc_list(bp);
|
|
|
|
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef BCM_CNIC
|
|
#ifdef BCM_CNIC
|
|
@@ -8405,11 +8512,47 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
|
|
bp->common.shmem2_base);
|
|
bp->common.shmem2_base);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef BCM_CNIC
|
|
|
|
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
|
|
|
|
+{
|
|
|
|
+ u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
|
|
|
|
+ drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
|
|
|
|
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
|
|
|
|
+ drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
|
|
|
|
+
|
|
|
|
+ /* Get the number of maximum allowed iSCSI and FCoE connections */
|
|
|
|
+ bp->cnic_eth_dev.max_iscsi_conn =
|
|
|
|
+ (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
|
|
|
|
+ BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
|
|
|
|
+
|
|
|
|
+ bp->cnic_eth_dev.max_fcoe_conn =
|
|
|
|
+ (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
|
|
|
|
+ BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
|
|
|
|
+
|
|
|
|
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
|
|
|
|
+ bp->cnic_eth_dev.max_iscsi_conn,
|
|
|
|
+ bp->cnic_eth_dev.max_fcoe_conn);
|
|
|
|
+
|
|
|
|
+ /* If mamimum allowed number of connections is zero -
|
|
|
|
+ * disable the feature.
|
|
|
|
+ */
|
|
|
|
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
|
|
|
|
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
|
|
|
|
+
|
|
|
|
+ if (!bp->cnic_eth_dev.max_fcoe_conn)
|
|
|
|
+ bp->flags |= NO_FCOE_FLAG;
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
|
|
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
|
|
{
|
|
{
|
|
u32 val, val2;
|
|
u32 val, val2;
|
|
int func = BP_ABS_FUNC(bp);
|
|
int func = BP_ABS_FUNC(bp);
|
|
int port = BP_PORT(bp);
|
|
int port = BP_PORT(bp);
|
|
|
|
+#ifdef BCM_CNIC
|
|
|
|
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
|
|
|
|
+ u8 *fip_mac = bp->fip_mac;
|
|
|
|
+#endif
|
|
|
|
|
|
if (BP_NOMCP(bp)) {
|
|
if (BP_NOMCP(bp)) {
|
|
BNX2X_ERROR("warning: random MAC workaround active\n");
|
|
BNX2X_ERROR("warning: random MAC workaround active\n");
|
|
@@ -8422,7 +8565,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
|
|
bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
|
|
bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
|
|
|
|
|
|
#ifdef BCM_CNIC
|
|
#ifdef BCM_CNIC
|
|
- /* iSCSI NPAR MAC */
|
|
|
|
|
|
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
|
|
|
|
+ * FCoE MAC then the appropriate feature should be disabled.
|
|
|
|
+ */
|
|
if (IS_MF_SI(bp)) {
|
|
if (IS_MF_SI(bp)) {
|
|
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
|
|
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
|
|
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
|
|
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
|
|
@@ -8430,8 +8575,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
|
|
iscsi_mac_addr_upper);
|
|
iscsi_mac_addr_upper);
|
|
val = MF_CFG_RD(bp, func_ext_config[func].
|
|
val = MF_CFG_RD(bp, func_ext_config[func].
|
|
iscsi_mac_addr_lower);
|
|
iscsi_mac_addr_lower);
|
|
- bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
|
|
|
|
- }
|
|
|
|
|
|
+ BNX2X_DEV_INFO("Read iSCSI MAC: "
|
|
|
|
+ "0x%x:0x%04x\n", val2, val);
|
|
|
|
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
|
|
|
|
+
|
|
|
|
+ /* Disable iSCSI OOO if MAC configuration is
|
|
|
|
+ * invalid.
|
|
|
|
+ */
|
|
|
|
+ if (!is_valid_ether_addr(iscsi_mac)) {
|
|
|
|
+ bp->flags |= NO_ISCSI_OOO_FLAG |
|
|
|
|
+ NO_ISCSI_FLAG;
|
|
|
|
+ memset(iscsi_mac, 0, ETH_ALEN);
|
|
|
|
+ }
|
|
|
|
+ } else
|
|
|
|
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
|
|
|
|
+
|
|
|
|
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
|
|
|
|
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
|
|
|
|
+ fcoe_mac_addr_upper);
|
|
|
|
+ val = MF_CFG_RD(bp, func_ext_config[func].
|
|
|
|
+ fcoe_mac_addr_lower);
|
|
|
|
+ BNX2X_DEV_INFO("Read FCoE MAC to "
|
|
|
|
+ "0x%x:0x%04x\n", val2, val);
|
|
|
|
+ bnx2x_set_mac_buf(fip_mac, val, val2);
|
|
|
|
+
|
|
|
|
+ /* Disable FCoE if MAC configuration is
|
|
|
|
+ * invalid.
|
|
|
|
+ */
|
|
|
|
+ if (!is_valid_ether_addr(fip_mac)) {
|
|
|
|
+ bp->flags |= NO_FCOE_FLAG;
|
|
|
|
+ memset(bp->fip_mac, 0, ETH_ALEN);
|
|
|
|
+ }
|
|
|
|
+ } else
|
|
|
|
+ bp->flags |= NO_FCOE_FLAG;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
} else {
|
|
} else {
|
|
@@ -8445,7 +8621,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
|
|
iscsi_mac_upper);
|
|
iscsi_mac_upper);
|
|
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
|
|
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
|
|
iscsi_mac_lower);
|
|
iscsi_mac_lower);
|
|
- bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
|
|
|
|
|
|
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
@@ -8453,14 +8629,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
|
|
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
|
|
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
|
|
|
|
|
|
#ifdef BCM_CNIC
|
|
#ifdef BCM_CNIC
|
|
- /* Inform the upper layers about FCoE MAC */
|
|
|
|
|
|
+ /* Set the FCoE MAC in modes other then MF_SI */
|
|
if (!CHIP_IS_E1x(bp)) {
|
|
if (!CHIP_IS_E1x(bp)) {
|
|
if (IS_MF_SD(bp))
|
|
if (IS_MF_SD(bp))
|
|
- memcpy(bp->fip_mac, bp->dev->dev_addr,
|
|
|
|
- sizeof(bp->fip_mac));
|
|
|
|
- else
|
|
|
|
- memcpy(bp->fip_mac, bp->iscsi_mac,
|
|
|
|
- sizeof(bp->fip_mac));
|
|
|
|
|
|
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
|
|
|
|
+ else if (!IS_MF(bp))
|
|
|
|
+ memcpy(fip_mac, iscsi_mac, ETH_ALEN);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
@@ -8623,6 +8797,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
|
|
/* Get MAC addresses */
|
|
/* Get MAC addresses */
|
|
bnx2x_get_mac_hwinfo(bp);
|
|
bnx2x_get_mac_hwinfo(bp);
|
|
|
|
|
|
|
|
+#ifdef BCM_CNIC
|
|
|
|
+ bnx2x_get_cnic_info(bp);
|
|
|
|
+#endif
|
|
|
|
+
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -8837,12 +9015,197 @@ static int bnx2x_close(struct net_device *dev)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#define E1_MAX_UC_LIST 29
|
|
|
|
+#define E1H_MAX_UC_LIST 30
|
|
|
|
+#define E2_MAX_UC_LIST 14
|
|
|
|
+static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
|
|
|
|
+{
|
|
|
|
+ if (CHIP_IS_E1(bp))
|
|
|
|
+ return E1_MAX_UC_LIST;
|
|
|
|
+ else if (CHIP_IS_E1H(bp))
|
|
|
|
+ return E1H_MAX_UC_LIST;
|
|
|
|
+ else
|
|
|
|
+ return E2_MAX_UC_LIST;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
|
|
|
|
+{
|
|
|
|
+ if (CHIP_IS_E1(bp))
|
|
|
|
+ /* CAM Entries for Port0:
|
|
|
|
+ * 0 - prim ETH MAC
|
|
|
|
+ * 1 - BCAST MAC
|
|
|
|
+ * 2 - iSCSI L2 ring ETH MAC
|
|
|
|
+ * 3-31 - UC MACs
|
|
|
|
+ *
|
|
|
|
+ * Port1 entries are allocated the same way starting from
|
|
|
|
+ * entry 32.
|
|
|
|
+ */
|
|
|
|
+ return 3 + 32 * BP_PORT(bp);
|
|
|
|
+ else if (CHIP_IS_E1H(bp)) {
|
|
|
|
+ /* CAM Entries:
|
|
|
|
+ * 0-7 - prim ETH MAC for each function
|
|
|
|
+ * 8-15 - iSCSI L2 ring ETH MAC for each function
|
|
|
|
+ * 16 till 255 UC MAC lists for each function
|
|
|
|
+ *
|
|
|
|
+ * Remark: There is no FCoE support for E1H, thus FCoE related
|
|
|
|
+ * MACs are not considered.
|
|
|
|
+ */
|
|
|
|
+ return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
|
|
|
|
+ bnx2x_max_uc_list(bp) * BP_FUNC(bp);
|
|
|
|
+ } else {
|
|
|
|
+ /* CAM Entries (there is a separate CAM per engine):
|
|
|
|
+ * 0-4 - prim ETH MAC for each function
|
|
|
|
+ * 4-7 - iSCSI L2 ring ETH MAC for each function
|
|
|
|
+ * 8-11 - FIP ucast L2 MAC for each function
|
|
|
|
+ * 12-15 - ALL_ENODE_MACS mcast MAC for each function
|
|
|
|
+ * 16 till 71 UC MAC lists for each function
|
|
|
|
+ */
|
|
|
|
+ u8 func_idx =
|
|
|
|
+ (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
|
|
|
|
+
|
|
|
|
+ return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
|
|
|
|
+ bnx2x_max_uc_list(bp) * func_idx;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* set uc list, do not wait as wait implies sleep and
|
|
|
|
+ * set_rx_mode can be invoked from non-sleepable context.
|
|
|
|
+ *
|
|
|
|
+ * Instead we use the same ramrod data buffer each time we need
|
|
|
|
+ * to configure a list of addresses, and use the fact that the
|
|
|
|
+ * list of MACs is changed in an incremental way and that the
|
|
|
|
+ * function is called under the netif_addr_lock. A temporary
|
|
|
|
+ * inconsistent CAM configuration (possible in case of very fast
|
|
|
|
+ * sequence of add/del/add on the host side) will shortly be
|
|
|
|
+ * restored by the handler of the last ramrod.
|
|
|
|
+ */
|
|
|
|
+static int bnx2x_set_uc_list(struct bnx2x *bp)
|
|
|
|
+{
|
|
|
|
+ int i = 0, old;
|
|
|
|
+ struct net_device *dev = bp->dev;
|
|
|
|
+ u8 offset = bnx2x_uc_list_cam_offset(bp);
|
|
|
|
+ struct netdev_hw_addr *ha;
|
|
|
|
+ struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
|
|
|
|
+ dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
|
|
|
|
+
|
|
|
|
+ if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ netdev_for_each_uc_addr(ha, dev) {
|
|
|
|
+ /* copy mac */
|
|
|
|
+ config_cmd->config_table[i].msb_mac_addr =
|
|
|
|
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
|
|
|
|
+ config_cmd->config_table[i].middle_mac_addr =
|
|
|
|
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
|
|
|
|
+ config_cmd->config_table[i].lsb_mac_addr =
|
|
|
|
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
|
|
|
|
+
|
|
|
|
+ config_cmd->config_table[i].vlan_id = 0;
|
|
|
|
+ config_cmd->config_table[i].pf_id = BP_FUNC(bp);
|
|
|
|
+ config_cmd->config_table[i].clients_bit_vector =
|
|
|
|
+ cpu_to_le32(1 << BP_L_ID(bp));
|
|
|
|
+
|
|
|
|
+ SET_FLAG(config_cmd->config_table[i].flags,
|
|
|
|
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
|
|
|
|
+ T_ETH_MAC_COMMAND_SET);
|
|
|
|
+
|
|
|
|
+ DP(NETIF_MSG_IFUP,
|
|
|
|
+ "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
|
|
|
|
+ config_cmd->config_table[i].msb_mac_addr,
|
|
|
|
+ config_cmd->config_table[i].middle_mac_addr,
|
|
|
|
+ config_cmd->config_table[i].lsb_mac_addr);
|
|
|
|
+
|
|
|
|
+ i++;
|
|
|
|
+
|
|
|
|
+ /* Set uc MAC in NIG */
|
|
|
|
+ bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
|
|
|
|
+ LLH_CAM_ETH_LINE + i);
|
|
|
|
+ }
|
|
|
|
+ old = config_cmd->hdr.length;
|
|
|
|
+ if (old > i) {
|
|
|
|
+ for (; i < old; i++) {
|
|
|
|
+ if (CAM_IS_INVALID(config_cmd->
|
|
|
|
+ config_table[i])) {
|
|
|
|
+ /* already invalidated */
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ /* invalidate */
|
|
|
|
+ SET_FLAG(config_cmd->config_table[i].flags,
|
|
|
|
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
|
|
|
|
+ T_ETH_MAC_COMMAND_INVALIDATE);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ wmb();
|
|
|
|
+
|
|
|
|
+ config_cmd->hdr.length = i;
|
|
|
|
+ config_cmd->hdr.offset = offset;
|
|
|
|
+ config_cmd->hdr.client_id = 0xff;
|
|
|
|
+ /* Mark that this ramrod doesn't use bp->set_mac_pending for
|
|
|
|
+ * synchronization.
|
|
|
|
+ */
|
|
|
|
+ config_cmd->hdr.echo = 0;
|
|
|
|
+
|
|
|
|
+ mb();
|
|
|
|
+
|
|
|
|
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
|
|
|
|
+ U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void bnx2x_invalidate_uc_list(struct bnx2x *bp)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+ struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
|
|
|
|
+ dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
|
|
|
|
+ int ramrod_flags = WAIT_RAMROD_COMMON;
|
|
|
|
+ u8 offset = bnx2x_uc_list_cam_offset(bp);
|
|
|
|
+ u8 max_list_size = bnx2x_max_uc_list(bp);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < max_list_size; i++) {
|
|
|
|
+ SET_FLAG(config_cmd->config_table[i].flags,
|
|
|
|
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
|
|
|
|
+ T_ETH_MAC_COMMAND_INVALIDATE);
|
|
|
|
+ bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ wmb();
|
|
|
|
+
|
|
|
|
+ config_cmd->hdr.length = max_list_size;
|
|
|
|
+ config_cmd->hdr.offset = offset;
|
|
|
|
+ config_cmd->hdr.client_id = 0xff;
|
|
|
|
+ /* We'll wait for a completion this time... */
|
|
|
|
+ config_cmd->hdr.echo = 1;
|
|
|
|
+
|
|
|
|
+ bp->set_mac_pending = 1;
|
|
|
|
+
|
|
|
|
+ mb();
|
|
|
|
+
|
|
|
|
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
|
|
|
|
+ U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
|
|
|
|
+
|
|
|
|
+ /* Wait for a completion */
|
|
|
|
+ bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
|
|
|
|
+ ramrod_flags);
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
|
|
|
|
+{
|
|
|
|
+ /* some multicasts */
|
|
|
|
+ if (CHIP_IS_E1(bp)) {
|
|
|
|
+ return bnx2x_set_e1_mc_list(bp);
|
|
|
|
+ } else { /* E1H and newer */
|
|
|
|
+ return bnx2x_set_e1h_mc_list(bp);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/* called with netif_tx_lock from dev_mcast.c */
|
|
/* called with netif_tx_lock from dev_mcast.c */
|
|
void bnx2x_set_rx_mode(struct net_device *dev)
|
|
void bnx2x_set_rx_mode(struct net_device *dev)
|
|
{
|
|
{
|
|
struct bnx2x *bp = netdev_priv(dev);
|
|
struct bnx2x *bp = netdev_priv(dev);
|
|
u32 rx_mode = BNX2X_RX_MODE_NORMAL;
|
|
u32 rx_mode = BNX2X_RX_MODE_NORMAL;
|
|
- int port = BP_PORT(bp);
|
|
|
|
|
|
|
|
if (bp->state != BNX2X_STATE_OPEN) {
|
|
if (bp->state != BNX2X_STATE_OPEN) {
|
|
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
|
|
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
|
|
@@ -8853,47 +9216,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
|
|
|
|
|
|
if (dev->flags & IFF_PROMISC)
|
|
if (dev->flags & IFF_PROMISC)
|
|
rx_mode = BNX2X_RX_MODE_PROMISC;
|
|
rx_mode = BNX2X_RX_MODE_PROMISC;
|
|
- else if ((dev->flags & IFF_ALLMULTI) ||
|
|
|
|
- ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
|
|
|
|
- CHIP_IS_E1(bp)))
|
|
|
|
|
|
+ else if (dev->flags & IFF_ALLMULTI)
|
|
rx_mode = BNX2X_RX_MODE_ALLMULTI;
|
|
rx_mode = BNX2X_RX_MODE_ALLMULTI;
|
|
- else { /* some multicasts */
|
|
|
|
- if (CHIP_IS_E1(bp)) {
|
|
|
|
- /*
|
|
|
|
- * set mc list, do not wait as wait implies sleep
|
|
|
|
- * and set_rx_mode can be invoked from non-sleepable
|
|
|
|
- * context
|
|
|
|
- */
|
|
|
|
- u8 offset = (CHIP_REV_IS_SLOW(bp) ?
|
|
|
|
- BNX2X_MAX_EMUL_MULTI*(1 + port) :
|
|
|
|
- BNX2X_MAX_MULTICAST*(1 + port));
|
|
|
|
-
|
|
|
|
- bnx2x_set_e1_mc_list(bp, offset);
|
|
|
|
- } else { /* E1H */
|
|
|
|
- /* Accept one or more multicasts */
|
|
|
|
- struct netdev_hw_addr *ha;
|
|
|
|
- u32 mc_filter[MC_HASH_SIZE];
|
|
|
|
- u32 crc, bit, regidx;
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- memset(mc_filter, 0, 4 * MC_HASH_SIZE);
|
|
|
|
-
|
|
|
|
- netdev_for_each_mc_addr(ha, dev) {
|
|
|
|
- DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
|
|
|
|
- bnx2x_mc_addr(ha));
|
|
|
|
-
|
|
|
|
- crc = crc32c_le(0, bnx2x_mc_addr(ha),
|
|
|
|
- ETH_ALEN);
|
|
|
|
- bit = (crc >> 24) & 0xff;
|
|
|
|
- regidx = bit >> 5;
|
|
|
|
- bit &= 0x1f;
|
|
|
|
- mc_filter[regidx] |= (1 << bit);
|
|
|
|
- }
|
|
|
|
|
|
+ else {
|
|
|
|
+ /* some multicasts */
|
|
|
|
+ if (bnx2x_set_mc_list(bp))
|
|
|
|
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
|
|
|
|
|
|
- for (i = 0; i < MC_HASH_SIZE; i++)
|
|
|
|
- REG_WR(bp, MC_HASH_OFFSET(bp, i),
|
|
|
|
- mc_filter[i]);
|
|
|
|
- }
|
|
|
|
|
|
+ /* some unicasts */
|
|
|
|
+ if (bnx2x_set_uc_list(bp))
|
|
|
|
+ rx_mode = BNX2X_RX_MODE_PROMISC;
|
|
}
|
|
}
|
|
|
|
|
|
bp->rx_mode = rx_mode;
|
|
bp->rx_mode = rx_mode;
|
|
@@ -8974,7 +9306,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
|
|
.ndo_stop = bnx2x_close,
|
|
.ndo_stop = bnx2x_close,
|
|
.ndo_start_xmit = bnx2x_start_xmit,
|
|
.ndo_start_xmit = bnx2x_start_xmit,
|
|
.ndo_select_queue = bnx2x_select_queue,
|
|
.ndo_select_queue = bnx2x_select_queue,
|
|
- .ndo_set_multicast_list = bnx2x_set_rx_mode,
|
|
|
|
|
|
+ .ndo_set_rx_mode = bnx2x_set_rx_mode,
|
|
.ndo_set_mac_address = bnx2x_change_mac_addr,
|
|
.ndo_set_mac_address = bnx2x_change_mac_addr,
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
.ndo_do_ioctl = bnx2x_ioctl,
|
|
.ndo_do_ioctl = bnx2x_ioctl,
|
|
@@ -9120,7 +9452,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
|
|
dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
|
|
dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
|
|
dev->vlan_features |= NETIF_F_TSO6;
|
|
dev->vlan_features |= NETIF_F_TSO6;
|
|
|
|
|
|
-#ifdef BCM_DCB
|
|
|
|
|
|
+#ifdef BCM_DCBNL
|
|
dev->dcbnl_ops = &bnx2x_dcbnl_ops;
|
|
dev->dcbnl_ops = &bnx2x_dcbnl_ops;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -9527,6 +9859,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+#ifdef BCM_DCBNL
|
|
|
|
+ /* Delete app tlvs from dcbnl */
|
|
|
|
+ bnx2x_dcbnl_update_applist(bp, true);
|
|
|
|
+#endif
|
|
|
|
+
|
|
unregister_netdev(dev);
|
|
unregister_netdev(dev);
|
|
|
|
|
|
/* Delete all NAPI objects */
|
|
/* Delete all NAPI objects */
|
|
@@ -9800,15 +10137,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
|
|
HW_CID(bp, BNX2X_ISCSI_ETH_CID));
|
|
HW_CID(bp, BNX2X_ISCSI_ETH_CID));
|
|
}
|
|
}
|
|
|
|
|
|
- /* There may be not more than 8 L2 and COMMON SPEs and not more
|
|
|
|
- * than 8 L5 SPEs in the air.
|
|
|
|
|
|
+ /* There may be not more than 8 L2 and not more than 8 L5 SPEs
|
|
|
|
+ * We also check that the number of outstanding
|
|
|
|
+ * COMMON ramrods is not more than the EQ and SPQ can
|
|
|
|
+ * accommodate.
|
|
*/
|
|
*/
|
|
- if ((type == NONE_CONNECTION_TYPE) ||
|
|
|
|
- (type == ETH_CONNECTION_TYPE)) {
|
|
|
|
- if (!atomic_read(&bp->spq_left))
|
|
|
|
|
|
+ if (type == ETH_CONNECTION_TYPE) {
|
|
|
|
+ if (!atomic_read(&bp->cq_spq_left))
|
|
break;
|
|
break;
|
|
else
|
|
else
|
|
- atomic_dec(&bp->spq_left);
|
|
|
|
|
|
+ atomic_dec(&bp->cq_spq_left);
|
|
|
|
+ } else if (type == NONE_CONNECTION_TYPE) {
|
|
|
|
+ if (!atomic_read(&bp->eq_spq_left))
|
|
|
|
+ break;
|
|
|
|
+ else
|
|
|
|
+ atomic_dec(&bp->eq_spq_left);
|
|
} else if ((type == ISCSI_CONNECTION_TYPE) ||
|
|
} else if ((type == ISCSI_CONNECTION_TYPE) ||
|
|
(type == FCOE_CONNECTION_TYPE)) {
|
|
(type == FCOE_CONNECTION_TYPE)) {
|
|
if (bp->cnic_spq_pending >=
|
|
if (bp->cnic_spq_pending >=
|
|
@@ -9886,7 +10229,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
|
|
int rc = 0;
|
|
int rc = 0;
|
|
|
|
|
|
mutex_lock(&bp->cnic_mutex);
|
|
mutex_lock(&bp->cnic_mutex);
|
|
- c_ops = bp->cnic_ops;
|
|
|
|
|
|
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
|
|
|
|
+ lockdep_is_held(&bp->cnic_mutex));
|
|
if (c_ops)
|
|
if (c_ops)
|
|
rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
|
|
rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
|
|
mutex_unlock(&bp->cnic_mutex);
|
|
mutex_unlock(&bp->cnic_mutex);
|
|
@@ -10000,7 +10344,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
|
|
int count = ctl->data.credit.credit_count;
|
|
int count = ctl->data.credit.credit_count;
|
|
|
|
|
|
smp_mb__before_atomic_inc();
|
|
smp_mb__before_atomic_inc();
|
|
- atomic_add(count, &bp->spq_left);
|
|
|
|
|
|
+ atomic_add(count, &bp->cq_spq_left);
|
|
smp_mb__after_atomic_inc();
|
|
smp_mb__after_atomic_inc();
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -10096,6 +10440,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
|
|
struct bnx2x *bp = netdev_priv(dev);
|
|
struct bnx2x *bp = netdev_priv(dev);
|
|
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
|
|
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
|
|
|
|
|
|
|
|
+ /* If both iSCSI and FCoE are disabled - return NULL in
|
|
|
|
+ * order to indicate CNIC that it should not try to work
|
|
|
|
+ * with this device.
|
|
|
|
+ */
|
|
|
|
+ if (NO_ISCSI(bp) && NO_FCOE(bp))
|
|
|
|
+ return NULL;
|
|
|
|
+
|
|
cp->drv_owner = THIS_MODULE;
|
|
cp->drv_owner = THIS_MODULE;
|
|
cp->chip_id = CHIP_ID(bp);
|
|
cp->chip_id = CHIP_ID(bp);
|
|
cp->pdev = bp->pdev;
|
|
cp->pdev = bp->pdev;
|
|
@@ -10116,6 +10467,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
|
|
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
|
|
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
|
|
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
|
|
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
|
|
|
|
|
|
|
|
+ if (NO_ISCSI_OOO(bp))
|
|
|
|
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
|
|
|
|
+
|
|
|
|
+ if (NO_ISCSI(bp))
|
|
|
|
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
|
|
|
|
+
|
|
|
|
+ if (NO_FCOE(bp))
|
|
|
|
+ cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
|
|
|
|
+
|
|
DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
|
|
DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
|
|
"starting cid %d\n",
|
|
"starting cid %d\n",
|
|
cp->ctx_blk_size,
|
|
cp->ctx_blk_size,
|