|
@@ -19,7 +19,36 @@
|
|
|
*/
|
|
|
#include "bnx2x.h"
|
|
|
#include "bnx2x_init.h"
|
|
|
+#include "bnx2x_cmn.h"
|
|
|
#include "bnx2x_sriov.h"
|
|
|
+
|
|
|
+/* General service functions */
|
|
|
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
|
|
|
+ u16 pf_id)
|
|
|
+{
|
|
|
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
|
|
|
+ pf_id);
|
|
|
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
|
|
|
+ pf_id);
|
|
|
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
|
|
|
+ pf_id);
|
|
|
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
|
|
|
+ pf_id);
|
|
|
+}
|
|
|
+
|
|
|
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
|
|
|
+ u8 enable)
|
|
|
+{
|
|
|
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
|
|
|
+ enable);
|
|
|
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
|
|
|
+ enable);
|
|
|
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
|
|
|
+ enable);
|
|
|
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
|
|
|
+ enable);
|
|
|
+}
|
|
|
+
|
|
|
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
|
|
|
{
|
|
|
int idx;
|
|
@@ -272,6 +301,376 @@ failed:
|
|
|
__bnx2x_iov_free_vfdb(bp);
|
|
|
return err;
|
|
|
}
|
|
|
+/* VF enable primitives
|
|
|
+ * when pretend is required the caller is responsible
|
|
|
+ * for calling pretend prior to calling these routines
|
|
|
+ */
|
|
|
+
|
|
|
+/* called only on E1H or E2.
|
|
|
+ * When pretending to be PF, the pretend value is the function number 0...7
|
|
|
+ * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
|
|
|
+ * combination
|
|
|
+ */
|
|
|
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
|
|
|
+{
|
|
|
+ u32 pretend_reg;
|
|
|
+
|
|
|
+ if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
|
|
|
+ return -1;
|
|
|
+
|
|
|
+ /* get my own pretend register */
|
|
|
+ pretend_reg = bnx2x_get_pretend_reg(bp);
|
|
|
+ REG_WR(bp, pretend_reg, pretend_func_val);
|
|
|
+ REG_RD(bp, pretend_reg);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* internal vf enable - until vf is enabled internally all transactions
|
|
|
+ * are blocked. this routine should always be called last with pretend.
|
|
|
+ */
|
|
|
+static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
|
|
|
+{
|
|
|
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
|
|
|
+}
|
|
|
+
|
|
|
+/* clears vf error in all semi blocks */
|
|
|
+static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
|
|
|
+{
|
|
|
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
|
|
|
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
|
|
|
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
|
|
|
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
|
|
|
+}
|
|
|
+
|
|
|
+static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
|
|
|
+{
|
|
|
+ u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
|
|
|
+ u32 was_err_reg = 0;
|
|
|
+
|
|
|
+ switch (was_err_group) {
|
|
|
+ case 0:
|
|
|
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
|
|
|
+ break;
|
|
|
+ case 1:
|
|
|
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
|
|
|
+ break;
|
|
|
+ case 2:
|
|
|
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
|
|
|
+ break;
|
|
|
+ case 3:
|
|
|
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
|
|
|
+}
|
|
|
+
|
|
|
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
|
|
|
+{
|
|
|
+ /* set the VF-PF association in the FW */
|
|
|
+ storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
|
|
|
+ storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
|
|
|
+
|
|
|
+ /* clear vf errors*/
|
|
|
+ bnx2x_vf_semi_clear_err(bp, abs_vfid);
|
|
|
+ bnx2x_vf_pglue_clear_err(bp, abs_vfid);
|
|
|
+
|
|
|
+ /* internal vf-enable - pretend */
|
|
|
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
|
|
|
+ DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
|
|
|
+ bnx2x_vf_enable_internal(bp, true);
|
|
|
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
|
|
|
+}
|
|
|
+
|
|
|
+static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
|
|
|
+{
|
|
|
+ struct pci_dev *dev;
|
|
|
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
|
|
|
+
|
|
|
+ if (!vf)
|
|
|
+ goto unknown_dev;
|
|
|
+
|
|
|
+ dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
|
|
|
+ if (dev)
|
|
|
+ return bnx2x_is_pcie_pending(dev);
|
|
|
+
|
|
|
+unknown_dev:
|
|
|
+ BNX2X_ERR("Unknown device\n");
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
|
|
|
+{
|
|
|
+ /* Wait 100ms */
|
|
|
+ msleep(100);
|
|
|
+
|
|
|
+ /* Verify no pending pci transactions */
|
|
|
+ if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
|
|
|
+ BNX2X_ERR("PCIE Transactions still pending\n");
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* must be called after the number of PF queues and the number of VFs are
|
|
|
+ * both known
|
|
|
+ */
|
|
|
+static void
|
|
|
+bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
|
|
|
+{
|
|
|
+ u16 vlan_count = 0;
|
|
|
+
|
|
|
+ /* will be set only during VF-ACQUIRE */
|
|
|
+ resc->num_rxqs = 0;
|
|
|
+ resc->num_txqs = 0;
|
|
|
+
|
|
|
+ /* no credit calculcis for macs (just yet) */
|
|
|
+ resc->num_mac_filters = 1;
|
|
|
+
|
|
|
+ /* divvy up vlan rules */
|
|
|
+ vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
|
|
|
+ vlan_count = 1 << ilog2(vlan_count);
|
|
|
+ resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
|
|
|
+
|
|
|
+ /* no real limitation */
|
|
|
+ resc->num_mc_filters = 0;
|
|
|
+
|
|
|
+ /* num_sbs already set */
|
|
|
+}
|
|
|
+
|
|
|
+/* IOV global initialization routines */
|
|
|
+void bnx2x_iov_init_dq(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ if (!IS_SRIOV(bp))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Set the DQ such that the CID reflect the abs_vfid */
|
|
|
+ REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
|
|
|
+ REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
|
|
|
+
|
|
|
+ /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
|
|
|
+ * the PF L2 queues
|
|
|
+ */
|
|
|
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
|
|
|
+
|
|
|
+ /* The VF window size is the log2 of the max number of CIDs per VF */
|
|
|
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
|
|
|
+
|
|
|
+ /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
|
|
|
+ * the Pf doorbell size although the 2 are independent.
|
|
|
+ */
|
|
|
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
|
|
|
+ BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
|
|
|
+
|
|
|
+ /* No security checks for now -
|
|
|
+ * configure single rule (out of 16) mask = 0x1, value = 0x0,
|
|
|
+ * CID range 0 - 0x1ffff
|
|
|
+ */
|
|
|
+ REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
|
|
|
+ REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
|
|
|
+ REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
|
|
|
+ REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
|
|
|
+
|
|
|
+ /* set the number of VF alllowed doorbells to the full DQ range */
|
|
|
+ REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
|
|
|
+
|
|
|
+ /* set the VF doorbell threshold */
|
|
|
+ REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
|
|
|
+}
|
|
|
+
|
|
|
+void bnx2x_iov_init_dmae(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
|
|
|
+ if (!IS_SRIOV(bp))
|
|
|
+ return;
|
|
|
+
|
|
|
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
|
|
|
+{
|
|
|
+ struct pci_dev *dev = bp->pdev;
|
|
|
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
|
|
|
+
|
|
|
+ return dev->bus->number + ((dev->devfn + iov->offset +
|
|
|
+ iov->stride * vfid) >> 8);
|
|
|
+}
|
|
|
+
|
|
|
+static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
|
|
|
+{
|
|
|
+ struct pci_dev *dev = bp->pdev;
|
|
|
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
|
|
|
+
|
|
|
+ return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
|
|
|
+}
|
|
|
+
|
|
|
+static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|
|
+{
|
|
|
+ int i, n;
|
|
|
+ struct pci_dev *dev = bp->pdev;
|
|
|
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
|
|
|
+
|
|
|
+ for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
|
|
|
+ u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
|
|
|
+ u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
|
|
|
+
|
|
|
+ do_div(size, iov->total);
|
|
|
+ vf->bars[n].bar = start + size * vf->abs_vfid;
|
|
|
+ vf->bars[n].size = size;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void bnx2x_iov_free_mem(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (!IS_SRIOV(bp))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* free vfs hw contexts */
|
|
|
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
|
|
|
+ struct hw_dma *cxt = &bp->vfdb->context[i];
|
|
|
+ BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
|
|
|
+ }
|
|
|
+
|
|
|
+ BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
|
|
|
+ BP_VFDB(bp)->sp_dma.mapping,
|
|
|
+ BP_VFDB(bp)->sp_dma.size);
|
|
|
+
|
|
|
+ BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
|
|
|
+ BP_VF_MBX_DMA(bp)->mapping,
|
|
|
+ BP_VF_MBX_DMA(bp)->size);
|
|
|
+}
|
|
|
+
|
|
|
+int bnx2x_iov_alloc_mem(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ size_t tot_size;
|
|
|
+ int i, rc = 0;
|
|
|
+
|
|
|
+ if (!IS_SRIOV(bp))
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ /* allocate vfs hw contexts */
|
|
|
+ tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
|
|
|
+ BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
|
|
|
+
|
|
|
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
|
|
|
+ struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
|
|
|
+ cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
|
|
|
+
|
|
|
+ if (cxt->size) {
|
|
|
+ BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
|
|
|
+ } else {
|
|
|
+ cxt->addr = NULL;
|
|
|
+ cxt->mapping = 0;
|
|
|
+ }
|
|
|
+ tot_size -= cxt->size;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* allocate vfs ramrods dma memory - client_init and set_mac */
|
|
|
+ tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
|
|
|
+ BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
|
|
|
+ tot_size);
|
|
|
+ BP_VFDB(bp)->sp_dma.size = tot_size;
|
|
|
+
|
|
|
+ /* allocate mailboxes */
|
|
|
+ tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
|
|
|
+ BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
|
|
|
+ tot_size);
|
|
|
+ BP_VF_MBX_DMA(bp)->size = tot_size;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+alloc_mem_err:
|
|
|
+ return -ENOMEM;
|
|
|
+}
|
|
|
+
|
|
|
+/* called by bnx2x_nic_load */
|
|
|
+int bnx2x_iov_nic_init(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ int vfid, qcount, i;
|
|
|
+
|
|
|
+ if (!IS_SRIOV(bp)) {
|
|
|
+ DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
|
|
|
+
|
|
|
+ /* initialize vf database */
|
|
|
+ for_each_vf(bp, vfid) {
|
|
|
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
|
|
|
+
|
|
|
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
|
|
|
+ BNX2X_CIDS_PER_VF;
|
|
|
+
|
|
|
+ union cdu_context *base_cxt = (union cdu_context *)
|
|
|
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
|
|
|
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
|
|
|
+
|
|
|
+ DP(BNX2X_MSG_IOV,
|
|
|
+ "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
|
|
|
+ vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
|
|
|
+ BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
|
|
|
+
|
|
|
+ /* init statically provisioned resources */
|
|
|
+ bnx2x_iov_static_resc(bp, &vf->alloc_resc);
|
|
|
+
|
|
|
+ /* queues are initialized during VF-ACQUIRE */
|
|
|
+
|
|
|
+ /* reserve the vf vlan credit */
|
|
|
+ bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
|
|
|
+
|
|
|
+ vf->filter_state = 0;
|
|
|
+ vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
|
|
|
+
|
|
|
+ /* init mcast object - This object will be re-initialized
|
|
|
+ * during VF-ACQUIRE with the proper cl_id and cid.
|
|
|
+ * It needs to be initialized here so that it can be safely
|
|
|
+ * handled by a subsequent FLR flow.
|
|
|
+ */
|
|
|
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
|
|
|
+ 0xFF, 0xFF, 0xFF,
|
|
|
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
|
|
|
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
|
|
|
+ BNX2X_FILTER_MCAST_PENDING,
|
|
|
+ &vf->filter_state,
|
|
|
+ BNX2X_OBJ_TYPE_RX_TX);
|
|
|
+
|
|
|
+ /* set the mailbox message addresses */
|
|
|
+ BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
|
|
|
+ (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
|
|
|
+ MBX_MSG_ALIGNED_SIZE);
|
|
|
+
|
|
|
+ BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
|
|
|
+ vfid * MBX_MSG_ALIGNED_SIZE;
|
|
|
+
|
|
|
+ /* Enable vf mailbox */
|
|
|
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Final VF init */
|
|
|
+ qcount = 0;
|
|
|
+ for_each_vf(bp, i) {
|
|
|
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
|
|
|
+
|
|
|
+ /* fill in the BDF and bars */
|
|
|
+ vf->bus = bnx2x_vf_bus(bp, i);
|
|
|
+ vf->devfn = bnx2x_vf_devfn(bp, i);
|
|
|
+ bnx2x_vf_set_bars(bp, vf);
|
|
|
+
|
|
|
+ DP(BNX2X_MSG_IOV,
|
|
|
+ "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
|
|
|
+ vf->abs_vfid, vf->bus, vf->devfn,
|
|
|
+ (unsigned)vf->bars[0].bar, vf->bars[0].size,
|
|
|
+ (unsigned)vf->bars[1].bar, vf->bars[1].size,
|
|
|
+ (unsigned)vf->bars[2].bar, vf->bars[2].size);
|
|
|
+
|
|
|
+ /* set local queue arrays */
|
|
|
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
|
|
|
+ qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
/* called by bnx2x_init_hw_func, returns the next ilt line */
|
|
|
int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
|