|
@@ -29,7 +29,6 @@
|
|
char vmxnet3_driver_name[] = "vmxnet3";
|
|
char vmxnet3_driver_name[] = "vmxnet3";
|
|
#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
|
|
#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* PCI Device ID Table
|
|
* PCI Device ID Table
|
|
* Last entry must be all 0s
|
|
* Last entry must be all 0s
|
|
@@ -151,11 +150,10 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
static void
|
|
static void
|
|
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
|
|
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
|
|
{
|
|
{
|
|
- u32 events = adapter->shared->ecr;
|
|
|
|
|
|
+ u32 events = le32_to_cpu(adapter->shared->ecr);
|
|
if (!events)
|
|
if (!events)
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -173,7 +171,7 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
|
|
if (adapter->tqd_start->status.stopped) {
|
|
if (adapter->tqd_start->status.stopped) {
|
|
printk(KERN_ERR "%s: tq error 0x%x\n",
|
|
printk(KERN_ERR "%s: tq error 0x%x\n",
|
|
adapter->netdev->name,
|
|
adapter->netdev->name,
|
|
- adapter->tqd_start->status.error);
|
|
|
|
|
|
+ le32_to_cpu(adapter->tqd_start->status.error));
|
|
}
|
|
}
|
|
if (adapter->rqd_start->status.stopped) {
|
|
if (adapter->rqd_start->status.stopped) {
|
|
printk(KERN_ERR "%s: rq error 0x%x\n",
|
|
printk(KERN_ERR "%s: rq error 0x%x\n",
|
|
@@ -185,6 +183,106 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
+/*
|
|
|
|
+ * The device expects the bitfields in shared structures to be written in
|
|
|
|
+ * little endian. When CPU is big endian, the following routines are used to
|
|
|
|
+ * correctly read and write into ABI.
|
|
|
|
+ * The general technique used here is : double word bitfields are defined in
|
|
|
|
+ * opposite order for big endian architecture. Then before reading them in
|
|
|
|
+ * driver the complete double word is translated using le32_to_cpu. Similarly
|
|
|
|
+ * After the driver writes into bitfields, cpu_to_le32 is used to translate the
|
|
|
|
+ * double words into required format.
|
|
|
|
+ * In order to avoid touching bits in shared structure more than once, temporary
|
|
|
|
+ * descriptors are used. These are passed as srcDesc to following functions.
|
|
|
|
+ */
|
|
|
|
+static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
|
|
|
|
+ struct Vmxnet3_RxDesc *dstDesc)
|
|
|
|
+{
|
|
|
|
+ u32 *src = (u32 *)srcDesc + 2;
|
|
|
|
+ u32 *dst = (u32 *)dstDesc + 2;
|
|
|
|
+ dstDesc->addr = le64_to_cpu(srcDesc->addr);
|
|
|
|
+ *dst = le32_to_cpu(*src);
|
|
|
|
+ dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
|
|
|
|
+ struct Vmxnet3_TxDesc *dstDesc)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+ u32 *src = (u32 *)(srcDesc + 1);
|
|
|
|
+ u32 *dst = (u32 *)(dstDesc + 1);
|
|
|
|
+
|
|
|
|
+ /* Working backwards so that the gen bit is set at the end. */
|
|
|
|
+ for (i = 2; i > 0; i--) {
|
|
|
|
+ src--;
|
|
|
|
+ dst--;
|
|
|
|
+ *dst = cpu_to_le32(*src);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
|
|
|
|
+ struct Vmxnet3_RxCompDesc *dstDesc)
|
|
|
|
+{
|
|
|
|
+ int i = 0;
|
|
|
|
+ u32 *src = (u32 *)srcDesc;
|
|
|
|
+ u32 *dst = (u32 *)dstDesc;
|
|
|
|
+ for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
|
|
|
|
+ *dst = le32_to_cpu(*src);
|
|
|
|
+ src++;
|
|
|
|
+ dst++;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/* Used to read bitfield values from double words. */
|
|
|
|
+static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
|
|
|
|
+{
|
|
|
|
+ u32 temp = le32_to_cpu(*bitfield);
|
|
|
|
+ u32 mask = ((1 << size) - 1) << pos;
|
|
|
|
+ temp &= mask;
|
|
|
|
+ temp >>= pos;
|
|
|
|
+ return temp;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#endif /* __BIG_ENDIAN_BITFIELD */
|
|
|
|
+
|
|
|
|
+#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
+
|
|
|
|
+# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
|
|
|
|
+ txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
|
|
|
|
+ VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
|
|
|
|
+# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
|
|
|
|
+ txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
|
|
|
|
+ VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
|
|
|
|
+# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
|
|
|
|
+ VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
|
|
|
|
+ VMXNET3_TCD_GEN_SIZE)
|
|
|
|
+# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
|
|
|
|
+ VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
|
|
|
|
+# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
|
|
|
|
+ (dstrcd) = (tmp); \
|
|
|
|
+ vmxnet3_RxCompToCPU((rcd), (tmp)); \
|
|
|
|
+ } while (0)
|
|
|
|
+# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
|
|
|
|
+ (dstrxd) = (tmp); \
|
|
|
|
+ vmxnet3_RxDescToCPU((rxd), (tmp)); \
|
|
|
|
+ } while (0)
|
|
|
|
+
|
|
|
|
+#else
|
|
|
|
+
|
|
|
|
+# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
|
|
|
|
+# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
|
|
|
|
+# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
|
|
|
|
+# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
|
|
|
|
+# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
|
|
|
|
+# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
|
|
|
|
+
|
|
|
|
+#endif /* __BIG_ENDIAN_BITFIELD */
|
|
|
|
+
|
|
|
|
|
|
static void
|
|
static void
|
|
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
|
|
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
|
|
@@ -212,7 +310,7 @@ vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
|
|
|
|
|
|
/* no out of order completion */
|
|
/* no out of order completion */
|
|
BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
|
|
BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
|
|
- BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1);
|
|
|
|
|
|
+ BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
|
|
|
|
|
|
skb = tq->buf_info[eop_idx].skb;
|
|
skb = tq->buf_info[eop_idx].skb;
|
|
BUG_ON(skb == NULL);
|
|
BUG_ON(skb == NULL);
|
|
@@ -246,9 +344,10 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
|
|
union Vmxnet3_GenericDesc *gdesc;
|
|
union Vmxnet3_GenericDesc *gdesc;
|
|
|
|
|
|
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
|
|
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
|
|
- while (gdesc->tcd.gen == tq->comp_ring.gen) {
|
|
|
|
- completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq,
|
|
|
|
- adapter->pdev, adapter);
|
|
|
|
|
|
+ while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
|
|
|
|
+ completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
|
|
|
|
+ &gdesc->tcd), tq, adapter->pdev,
|
|
|
|
+ adapter);
|
|
|
|
|
|
vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
|
|
vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
|
|
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
|
|
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
|
|
@@ -472,9 +571,9 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
|
}
|
|
}
|
|
|
|
|
|
BUG_ON(rbi->dma_addr == 0);
|
|
BUG_ON(rbi->dma_addr == 0);
|
|
- gd->rxd.addr = rbi->dma_addr;
|
|
|
|
- gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val |
|
|
|
|
- rbi->len;
|
|
|
|
|
|
+ gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
|
|
|
|
+ gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
|
|
|
|
+ | val | rbi->len);
|
|
|
|
|
|
num_allocated++;
|
|
num_allocated++;
|
|
vmxnet3_cmd_ring_adv_next2fill(ring);
|
|
vmxnet3_cmd_ring_adv_next2fill(ring);
|
|
@@ -531,10 +630,10 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|
|
|
|
|
/* no need to map the buffer if headers are copied */
|
|
/* no need to map the buffer if headers are copied */
|
|
if (ctx->copy_size) {
|
|
if (ctx->copy_size) {
|
|
- ctx->sop_txd->txd.addr = tq->data_ring.basePA +
|
|
|
|
|
|
+ ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
|
|
tq->tx_ring.next2fill *
|
|
tq->tx_ring.next2fill *
|
|
- sizeof(struct Vmxnet3_TxDataDesc);
|
|
|
|
- ctx->sop_txd->dword[2] = dw2 | ctx->copy_size;
|
|
|
|
|
|
+ sizeof(struct Vmxnet3_TxDataDesc));
|
|
|
|
+ ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
|
|
ctx->sop_txd->dword[3] = 0;
|
|
ctx->sop_txd->dword[3] = 0;
|
|
|
|
|
|
tbi = tq->buf_info + tq->tx_ring.next2fill;
|
|
tbi = tq->buf_info + tq->tx_ring.next2fill;
|
|
@@ -542,7 +641,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|
|
|
|
|
dev_dbg(&adapter->netdev->dev,
|
|
dev_dbg(&adapter->netdev->dev,
|
|
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
|
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
|
- tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
|
|
|
|
|
|
+ tq->tx_ring.next2fill,
|
|
|
|
+ le64_to_cpu(ctx->sop_txd->txd.addr),
|
|
ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
|
|
ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
|
|
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
|
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
|
|
|
|
|
@@ -570,14 +670,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
|
|
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
|
|
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
|
|
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
|
|
|
|
|
|
- gdesc->txd.addr = tbi->dma_addr;
|
|
|
|
- gdesc->dword[2] = dw2 | buf_size;
|
|
|
|
|
|
+ gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
|
|
|
|
+ gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
|
|
gdesc->dword[3] = 0;
|
|
gdesc->dword[3] = 0;
|
|
|
|
|
|
dev_dbg(&adapter->netdev->dev,
|
|
dev_dbg(&adapter->netdev->dev,
|
|
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
|
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
|
- tq->tx_ring.next2fill, gdesc->txd.addr,
|
|
|
|
- gdesc->dword[2], gdesc->dword[3]);
|
|
|
|
|
|
+ tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
|
|
|
|
+ le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
|
|
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
|
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
|
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
|
|
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
|
|
|
|
|
|
@@ -599,14 +699,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
|
|
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
|
|
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
|
|
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
|
|
|
|
|
|
- gdesc->txd.addr = tbi->dma_addr;
|
|
|
|
- gdesc->dword[2] = dw2 | frag->size;
|
|
|
|
|
|
+ gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
|
|
|
|
+ gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
|
|
gdesc->dword[3] = 0;
|
|
gdesc->dword[3] = 0;
|
|
|
|
|
|
dev_dbg(&adapter->netdev->dev,
|
|
dev_dbg(&adapter->netdev->dev,
|
|
"txd[%u]: 0x%llu %u %u\n",
|
|
"txd[%u]: 0x%llu %u %u\n",
|
|
- tq->tx_ring.next2fill, gdesc->txd.addr,
|
|
|
|
- gdesc->dword[2], gdesc->dword[3]);
|
|
|
|
|
|
+ tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
|
|
|
|
+ le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
|
|
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
|
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
|
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
|
|
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
|
|
}
|
|
}
|
|
@@ -751,6 +851,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
struct vmxnet3_tx_ctx ctx;
|
|
struct vmxnet3_tx_ctx ctx;
|
|
union Vmxnet3_GenericDesc *gdesc;
|
|
union Vmxnet3_GenericDesc *gdesc;
|
|
|
|
+#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
+ /* Use temporary descriptor to avoid touching bits multiple times */
|
|
|
|
+ union Vmxnet3_GenericDesc tempTxDesc;
|
|
|
|
+#endif
|
|
|
|
|
|
/* conservatively estimate # of descriptors to use */
|
|
/* conservatively estimate # of descriptors to use */
|
|
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
|
|
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
|
|
@@ -827,16 +931,22 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
|
|
vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
|
|
|
|
|
|
/* setup the EOP desc */
|
|
/* setup the EOP desc */
|
|
- ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
|
|
|
|
|
|
+ ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
|
|
|
|
|
|
/* setup the SOP desc */
|
|
/* setup the SOP desc */
|
|
|
|
+#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
+ gdesc = &tempTxDesc;
|
|
|
|
+ gdesc->dword[2] = ctx.sop_txd->dword[2];
|
|
|
|
+ gdesc->dword[3] = ctx.sop_txd->dword[3];
|
|
|
|
+#else
|
|
gdesc = ctx.sop_txd;
|
|
gdesc = ctx.sop_txd;
|
|
|
|
+#endif
|
|
if (ctx.mss) {
|
|
if (ctx.mss) {
|
|
gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
|
|
gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
|
|
gdesc->txd.om = VMXNET3_OM_TSO;
|
|
gdesc->txd.om = VMXNET3_OM_TSO;
|
|
gdesc->txd.msscof = ctx.mss;
|
|
gdesc->txd.msscof = ctx.mss;
|
|
- tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen +
|
|
|
|
- ctx.mss - 1) / ctx.mss;
|
|
|
|
|
|
+ le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
|
|
|
|
+ gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
|
|
} else {
|
|
} else {
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
gdesc->txd.hlen = ctx.eth_ip_hdr_size;
|
|
gdesc->txd.hlen = ctx.eth_ip_hdr_size;
|
|
@@ -847,7 +957,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
gdesc->txd.om = 0;
|
|
gdesc->txd.om = 0;
|
|
gdesc->txd.msscof = 0;
|
|
gdesc->txd.msscof = 0;
|
|
}
|
|
}
|
|
- tq->shared->txNumDeferred++;
|
|
|
|
|
|
+ le32_add_cpu(&tq->shared->txNumDeferred, 1);
|
|
}
|
|
}
|
|
|
|
|
|
if (vlan_tx_tag_present(skb)) {
|
|
if (vlan_tx_tag_present(skb)) {
|
|
@@ -855,19 +965,27 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
gdesc->txd.tci = vlan_tx_tag_get(skb);
|
|
gdesc->txd.tci = vlan_tx_tag_get(skb);
|
|
}
|
|
}
|
|
|
|
|
|
- wmb();
|
|
|
|
-
|
|
|
|
- /* finally flips the GEN bit of the SOP desc */
|
|
|
|
- gdesc->dword[2] ^= VMXNET3_TXD_GEN;
|
|
|
|
|
|
+ /* finally flips the GEN bit of the SOP desc. */
|
|
|
|
+ gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
|
|
|
|
+ VMXNET3_TXD_GEN);
|
|
|
|
+#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
+ /* Finished updating in bitfields of Tx Desc, so write them in original
|
|
|
|
+ * place.
|
|
|
|
+ */
|
|
|
|
+ vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
|
|
|
|
+ (struct Vmxnet3_TxDesc *)ctx.sop_txd);
|
|
|
|
+ gdesc = ctx.sop_txd;
|
|
|
|
+#endif
|
|
dev_dbg(&adapter->netdev->dev,
|
|
dev_dbg(&adapter->netdev->dev,
|
|
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
|
|
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
|
|
(u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
|
|
(u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
|
|
- tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
|
|
|
|
- gdesc->dword[3]);
|
|
|
|
|
|
+ tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
|
|
|
|
+ le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
|
|
|
|
|
|
spin_unlock_irqrestore(&tq->tx_lock, flags);
|
|
spin_unlock_irqrestore(&tq->tx_lock, flags);
|
|
|
|
|
|
- if (tq->shared->txNumDeferred >= tq->shared->txThreshold) {
|
|
|
|
|
|
+ if (le32_to_cpu(tq->shared->txNumDeferred) >=
|
|
|
|
+ le32_to_cpu(tq->shared->txThreshold)) {
|
|
tq->shared->txNumDeferred = 0;
|
|
tq->shared->txNumDeferred = 0;
|
|
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
|
|
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
|
|
tq->tx_ring.next2fill);
|
|
tq->tx_ring.next2fill);
|
|
@@ -889,9 +1007,8 @@ static netdev_tx_t
|
|
vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
{
|
|
{
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
|
|
- struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
|
|
|
|
|
|
|
|
- return vmxnet3_tq_xmit(skb, tq, adapter, netdev);
|
|
|
|
|
|
+ return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -902,7 +1019,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
|
|
{
|
|
{
|
|
if (!gdesc->rcd.cnc && adapter->rxcsum) {
|
|
if (!gdesc->rcd.cnc && adapter->rxcsum) {
|
|
/* typical case: TCP/UDP over IP and both csums are correct */
|
|
/* typical case: TCP/UDP over IP and both csums are correct */
|
|
- if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) ==
|
|
|
|
|
|
+ if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
|
|
VMXNET3_RCD_CSUM_OK) {
|
|
VMXNET3_RCD_CSUM_OK) {
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
|
|
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
|
|
@@ -957,8 +1074,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
u32 num_rxd = 0;
|
|
u32 num_rxd = 0;
|
|
struct Vmxnet3_RxCompDesc *rcd;
|
|
struct Vmxnet3_RxCompDesc *rcd;
|
|
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
|
|
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
|
|
-
|
|
|
|
- rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
|
|
|
|
|
|
+#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
+ struct Vmxnet3_RxDesc rxCmdDesc;
|
|
|
|
+ struct Vmxnet3_RxCompDesc rxComp;
|
|
|
|
+#endif
|
|
|
|
+ vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
|
|
|
|
+ &rxComp);
|
|
while (rcd->gen == rq->comp_ring.gen) {
|
|
while (rcd->gen == rq->comp_ring.gen) {
|
|
struct vmxnet3_rx_buf_info *rbi;
|
|
struct vmxnet3_rx_buf_info *rbi;
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
@@ -976,11 +1097,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
|
|
|
idx = rcd->rxdIdx;
|
|
idx = rcd->rxdIdx;
|
|
ring_idx = rcd->rqID == rq->qid ? 0 : 1;
|
|
ring_idx = rcd->rqID == rq->qid ? 0 : 1;
|
|
-
|
|
|
|
- rxd = &rq->rx_ring[ring_idx].base[idx].rxd;
|
|
|
|
|
|
+ vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
|
|
|
|
+ &rxCmdDesc);
|
|
rbi = rq->buf_info[ring_idx] + idx;
|
|
rbi = rq->buf_info[ring_idx] + idx;
|
|
|
|
|
|
- BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len);
|
|
|
|
|
|
+ BUG_ON(rxd->addr != rbi->dma_addr ||
|
|
|
|
+ rxd->len != rbi->len);
|
|
|
|
|
|
if (unlikely(rcd->eop && rcd->err)) {
|
|
if (unlikely(rcd->eop && rcd->err)) {
|
|
vmxnet3_rx_error(rq, rcd, ctx, adapter);
|
|
vmxnet3_rx_error(rq, rcd, ctx, adapter);
|
|
@@ -1078,7 +1200,8 @@ rcd_done:
|
|
}
|
|
}
|
|
|
|
|
|
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
|
|
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
|
|
- rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
|
|
|
|
|
|
+ vmxnet3_getRxComp(rcd,
|
|
|
|
+ &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
|
|
}
|
|
}
|
|
|
|
|
|
return num_rxd;
|
|
return num_rxd;
|
|
@@ -1094,7 +1217,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
|
|
|
|
|
|
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
|
|
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
|
|
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
|
|
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
|
|
- rxd = &rq->rx_ring[ring_idx].base[i].rxd;
|
|
|
|
|
|
+#ifdef __BIG_ENDIAN_BITFIELD
|
|
|
|
+ struct Vmxnet3_RxDesc rxDesc;
|
|
|
|
+#endif
|
|
|
|
+ vmxnet3_getRxDesc(rxd,
|
|
|
|
+ &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
|
|
|
|
|
|
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
|
|
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
|
|
rq->buf_info[ring_idx][i].skb) {
|
|
rq->buf_info[ring_idx][i].skb) {
|
|
@@ -1346,12 +1473,12 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
|
|
err = request_irq(adapter->intr.msix_entries[0].vector,
|
|
err = request_irq(adapter->intr.msix_entries[0].vector,
|
|
vmxnet3_intr, 0, adapter->netdev->name,
|
|
vmxnet3_intr, 0, adapter->netdev->name,
|
|
adapter->netdev);
|
|
adapter->netdev);
|
|
- } else
|
|
|
|
-#endif
|
|
|
|
- if (adapter->intr.type == VMXNET3_IT_MSI) {
|
|
|
|
|
|
+ } else if (adapter->intr.type == VMXNET3_IT_MSI) {
|
|
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
|
|
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
|
|
adapter->netdev->name, adapter->netdev);
|
|
adapter->netdev->name, adapter->netdev);
|
|
- } else {
|
|
|
|
|
|
+ } else
|
|
|
|
+#endif
|
|
|
|
+ {
|
|
err = request_irq(adapter->pdev->irq, vmxnet3_intr,
|
|
err = request_irq(adapter->pdev->irq, vmxnet3_intr,
|
|
IRQF_SHARED, adapter->netdev->name,
|
|
IRQF_SHARED, adapter->netdev->name,
|
|
adapter->netdev);
|
|
adapter->netdev);
|
|
@@ -1412,6 +1539,22 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
+inline void set_flag_le16(__le16 *data, u16 flag)
|
|
|
|
+{
|
|
|
|
+ *data = cpu_to_le16(le16_to_cpu(*data) | flag);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+inline void set_flag_le64(__le64 *data, u64 flag)
|
|
|
|
+{
|
|
|
|
+ *data = cpu_to_le64(le64_to_cpu(*data) | flag);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+inline void reset_flag_le64(__le64 *data, u64 flag)
|
|
|
|
+{
|
|
|
|
+ *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
static void
|
|
static void
|
|
vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
|
|
vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
|
|
{
|
|
{
|
|
@@ -1427,7 +1570,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
|
|
adapter->vlan_grp = grp;
|
|
adapter->vlan_grp = grp;
|
|
|
|
|
|
/* update FEATURES to device */
|
|
/* update FEATURES to device */
|
|
- devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
|
|
|
|
|
|
+ set_flag_le64(&devRead->misc.uptFeatures,
|
|
|
|
+ UPT1_F_RXVLAN);
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
VMXNET3_CMD_UPDATE_FEATURE);
|
|
VMXNET3_CMD_UPDATE_FEATURE);
|
|
/*
|
|
/*
|
|
@@ -1450,7 +1594,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
|
|
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
|
|
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
|
|
adapter->vlan_grp = NULL;
|
|
adapter->vlan_grp = NULL;
|
|
|
|
|
|
- if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
|
|
|
|
|
|
+ if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
|
|
int i;
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
|
|
for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
|
|
@@ -1463,7 +1607,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
|
|
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
|
|
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
|
|
|
|
|
|
/* update FEATURES to device */
|
|
/* update FEATURES to device */
|
|
- devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
|
|
|
|
|
|
+ reset_flag_le64(&devRead->misc.uptFeatures,
|
|
|
|
+ UPT1_F_RXVLAN);
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
VMXNET3_CMD_UPDATE_FEATURE);
|
|
VMXNET3_CMD_UPDATE_FEATURE);
|
|
}
|
|
}
|
|
@@ -1565,9 +1710,10 @@ vmxnet3_set_mc(struct net_device *netdev)
|
|
new_table = vmxnet3_copy_mc(netdev);
|
|
new_table = vmxnet3_copy_mc(netdev);
|
|
if (new_table) {
|
|
if (new_table) {
|
|
new_mode |= VMXNET3_RXM_MCAST;
|
|
new_mode |= VMXNET3_RXM_MCAST;
|
|
- rxConf->mfTableLen = netdev->mc_count *
|
|
|
|
- ETH_ALEN;
|
|
|
|
- rxConf->mfTablePA = virt_to_phys(new_table);
|
|
|
|
|
|
+ rxConf->mfTableLen = cpu_to_le16(
|
|
|
|
+ netdev->mc_count * ETH_ALEN);
|
|
|
|
+ rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
|
|
|
|
+ new_table));
|
|
} else {
|
|
} else {
|
|
printk(KERN_INFO "%s: failed to copy mcast list"
|
|
printk(KERN_INFO "%s: failed to copy mcast list"
|
|
", setting ALL_MULTI\n", netdev->name);
|
|
", setting ALL_MULTI\n", netdev->name);
|
|
@@ -1582,7 +1728,7 @@ vmxnet3_set_mc(struct net_device *netdev)
|
|
}
|
|
}
|
|
|
|
|
|
if (new_mode != rxConf->rxMode) {
|
|
if (new_mode != rxConf->rxMode) {
|
|
- rxConf->rxMode = new_mode;
|
|
|
|
|
|
+ rxConf->rxMode = cpu_to_le32(new_mode);
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
VMXNET3_CMD_UPDATE_RX_MODE);
|
|
VMXNET3_CMD_UPDATE_RX_MODE);
|
|
}
|
|
}
|
|
@@ -1610,63 +1756,69 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
|
|
memset(shared, 0, sizeof(*shared));
|
|
memset(shared, 0, sizeof(*shared));
|
|
|
|
|
|
/* driver settings */
|
|
/* driver settings */
|
|
- shared->magic = VMXNET3_REV1_MAGIC;
|
|
|
|
- devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
|
|
|
|
|
|
+ shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
|
|
|
|
+ devRead->misc.driverInfo.version = cpu_to_le32(
|
|
|
|
+ VMXNET3_DRIVER_VERSION_NUM);
|
|
devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
|
|
devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
|
|
VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
|
|
VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
|
|
devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
|
|
devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
|
|
- devRead->misc.driverInfo.vmxnet3RevSpt = 1;
|
|
|
|
- devRead->misc.driverInfo.uptVerSpt = 1;
|
|
|
|
|
|
+ *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
|
|
|
|
+ *((u32 *)&devRead->misc.driverInfo.gos));
|
|
|
|
+ devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
|
|
|
|
+ devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
|
|
|
|
|
|
- devRead->misc.ddPA = virt_to_phys(adapter);
|
|
|
|
- devRead->misc.ddLen = sizeof(struct vmxnet3_adapter);
|
|
|
|
|
|
+ devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
|
|
|
|
+ devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
|
|
|
|
|
|
/* set up feature flags */
|
|
/* set up feature flags */
|
|
if (adapter->rxcsum)
|
|
if (adapter->rxcsum)
|
|
- devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
|
|
|
|
|
|
+ set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
|
|
|
|
|
|
if (adapter->lro) {
|
|
if (adapter->lro) {
|
|
- devRead->misc.uptFeatures |= UPT1_F_LRO;
|
|
|
|
- devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS;
|
|
|
|
|
|
+ set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
|
|
|
|
+ devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
|
|
}
|
|
}
|
|
if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
|
|
if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
|
|
&& adapter->vlan_grp) {
|
|
&& adapter->vlan_grp) {
|
|
- devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
|
|
|
|
|
|
+ set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
|
|
}
|
|
}
|
|
|
|
|
|
- devRead->misc.mtu = adapter->netdev->mtu;
|
|
|
|
- devRead->misc.queueDescPA = adapter->queue_desc_pa;
|
|
|
|
- devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) +
|
|
|
|
- sizeof(struct Vmxnet3_RxQueueDesc);
|
|
|
|
|
|
+ devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
|
|
|
|
+ devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
|
|
|
|
+ devRead->misc.queueDescLen = cpu_to_le32(
|
|
|
|
+ sizeof(struct Vmxnet3_TxQueueDesc) +
|
|
|
|
+ sizeof(struct Vmxnet3_RxQueueDesc));
|
|
|
|
|
|
/* tx queue settings */
|
|
/* tx queue settings */
|
|
BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
|
|
BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
|
|
|
|
|
|
devRead->misc.numTxQueues = 1;
|
|
devRead->misc.numTxQueues = 1;
|
|
tqc = &adapter->tqd_start->conf;
|
|
tqc = &adapter->tqd_start->conf;
|
|
- tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA;
|
|
|
|
- tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA;
|
|
|
|
- tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA;
|
|
|
|
- tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info);
|
|
|
|
- tqc->txRingSize = adapter->tx_queue.tx_ring.size;
|
|
|
|
- tqc->dataRingSize = adapter->tx_queue.data_ring.size;
|
|
|
|
- tqc->compRingSize = adapter->tx_queue.comp_ring.size;
|
|
|
|
- tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) *
|
|
|
|
- tqc->txRingSize;
|
|
|
|
|
|
+ tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
|
|
|
|
+ tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
|
|
|
|
+ tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
|
|
|
|
+ tqc->ddPA = cpu_to_le64(virt_to_phys(
|
|
|
|
+ adapter->tx_queue.buf_info));
|
|
|
|
+ tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
|
|
|
|
+ tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
|
|
|
|
+ tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
|
|
|
|
+ tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
|
|
|
|
+ tqc->txRingSize);
|
|
tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
|
|
tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
|
|
|
|
|
|
/* rx queue settings */
|
|
/* rx queue settings */
|
|
devRead->misc.numRxQueues = 1;
|
|
devRead->misc.numRxQueues = 1;
|
|
rqc = &adapter->rqd_start->conf;
|
|
rqc = &adapter->rqd_start->conf;
|
|
- rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA;
|
|
|
|
- rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA;
|
|
|
|
- rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA;
|
|
|
|
- rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info);
|
|
|
|
- rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size;
|
|
|
|
- rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size;
|
|
|
|
- rqc->compRingSize = adapter->rx_queue.comp_ring.size;
|
|
|
|
- rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) *
|
|
|
|
- (rqc->rxRingSize[0] + rqc->rxRingSize[1]);
|
|
|
|
|
|
+ rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
|
|
|
|
+ rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
|
|
|
|
+ rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
|
|
|
|
+ rqc->ddPA = cpu_to_le64(virt_to_phys(
|
|
|
|
+ adapter->rx_queue.buf_info));
|
|
|
|
+ rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
|
|
|
|
+ rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
|
|
|
|
+ rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
|
|
|
|
+ rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
|
|
|
|
+ (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
|
|
rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
|
|
rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
|
|
|
|
|
|
/* intr settings */
|
|
/* intr settings */
|
|
@@ -1715,11 +1867,10 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
|
|
|
|
|
|
vmxnet3_setup_driver_shared(adapter);
|
|
vmxnet3_setup_driver_shared(adapter);
|
|
|
|
|
|
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL,
|
|
|
|
- VMXNET3_GET_ADDR_LO(adapter->shared_pa));
|
|
|
|
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH,
|
|
|
|
- VMXNET3_GET_ADDR_HI(adapter->shared_pa));
|
|
|
|
-
|
|
|
|
|
|
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
|
|
|
|
+ adapter->shared_pa));
|
|
|
|
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
|
|
|
|
+ adapter->shared_pa));
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
VMXNET3_CMD_ACTIVATE_DEV);
|
|
VMXNET3_CMD_ACTIVATE_DEV);
|
|
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
|
|
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
|
|
@@ -2425,7 +2576,7 @@ vmxnet3_suspend(struct device *device)
|
|
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
|
|
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
|
|
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
|
|
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
|
|
|
|
|
|
- pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
|
|
|
|
|
|
+ set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
|
|
i++;
|
|
i++;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2467,19 +2618,21 @@ vmxnet3_suspend(struct device *device)
|
|
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
|
|
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
|
|
in_dev_put(in_dev);
|
|
in_dev_put(in_dev);
|
|
|
|
|
|
- pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
|
|
|
|
|
|
+ set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
|
|
i++;
|
|
i++;
|
|
}
|
|
}
|
|
|
|
|
|
skip_arp:
|
|
skip_arp:
|
|
if (adapter->wol & WAKE_MAGIC)
|
|
if (adapter->wol & WAKE_MAGIC)
|
|
- pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
|
|
|
|
|
|
+ set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
|
|
|
|
|
|
pmConf->numFilters = i;
|
|
pmConf->numFilters = i;
|
|
|
|
|
|
- adapter->shared->devRead.pmConfDesc.confVer = 1;
|
|
|
|
- adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
|
|
|
|
- adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
|
|
|
|
|
|
+ adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
|
|
|
|
+ adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
|
|
|
|
+ *pmConf));
|
|
|
|
+ adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
|
|
|
|
+ pmConf));
|
|
|
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
VMXNET3_CMD_UPDATE_PMCFG);
|
|
VMXNET3_CMD_UPDATE_PMCFG);
|
|
@@ -2510,9 +2663,11 @@ vmxnet3_resume(struct device *device)
|
|
pmConf = adapter->pm_conf;
|
|
pmConf = adapter->pm_conf;
|
|
memset(pmConf, 0, sizeof(*pmConf));
|
|
memset(pmConf, 0, sizeof(*pmConf));
|
|
|
|
|
|
- adapter->shared->devRead.pmConfDesc.confVer = 1;
|
|
|
|
- adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
|
|
|
|
- adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
|
|
|
|
|
|
+ adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
|
|
|
|
+ adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
|
|
|
|
+ *pmConf));
|
|
|
|
+ adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
|
|
|
|
+ pmConf));
|
|
|
|
|
|
netif_device_attach(netdev);
|
|
netif_device_attach(netdev);
|
|
pci_set_power_state(pdev, PCI_D0);
|
|
pci_set_power_state(pdev, PCI_D0);
|