|
@@ -548,26 +548,23 @@ map_skb:
|
|
rx_desc = E1000_RX_DESC(*rx_ring, i);
|
|
rx_desc = E1000_RX_DESC(*rx_ring, i);
|
|
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
|
|
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
|
|
|
|
|
|
|
|
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
|
|
|
|
+ /*
|
|
|
|
+ * Force memory writes to complete before letting h/w
|
|
|
|
+ * know there are new descriptors to fetch. (Only
|
|
|
|
+ * applicable for weak-ordered memory model archs,
|
|
|
|
+ * such as IA-64).
|
|
|
|
+ */
|
|
|
|
+ wmb();
|
|
|
|
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
|
|
|
|
+ }
|
|
i++;
|
|
i++;
|
|
if (i == rx_ring->count)
|
|
if (i == rx_ring->count)
|
|
i = 0;
|
|
i = 0;
|
|
buffer_info = &rx_ring->buffer_info[i];
|
|
buffer_info = &rx_ring->buffer_info[i];
|
|
}
|
|
}
|
|
|
|
|
|
- if (rx_ring->next_to_use != i) {
|
|
|
|
- rx_ring->next_to_use = i;
|
|
|
|
- if (i-- == 0)
|
|
|
|
- i = (rx_ring->count - 1);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Force memory writes to complete before letting h/w
|
|
|
|
- * know there are new descriptors to fetch. (Only
|
|
|
|
- * applicable for weak-ordered memory model archs,
|
|
|
|
- * such as IA-64).
|
|
|
|
- */
|
|
|
|
- wmb();
|
|
|
|
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
|
|
|
|
- }
|
|
|
|
|
|
+ rx_ring->next_to_use = i;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -649,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
|
|
|
|
|
|
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
|
|
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
|
|
|
|
|
|
|
|
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
|
|
|
|
+ /*
|
|
|
|
+ * Force memory writes to complete before letting h/w
|
|
|
|
+ * know there are new descriptors to fetch. (Only
|
|
|
|
+ * applicable for weak-ordered memory model archs,
|
|
|
|
+ * such as IA-64).
|
|
|
|
+ */
|
|
|
|
+ wmb();
|
|
|
|
+ writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
|
|
|
|
+ }
|
|
|
|
+
|
|
i++;
|
|
i++;
|
|
if (i == rx_ring->count)
|
|
if (i == rx_ring->count)
|
|
i = 0;
|
|
i = 0;
|
|
@@ -656,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
|
|
}
|
|
}
|
|
|
|
|
|
no_buffers:
|
|
no_buffers:
|
|
- if (rx_ring->next_to_use != i) {
|
|
|
|
- rx_ring->next_to_use = i;
|
|
|
|
-
|
|
|
|
- if (!(i--))
|
|
|
|
- i = (rx_ring->count - 1);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Force memory writes to complete before letting h/w
|
|
|
|
- * know there are new descriptors to fetch. (Only
|
|
|
|
- * applicable for weak-ordered memory model archs,
|
|
|
|
- * such as IA-64).
|
|
|
|
- */
|
|
|
|
- wmb();
|
|
|
|
- /*
|
|
|
|
- * Hardware increments by 16 bytes, but packet split
|
|
|
|
- * descriptors are 32 bytes...so we increment tail
|
|
|
|
- * twice as much.
|
|
|
|
- */
|
|
|
|
- writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
|
|
|
|
- }
|
|
|
|
|
|
+ rx_ring->next_to_use = i;
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|