|
@@ -1025,6 +1025,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
|
|
dev->trans_start = jiffies;
|
|
dev->trans_start = jiffies;
|
|
|
|
|
|
|
|
+ /* The powerpc-specific eieio() is used, as wmb() has too strong
|
|
|
|
+ * semantics (it requires synchronization between cacheable and
|
|
|
|
+ * uncacheable mappings, which eieio doesn't provide and which we
|
|
|
|
+ * don't need), thus requiring a more expensive sync instruction. At
|
|
|
|
+ * some point, the set of architecture-independent barrier functions
|
|
|
|
+ * should be expanded to include weaker barriers.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+ eieio();
|
|
txbdp->status = status;
|
|
txbdp->status = status;
|
|
|
|
|
|
/* If this was the last BD in the ring, the next one */
|
|
/* If this was the last BD in the ring, the next one */
|
|
@@ -1301,6 +1310,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
|
|
bdp->length = 0;
|
|
bdp->length = 0;
|
|
|
|
|
|
/* Mark the buffer empty */
|
|
/* Mark the buffer empty */
|
|
|
|
+ eieio();
|
|
bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
|
|
bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
|
|
|
|
|
|
return skb;
|
|
return skb;
|
|
@@ -1484,6 +1494,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
|
|
bdp = priv->cur_rx;
|
|
bdp = priv->cur_rx;
|
|
|
|
|
|
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
|
|
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
|
|
|
|
+ rmb();
|
|
skb = priv->rx_skbuff[priv->skb_currx];
|
|
skb = priv->rx_skbuff[priv->skb_currx];
|
|
|
|
|
|
if (!(bdp->status &
|
|
if (!(bdp->status &
|