|
@@ -1642,6 +1642,14 @@ static int netdev_close(struct net_device *dev)
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
|
|
+ /* Wait and kill tasklet */
|
|
|
|
+ tasklet_kill(&np->rx_tasklet);
|
|
|
|
+ tasklet_kill(&np->tx_tasklet);
|
|
|
|
+ np->cur_tx = 0;
|
|
|
|
+ np->dirty_tx = 0;
|
|
|
|
+ np->cur_task = 0;
|
|
|
|
+ np->last_tx = 0;
|
|
|
|
+
|
|
netif_stop_queue(dev);
|
|
netif_stop_queue(dev);
|
|
|
|
|
|
if (netif_msg_ifdown(np)) {
|
|
if (netif_msg_ifdown(np)) {
|
|
@@ -1662,9 +1670,20 @@ static int netdev_close(struct net_device *dev)
|
|
/* Stop the chip's Tx and Rx processes. */
|
|
/* Stop the chip's Tx and Rx processes. */
|
|
iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
|
|
iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
|
|
|
|
|
|
- /* Wait and kill tasklet */
|
|
|
|
- tasklet_kill(&np->rx_tasklet);
|
|
|
|
- tasklet_kill(&np->tx_tasklet);
|
|
|
|
|
|
+ for (i = 2000; i > 0; i--) {
|
|
|
|
+ if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
|
|
|
|
+ break;
|
|
|
|
+ mdelay(1);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
|
|
|
|
+ ioaddr +ASICCtrl + 2);
|
|
|
|
+
|
|
|
|
+ for (i = 2000; i > 0; i--) {
|
|
|
|
+ if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
|
|
|
|
+ break;
|
|
|
|
+ mdelay(1);
|
|
|
|
+ }
|
|
|
|
|
|
#ifdef __i386__
|
|
#ifdef __i386__
|
|
if (netif_msg_hw(np)) {
|
|
if (netif_msg_hw(np)) {
|
|
@@ -1702,6 +1721,7 @@ static int netdev_close(struct net_device *dev)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
for (i = 0; i < TX_RING_SIZE; i++) {
|
|
for (i = 0; i < TX_RING_SIZE; i++) {
|
|
|
|
+ np->tx_ring[i].next_desc = 0;
|
|
skb = np->tx_skbuff[i];
|
|
skb = np->tx_skbuff[i];
|
|
if (skb) {
|
|
if (skb) {
|
|
pci_unmap_single(np->pci_dev,
|
|
pci_unmap_single(np->pci_dev,
|