|
@@ -1682,20 +1682,21 @@ ath5k_tasklet_rx(unsigned long data)
|
|
|
struct ath5k_rx_status rs = {};
|
|
|
struct sk_buff *skb;
|
|
|
struct ath5k_softc *sc = (void *)data;
|
|
|
- struct ath5k_buf *bf;
|
|
|
+ struct ath5k_buf *bf, *bf_last;
|
|
|
struct ath5k_desc *ds;
|
|
|
int ret;
|
|
|
int hdrlen;
|
|
|
int pad;
|
|
|
|
|
|
spin_lock(&sc->rxbuflock);
|
|
|
+ if (list_empty(&sc->rxbuf)) {
|
|
|
+ ATH5K_WARN(sc, "empty rx buf pool\n");
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+ bf_last = list_entry(sc->rxbuf.prev, struct ath5k_buf, list);
|
|
|
do {
|
|
|
rxs.flag = 0;
|
|
|
|
|
|
- if (unlikely(list_empty(&sc->rxbuf))) {
|
|
|
- ATH5K_WARN(sc, "empty rx buf pool\n");
|
|
|
- break;
|
|
|
- }
|
|
|
bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
|
|
|
BUG_ON(bf->skb == NULL);
|
|
|
skb = bf->skb;
|
|
@@ -1705,8 +1706,24 @@ ath5k_tasklet_rx(unsigned long data)
|
|
|
pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr,
|
|
|
sc->desc_len, PCI_DMA_FROMDEVICE);
|
|
|
|
|
|
- if (unlikely(ds->ds_link == bf->daddr)) /* this is the end */
|
|
|
- break;
|
|
|
+ /*
|
|
|
+ * last buffer must not be freed to ensure proper hardware
|
|
|
+ * function. When the hardware finishes also a packet next to
|
|
|
+ * it, we are sure, it doesn't use it anymore and we can go on.
|
|
|
+ */
|
|
|
+ if (bf_last == bf)
|
|
|
+ bf->flags |= 1;
|
|
|
+ if (bf->flags) {
|
|
|
+ struct ath5k_buf *bf_next = list_entry(bf->list.next,
|
|
|
+ struct ath5k_buf, list);
|
|
|
+ ret = sc->ah->ah_proc_rx_desc(sc->ah, bf_next->desc,
|
|
|
+ &rs);
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
+ bf->flags &= ~1;
|
|
|
+ /* skip the overwritten one (even status is martian) */
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
|
|
|
ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
|
|
|
if (unlikely(ret == -EINPROGRESS))
|
|
@@ -1816,6 +1833,7 @@ accept:
|
|
|
next:
|
|
|
list_move_tail(&bf->list, &sc->rxbuf);
|
|
|
} while (ath5k_rxbuf_setup(sc, bf) == 0);
|
|
|
+unlock:
|
|
|
spin_unlock(&sc->rxbuflock);
|
|
|
}
|
|
|
|