|
@@ -244,25 +244,31 @@ void iwl_rx_allocate(struct iwl_priv *priv)
|
|
struct list_head *element;
|
|
struct list_head *element;
|
|
struct iwl_rx_mem_buffer *rxb;
|
|
struct iwl_rx_mem_buffer *rxb;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
- while (!list_empty(&rxq->rx_used)) {
|
|
|
|
|
|
+
|
|
|
|
+ while (1) {
|
|
|
|
+ spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
+
|
|
|
|
+ if (list_empty(&rxq->rx_used)) {
|
|
|
|
+ spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
element = rxq->rx_used.next;
|
|
element = rxq->rx_used.next;
|
|
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
|
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
|
|
|
+ list_del(element);
|
|
|
|
+
|
|
|
|
+ spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
/* Alloc a new receive buffer */
|
|
/* Alloc a new receive buffer */
|
|
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
|
|
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
|
|
- __GFP_NOWARN | GFP_ATOMIC);
|
|
|
|
|
|
+ GFP_KERNEL);
|
|
if (!rxb->skb) {
|
|
if (!rxb->skb) {
|
|
- if (net_ratelimit())
|
|
|
|
- printk(KERN_CRIT DRV_NAME
|
|
|
|
- ": Can not allocate SKB buffers\n");
|
|
|
|
|
|
+ printk(KERN_CRIT DRV_NAME
|
|
|
|
+ "Can not allocate SKB buffers\n");
|
|
/* We don't reschedule replenish work here -- we will
|
|
/* We don't reschedule replenish work here -- we will
|
|
* call the restock method and if it still needs
|
|
* call the restock method and if it still needs
|
|
* more buffers it will schedule replenish */
|
|
* more buffers it will schedule replenish */
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
- priv->alloc_rxb_skb++;
|
|
|
|
- list_del(element);
|
|
|
|
|
|
|
|
/* Get physical address of RB/SKB */
|
|
/* Get physical address of RB/SKB */
|
|
rxb->real_dma_addr = pci_map_single(
|
|
rxb->real_dma_addr = pci_map_single(
|
|
@@ -276,12 +282,15 @@ void iwl_rx_allocate(struct iwl_priv *priv)
|
|
rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
|
|
rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
|
|
skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
|
|
skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&rxq->lock, flags);
|
|
|
|
+
|
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
|
rxq->free_count++;
|
|
rxq->free_count++;
|
|
|
|
+ priv->alloc_rxb_skb++;
|
|
|
|
+
|
|
|
|
+ spin_unlock_irqrestore(&rxq->lock, flags);
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
}
|
|
}
|
|
-EXPORT_SYMBOL(iwl_rx_allocate);
|
|
|
|
|
|
|
|
void iwl_rx_replenish(struct iwl_priv *priv)
|
|
void iwl_rx_replenish(struct iwl_priv *priv)
|
|
{
|
|
{
|