|
@@ -145,12 +145,14 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
|
|
|
goto exit_unlock;
|
|
|
}
|
|
|
|
|
|
- iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write & ~0x7);
|
|
|
+ q->write_actual = (q->write & ~0x7);
|
|
|
+ iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
|
|
|
|
|
|
/* Else device is assumed to be awake */
|
|
|
} else {
|
|
|
/* Device expects a multiple of 8 */
|
|
|
- iwl_write32(priv, rx_wrt_ptr_reg, q->write & ~0x7);
|
|
|
+ q->write_actual = (q->write & ~0x7);
|
|
|
+ iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
|
|
|
}
|
|
|
|
|
|
q->need_update = 0;
|
|
@@ -212,7 +214,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
|
|
|
|
|
|
/* If we've added more space for the firmware to place data, tell it.
|
|
|
* Increment device's write pointer in multiples of 8. */
|
|
|
- if (write != (rxq->write & ~0x7)) {
|
|
|
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
|
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
|
rxq->need_update = 1;
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
@@ -232,7 +234,7 @@ EXPORT_SYMBOL(iwl_rx_queue_restock);
|
|
|
* Also restock the Rx queue via iwl_rx_queue_restock.
|
|
|
* This is called as a scheduled work item (except for during initialization)
|
|
|
*/
|
|
|
-void iwl_rx_allocate(struct iwl_priv *priv)
|
|
|
+void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|
|
{
|
|
|
struct iwl_rx_queue *rxq = &priv->rxq;
|
|
|
struct list_head *element;
|
|
@@ -254,7 +256,8 @@ void iwl_rx_allocate(struct iwl_priv *priv)
|
|
|
|
|
|
/* Alloc a new receive buffer */
|
|
|
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
|
|
|
- GFP_KERNEL);
|
|
|
+ priority);
|
|
|
+
|
|
|
if (!rxb->skb) {
|
|
|
IWL_CRIT(priv, "Can not allocate SKB buffers\n");
|
|
|
/* We don't reschedule replenish work here -- we will
|
|
@@ -289,7 +292,7 @@ void iwl_rx_replenish(struct iwl_priv *priv)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- iwl_rx_allocate(priv);
|
|
|
+ iwl_rx_allocate(priv, GFP_KERNEL);
|
|
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
iwl_rx_queue_restock(priv);
|
|
@@ -297,6 +300,14 @@ void iwl_rx_replenish(struct iwl_priv *priv)
|
|
|
}
|
|
|
EXPORT_SYMBOL(iwl_rx_replenish);
|
|
|
|
|
|
+void iwl_rx_replenish_now(struct iwl_priv *priv)
|
|
|
+{
|
|
|
+ iwl_rx_allocate(priv, GFP_ATOMIC);
|
|
|
+
|
|
|
+ iwl_rx_queue_restock(priv);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(iwl_rx_replenish_now);
|
|
|
+
|
|
|
|
|
|
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
|
|
|
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
|
|
@@ -352,6 +363,7 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
|
|
|
/* Set us so that we have processed and used all buffers, but have
|
|
|
* not restocked the Rx queue with fresh buffers */
|
|
|
rxq->read = rxq->write = 0;
|
|
|
+ rxq->write_actual = 0;
|
|
|
rxq->free_count = 0;
|
|
|
rxq->need_update = 0;
|
|
|
return 0;
|
|
@@ -390,6 +402,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
|
|
/* Set us so that we have processed and used all buffers, but have
|
|
|
* not restocked the Rx queue with fresh buffers */
|
|
|
rxq->read = rxq->write = 0;
|
|
|
+ rxq->write_actual = 0;
|
|
|
rxq->free_count = 0;
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
}
|