|
@@ -60,7 +60,8 @@ static const u16 default_tid_to_tx_fifo[] = {
|
|
|
static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
|
|
|
struct iwl_dma_ptr *ptr, size_t size)
|
|
|
{
|
|
|
- ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
|
|
|
+ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
|
|
|
+ GFP_KERNEL);
|
|
|
if (!ptr->addr)
|
|
|
return -ENOMEM;
|
|
|
ptr->size = size;
|
|
@@ -73,21 +74,20 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
|
|
|
if (unlikely(!ptr->addr))
|
|
|
return;
|
|
|
|
|
|
- pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
|
|
|
+ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
|
|
|
memset(ptr, 0, sizeof(*ptr));
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* iwl_txq_update_write_ptr - Send new write index to hardware
|
|
|
*/
|
|
|
-int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
|
|
+void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
|
|
{
|
|
|
u32 reg = 0;
|
|
|
- int ret = 0;
|
|
|
int txq_id = txq->q.id;
|
|
|
|
|
|
if (txq->need_update == 0)
|
|
|
- return ret;
|
|
|
+ return;
|
|
|
|
|
|
/* if we're trying to save power */
|
|
|
if (test_bit(STATUS_POWER_PMI, &priv->status)) {
|
|
@@ -101,7 +101,7 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
|
|
txq_id, reg);
|
|
|
iwl_set_bit(priv, CSR_GP_CNTRL,
|
|
|
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
|
|
- return ret;
|
|
|
+ return;
|
|
|
}
|
|
|
|
|
|
iwl_write_direct32(priv, HBUS_TARG_WRPTR,
|
|
@@ -114,8 +114,6 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
|
|
txq->q.write_ptr | (txq_id << 8));
|
|
|
|
|
|
txq->need_update = 0;
|
|
|
-
|
|
|
- return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
|
|
|
|
|
@@ -132,7 +130,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
|
|
|
{
|
|
|
struct iwl_tx_queue *txq = &priv->txq[txq_id];
|
|
|
struct iwl_queue *q = &txq->q;
|
|
|
- struct pci_dev *dev = priv->pci_dev;
|
|
|
+ struct device *dev = &priv->pci_dev->dev;
|
|
|
int i;
|
|
|
|
|
|
if (q->n_bd == 0)
|
|
@@ -149,8 +147,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
|
|
|
|
|
|
/* De-alloc circular buffer of TFDs */
|
|
|
if (txq->q.n_bd)
|
|
|
- pci_free_consistent(dev, priv->hw_params.tfd_size *
|
|
|
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
|
|
|
+ dma_free_coherent(dev, priv->hw_params.tfd_size *
|
|
|
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
|
|
|
|
|
|
/* De-alloc array of per-TFD driver data */
|
|
|
kfree(txq->txb);
|
|
@@ -179,7 +177,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
|
|
|
{
|
|
|
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
|
|
|
struct iwl_queue *q = &txq->q;
|
|
|
- struct pci_dev *dev = priv->pci_dev;
|
|
|
+ struct device *dev = &priv->pci_dev->dev;
|
|
|
int i;
|
|
|
|
|
|
if (q->n_bd == 0)
|
|
@@ -191,8 +189,8 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
|
|
|
|
|
|
/* De-alloc circular buffer of TFDs */
|
|
|
if (txq->q.n_bd)
|
|
|
- pci_free_consistent(dev, priv->hw_params.tfd_size *
|
|
|
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
|
|
|
+ dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
|
|
|
+ txq->tfds, txq->q.dma_addr);
|
|
|
|
|
|
/* deallocate arrays */
|
|
|
kfree(txq->cmd);
|
|
@@ -283,7 +281,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
|
|
|
static int iwl_tx_queue_alloc(struct iwl_priv *priv,
|
|
|
struct iwl_tx_queue *txq, u32 id)
|
|
|
{
|
|
|
- struct pci_dev *dev = priv->pci_dev;
|
|
|
+ struct device *dev = &priv->pci_dev->dev;
|
|
|
size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
|
|
|
|
|
|
/* Driver private data, only for Tx (not command) queues,
|
|
@@ -302,8 +300,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
|
|
|
|
|
|
/* Circular buffer of transmit frame descriptors (TFDs),
|
|
|
* shared with device */
|
|
|
- txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
|
|
|
-
|
|
|
+ txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
|
|
|
+ GFP_KERNEL);
|
|
|
if (!txq->tfds) {
|
|
|
IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
|
|
|
goto error;
|
|
@@ -731,7 +729,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|
|
u8 tid = 0;
|
|
|
u8 *qc = NULL;
|
|
|
unsigned long flags;
|
|
|
- int ret;
|
|
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
if (iwl_is_rfkill(priv)) {
|
|
@@ -806,8 +803,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|
|
hdr->seq_ctrl |= cpu_to_le16(seq_number);
|
|
|
seq_number += 0x10;
|
|
|
/* aggregation is on for this <sta,tid> */
|
|
|
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
|
|
|
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
|
|
|
+ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
|
|
|
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
txq = &priv->txq[txq_id];
|
|
@@ -949,7 +948,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|
|
|
|
|
/* Tell device the write index *just past* this latest filled TFD */
|
|
|
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
|
|
|
- ret = iwl_txq_update_write_ptr(priv, txq);
|
|
|
+ iwl_txq_update_write_ptr(priv, txq);
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
|
|
/*
|
|
@@ -963,9 +962,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|
|
if (sta_priv && sta_priv->client)
|
|
|
atomic_inc(&sta_priv->pending_frames);
|
|
|
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
|
|
|
if (wait_write_ptr) {
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
@@ -1004,7 +1000,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|
|
struct iwl_cmd_meta *out_meta;
|
|
|
dma_addr_t phys_addr;
|
|
|
unsigned long flags;
|
|
|
- int len, ret;
|
|
|
+ int len;
|
|
|
u32 idx;
|
|
|
u16 fix_size;
|
|
|
|
|
@@ -1101,10 +1097,10 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|
|
|
|
|
/* Increment and update queue's write index */
|
|
|
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
|
|
|
- ret = iwl_txq_update_write_ptr(priv, txq);
|
|
|
+ iwl_txq_update_write_ptr(priv, txq);
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->hcmd_lock, flags);
|
|
|
- return ret ? ret : idx;
|
|
|
+ return idx;
|
|
|
}
|
|
|
|
|
|
static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
|
|
@@ -1328,7 +1324,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
|
|
|
{
|
|
|
int tx_fifo_id, txq_id, sta_id, ssn = -1;
|
|
|
struct iwl_tid_data *tid_data;
|
|
|
- int ret, write_ptr, read_ptr;
|
|
|
+ int write_ptr, read_ptr;
|
|
|
unsigned long flags;
|
|
|
|
|
|
if (!ra) {
|
|
@@ -1380,13 +1376,17 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
|
|
|
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
|
|
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
- ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
|
|
|
+ /*
|
|
|
+ * the only reason this call can fail is queue number out of range,
|
|
|
+ * which can happen if uCode is reloaded and all the station
|
|
|
+ * information are lost. if it is outside the range, there is no need
|
|
|
+ * to deactivate the uCode queue, just return "success" to allow
|
|
|
+ * mac80211 to clean up it own data.
|
|
|
+ */
|
|
|
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
|
|
|
tx_fifo_id);
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
|
|
|
|
|
|
return 0;
|