|
@@ -62,6 +62,10 @@
|
|
|
*****************************************************************************/
|
|
|
#include "iwl-dev.h"
|
|
|
#include "iwl-trans.h"
|
|
|
+#include "iwl-core.h"
|
|
|
+#include "iwl-helpers.h"
|
|
|
+/*TODO remove uneeded includes when the transport layer tx_free will be here */
|
|
|
+#include "iwl-agn.h"
|
|
|
|
|
|
static int iwl_trans_rx_alloc(struct iwl_priv *priv)
|
|
|
{
|
|
@@ -184,9 +188,233 @@ static void iwl_trans_rx_free(struct iwl_priv *priv)
|
|
|
rxq->rb_stts = NULL;
|
|
|
}
|
|
|
|
|
|
+/* TODO:remove this code duplication */
|
|
|
+static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
|
|
|
+ struct iwl_dma_ptr *ptr, size_t size)
|
|
|
+{
|
|
|
+ if (WARN_ON(ptr->addr))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ ptr->addr = dma_alloc_coherent(priv->bus.dev, size,
|
|
|
+ &ptr->dma, GFP_KERNEL);
|
|
|
+ if (!ptr->addr)
|
|
|
+ return -ENOMEM;
|
|
|
+ ptr->size = size;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
|
|
+ int slots_num, u32 txq_id)
|
|
|
+{
|
|
|
+ size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
|
|
|
+ GFP_KERNEL);
|
|
|
+ txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
|
|
|
+ GFP_KERNEL);
|
|
|
+
|
|
|
+ if (!txq->meta || !txq->cmd)
|
|
|
+ goto error;
|
|
|
+
|
|
|
+ for (i = 0; i < slots_num; i++) {
|
|
|
+ txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!txq->cmd[i])
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Alloc driver data array and TFD circular buffer */
|
|
|
+ /* Driver private data, only for Tx (not command) queues,
|
|
|
+ * not shared with device. */
|
|
|
+ if (txq_id != priv->cmd_queue) {
|
|
|
+ txq->txb = kzalloc(sizeof(txq->txb[0]) *
|
|
|
+ TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
|
|
|
+ if (!txq->txb) {
|
|
|
+ IWL_ERR(priv, "kmalloc for auxiliary BD "
|
|
|
+ "structures failed\n");
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ txq->txb = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Circular buffer of transmit frame descriptors (TFDs),
|
|
|
+ * shared with device */
|
|
|
+ txq->tfds = dma_alloc_coherent(priv->bus.dev, tfd_sz, &txq->q.dma_addr,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!txq->tfds) {
|
|
|
+ IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ txq->q.id = txq_id;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+error:
|
|
|
+ kfree(txq->txb);
|
|
|
+ txq->txb = NULL;
|
|
|
+ /* since txq->cmd has been zeroed,
|
|
|
+ * all non allocated cmd[i] will be NULL */
|
|
|
+ if (txq->cmd)
|
|
|
+ for (i = 0; i < slots_num; i++)
|
|
|
+ kfree(txq->cmd[i]);
|
|
|
+ kfree(txq->meta);
|
|
|
+ kfree(txq->cmd);
|
|
|
+ txq->meta = NULL;
|
|
|
+ txq->cmd = NULL;
|
|
|
+
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
|
|
+ int slots_num, u32 txq_id)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ txq->need_update = 0;
|
|
|
+ memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * For the default queues 0-3, set up the swq_id
|
|
|
+ * already -- all others need to get one later
|
|
|
+ * (if they need one at all).
|
|
|
+ */
|
|
|
+ if (txq_id < 4)
|
|
|
+ iwl_set_swq_id(txq, txq_id, txq_id);
|
|
|
+
|
|
|
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
|
|
|
+ * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
|
|
|
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
|
|
|
+
|
|
|
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
|
|
|
+ ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
|
|
|
+ txq_id);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
|
|
|
+ * given Tx queue, and enable the DMA channel used for that queue.
|
|
|
+ * Circular buffer (TFD queue in DRAM) physical base address */
|
|
|
+ iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
|
|
|
+ txq->q.dma_addr >> 8);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * iwl_trans_tx_alloc - allocate TX context
|
|
|
+ * Allocate all Tx DMA structures and initialize them
|
|
|
+ *
|
|
|
+ * @param priv
|
|
|
+ * @return error code
|
|
|
+ */
|
|
|
+static int iwl_trans_tx_alloc(struct iwl_priv *priv)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ int txq_id, slots_num;
|
|
|
+
|
|
|
+ /*It is not allowed to alloc twice, so warn when this happens.
|
|
|
+ * We cannot rely on the previous allocation, so free and fail */
|
|
|
+ if (WARN_ON(priv->txq)) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
|
|
|
+ priv->hw_params.scd_bc_tbls_size);
|
|
|
+ if (ret) {
|
|
|
+ IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Alloc keep-warm buffer */
|
|
|
+ ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
|
|
|
+ if (ret) {
|
|
|
+ IWL_ERR(priv, "Keep Warm allocation failed\n");
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
|
|
|
+ priv->cfg->base_params->num_of_queues, GFP_KERNEL);
|
|
|
+ if (!priv->txq) {
|
|
|
+ IWL_ERR(priv, "Not enough memory for txq\n");
|
|
|
+ ret = ENOMEM;
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
|
|
|
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
|
|
|
+ slots_num = (txq_id == priv->cmd_queue) ?
|
|
|
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
|
|
|
+ ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
|
|
|
+ txq_id);
|
|
|
+ if (ret) {
|
|
|
+ IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+error:
|
|
|
+ iwlagn_hw_txq_ctx_free(priv);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+static int iwl_trans_tx_init(struct iwl_priv *priv)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ int txq_id, slots_num;
|
|
|
+ unsigned long flags;
|
|
|
+ bool alloc = false;
|
|
|
+
|
|
|
+ if (!priv->txq) {
|
|
|
+ ret = iwl_trans_tx_alloc(priv);
|
|
|
+ if (ret)
|
|
|
+ goto error;
|
|
|
+ alloc = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_lock_irqsave(&priv->lock, flags);
|
|
|
+
|
|
|
+ /* Turn off all Tx DMA fifos */
|
|
|
+ iwl_write_prph(priv, IWLAGN_SCD_TXFACT, 0);
|
|
|
+
|
|
|
+ /* Tell NIC where to find the "keep warm" buffer */
|
|
|
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
+
|
|
|
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
|
|
|
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
|
|
|
+ slots_num = (txq_id == priv->cmd_queue) ?
|
|
|
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
|
|
|
+ ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
|
|
|
+ txq_id);
|
|
|
+ if (ret) {
|
|
|
+ IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+error:
|
|
|
+ /*Upon error, free only if we allocated something */
|
|
|
+ if (alloc)
|
|
|
+ iwlagn_hw_txq_ctx_free(priv);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static const struct iwl_trans_ops trans_ops = {
|
|
|
.rx_init = iwl_trans_rx_init,
|
|
|
.rx_free = iwl_trans_rx_free,
|
|
|
+
|
|
|
+ .tx_init = iwl_trans_tx_init,
|
|
|
};
|
|
|
|
|
|
void iwl_trans_register(struct iwl_trans *trans)
|