|
@@ -70,15 +70,12 @@
|
|
|
|
|
|
#include "iwl-drv.h"
|
|
#include "iwl-drv.h"
|
|
#include "iwl-trans.h"
|
|
#include "iwl-trans.h"
|
|
-#include "iwl-trans-pcie-int.h"
|
|
|
|
#include "iwl-csr.h"
|
|
#include "iwl-csr.h"
|
|
#include "iwl-prph.h"
|
|
#include "iwl-prph.h"
|
|
-#include "iwl-eeprom.h"
|
|
|
|
#include "iwl-agn-hw.h"
|
|
#include "iwl-agn-hw.h"
|
|
|
|
+#include "internal.h"
|
|
/* FIXME: need to abstract out TX command (once we know what it looks like) */
|
|
/* FIXME: need to abstract out TX command (once we know what it looks like) */
|
|
-#include "iwl-commands.h"
|
|
|
|
-
|
|
|
|
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
|
|
|
|
|
|
+#include "dvm/commands.h"
|
|
|
|
|
|
#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
|
|
#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
|
|
(((1<<trans->cfg->base_params->num_of_queues) - 1) &\
|
|
(((1<<trans->cfg->base_params->num_of_queues) - 1) &\
|
|
@@ -86,8 +83,7 @@
|
|
|
|
|
|
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
|
|
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
- struct iwl_trans_pcie *trans_pcie =
|
|
|
|
- IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
|
struct device *dev = trans->dev;
|
|
struct device *dev = trans->dev;
|
|
|
|
|
|
@@ -114,7 +110,7 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans)
|
|
|
|
|
|
err_rb_stts:
|
|
err_rb_stts:
|
|
dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
|
|
dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
|
|
- rxq->bd, rxq->bd_dma);
|
|
|
|
|
|
+ rxq->bd, rxq->bd_dma);
|
|
memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
|
|
memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
|
|
rxq->bd = NULL;
|
|
rxq->bd = NULL;
|
|
err_bd:
|
|
err_bd:
|
|
@@ -123,8 +119,7 @@ err_bd:
|
|
|
|
|
|
static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
|
|
static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
- struct iwl_trans_pcie *trans_pcie =
|
|
|
|
- IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
@@ -134,8 +129,8 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
|
|
* to an SKB, so we need to unmap and free potential storage */
|
|
* to an SKB, so we need to unmap and free potential storage */
|
|
if (rxq->pool[i].page != NULL) {
|
|
if (rxq->pool[i].page != NULL) {
|
|
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
|
|
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
|
|
- PAGE_SIZE << trans_pcie->rx_page_order,
|
|
|
|
- DMA_FROM_DEVICE);
|
|
|
|
|
|
+ PAGE_SIZE << trans_pcie->rx_page_order,
|
|
|
|
+ DMA_FROM_DEVICE);
|
|
__free_pages(rxq->pool[i].page,
|
|
__free_pages(rxq->pool[i].page,
|
|
trans_pcie->rx_page_order);
|
|
trans_pcie->rx_page_order);
|
|
rxq->pool[i].page = NULL;
|
|
rxq->pool[i].page = NULL;
|
|
@@ -193,8 +188,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
|
|
|
|
|
|
static int iwl_rx_init(struct iwl_trans *trans)
|
|
static int iwl_rx_init(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
- struct iwl_trans_pcie *trans_pcie =
|
|
|
|
- IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
|
|
|
|
|
int i, err;
|
|
int i, err;
|
|
@@ -236,10 +230,8 @@ static int iwl_rx_init(struct iwl_trans *trans)
|
|
|
|
|
|
static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
|
|
static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
- struct iwl_trans_pcie *trans_pcie =
|
|
|
|
- IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
|
-
|
|
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
/*if rxq->bd is NULL, it means that nothing has been allocated,
|
|
/*if rxq->bd is NULL, it means that nothing has been allocated,
|
|
@@ -274,11 +266,11 @@ static int iwl_trans_rx_stop(struct iwl_trans *trans)
|
|
/* stop Rx DMA */
|
|
/* stop Rx DMA */
|
|
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
|
|
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
|
|
return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
|
|
return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
|
|
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
|
|
|
|
|
|
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
|
|
|
|
- struct iwl_dma_ptr *ptr, size_t size)
|
|
|
|
|
|
+static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
|
|
|
|
+ struct iwl_dma_ptr *ptr, size_t size)
|
|
{
|
|
{
|
|
if (WARN_ON(ptr->addr))
|
|
if (WARN_ON(ptr->addr))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
@@ -291,8 +283,8 @@ static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
|
|
|
|
- struct iwl_dma_ptr *ptr)
|
|
|
|
|
|
+static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
|
|
|
|
+ struct iwl_dma_ptr *ptr)
|
|
{
|
|
{
|
|
if (unlikely(!ptr->addr))
|
|
if (unlikely(!ptr->addr))
|
|
return;
|
|
return;
|
|
@@ -329,12 +321,12 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
|
|
}
|
|
}
|
|
|
|
|
|
static int iwl_trans_txq_alloc(struct iwl_trans *trans,
|
|
static int iwl_trans_txq_alloc(struct iwl_trans *trans,
|
|
- struct iwl_tx_queue *txq, int slots_num,
|
|
|
|
- u32 txq_id)
|
|
|
|
|
|
+ struct iwl_tx_queue *txq, int slots_num,
|
|
|
|
+ u32 txq_id)
|
|
{
|
|
{
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
|
|
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
|
|
int i;
|
|
int i;
|
|
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
if (WARN_ON(txq->entries || txq->tfds))
|
|
if (WARN_ON(txq->entries || txq->tfds))
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
@@ -435,7 +427,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
|
|
|
|
|
|
spin_lock_bh(&txq->lock);
|
|
spin_lock_bh(&txq->lock);
|
|
while (q->write_ptr != q->read_ptr) {
|
|
while (q->write_ptr != q->read_ptr) {
|
|
- iwlagn_txq_free_tfd(trans, txq, dma_dir);
|
|
|
|
|
|
+ iwl_txq_free_tfd(trans, txq, dma_dir);
|
|
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
|
|
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
|
|
}
|
|
}
|
|
spin_unlock_bh(&txq->lock);
|
|
spin_unlock_bh(&txq->lock);
|
|
@@ -455,6 +447,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
|
|
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
|
|
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
|
|
struct device *dev = trans->dev;
|
|
struct device *dev = trans->dev;
|
|
int i;
|
|
int i;
|
|
|
|
+
|
|
if (WARN_ON(!txq))
|
|
if (WARN_ON(!txq))
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -574,11 +567,11 @@ error:
|
|
}
|
|
}
|
|
static int iwl_tx_init(struct iwl_trans *trans)
|
|
static int iwl_tx_init(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
int ret;
|
|
int ret;
|
|
int txq_id, slots_num;
|
|
int txq_id, slots_num;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
bool alloc = false;
|
|
bool alloc = false;
|
|
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
if (!trans_pcie->txq) {
|
|
if (!trans_pcie->txq) {
|
|
ret = iwl_trans_tx_alloc(trans);
|
|
ret = iwl_trans_tx_alloc(trans);
|
|
@@ -643,10 +636,9 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
|
|
|
|
|
|
static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
|
|
static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
int pos;
|
|
int pos;
|
|
u16 pci_lnk_ctl;
|
|
u16 pci_lnk_ctl;
|
|
- struct iwl_trans_pcie *trans_pcie =
|
|
|
|
- IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
struct pci_dev *pci_dev = trans_pcie->pci_dev;
|
|
struct pci_dev *pci_dev = trans_pcie->pci_dev;
|
|
|
|
|
|
@@ -700,14 +692,14 @@ static int iwl_apm_init(struct iwl_trans *trans)
|
|
|
|
|
|
/* Disable L0S exit timer (platform NMI Work/Around) */
|
|
/* Disable L0S exit timer (platform NMI Work/Around) */
|
|
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
|
|
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
|
|
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
|
|
|
|
|
|
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Disable L0s without affecting L1;
|
|
* Disable L0s without affecting L1;
|
|
* don't wait for ICH L0s (ICH bug W/A)
|
|
* don't wait for ICH L0s (ICH bug W/A)
|
|
*/
|
|
*/
|
|
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
|
|
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
|
|
- CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
|
|
|
|
|
|
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
|
|
|
|
|
|
/* Set FH wait threshold to maximum (HW error during stress W/A) */
|
|
/* Set FH wait threshold to maximum (HW error during stress W/A) */
|
|
iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
|
|
iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
|
|
@@ -717,7 +709,7 @@ static int iwl_apm_init(struct iwl_trans *trans)
|
|
* wake device's PCI Express link L1a -> L0s
|
|
* wake device's PCI Express link L1a -> L0s
|
|
*/
|
|
*/
|
|
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
|
|
|
|
|
|
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
|
|
|
|
|
|
iwl_apm_config(trans);
|
|
iwl_apm_config(trans);
|
|
|
|
|
|
@@ -738,8 +730,8 @@ static int iwl_apm_init(struct iwl_trans *trans)
|
|
* and accesses to uCode SRAM.
|
|
* and accesses to uCode SRAM.
|
|
*/
|
|
*/
|
|
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
|
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
|
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
|
|
|
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
|
|
|
|
|
|
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
|
|
|
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
|
|
if (ret < 0) {
|
|
if (ret < 0) {
|
|
IWL_DEBUG_INFO(trans, "Failed to init the card\n");
|
|
IWL_DEBUG_INFO(trans, "Failed to init the card\n");
|
|
goto out;
|
|
goto out;
|
|
@@ -773,8 +765,8 @@ static int iwl_apm_stop_master(struct iwl_trans *trans)
|
|
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
|
|
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
|
|
|
|
|
|
ret = iwl_poll_bit(trans, CSR_RESET,
|
|
ret = iwl_poll_bit(trans, CSR_RESET,
|
|
- CSR_RESET_REG_FLAG_MASTER_DISABLED,
|
|
|
|
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
|
|
|
|
|
|
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
|
|
|
|
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
|
|
if (ret)
|
|
if (ret)
|
|
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
|
|
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
|
|
|
|
|
|
@@ -816,8 +808,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
|
|
iwl_apm_init(trans);
|
|
iwl_apm_init(trans);
|
|
|
|
|
|
/* Set interrupt coalescing calibration timer to default (512 usecs) */
|
|
/* Set interrupt coalescing calibration timer to default (512 usecs) */
|
|
- iwl_write8(trans, CSR_INT_COALESCING,
|
|
|
|
- IWL_HOST_INT_CALIB_TIMEOUT_DEF);
|
|
|
|
|
|
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
|
|
|
|
|
|
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
@@ -836,8 +827,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
|
|
|
|
|
|
if (trans->cfg->base_params->shadow_reg_enable) {
|
|
if (trans->cfg->base_params->shadow_reg_enable) {
|
|
/* enable shadow regs in HW */
|
|
/* enable shadow regs in HW */
|
|
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
|
|
|
|
- 0x800FFFFF);
|
|
|
|
|
|
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
|
|
|
|
+ IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
|
|
}
|
|
}
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -851,13 +842,13 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
|
|
|
|
|
|
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
|
|
|
|
|
|
/* See if we got it */
|
|
/* See if we got it */
|
|
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
|
|
|
|
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
|
|
|
|
- HW_READY_TIMEOUT);
|
|
|
|
|
|
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
|
|
|
|
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
|
|
|
|
+ HW_READY_TIMEOUT);
|
|
|
|
|
|
IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
|
|
IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
|
|
return ret;
|
|
return ret;
|
|
@@ -877,11 +868,11 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
|
|
|
|
|
|
/* If HW is not ready, prepare the conditions to check again */
|
|
/* If HW is not ready, prepare the conditions to check again */
|
|
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
- CSR_HW_IF_CONFIG_REG_PREPARE);
|
|
|
|
|
|
+ CSR_HW_IF_CONFIG_REG_PREPARE);
|
|
|
|
|
|
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
|
|
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
|
|
|
|
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
|
|
|
|
|
|
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
|
|
|
|
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
|
|
|
|
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
return ret;
|
|
return ret;
|
|
@@ -908,32 +899,33 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
|
|
trans_pcie->ucode_write_complete = false;
|
|
trans_pcie->ucode_write_complete = false;
|
|
|
|
|
|
iwl_write_direct32(trans,
|
|
iwl_write_direct32(trans,
|
|
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
|
|
|
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
|
|
|
|
|
|
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
|
|
|
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
|
|
|
|
|
|
iwl_write_direct32(trans,
|
|
iwl_write_direct32(trans,
|
|
- FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
|
|
|
|
|
|
+ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
|
|
|
|
+ dst_addr);
|
|
|
|
|
|
iwl_write_direct32(trans,
|
|
iwl_write_direct32(trans,
|
|
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
|
|
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
|
|
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
|
|
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
|
|
|
|
|
|
iwl_write_direct32(trans,
|
|
iwl_write_direct32(trans,
|
|
- FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
|
|
|
|
- (iwl_get_dma_hi_addr(phy_addr)
|
|
|
|
- << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
|
|
|
|
|
|
+ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
|
|
|
|
+ (iwl_get_dma_hi_addr(phy_addr)
|
|
|
|
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
|
|
|
|
|
|
iwl_write_direct32(trans,
|
|
iwl_write_direct32(trans,
|
|
- FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
|
|
|
|
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
|
|
|
|
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
|
|
|
|
- FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
|
|
|
|
|
|
+ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
|
|
|
|
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
|
|
|
|
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
|
|
|
|
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
|
|
|
|
|
|
iwl_write_direct32(trans,
|
|
iwl_write_direct32(trans,
|
|
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
|
|
|
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
|
|
|
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
|
|
|
|
- FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
|
|
|
|
|
|
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
|
|
|
|
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
|
|
|
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
|
|
|
|
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
|
|
|
|
|
|
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
|
|
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
|
|
section_num);
|
|
section_num);
|
|
@@ -1038,6 +1030,10 @@ static void iwl_tx_start(struct iwl_trans *trans)
|
|
|
|
|
|
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
|
|
+ /* make sure all queue are not stopped/used */
|
|
|
|
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
|
|
|
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
|
|
|
+
|
|
trans_pcie->scd_base_addr =
|
|
trans_pcie->scd_base_addr =
|
|
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
|
|
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
|
|
a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
|
|
a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
|
|
@@ -1058,64 +1054,33 @@ static void iwl_tx_start(struct iwl_trans *trans)
|
|
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
|
|
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
|
|
trans_pcie->scd_bc_tbls.dma >> 10);
|
|
trans_pcie->scd_bc_tbls.dma >> 10);
|
|
|
|
|
|
|
|
+ for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
|
|
|
|
+ int fifo = trans_pcie->setup_q_to_fifo[i];
|
|
|
|
+
|
|
|
|
+ __iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
|
|
|
|
+ IWL_TID_NON_QOS,
|
|
|
|
+ SCD_FRAME_LIMIT, 0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Activate all Tx DMA/FIFO channels */
|
|
|
|
+ iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
|
|
|
|
+
|
|
/* Enable DMA channel */
|
|
/* Enable DMA channel */
|
|
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
|
|
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
|
|
iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
|
|
iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
|
|
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
|
|
|
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
|
|
|
|
|
|
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
|
|
|
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
|
|
|
|
|
|
/* Update FH chicken bits */
|
|
/* Update FH chicken bits */
|
|
reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
|
|
reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
|
|
iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
|
|
iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
|
|
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
|
|
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
|
|
|
|
|
|
- iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
|
|
|
|
- SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
|
|
|
|
- iwl_write_prph(trans, SCD_AGGR_SEL, 0);
|
|
|
|
-
|
|
|
|
- /* initiate the queues */
|
|
|
|
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
|
|
|
|
- iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
|
|
|
|
- iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
|
|
|
|
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
|
|
|
|
- SCD_CONTEXT_QUEUE_OFFSET(i), 0);
|
|
|
|
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
|
|
|
|
- SCD_CONTEXT_QUEUE_OFFSET(i) +
|
|
|
|
- sizeof(u32),
|
|
|
|
- ((SCD_WIN_SIZE <<
|
|
|
|
- SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
|
|
|
|
- SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
|
|
|
|
- ((SCD_FRAME_LIMIT <<
|
|
|
|
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
|
|
|
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- iwl_write_prph(trans, SCD_INTERRUPT_MASK,
|
|
|
|
- IWL_MASK(0, trans->cfg->base_params->num_of_queues));
|
|
|
|
-
|
|
|
|
- /* Activate all Tx DMA/FIFO channels */
|
|
|
|
- iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
|
|
|
|
-
|
|
|
|
- iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
|
|
|
|
-
|
|
|
|
- /* make sure all queue are not stopped/used */
|
|
|
|
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
|
|
|
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
|
|
|
-
|
|
|
|
- for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
|
|
|
|
- int fifo = trans_pcie->setup_q_to_fifo[i];
|
|
|
|
-
|
|
|
|
- set_bit(i, trans_pcie->queue_used);
|
|
|
|
-
|
|
|
|
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
|
|
|
|
- fifo, true);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
/* Enable L1-Active */
|
|
/* Enable L1-Active */
|
|
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
|
|
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
|
|
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
|
|
|
|
|
|
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
|
|
}
|
|
}
|
|
|
|
|
|
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
|
|
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
|
|
@@ -1129,9 +1094,9 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
|
|
*/
|
|
*/
|
|
static int iwl_trans_tx_stop(struct iwl_trans *trans)
|
|
static int iwl_trans_tx_stop(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
int ch, txq_id, ret;
|
|
int ch, txq_id, ret;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
/* Turn off all Tx DMA fifos */
|
|
/* Turn off all Tx DMA fifos */
|
|
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
@@ -1143,13 +1108,13 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
|
|
iwl_write_direct32(trans,
|
|
iwl_write_direct32(trans,
|
|
FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
|
|
FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
|
|
ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
|
|
ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
|
|
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
|
|
|
|
- 1000);
|
|
|
|
|
|
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
- IWL_ERR(trans, "Failing on timeout while stopping"
|
|
|
|
- " DMA channel %d [0x%08x]", ch,
|
|
|
|
- iwl_read_direct32(trans,
|
|
|
|
- FH_TSSR_TX_STATUS_REG));
|
|
|
|
|
|
+ IWL_ERR(trans,
|
|
|
|
+ "Failing on timeout while stopping DMA channel %d [0x%08x]",
|
|
|
|
+ ch,
|
|
|
|
+ iwl_read_direct32(trans,
|
|
|
|
+ FH_TSSR_TX_STATUS_REG));
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
|
|
|
|
|
@@ -1168,8 +1133,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
|
|
|
|
|
|
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
|
|
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
/* tell the device to stop sending interrupts */
|
|
/* tell the device to stop sending interrupts */
|
|
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
|
@@ -1199,7 +1164,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
|
|
|
|
|
|
/* Make sure (redundant) we've released our request to stay awake */
|
|
/* Make sure (redundant) we've released our request to stay awake */
|
|
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
|
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
|
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
|
|
|
|
|
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
|
|
|
|
|
/* Stop the device, and put it in low power state */
|
|
/* Stop the device, and put it in low power state */
|
|
iwl_apm_stop(trans);
|
|
iwl_apm_stop(trans);
|
|
@@ -1273,8 +1238,9 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
txq->entries[q->write_ptr].cmd = dev_cmd;
|
|
txq->entries[q->write_ptr].cmd = dev_cmd;
|
|
|
|
|
|
dev_cmd->hdr.cmd = REPLY_TX;
|
|
dev_cmd->hdr.cmd = REPLY_TX;
|
|
- dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
|
|
|
|
- INDEX_TO_SEQ(q->write_ptr)));
|
|
|
|
|
|
+ dev_cmd->hdr.sequence =
|
|
|
|
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
|
|
|
|
+ INDEX_TO_SEQ(q->write_ptr)));
|
|
|
|
|
|
/* Set up first empty entry in queue's array of Tx/cmd buffers */
|
|
/* Set up first empty entry in queue's array of Tx/cmd buffers */
|
|
out_meta = &txq->entries[q->write_ptr].meta;
|
|
out_meta = &txq->entries[q->write_ptr].meta;
|
|
@@ -1339,7 +1305,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
|
|
|
/* take back ownership of DMA buffer to enable update */
|
|
/* take back ownership of DMA buffer to enable update */
|
|
dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
|
|
dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
|
|
- DMA_BIDIRECTIONAL);
|
|
|
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
|
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
|
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
|
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
|
|
|
|
|
@@ -1351,7 +1317,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
|
|
iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
|
|
|
|
|
|
dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
|
|
dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
|
|
- DMA_BIDIRECTIONAL);
|
|
|
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
|
|
|
trace_iwlwifi_dev_tx(trans->dev,
|
|
trace_iwlwifi_dev_tx(trans->dev,
|
|
&txq->tfds[txq->q.write_ptr],
|
|
&txq->tfds[txq->q.write_ptr],
|
|
@@ -1390,8 +1356,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|
|
|
|
|
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
|
|
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
- struct iwl_trans_pcie *trans_pcie =
|
|
|
|
- IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
int err;
|
|
int err;
|
|
bool hw_rfkill;
|
|
bool hw_rfkill;
|
|
|
|
|
|
@@ -1404,7 +1369,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
|
|
iwl_alloc_isr_ict(trans);
|
|
iwl_alloc_isr_ict(trans);
|
|
|
|
|
|
err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
|
|
err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
|
|
- DRV_NAME, trans);
|
|
|
|
|
|
+ DRV_NAME, trans);
|
|
if (err) {
|
|
if (err) {
|
|
IWL_ERR(trans, "Error allocating IRQ %d\n",
|
|
IWL_ERR(trans, "Error allocating IRQ %d\n",
|
|
trans_pcie->irq);
|
|
trans_pcie->irq);
|
|
@@ -1442,9 +1407,9 @@ error:
|
|
static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
|
|
static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
|
|
bool op_mode_leaving)
|
|
bool op_mode_leaving)
|
|
{
|
|
{
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
bool hw_rfkill;
|
|
bool hw_rfkill;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
|
|
iwl_apm_stop(trans);
|
|
iwl_apm_stop(trans);
|
|
|
|
|
|
@@ -1548,8 +1513,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
|
|
|
|
|
|
void iwl_trans_pcie_free(struct iwl_trans *trans)
|
|
void iwl_trans_pcie_free(struct iwl_trans *trans)
|
|
{
|
|
{
|
|
- struct iwl_trans_pcie *trans_pcie =
|
|
|
|
- IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
iwl_trans_pcie_tx_free(trans);
|
|
iwl_trans_pcie_tx_free(trans);
|
|
#ifndef CONFIG_IWLWIFI_IDI
|
|
#ifndef CONFIG_IWLWIFI_IDI
|
|
@@ -1564,6 +1528,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
|
|
iounmap(trans_pcie->hw_base);
|
|
iounmap(trans_pcie->hw_base);
|
|
pci_release_regions(trans_pcie->pci_dev);
|
|
pci_release_regions(trans_pcie->pci_dev);
|
|
pci_disable_device(trans_pcie->pci_dev);
|
|
pci_disable_device(trans_pcie->pci_dev);
|
|
|
|
+ kmem_cache_destroy(trans->dev_cmd_pool);
|
|
|
|
|
|
kfree(trans);
|
|
kfree(trans);
|
|
}
|
|
}
|
|
@@ -1811,8 +1776,8 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
|
|
};
|
|
};
|
|
|
|
|
|
static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
|
|
static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
|
|
- char __user *user_buf,
|
|
|
|
- size_t count, loff_t *ppos)
|
|
|
|
|
|
+ char __user *user_buf,
|
|
|
|
+ size_t count, loff_t *ppos)
|
|
{
|
|
{
|
|
struct iwl_trans *trans = file->private_data;
|
|
struct iwl_trans *trans = file->private_data;
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
@@ -1848,11 +1813,11 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
|
|
}
|
|
}
|
|
|
|
|
|
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
|
|
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
|
|
- char __user *user_buf,
|
|
|
|
- size_t count, loff_t *ppos) {
|
|
|
|
|
|
+ char __user *user_buf,
|
|
|
|
+ size_t count, loff_t *ppos)
|
|
|
|
+{
|
|
struct iwl_trans *trans = file->private_data;
|
|
struct iwl_trans *trans = file->private_data;
|
|
- struct iwl_trans_pcie *trans_pcie =
|
|
|
|
- IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
|
char buf[256];
|
|
char buf[256];
|
|
int pos = 0;
|
|
int pos = 0;
|
|
@@ -1876,11 +1841,10 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
|
|
|
|
|
|
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
|
|
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
|
|
char __user *user_buf,
|
|
char __user *user_buf,
|
|
- size_t count, loff_t *ppos) {
|
|
|
|
-
|
|
|
|
|
|
+ size_t count, loff_t *ppos)
|
|
|
|
+{
|
|
struct iwl_trans *trans = file->private_data;
|
|
struct iwl_trans *trans = file->private_data;
|
|
- struct iwl_trans_pcie *trans_pcie =
|
|
|
|
- IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
|
|
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
|
|
|
|
|
|
int pos = 0;
|
|
int pos = 0;
|
|
@@ -1938,8 +1902,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
|
|
size_t count, loff_t *ppos)
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
{
|
|
struct iwl_trans *trans = file->private_data;
|
|
struct iwl_trans *trans = file->private_data;
|
|
- struct iwl_trans_pcie *trans_pcie =
|
|
|
|
- IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
|
|
|
|
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
|
|
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
|
|
|
|
|
|
char buf[8];
|
|
char buf[8];
|
|
@@ -1959,8 +1922,8 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
|
|
}
|
|
}
|
|
|
|
|
|
static ssize_t iwl_dbgfs_csr_write(struct file *file,
|
|
static ssize_t iwl_dbgfs_csr_write(struct file *file,
|
|
- const char __user *user_buf,
|
|
|
|
- size_t count, loff_t *ppos)
|
|
|
|
|
|
+ const char __user *user_buf,
|
|
|
|
+ size_t count, loff_t *ppos)
|
|
{
|
|
{
|
|
struct iwl_trans *trans = file->private_data;
|
|
struct iwl_trans *trans = file->private_data;
|
|
char buf[8];
|
|
char buf[8];
|
|
@@ -1980,8 +1943,8 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
|
|
}
|
|
}
|
|
|
|
|
|
static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
|
|
static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
|
|
- char __user *user_buf,
|
|
|
|
- size_t count, loff_t *ppos)
|
|
|
|
|
|
+ char __user *user_buf,
|
|
|
|
+ size_t count, loff_t *ppos)
|
|
{
|
|
{
|
|
struct iwl_trans *trans = file->private_data;
|
|
struct iwl_trans *trans = file->private_data;
|
|
char *buf;
|
|
char *buf;
|
|
@@ -2024,7 +1987,7 @@ DEBUGFS_WRITE_FILE_OPS(fw_restart);
|
|
*
|
|
*
|
|
*/
|
|
*/
|
|
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
|
|
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
|
|
- struct dentry *dir)
|
|
|
|
|
|
+ struct dentry *dir)
|
|
{
|
|
{
|
|
DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
|
|
DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
|
|
DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
|
|
DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
|
|
@@ -2036,9 +1999,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
|
|
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
|
|
- struct dentry *dir)
|
|
|
|
-{ return 0; }
|
|
|
|
-
|
|
|
|
|
|
+ struct dentry *dir)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
#endif /*CONFIG_IWLWIFI_DEBUGFS */
|
|
#endif /*CONFIG_IWLWIFI_DEBUGFS */
|
|
|
|
|
|
static const struct iwl_trans_ops trans_ops_pcie = {
|
|
static const struct iwl_trans_ops trans_ops_pcie = {
|
|
@@ -2055,8 +2019,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
|
|
.tx = iwl_trans_pcie_tx,
|
|
.tx = iwl_trans_pcie_tx,
|
|
.reclaim = iwl_trans_pcie_reclaim,
|
|
.reclaim = iwl_trans_pcie_reclaim,
|
|
|
|
|
|
- .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
|
|
|
|
- .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
|
|
|
|
|
|
+ .txq_disable = iwl_trans_pcie_txq_disable,
|
|
|
|
+ .txq_enable = iwl_trans_pcie_txq_enable,
|
|
|
|
|
|
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
|
|
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
|
|
|
|
|
|
@@ -2079,11 +2043,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|
{
|
|
{
|
|
struct iwl_trans_pcie *trans_pcie;
|
|
struct iwl_trans_pcie *trans_pcie;
|
|
struct iwl_trans *trans;
|
|
struct iwl_trans *trans;
|
|
|
|
+ char cmd_pool_name[100];
|
|
u16 pci_cmd;
|
|
u16 pci_cmd;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
trans = kzalloc(sizeof(struct iwl_trans) +
|
|
trans = kzalloc(sizeof(struct iwl_trans) +
|
|
- sizeof(struct iwl_trans_pcie), GFP_KERNEL);
|
|
|
|
|
|
+ sizeof(struct iwl_trans_pcie), GFP_KERNEL);
|
|
|
|
|
|
if (WARN_ON(!trans))
|
|
if (WARN_ON(!trans))
|
|
return NULL;
|
|
return NULL;
|
|
@@ -2099,7 +2064,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|
/* W/A - seems to solve weird behavior. We need to remove this if we
|
|
/* W/A - seems to solve weird behavior. We need to remove this if we
|
|
* don't want to stay in L1 all the time. This wastes a lot of power */
|
|
* don't want to stay in L1 all the time. This wastes a lot of power */
|
|
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
|
|
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
|
|
- PCIE_LINK_STATE_CLKPM);
|
|
|
|
|
|
+ PCIE_LINK_STATE_CLKPM);
|
|
|
|
|
|
if (pci_enable_device(pdev)) {
|
|
if (pci_enable_device(pdev)) {
|
|
err = -ENODEV;
|
|
err = -ENODEV;
|
|
@@ -2115,7 +2080,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
if (!err)
|
|
if (!err)
|
|
err = pci_set_consistent_dma_mask(pdev,
|
|
err = pci_set_consistent_dma_mask(pdev,
|
|
- DMA_BIT_MASK(32));
|
|
|
|
|
|
+ DMA_BIT_MASK(32));
|
|
/* both attempts failed: */
|
|
/* both attempts failed: */
|
|
if (err) {
|
|
if (err) {
|
|
dev_printk(KERN_ERR, &pdev->dev,
|
|
dev_printk(KERN_ERR, &pdev->dev,
|
|
@@ -2138,13 +2103,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|
}
|
|
}
|
|
|
|
|
|
dev_printk(KERN_INFO, &pdev->dev,
|
|
dev_printk(KERN_INFO, &pdev->dev,
|
|
- "pci_resource_len = 0x%08llx\n",
|
|
|
|
- (unsigned long long) pci_resource_len(pdev, 0));
|
|
|
|
|
|
+ "pci_resource_len = 0x%08llx\n",
|
|
|
|
+ (unsigned long long) pci_resource_len(pdev, 0));
|
|
dev_printk(KERN_INFO, &pdev->dev,
|
|
dev_printk(KERN_INFO, &pdev->dev,
|
|
- "pci_resource_base = %p\n", trans_pcie->hw_base);
|
|
|
|
|
|
+ "pci_resource_base = %p\n", trans_pcie->hw_base);
|
|
|
|
|
|
dev_printk(KERN_INFO, &pdev->dev,
|
|
dev_printk(KERN_INFO, &pdev->dev,
|
|
- "HW Revision ID = 0x%X\n", pdev->revision);
|
|
|
|
|
|
+ "HW Revision ID = 0x%X\n", pdev->revision);
|
|
|
|
|
|
/* We disable the RETRY_TIMEOUT register (0x41) to keep
|
|
/* We disable the RETRY_TIMEOUT register (0x41) to keep
|
|
* PCI Tx retries from interfering with C3 CPU state */
|
|
* PCI Tx retries from interfering with C3 CPU state */
|
|
@@ -2153,7 +2118,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|
err = pci_enable_msi(pdev);
|
|
err = pci_enable_msi(pdev);
|
|
if (err)
|
|
if (err)
|
|
dev_printk(KERN_ERR, &pdev->dev,
|
|
dev_printk(KERN_ERR, &pdev->dev,
|
|
- "pci_enable_msi failed(0X%x)", err);
|
|
|
|
|
|
+ "pci_enable_msi failed(0X%x)", err);
|
|
|
|
|
|
trans->dev = &pdev->dev;
|
|
trans->dev = &pdev->dev;
|
|
trans_pcie->irq = pdev->irq;
|
|
trans_pcie->irq = pdev->irq;
|
|
@@ -2175,8 +2140,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|
init_waitqueue_head(&trans->wait_command_queue);
|
|
init_waitqueue_head(&trans->wait_command_queue);
|
|
spin_lock_init(&trans->reg_lock);
|
|
spin_lock_init(&trans->reg_lock);
|
|
|
|
|
|
|
|
+ snprintf(cmd_pool_name, sizeof(cmd_pool_name), "iwl_cmd_pool:%s",
|
|
|
|
+ dev_name(trans->dev));
|
|
|
|
+
|
|
|
|
+ trans->dev_cmd_headroom = 0;
|
|
|
|
+ trans->dev_cmd_pool =
|
|
|
|
+ kmem_cache_create(cmd_pool_name,
|
|
|
|
+ sizeof(struct iwl_device_cmd)
|
|
|
|
+ + trans->dev_cmd_headroom,
|
|
|
|
+ sizeof(void *),
|
|
|
|
+ SLAB_HWCACHE_ALIGN,
|
|
|
|
+ NULL);
|
|
|
|
+
|
|
|
|
+ if (!trans->dev_cmd_pool)
|
|
|
|
+ goto out_pci_disable_msi;
|
|
|
|
+
|
|
return trans;
|
|
return trans;
|
|
|
|
|
|
|
|
+out_pci_disable_msi:
|
|
|
|
+ pci_disable_msi(pdev);
|
|
out_pci_release_regions:
|
|
out_pci_release_regions:
|
|
pci_release_regions(pdev);
|
|
pci_release_regions(pdev);
|
|
out_pci_disable_device:
|
|
out_pci_disable_device:
|
|
@@ -2185,4 +2167,3 @@ out_no_pci:
|
|
kfree(trans);
|
|
kfree(trans);
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
-
|
|
|