123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470 |
- /******************************************************************************
- *
- * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * James P. Ketrenos <ipw2100-admin@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
- #include <net/mac80211.h>
- #include "iwl-eeprom.h"
- #include "iwl-dev.h"
- #include "iwl-core.h"
- #include "iwl-sta.h"
- #include "iwl-io.h"
- #include "iwl-calib.h"
- #include "iwl-helpers.h"
- /************************** RX-FUNCTIONS ****************************/
- /*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC. These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC. The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_rx_queue_alloc() Allocates rx_free
- * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
- /**
- * iwl_rx_queue_space - Return number of free slots available in queue.
- */
- int iwl_rx_queue_space(const struct iwl_rx_queue *q)
- {
- int s = q->read - q->write;
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
- }
- EXPORT_SYMBOL(iwl_rx_queue_space);
- /**
- * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
- */
- int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
- {
- u32 reg = 0;
- int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&q->lock, flags);
- if (q->need_update == 0)
- goto exit_unlock;
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- iwl_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
- ret = iwl_grab_nic_access(priv);
- if (ret)
- goto exit_unlock;
- /* Device expects a multiple of 8 */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
- q->write & ~0x7);
- iwl_release_nic_access(priv);
- /* Else device is assumed to be awake */
- } else
- /* Device expects a multiple of 8 */
- iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
- q->need_update = 0;
- exit_unlock:
- spin_unlock_irqrestore(&q->lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
- /**
- * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
- static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
- {
- return cpu_to_le32((u32)(dma_addr >> 8));
- }
- /**
- * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
- int iwl_rx_queue_restock(struct iwl_priv *priv)
- {
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
- int write;
- int ret = 0;
- spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if ((write != (rxq->write & ~0x7))
- || (abs(rxq->write - rxq->read) > 7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- ret = iwl_rx_queue_update_write_ptr(priv, rxq);
- }
- return ret;
- }
- EXPORT_SYMBOL(iwl_rx_queue_restock);
- /**
- * iwl_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
- void iwl_rx_allocate(struct iwl_priv *priv)
- {
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
- spin_lock_irqsave(&rxq->lock, flags);
- while (!list_empty(&rxq->rx_used)) {
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- /* Alloc a new receive buffer */
- rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
- __GFP_NOWARN | GFP_ATOMIC);
- if (!rxb->skb) {
- if (net_ratelimit())
- printk(KERN_CRIT DRV_NAME
- ": Can not allocate SKB buffers\n");
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- break;
- }
- priv->alloc_rxb_skb++;
- list_del(element);
- /* Get physical address of RB/SKB */
- rxb->dma_addr =
- pci_map_single(priv->pci_dev, rxb->skb->data,
- priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
- EXPORT_SYMBOL(iwl_rx_allocate);
- void iwl_rx_replenish(struct iwl_priv *priv)
- {
- unsigned long flags;
- iwl_rx_allocate(priv);
- spin_lock_irqsave(&priv->lock, flags);
- iwl_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
- EXPORT_SYMBOL(iwl_rx_replenish);
- /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
- void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
- {
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].skb != NULL) {
- pci_unmap_single(priv->pci_dev,
- rxq->pool[i].dma_addr,
- priv->hw_params.rx_buf_size,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(rxq->pool[i].skb);
- }
- }
- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
- rxq->bd = NULL;
- }
- EXPORT_SYMBOL(iwl_rx_queue_free);
- int iwl_rx_queue_alloc(struct iwl_priv *priv)
- {
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct pci_dev *dev = priv->pci_dev;
- int i;
- spin_lock_init(&rxq->lock);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
- if (!rxq->bd)
- return -ENOMEM;
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->free_count = 0;
- rxq->need_update = 0;
- return 0;
- }
- EXPORT_SYMBOL(iwl_rx_queue_alloc);
- void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
- {
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].skb != NULL) {
- pci_unmap_single(priv->pci_dev,
- rxq->pool[i].dma_addr,
- priv->hw_params.rx_buf_size,
- PCI_DMA_FROMDEVICE);
- priv->alloc_rxb_skb--;
- dev_kfree_skb(rxq->pool[i].skb);
- rxq->pool[i].skb = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
- EXPORT_SYMBOL(iwl_rx_queue_reset);
- int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
- {
- int ret;
- unsigned long flags;
- unsigned int rb_size;
- spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (ret) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
- if (priv->cfg->mod_params->amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
- /* Stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- /* Reset driver's Rx queue write index */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- rxq->dma_addr >> 8);
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- (priv->shared_phys + priv->rb_closed_offset) >> 4);
- /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- rb_size |
- /* 0x10 << 4 | */
- (RX_QUEUE_SIZE_LOG <<
- FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
- /*
- * iwl_write32(priv,CSR_INT_COAL_REG,0);
- */
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
- }
- int iwl_rxq_stop(struct iwl_priv *priv)
- {
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- ret = iwl_grab_nic_access(priv);
- if (unlikely(ret)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
- /* stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- ret = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- (1 << 24), 1000);
- if (ret < 0)
- IWL_ERROR("Can't stop Rx DMA.\n");
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
- }
- EXPORT_SYMBOL(iwl_rxq_stop);
- void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
- {
- #ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
- struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
- struct iwl4965_missed_beacon_notif *missed_beacon;
- missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
- IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consequtive_missed_beacons),
- le32_to_cpu(missed_beacon->total_missed_becons),
- le32_to_cpu(missed_beacon->num_recvd_beacons),
- le32_to_cpu(missed_beacon->num_expected_beacons));
- if (!test_bit(STATUS_SCANNING, &priv->status))
- iwl_init_sensitivity(priv);
- }
- #endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
- }
- EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
|