|
@@ -745,7 +745,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
|
|
|
u8 type)
|
|
|
{
|
|
|
struct iwl_spectrum_cmd spectrum;
|
|
|
- struct iwl_rx_packet *res;
|
|
|
+ struct iwl_rx_packet *pkt;
|
|
|
struct iwl_host_cmd cmd = {
|
|
|
.id = REPLY_SPECTRUM_MEASUREMENT_CMD,
|
|
|
.data = (void *)&spectrum,
|
|
@@ -790,18 +790,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
|
|
|
- res = (struct iwl_rx_packet *)cmd.reply_skb->data;
|
|
|
- if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
|
|
|
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
|
|
|
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
|
|
|
IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
|
|
|
rc = -EIO;
|
|
|
}
|
|
|
|
|
|
- spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
|
|
|
+ spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
|
|
|
switch (spectrum_resp_status) {
|
|
|
case 0: /* Command will be handled */
|
|
|
- if (res->u.spectrum.id != 0xff) {
|
|
|
+ if (pkt->u.spectrum.id != 0xff) {
|
|
|
IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
|
|
|
- res->u.spectrum.id);
|
|
|
+ pkt->u.spectrum.id);
|
|
|
priv->measurement_status &= ~MEASUREMENT_READY;
|
|
|
}
|
|
|
priv->measurement_status |= MEASUREMENT_ACTIVE;
|
|
@@ -813,7 +813,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- dev_kfree_skb_any(cmd.reply_skb);
|
|
|
+ free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
|
|
|
|
|
|
return rc;
|
|
|
}
|
|
@@ -822,7 +822,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
|
|
|
static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
|
|
|
struct iwl_rx_mem_buffer *rxb)
|
|
|
{
|
|
|
- struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
|
|
|
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
struct iwl_alive_resp *palive;
|
|
|
struct delayed_work *pwork;
|
|
|
|
|
@@ -859,7 +859,7 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
|
|
|
struct iwl_rx_mem_buffer *rxb)
|
|
|
{
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUG
|
|
|
- struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
|
|
|
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
#endif
|
|
|
|
|
|
IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
|
|
@@ -895,7 +895,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
|
|
|
struct iwl_rx_mem_buffer *rxb)
|
|
|
{
|
|
|
#ifdef CONFIG_IWLWIFI_DEBUG
|
|
|
- struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
|
|
|
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
|
|
|
u8 rate = beacon->beacon_notify_hdr.rate;
|
|
|
|
|
@@ -918,7 +918,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
|
|
|
static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
|
|
|
struct iwl_rx_mem_buffer *rxb)
|
|
|
{
|
|
|
- struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
|
|
|
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
|
|
|
unsigned long status = priv->status;
|
|
|
|
|
@@ -1082,7 +1082,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
|
|
|
list_del(element);
|
|
|
|
|
|
/* Point to Rx buffer via next RBD in circular buffer */
|
|
|
- rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr);
|
|
|
+ rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
|
|
|
rxq->queue[rxq->write] = rxb;
|
|
|
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
|
|
|
rxq->free_count--;
|
|
@@ -1122,7 +1122,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|
|
struct iwl_rx_queue *rxq = &priv->rxq;
|
|
|
struct list_head *element;
|
|
|
struct iwl_rx_mem_buffer *rxb;
|
|
|
- struct sk_buff *skb;
|
|
|
+ struct page *page;
|
|
|
unsigned long flags;
|
|
|
|
|
|
while (1) {
|
|
@@ -1136,9 +1136,13 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|
|
|
|
|
if (rxq->free_count > RX_LOW_WATERMARK)
|
|
|
priority |= __GFP_NOWARN;
|
|
|
+
|
|
|
+ if (priv->hw_params.rx_page_order > 0)
|
|
|
+ priority |= __GFP_COMP;
|
|
|
+
|
|
|
/* Alloc a new receive buffer */
|
|
|
- skb = alloc_skb(priv->hw_params.rx_buf_size, priority);
|
|
|
- if (!skb) {
|
|
|
+ page = alloc_pages(priority, priv->hw_params.rx_page_order);
|
|
|
+ if (!page) {
|
|
|
if (net_ratelimit())
|
|
|
IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
|
|
|
if ((rxq->free_count <= RX_LOW_WATERMARK) &&
|
|
@@ -1155,7 +1159,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
|
if (list_empty(&rxq->rx_used)) {
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
+ __free_pages(page, priv->hw_params.rx_page_order);
|
|
|
return;
|
|
|
}
|
|
|
element = rxq->rx_used.next;
|
|
@@ -1163,26 +1167,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|
|
list_del(element);
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
|
|
|
- rxb->skb = skb;
|
|
|
-
|
|
|
- /* If radiotap head is required, reserve some headroom here.
|
|
|
- * The physical head count is a variable rx_stats->phy_count.
|
|
|
- * We reserve 4 bytes here. Plus these extra bytes, the
|
|
|
- * headroom of the physical head should be enough for the
|
|
|
- * radiotap head that iwl3945 supported. See iwl3945_rt.
|
|
|
- */
|
|
|
- skb_reserve(rxb->skb, 4);
|
|
|
-
|
|
|
+ rxb->page = page;
|
|
|
/* Get physical address of RB/SKB */
|
|
|
- rxb->real_dma_addr = pci_map_single(priv->pci_dev,
|
|
|
- rxb->skb->data,
|
|
|
- priv->hw_params.rx_buf_size,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
+ rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
|
|
|
+ PAGE_SIZE << priv->hw_params.rx_page_order,
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
|
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
|
+
|
|
|
list_add_tail(&rxb->list, &rxq->rx_free);
|
|
|
- priv->alloc_rxb_skb++;
|
|
|
rxq->free_count++;
|
|
|
+ priv->alloc_rxb_page++;
|
|
|
+
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
}
|
|
|
}
|
|
@@ -1198,14 +1194,14 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
|
|
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
|
|
|
/* In the reset function, these buffers may have been allocated
|
|
|
* to an SKB, so we need to unmap and free potential storage */
|
|
|
- if (rxq->pool[i].skb != NULL) {
|
|
|
- pci_unmap_single(priv->pci_dev,
|
|
|
- rxq->pool[i].real_dma_addr,
|
|
|
- priv->hw_params.rx_buf_size,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
- priv->alloc_rxb_skb--;
|
|
|
- dev_kfree_skb(rxq->pool[i].skb);
|
|
|
- rxq->pool[i].skb = NULL;
|
|
|
+ if (rxq->pool[i].page != NULL) {
|
|
|
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
|
|
|
+ PAGE_SIZE << priv->hw_params.rx_page_order,
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
+ priv->alloc_rxb_page--;
|
|
|
+ __free_pages(rxq->pool[i].page,
|
|
|
+ priv->hw_params.rx_page_order);
|
|
|
+ rxq->pool[i].page = NULL;
|
|
|
}
|
|
|
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
|
|
|
}
|
|
@@ -1213,8 +1209,8 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
|
|
/* Set us so that we have processed and used all buffers, but have
|
|
|
* not restocked the Rx queue with fresh buffers */
|
|
|
rxq->read = rxq->write = 0;
|
|
|
- rxq->free_count = 0;
|
|
|
rxq->write_actual = 0;
|
|
|
+ rxq->free_count = 0;
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
}
|
|
|
|
|
@@ -1247,12 +1243,14 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
|
|
|
{
|
|
|
int i;
|
|
|
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
|
|
|
- if (rxq->pool[i].skb != NULL) {
|
|
|
- pci_unmap_single(priv->pci_dev,
|
|
|
- rxq->pool[i].real_dma_addr,
|
|
|
- priv->hw_params.rx_buf_size,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
- dev_kfree_skb(rxq->pool[i].skb);
|
|
|
+ if (rxq->pool[i].page != NULL) {
|
|
|
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
|
|
|
+ PAGE_SIZE << priv->hw_params.rx_page_order,
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
+ __free_pages(rxq->pool[i].page,
|
|
|
+ priv->hw_params.rx_page_order);
|
|
|
+ rxq->pool[i].page = NULL;
|
|
|
+ priv->alloc_rxb_page--;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1388,10 +1386,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
|
|
|
|
|
|
rxq->queue[i] = NULL;
|
|
|
|
|
|
- pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
|
|
|
- priv->hw_params.rx_buf_size,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
- pkt = (struct iwl_rx_packet *)rxb->skb->data;
|
|
|
+ pci_unmap_page(priv->pci_dev, rxb->page_dma,
|
|
|
+ PAGE_SIZE << priv->hw_params.rx_page_order,
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
|
+ pkt = rxb_addr(rxb);
|
|
|
|
|
|
trace_iwlwifi_dev_rx(priv, pkt,
|
|
|
le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
|
|
@@ -1416,16 +1414,17 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
|
|
|
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
|
|
|
} else {
|
|
|
/* No handling needed */
|
|
|
- IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n",
|
|
|
+ IWL_DEBUG_RX(priv,
|
|
|
+ "r %d i %d No handler needed for %s, 0x%02x\n",
|
|
|
r, i, get_cmd_string(pkt->hdr.cmd),
|
|
|
pkt->hdr.cmd);
|
|
|
}
|
|
|
|
|
|
if (reclaim) {
|
|
|
- /* Invoke any callbacks, transfer the skb to caller, and
|
|
|
- * fire off the (possibly) blocking iwl_send_cmd()
|
|
|
+ /* Invoke any callbacks, transfer the buffer to caller,
|
|
|
+ * and fire off the (possibly) blocking iwl_send_cmd()
|
|
|
* as we reclaim the driver command queue */
|
|
|
- if (rxb && rxb->skb)
|
|
|
+ if (rxb && rxb->page)
|
|
|
iwl_tx_cmd_complete(priv, rxb);
|
|
|
else
|
|
|
IWL_WARN(priv, "Claim null rxb?\n");
|
|
@@ -1434,10 +1433,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
|
|
|
/* For now we just don't re-use anything. We can tweak this
|
|
|
* later to try and re-use notification packets and SKBs that
|
|
|
* fail to Rx correctly */
|
|
|
- if (rxb->skb != NULL) {
|
|
|
- priv->alloc_rxb_skb--;
|
|
|
- dev_kfree_skb_any(rxb->skb);
|
|
|
- rxb->skb = NULL;
|
|
|
+ if (rxb->page != NULL) {
|
|
|
+ priv->alloc_rxb_page--;
|
|
|
+ __free_pages(rxb->page, priv->hw_params.rx_page_order);
|
|
|
+ rxb->page = NULL;
|
|
|
}
|
|
|
|
|
|
spin_lock_irqsave(&rxq->lock, flags);
|
|
@@ -1678,6 +1677,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+ spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
+
|
|
|
/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
|
|
|
* atomic, make sure that inta covers all the interrupts that
|
|
|
* we've discovered, even if FH interrupt came in just after
|
|
@@ -1699,8 +1700,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
|
|
|
|
|
|
handled |= CSR_INT_BIT_HW_ERR;
|
|
|
|
|
|
- spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
-
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -1792,7 +1791,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
|
|
|
"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
|
|
|
}
|
|
|
#endif
|
|
|
- spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
}
|
|
|
|
|
|
static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
|