|
@@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
|
|
|
dma_map_page(trans->dev, page, 0,
|
|
|
PAGE_SIZE << trans_pcie->rx_page_order,
|
|
|
DMA_FROM_DEVICE);
|
|
|
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
|
|
|
+ rxb->page = NULL;
|
|
|
+ spin_lock_irqsave(&rxq->lock, flags);
|
|
|
+ list_add(&rxb->list, &rxq->rx_used);
|
|
|
+ spin_unlock_irqrestore(&rxq->lock, flags);
|
|
|
+ __free_pages(page, trans_pcie->rx_page_order);
|
|
|
+ return;
|
|
|
+ }
|
|
|
/* dma address must be no more than 36 bits */
|
|
|
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
|
|
|
/* and also 256 byte aligned! */
|
|
@@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
|
|
|
dma_map_page(trans->dev, rxb->page, 0,
|
|
|
PAGE_SIZE << trans_pcie->rx_page_order,
|
|
|
DMA_FROM_DEVICE);
|
|
|
- list_add_tail(&rxb->list, &rxq->rx_free);
|
|
|
- rxq->free_count++;
|
|
|
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
|
|
|
+ /*
|
|
|
+ * free the page(s) as well to not break
|
|
|
+ * the invariant that the items on the used
|
|
|
+ * list have no page(s)
|
|
|
+ */
|
|
|
+ __free_pages(rxb->page, trans_pcie->rx_page_order);
|
|
|
+ rxb->page = NULL;
|
|
|
+ list_add_tail(&rxb->list, &rxq->rx_used);
|
|
|
+ } else {
|
|
|
+ list_add_tail(&rxb->list, &rxq->rx_free);
|
|
|
+ rxq->free_count++;
|
|
|
+ }
|
|
|
} else
|
|
|
list_add_tail(&rxb->list, &rxq->rx_used);
|
|
|
spin_unlock_irqrestore(&rxq->lock, flags);
|