|
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
|
|
|
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
|
|
|
struct efx_rx_buffer *buf)
|
|
|
{
|
|
|
- /* Offset is always within one page, so we don't need to consider
|
|
|
- * the page order.
|
|
|
- */
|
|
|
- return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
|
|
|
- efx->type->rx_buffer_hash_size;
|
|
|
+ return buf->page_offset + efx->type->rx_buffer_hash_size;
|
|
|
}
|
|
|
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
|
|
|
{
|
|
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
|
|
struct efx_nic *efx = rx_queue->efx;
|
|
|
struct efx_rx_buffer *rx_buf;
|
|
|
struct page *page;
|
|
|
+ unsigned int page_offset;
|
|
|
struct efx_rx_page_state *state;
|
|
|
dma_addr_t dma_addr;
|
|
|
unsigned index, count;
|
|
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
|
|
state->dma_addr = dma_addr;
|
|
|
|
|
|
dma_addr += sizeof(struct efx_rx_page_state);
|
|
|
+ page_offset = sizeof(struct efx_rx_page_state);
|
|
|
|
|
|
split:
|
|
|
index = rx_queue->added_count & rx_queue->ptr_mask;
|
|
|
rx_buf = efx_rx_buffer(rx_queue, index);
|
|
|
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
|
|
|
rx_buf->u.page = page;
|
|
|
+ rx_buf->page_offset = page_offset;
|
|
|
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
|
|
|
rx_buf->flags = EFX_RX_BUF_PAGE;
|
|
|
++rx_queue->added_count;
|
|
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
|
|
/* Use the second half of the page */
|
|
|
get_page(page);
|
|
|
dma_addr += (PAGE_SIZE >> 1);
|
|
|
+ page_offset += (PAGE_SIZE >> 1);
|
|
|
++count;
|
|
|
goto split;
|
|
|
}
|