|
@@ -4066,7 +4066,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
|
|
|
/* errors is only valid for DD + EOP descriptors */
|
|
|
if (unlikely((status & E1000_RXD_STAT_EOP) &&
|
|
|
(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
|
|
|
- u8 last_byte = *(skb->data + length - 1);
|
|
|
+ u8 *mapped;
|
|
|
+ u8 last_byte;
|
|
|
+
|
|
|
+ mapped = page_address(buffer_info->page);
|
|
|
+ last_byte = *(mapped + length - 1);
|
|
|
if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
|
|
|
last_byte)) {
|
|
|
spin_lock_irqsave(&adapter->stats_lock,
|
|
@@ -4391,30 +4395,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- /* Fix for errata 23, can't cross 64kB boundary */
|
|
|
- if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
|
|
|
- struct sk_buff *oldskb = skb;
|
|
|
- e_err(rx_err, "skb align check failed: %u bytes at "
|
|
|
- "%p\n", bufsz, skb->data);
|
|
|
- /* Try again, without freeing the previous */
|
|
|
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
|
|
|
- /* Failed allocation, critical failure */
|
|
|
- if (!skb) {
|
|
|
- dev_kfree_skb(oldskb);
|
|
|
- adapter->alloc_rx_buff_failed++;
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
|
|
|
- /* give up */
|
|
|
- dev_kfree_skb(skb);
|
|
|
- dev_kfree_skb(oldskb);
|
|
|
- break; /* while (cleaned_count--) */
|
|
|
- }
|
|
|
-
|
|
|
- /* Use new allocation */
|
|
|
- dev_kfree_skb(oldskb);
|
|
|
- }
|
|
|
buffer_info->skb = skb;
|
|
|
buffer_info->length = adapter->rx_buffer_len;
|
|
|
check_page:
|