|
@@ -219,9 +219,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
|
|
|
if (netdev) {
|
|
|
dev_info(&adapter->pdev->dev, "Net device Info\n");
|
|
|
pr_info("Device Name state trans_start last_rx\n");
|
|
|
- pr_info("%-15s %016lX %016lX %016lX\n",
|
|
|
- netdev->name, netdev->state, netdev->trans_start,
|
|
|
- netdev->last_rx);
|
|
|
+ pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
|
|
|
+ netdev->state, netdev->trans_start, netdev->last_rx);
|
|
|
}
|
|
|
|
|
|
/* Print Registers */
|
|
@@ -755,8 +754,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
|
|
|
cpu_to_le64(ps_page->dma);
|
|
|
}
|
|
|
|
|
|
- skb = __netdev_alloc_skb_ip_align(netdev,
|
|
|
- adapter->rx_ps_bsize0,
|
|
|
+ skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
|
|
|
gfp);
|
|
|
|
|
|
if (!skb) {
|
|
@@ -850,8 +848,8 @@ check_page:
|
|
|
|
|
|
if (!buffer_info->dma)
|
|
|
buffer_info->dma = dma_map_page(&pdev->dev,
|
|
|
- buffer_info->page, 0,
|
|
|
- PAGE_SIZE,
|
|
|
+ buffer_info->page, 0,
|
|
|
+ PAGE_SIZE,
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
|
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
|
|
@@ -937,10 +935,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|
|
|
|
|
cleaned = true;
|
|
|
cleaned_count++;
|
|
|
- dma_unmap_single(&pdev->dev,
|
|
|
- buffer_info->dma,
|
|
|
- adapter->rx_buffer_len,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
|
|
|
+ adapter->rx_buffer_len, DMA_FROM_DEVICE);
|
|
|
buffer_info->dma = 0;
|
|
|
|
|
|
length = le16_to_cpu(rx_desc->wb.upper.length);
|
|
@@ -1068,8 +1064,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring,
|
|
|
static void e1000_print_hw_hang(struct work_struct *work)
|
|
|
{
|
|
|
struct e1000_adapter *adapter = container_of(work,
|
|
|
- struct e1000_adapter,
|
|
|
- print_hang_task);
|
|
|
+ struct e1000_adapter,
|
|
|
+ print_hang_task);
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
struct e1000_ring *tx_ring = adapter->tx_ring;
|
|
|
unsigned int i = tx_ring->next_to_clean;
|
|
@@ -1082,8 +1078,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
|
|
|
if (test_bit(__E1000_DOWN, &adapter->state))
|
|
|
return;
|
|
|
|
|
|
- if (!adapter->tx_hang_recheck &&
|
|
|
- (adapter->flags2 & FLAG2_DMA_BURST)) {
|
|
|
+ if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
|
|
|
/* May be block on write-back, flush and detect again
|
|
|
* flush pending descriptor writebacks to memory
|
|
|
*/
|
|
@@ -1125,19 +1120,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
|
|
|
"PHY 1000BASE-T Status <%x>\n"
|
|
|
"PHY Extended Status <%x>\n"
|
|
|
"PCI Status <%x>\n",
|
|
|
- readl(tx_ring->head),
|
|
|
- readl(tx_ring->tail),
|
|
|
- tx_ring->next_to_use,
|
|
|
- tx_ring->next_to_clean,
|
|
|
- tx_ring->buffer_info[eop].time_stamp,
|
|
|
- eop,
|
|
|
- jiffies,
|
|
|
- eop_desc->upper.fields.status,
|
|
|
- er32(STATUS),
|
|
|
- phy_status,
|
|
|
- phy_1000t_status,
|
|
|
- phy_ext_status,
|
|
|
- pci_status);
|
|
|
+ readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
|
|
|
+ tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
|
|
|
+ eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
|
|
|
+ phy_status, phy_1000t_status, phy_ext_status, pci_status);
|
|
|
|
|
|
/* Suggest workaround for known h/w issue */
|
|
|
if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
|
|
@@ -1430,7 +1416,7 @@ copydone:
|
|
|
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
|
|
|
|
|
|
if (rx_desc->wb.upper.header_status &
|
|
|
- cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
|
|
|
+ cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
|
|
|
adapter->rx_hdr_split++;
|
|
|
|
|
|
e1000_receive_skb(adapter, netdev, skb, staterr,
|
|
@@ -1468,7 +1454,7 @@ next_desc:
|
|
|
* e1000_consume_page - helper function
|
|
|
**/
|
|
|
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
|
|
|
- u16 length)
|
|
|
+ u16 length)
|
|
|
{
|
|
|
bi->page = NULL;
|
|
|
skb->len += length;
|
|
@@ -1495,7 +1481,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|
|
unsigned int i;
|
|
|
int cleaned_count = 0;
|
|
|
bool cleaned = false;
|
|
|
- unsigned int total_rx_bytes=0, total_rx_packets=0;
|
|
|
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
|
|
+ struct skb_shared_info *shinfo;
|
|
|
|
|
|
i = rx_ring->next_to_clean;
|
|
|
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
|
|
@@ -1541,7 +1528,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|
|
rx_ring->rx_skb_top = NULL;
|
|
|
goto next_desc;
|
|
|
}
|
|
|
-
|
|
|
#define rxtop (rx_ring->rx_skb_top)
|
|
|
if (!(staterr & E1000_RXD_STAT_EOP)) {
|
|
|
/* this descriptor is only the beginning (or middle) */
|
|
@@ -1549,12 +1535,13 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|
|
/* this is the beginning of a chain */
|
|
|
rxtop = skb;
|
|
|
skb_fill_page_desc(rxtop, 0, buffer_info->page,
|
|
|
- 0, length);
|
|
|
+ 0, length);
|
|
|
} else {
|
|
|
/* this is the middle of a chain */
|
|
|
- skb_fill_page_desc(rxtop,
|
|
|
- skb_shinfo(rxtop)->nr_frags,
|
|
|
- buffer_info->page, 0, length);
|
|
|
+ shinfo = skb_shinfo(rxtop);
|
|
|
+ skb_fill_page_desc(rxtop, shinfo->nr_frags,
|
|
|
+ buffer_info->page, 0,
|
|
|
+ length);
|
|
|
/* re-use the skb, only consumed the page */
|
|
|
buffer_info->skb = skb;
|
|
|
}
|
|
@@ -1563,9 +1550,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|
|
} else {
|
|
|
if (rxtop) {
|
|
|
/* end of the chain */
|
|
|
- skb_fill_page_desc(rxtop,
|
|
|
- skb_shinfo(rxtop)->nr_frags,
|
|
|
- buffer_info->page, 0, length);
|
|
|
+ shinfo = skb_shinfo(rxtop);
|
|
|
+ skb_fill_page_desc(rxtop, shinfo->nr_frags,
|
|
|
+ buffer_info->page, 0,
|
|
|
+ length);
|
|
|
/* re-use the current skb, we only consumed the
|
|
|
* page
|
|
|
*/
|
|
@@ -1590,10 +1578,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|
|
skb_put(skb, length);
|
|
|
} else {
|
|
|
skb_fill_page_desc(skb, 0,
|
|
|
- buffer_info->page, 0,
|
|
|
- length);
|
|
|
+ buffer_info->page, 0,
|
|
|
+ length);
|
|
|
e1000_consume_page(buffer_info, skb,
|
|
|
- length);
|
|
|
+ length);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1666,8 +1654,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
|
|
|
DMA_FROM_DEVICE);
|
|
|
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
|
|
|
dma_unmap_page(&pdev->dev, buffer_info->dma,
|
|
|
- PAGE_SIZE,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
+ PAGE_SIZE, DMA_FROM_DEVICE);
|
|
|
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
|
|
|
dma_unmap_single(&pdev->dev, buffer_info->dma,
|
|
|
adapter->rx_ps_bsize0,
|
|
@@ -1720,7 +1707,8 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
|
|
|
static void e1000e_downshift_workaround(struct work_struct *work)
|
|
|
{
|
|
|
struct e1000_adapter *adapter = container_of(work,
|
|
|
- struct e1000_adapter, downshift_task);
|
|
|
+ struct e1000_adapter,
|
|
|
+ downshift_task);
|
|
|
|
|
|
if (test_bit(__E1000_DOWN, &adapter->state))
|
|
|
return;
|
|
@@ -1913,7 +1901,6 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
struct e1000_ring *tx_ring = adapter->tx_ring;
|
|
|
|
|
|
-
|
|
|
adapter->total_tx_bytes = 0;
|
|
|
adapter->total_tx_packets = 0;
|
|
|
|
|
@@ -1970,7 +1957,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
|
|
|
ew32(RFCTL, rfctl);
|
|
|
}
|
|
|
|
|
|
-#define E1000_IVAR_INT_ALLOC_VALID 0x8
|
|
|
/* Configure Rx vector */
|
|
|
rx_ring->ims_val = E1000_IMS_RXQ0;
|
|
|
adapter->eiac_mask |= rx_ring->ims_val;
|
|
@@ -2045,8 +2031,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
|
|
|
if (adapter->flags & FLAG_HAS_MSIX) {
|
|
|
adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
|
|
|
adapter->msix_entries = kcalloc(adapter->num_vectors,
|
|
|
- sizeof(struct msix_entry),
|
|
|
- GFP_KERNEL);
|
|
|
+ sizeof(struct
|
|
|
+ msix_entry),
|
|
|
+ GFP_KERNEL);
|
|
|
if (adapter->msix_entries) {
|
|
|
for (i = 0; i < adapter->num_vectors; i++)
|
|
|
adapter->msix_entries[i].entry = i;
|
|
@@ -2490,7 +2477,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
|
|
|
switch (itr_setting) {
|
|
|
case lowest_latency:
|
|
|
/* handle TSO and jumbo frames */
|
|
|
- if (bytes/packets > 8000)
|
|
|
+ if (bytes / packets > 8000)
|
|
|
retval = bulk_latency;
|
|
|
else if ((packets < 5) && (bytes > 512))
|
|
|
retval = low_latency;
|
|
@@ -2498,13 +2485,13 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
|
|
|
case low_latency: /* 50 usec aka 20000 ints/s */
|
|
|
if (bytes > 10000) {
|
|
|
/* this if handles the TSO accounting */
|
|
|
- if (bytes/packets > 8000)
|
|
|
+ if (bytes / packets > 8000)
|
|
|
retval = bulk_latency;
|
|
|
- else if ((packets < 10) || ((bytes/packets) > 1200))
|
|
|
+ else if ((packets < 10) || ((bytes / packets) > 1200))
|
|
|
retval = bulk_latency;
|
|
|
else if ((packets > 35))
|
|
|
retval = lowest_latency;
|
|
|
- } else if (bytes/packets > 2000) {
|
|
|
+ } else if (bytes / packets > 2000) {
|
|
|
retval = bulk_latency;
|
|
|
} else if (packets <= 2 && bytes < 512) {
|
|
|
retval = lowest_latency;
|
|
@@ -2556,8 +2543,8 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
|
|
|
|
|
|
current_itr = max(adapter->rx_itr, adapter->tx_itr);
|
|
|
|
|
|
- switch (current_itr) {
|
|
|
/* counts and packets in update_itr are dependent on these numbers */
|
|
|
+ switch (current_itr) {
|
|
|
case lowest_latency:
|
|
|
new_itr = 70000;
|
|
|
break;
|
|
@@ -2578,8 +2565,7 @@ set_itr_now:
|
|
|
* increasing
|
|
|
*/
|
|
|
new_itr = new_itr > adapter->itr ?
|
|
|
- min(adapter->itr + (new_itr >> 2), new_itr) :
|
|
|
- new_itr;
|
|
|
+ min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
|
|
|
adapter->itr = new_itr;
|
|
|
adapter->rx_ring->itr_val = new_itr;
|
|
|
if (adapter->msix_entries)
|
|
@@ -2810,8 +2796,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
|
|
|
u16 vid = adapter->hw.mng_cookie.vlan_id;
|
|
|
u16 old_vid = adapter->mng_vlan_id;
|
|
|
|
|
|
- if (adapter->hw.mng_cookie.status &
|
|
|
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
|
|
|
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
|
|
|
e1000_vlan_rx_add_vid(netdev, vid);
|
|
|
adapter->mng_vlan_id = vid;
|
|
|
}
|
|
@@ -2827,7 +2812,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
|
|
|
e1000_vlan_rx_add_vid(adapter->netdev, 0);
|
|
|
|
|
|
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
|
|
|
- e1000_vlan_rx_add_vid(adapter->netdev, vid);
|
|
|
+ e1000_vlan_rx_add_vid(adapter->netdev, vid);
|
|
|
}
|
|
|
|
|
|
static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
|
|
@@ -3002,8 +2987,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
|
|
|
rctl = er32(RCTL);
|
|
|
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
|
|
|
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
|
|
|
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
|
|
|
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
|
|
|
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
|
|
|
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
|
|
|
|
|
|
/* Do not Store bad packets */
|
|
|
rctl &= ~E1000_RCTL_SBP;
|
|
@@ -3089,19 +3074,17 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
|
|
|
/* Enable Packet split descriptors */
|
|
|
rctl |= E1000_RCTL_DTYP_PS;
|
|
|
|
|
|
- psrctl |= adapter->rx_ps_bsize0 >>
|
|
|
- E1000_PSRCTL_BSIZE0_SHIFT;
|
|
|
+ psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
|
|
|
|
|
|
switch (adapter->rx_ps_pages) {
|
|
|
case 3:
|
|
|
- psrctl |= PAGE_SIZE <<
|
|
|
- E1000_PSRCTL_BSIZE3_SHIFT;
|
|
|
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
|
|
|
+ /* fall-through */
|
|
|
case 2:
|
|
|
- psrctl |= PAGE_SIZE <<
|
|
|
- E1000_PSRCTL_BSIZE2_SHIFT;
|
|
|
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
|
|
|
+ /* fall-through */
|
|
|
case 1:
|
|
|
- psrctl |= PAGE_SIZE >>
|
|
|
- E1000_PSRCTL_BSIZE1_SHIFT;
|
|
|
+ psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -3275,7 +3258,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev)
|
|
|
/* update_mc_addr_list expects a packed array of only addresses. */
|
|
|
i = 0;
|
|
|
netdev_for_each_mc_addr(ha, netdev)
|
|
|
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
|
|
|
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
|
|
|
|
|
|
hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
|
|
|
kfree(mta_list);
|
|
@@ -3752,8 +3735,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
|
|
* but don't include ethernet FCS because hardware appends it
|
|
|
*/
|
|
|
min_tx_space = (adapter->max_frame_size +
|
|
|
- sizeof(struct e1000_tx_desc) -
|
|
|
- ETH_FCS_LEN) * 2;
|
|
|
+ sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
|
|
|
min_tx_space = ALIGN(min_tx_space, 1024);
|
|
|
min_tx_space >>= 10;
|
|
|
/* software strips receive CRC, so leave room for it */
|
|
@@ -3856,13 +3838,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
|
|
if ((adapter->max_frame_size * 2) > (pba << 10)) {
|
|
|
if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
|
|
|
dev_info(&adapter->pdev->dev,
|
|
|
- "Interrupt Throttle Rate turned off\n");
|
|
|
+ "Interrupt Throttle Rate off\n");
|
|
|
adapter->flags2 |= FLAG2_DISABLE_AIM;
|
|
|
e1000e_write_itr(adapter, 0);
|
|
|
}
|
|
|
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
|
|
|
dev_info(&adapter->pdev->dev,
|
|
|
- "Interrupt Throttle Rate turned on\n");
|
|
|
+ "Interrupt Throttle Rate on\n");
|
|
|
adapter->flags2 &= ~FLAG2_DISABLE_AIM;
|
|
|
adapter->itr = 20000;
|
|
|
e1000e_write_itr(adapter, adapter->itr);
|
|
@@ -4261,8 +4243,7 @@ static int e1000_open(struct net_device *netdev)
|
|
|
e1000e_power_up_phy(adapter);
|
|
|
|
|
|
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
|
|
|
- if ((adapter->hw.mng_cookie.status &
|
|
|
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
|
|
|
+ if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
|
|
|
e1000_update_mng_vlan(adapter);
|
|
|
|
|
|
/* DMA latency requirement to workaround jumbo issue */
|
|
@@ -4364,8 +4345,7 @@ static int e1000_close(struct net_device *netdev)
|
|
|
/* kill manageability vlan ID if supported, but not if a vlan with
|
|
|
* the same ID is registered on the host OS (let 8021q kill it)
|
|
|
*/
|
|
|
- if (adapter->hw.mng_cookie.status &
|
|
|
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
|
|
|
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
|
|
|
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
|
|
|
|
|
|
/* If AMT is enabled, let the firmware know that the network
|
|
@@ -4381,6 +4361,7 @@ static int e1000_close(struct net_device *netdev)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
/**
|
|
|
* e1000_set_mac - Change the Ethernet Address of the NIC
|
|
|
* @netdev: network interface device structure
|
|
@@ -4431,7 +4412,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
|
|
|
static void e1000e_update_phy_task(struct work_struct *work)
|
|
|
{
|
|
|
struct e1000_adapter *adapter = container_of(work,
|
|
|
- struct e1000_adapter, update_phy_task);
|
|
|
+ struct e1000_adapter,
|
|
|
+ update_phy_task);
|
|
|
|
|
|
if (test_bit(__E1000_DOWN, &adapter->state))
|
|
|
return;
|
|
@@ -4448,7 +4430,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
|
|
|
**/
|
|
|
static void e1000_update_phy_info(unsigned long data)
|
|
|
{
|
|
|
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
|
|
|
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
|
|
|
|
|
|
if (test_bit(__E1000_DOWN, &adapter->state))
|
|
|
return;
|
|
@@ -4615,18 +4597,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
|
|
|
* our own version based on RUC and ROC
|
|
|
*/
|
|
|
netdev->stats.rx_errors = adapter->stats.rxerrc +
|
|
|
- adapter->stats.crcerrs + adapter->stats.algnerrc +
|
|
|
- adapter->stats.ruc + adapter->stats.roc +
|
|
|
- adapter->stats.cexterr;
|
|
|
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
|
|
|
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
|
|
|
netdev->stats.rx_length_errors = adapter->stats.ruc +
|
|
|
- adapter->stats.roc;
|
|
|
+ adapter->stats.roc;
|
|
|
netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
|
|
|
netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
|
|
|
netdev->stats.rx_missed_errors = adapter->stats.mpc;
|
|
|
|
|
|
/* Tx Errors */
|
|
|
- netdev->stats.tx_errors = adapter->stats.ecol +
|
|
|
- adapter->stats.latecol;
|
|
|
+ netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
|
|
|
netdev->stats.tx_aborted_errors = adapter->stats.ecol;
|
|
|
netdev->stats.tx_window_errors = adapter->stats.latecol;
|
|
|
netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
|
|
@@ -4782,7 +4762,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
|
|
|
**/
|
|
|
static void e1000_watchdog(unsigned long data)
|
|
|
{
|
|
|
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
|
|
|
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
|
|
|
|
|
|
/* Do the rest outside of interrupt context */
|
|
|
schedule_work(&adapter->watchdog_task);
|
|
@@ -4793,7 +4773,8 @@ static void e1000_watchdog(unsigned long data)
|
|
|
static void e1000_watchdog_task(struct work_struct *work)
|
|
|
{
|
|
|
struct e1000_adapter *adapter = container_of(work,
|
|
|
- struct e1000_adapter, watchdog_task);
|
|
|
+ struct e1000_adapter,
|
|
|
+ watchdog_task);
|
|
|
struct net_device *netdev = adapter->netdev;
|
|
|
struct e1000_mac_info *mac = &adapter->hw.mac;
|
|
|
struct e1000_phy_info *phy = &adapter->hw.phy;
|
|
@@ -4827,8 +4808,8 @@ static void e1000_watchdog_task(struct work_struct *work)
|
|
|
/* update snapshot of PHY registers on LSC */
|
|
|
e1000_phy_read_status(adapter);
|
|
|
mac->ops.get_link_up_info(&adapter->hw,
|
|
|
- &adapter->link_speed,
|
|
|
- &adapter->link_duplex);
|
|
|
+ &adapter->link_speed,
|
|
|
+ &adapter->link_duplex);
|
|
|
e1000_print_link_info(adapter);
|
|
|
|
|
|
/* check if SmartSpeed worked */
|
|
@@ -4941,7 +4922,7 @@ static void e1000_watchdog_task(struct work_struct *work)
|
|
|
adapter->flags |= FLAG_RESTART_NOW;
|
|
|
else
|
|
|
pm_schedule_suspend(netdev->dev.parent,
|
|
|
- LINK_TIMEOUT);
|
|
|
+ LINK_TIMEOUT);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -4976,8 +4957,8 @@ link_up:
|
|
|
*/
|
|
|
u32 goc = (adapter->gotc + adapter->gorc) / 10000;
|
|
|
u32 dif = (adapter->gotc > adapter->gorc ?
|
|
|
- adapter->gotc - adapter->gorc :
|
|
|
- adapter->gorc - adapter->gotc) / 10000;
|
|
|
+ adapter->gotc - adapter->gorc :
|
|
|
+ adapter->gorc - adapter->gotc) / 10000;
|
|
|
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
|
|
|
|
|
|
e1000e_write_itr(adapter, itr);
|
|
@@ -5056,14 +5037,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
|
|
iph->tot_len = 0;
|
|
|
iph->check = 0;
|
|
|
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
|
|
|
- 0, IPPROTO_TCP, 0);
|
|
|
+ 0, IPPROTO_TCP, 0);
|
|
|
cmd_length = E1000_TXD_CMD_IP;
|
|
|
ipcse = skb_transport_offset(skb) - 1;
|
|
|
} else if (skb_is_gso_v6(skb)) {
|
|
|
ipv6_hdr(skb)->payload_len = 0;
|
|
|
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
|
|
|
- &ipv6_hdr(skb)->daddr,
|
|
|
- 0, IPPROTO_TCP, 0);
|
|
|
+ &ipv6_hdr(skb)->daddr,
|
|
|
+ 0, IPPROTO_TCP, 0);
|
|
|
ipcse = 0;
|
|
|
}
|
|
|
ipcss = skb_network_offset(skb);
|
|
@@ -5072,7 +5053,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
|
|
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
|
|
|
|
|
|
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
|
|
|
- E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
|
|
|
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
|
|
|
|
|
|
i = tx_ring->next_to_use;
|
|
|
context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
|
|
@@ -5142,8 +5123,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
|
|
|
|
|
context_desc->lower_setup.ip_config = 0;
|
|
|
context_desc->upper_setup.tcp_fields.tucss = css;
|
|
|
- context_desc->upper_setup.tcp_fields.tucso =
|
|
|
- css + skb->csum_offset;
|
|
|
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
|
|
|
context_desc->upper_setup.tcp_fields.tucse = 0;
|
|
|
context_desc->tcp_seg_setup.data = 0;
|
|
|
context_desc->cmd_and_length = cpu_to_le32(cmd_len);
|
|
@@ -5216,7 +5196,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
|
|
|
buffer_info->time_stamp = jiffies;
|
|
|
buffer_info->next_to_watch = i;
|
|
|
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
|
|
|
- offset, size, DMA_TO_DEVICE);
|
|
|
+ offset, size,
|
|
|
+ DMA_TO_DEVICE);
|
|
|
buffer_info->mapped_as_page = true;
|
|
|
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
|
|
|
goto dma_error;
|
|
@@ -5265,7 +5246,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
|
|
|
|
|
|
if (tx_flags & E1000_TX_FLAGS_TSO) {
|
|
|
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
|
|
|
- E1000_TXD_CMD_TSE;
|
|
|
+ E1000_TXD_CMD_TSE;
|
|
|
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
|
|
|
|
|
|
if (tx_flags & E1000_TX_FLAGS_IPV4)
|
|
@@ -5296,8 +5277,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
|
|
|
buffer_info = &tx_ring->buffer_info[i];
|
|
|
tx_desc = E1000_TX_DESC(*tx_ring, i);
|
|
|
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
|
|
|
- tx_desc->lower.data =
|
|
|
- cpu_to_le32(txd_lower | buffer_info->length);
|
|
|
+ tx_desc->lower.data = cpu_to_le32(txd_lower |
|
|
|
+ buffer_info->length);
|
|
|
tx_desc->upper.data = cpu_to_le32(txd_upper);
|
|
|
|
|
|
i++;
|
|
@@ -5347,11 +5328,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
|
|
|
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
|
|
|
return 0;
|
|
|
|
|
|
- if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
|
|
|
+ if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
|
|
|
return 0;
|
|
|
|
|
|
{
|
|
|
- const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
|
|
|
+ const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
|
|
|
struct udphdr *udp;
|
|
|
|
|
|
if (ip->protocol != IPPROTO_UDP)
|
|
@@ -5576,7 +5557,7 @@ static void e1000_reset_task(struct work_struct *work)
|
|
|
* Returns the address of the device statistics structure.
|
|
|
**/
|
|
|
struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
|
|
|
- struct rtnl_link_stats64 *stats)
|
|
|
+ struct rtnl_link_stats64 *stats)
|
|
|
{
|
|
|
struct e1000_adapter *adapter = netdev_priv(netdev);
|
|
|
|
|
@@ -5597,18 +5578,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
|
|
|
* our own version based on RUC and ROC
|
|
|
*/
|
|
|
stats->rx_errors = adapter->stats.rxerrc +
|
|
|
- adapter->stats.crcerrs + adapter->stats.algnerrc +
|
|
|
- adapter->stats.ruc + adapter->stats.roc +
|
|
|
- adapter->stats.cexterr;
|
|
|
- stats->rx_length_errors = adapter->stats.ruc +
|
|
|
- adapter->stats.roc;
|
|
|
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
|
|
|
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
|
|
|
+ stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
|
|
|
stats->rx_crc_errors = adapter->stats.crcerrs;
|
|
|
stats->rx_frame_errors = adapter->stats.algnerrc;
|
|
|
stats->rx_missed_errors = adapter->stats.mpc;
|
|
|
|
|
|
/* Tx Errors */
|
|
|
- stats->tx_errors = adapter->stats.ecol +
|
|
|
- adapter->stats.latecol;
|
|
|
+ stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
|
|
|
stats->tx_aborted_errors = adapter->stats.ecol;
|
|
|
stats->tx_window_errors = adapter->stats.latecol;
|
|
|
stats->tx_carrier_errors = adapter->stats.tncrs;
|
|
@@ -5677,9 +5655,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
|
|
|
/* adjust allocation if LPE protects us, and we aren't using SBP */
|
|
|
if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
|
|
|
- (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
|
|
|
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
|
|
|
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
|
|
|
- + ETH_FCS_LEN;
|
|
|
+ + ETH_FCS_LEN;
|
|
|
|
|
|
if (netif_running(netdev))
|
|
|
e1000e_up(adapter);
|
|
@@ -5858,7 +5836,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
|
|
|
phy_reg &= ~(BM_RCTL_MO_MASK);
|
|
|
if (mac_reg & E1000_RCTL_MO_3)
|
|
|
phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
|
|
|
- << BM_RCTL_MO_SHIFT);
|
|
|
+ << BM_RCTL_MO_SHIFT);
|
|
|
if (mac_reg & E1000_RCTL_BAM)
|
|
|
phy_reg |= BM_RCTL_BAM;
|
|
|
if (mac_reg & E1000_RCTL_PMCF)
|
|
@@ -5932,10 +5910,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
|
|
|
}
|
|
|
|
|
|
ctrl = er32(CTRL);
|
|
|
- /* advertise wake from D3Cold */
|
|
|
- #define E1000_CTRL_ADVD3WUC 0x00100000
|
|
|
- /* phy power management enable */
|
|
|
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
|
|
|
ctrl |= E1000_CTRL_ADVD3WUC;
|
|
|
if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
|
|
|
ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
|
|
@@ -6002,8 +5976,7 @@ static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
|
|
|
pci_set_power_state(pdev, PCI_D3hot);
|
|
|
}
|
|
|
|
|
|
-static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
|
|
|
- bool wake)
|
|
|
+static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, bool wake)
|
|
|
{
|
|
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
|
|
struct e1000_adapter *adapter = netdev_priv(netdev);
|
|
@@ -6107,24 +6080,24 @@ static int __e1000_resume(struct pci_dev *pdev)
|
|
|
e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
|
|
|
if (phy_data) {
|
|
|
e_info("PHY Wakeup cause - %s\n",
|
|
|
- phy_data & E1000_WUS_EX ? "Unicast Packet" :
|
|
|
- phy_data & E1000_WUS_MC ? "Multicast Packet" :
|
|
|
- phy_data & E1000_WUS_BC ? "Broadcast Packet" :
|
|
|
- phy_data & E1000_WUS_MAG ? "Magic Packet" :
|
|
|
- phy_data & E1000_WUS_LNKC ?
|
|
|
- "Link Status Change" : "other");
|
|
|
+ phy_data & E1000_WUS_EX ? "Unicast Packet" :
|
|
|
+ phy_data & E1000_WUS_MC ? "Multicast Packet" :
|
|
|
+ phy_data & E1000_WUS_BC ? "Broadcast Packet" :
|
|
|
+ phy_data & E1000_WUS_MAG ? "Magic Packet" :
|
|
|
+ phy_data & E1000_WUS_LNKC ?
|
|
|
+ "Link Status Change" : "other");
|
|
|
}
|
|
|
e1e_wphy(&adapter->hw, BM_WUS, ~0);
|
|
|
} else {
|
|
|
u32 wus = er32(WUS);
|
|
|
if (wus) {
|
|
|
e_info("MAC Wakeup cause - %s\n",
|
|
|
- wus & E1000_WUS_EX ? "Unicast Packet" :
|
|
|
- wus & E1000_WUS_MC ? "Multicast Packet" :
|
|
|
- wus & E1000_WUS_BC ? "Broadcast Packet" :
|
|
|
- wus & E1000_WUS_MAG ? "Magic Packet" :
|
|
|
- wus & E1000_WUS_LNKC ? "Link Status Change" :
|
|
|
- "other");
|
|
|
+ wus & E1000_WUS_EX ? "Unicast Packet" :
|
|
|
+ wus & E1000_WUS_MC ? "Multicast Packet" :
|
|
|
+ wus & E1000_WUS_BC ? "Broadcast Packet" :
|
|
|
+ wus & E1000_WUS_MAG ? "Magic Packet" :
|
|
|
+ wus & E1000_WUS_LNKC ? "Link Status Change" :
|
|
|
+ "other");
|
|
|
}
|
|
|
ew32(WUS, ~0);
|
|
|
}
|
|
@@ -6413,7 +6386,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
|
|
|
e_info("(PCI Express:2.5GT/s:%s) %pM\n",
|
|
|
/* bus width */
|
|
|
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
|
|
|
- "Width x1"),
|
|
|
+ "Width x1"),
|
|
|
/* MAC address */
|
|
|
netdev->dev_addr);
|
|
|
e_info("Intel(R) PRO/%s Network Connection\n",
|
|
@@ -6523,7 +6496,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
resource_size_t flash_start, flash_len;
|
|
|
static int cards_found;
|
|
|
u16 aspm_disable_flag = 0;
|
|
|
- int i, err, pci_using_dac;
|
|
|
+ int bars, i, err, pci_using_dac;
|
|
|
u16 eeprom_data = 0;
|
|
|
u16 eeprom_apme_mask = E1000_EEPROM_APME;
|
|
|
|
|
@@ -6550,15 +6523,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
err = dma_set_coherent_mask(&pdev->dev,
|
|
|
DMA_BIT_MASK(32));
|
|
|
if (err) {
|
|
|
- dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
|
|
|
+ dev_err(&pdev->dev,
|
|
|
+ "No usable DMA configuration, aborting\n");
|
|
|
goto err_dma;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- err = pci_request_selected_regions_exclusive(pdev,
|
|
|
- pci_select_bars(pdev, IORESOURCE_MEM),
|
|
|
- e1000e_driver_name);
|
|
|
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
|
|
|
+ err = pci_request_selected_regions_exclusive(pdev, bars,
|
|
|
+ e1000e_driver_name);
|
|
|
if (err)
|
|
|
goto err_pci_reg;
|
|
|
|
|
@@ -6727,11 +6701,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
|
|
init_timer(&adapter->watchdog_timer);
|
|
|
adapter->watchdog_timer.function = e1000_watchdog;
|
|
|
- adapter->watchdog_timer.data = (unsigned long) adapter;
|
|
|
+ adapter->watchdog_timer.data = (unsigned long)adapter;
|
|
|
|
|
|
init_timer(&adapter->phy_info_timer);
|
|
|
adapter->phy_info_timer.function = e1000_update_phy_info;
|
|
|
- adapter->phy_info_timer.data = (unsigned long) adapter;
|
|
|
+ adapter->phy_info_timer.data = (unsigned long)adapter;
|
|
|
|
|
|
INIT_WORK(&adapter->reset_task, e1000_reset_task);
|
|
|
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
|
|
@@ -6835,7 +6809,7 @@ err_ioremap:
|
|
|
free_netdev(netdev);
|
|
|
err_alloc_etherdev:
|
|
|
pci_release_selected_regions(pdev,
|
|
|
- pci_select_bars(pdev, IORESOURCE_MEM));
|
|
|
+ pci_select_bars(pdev, IORESOURCE_MEM));
|
|
|
err_pci_reg:
|
|
|
err_dma:
|
|
|
pci_disable_device(pdev);
|
|
@@ -6905,7 +6879,7 @@ static void e1000_remove(struct pci_dev *pdev)
|
|
|
if (adapter->hw.flash_address)
|
|
|
iounmap(adapter->hw.flash_address);
|
|
|
pci_release_selected_regions(pdev,
|
|
|
- pci_select_bars(pdev, IORESOURCE_MEM));
|
|
|
+ pci_select_bars(pdev, IORESOURCE_MEM));
|
|
|
|
|
|
free_netdev(netdev);
|
|
|
|
|
@@ -6926,7 +6900,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
|
|
|
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
|
|
|
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
|
|
|
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
|
|
|
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
|
|
|
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
|
|
|
+ board_82571 },
|
|
|
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
|
|
|
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
|
|
|
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
|
|
@@ -7002,8 +6977,8 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
|
|
|
#ifdef CONFIG_PM
|
|
|
static const struct dev_pm_ops e1000_pm_ops = {
|
|
|
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
|
|
|
- SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
|
|
|
- e1000_runtime_resume, e1000_idle)
|
|
|
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
|
|
|
+ e1000_idle)
|
|
|
};
|
|
|
#endif
|
|
|
|