|
@@ -81,7 +81,7 @@ typedef enum {
|
|
|
} PHY_DEVICE_et;
|
|
|
|
|
|
typedef struct {
|
|
|
- PHY_DEVICE_et phyDevice;
|
|
|
+ PHY_DEVICE_et phyDevice;
|
|
|
u32 phyIdOUI;
|
|
|
u16 phyIdModel;
|
|
|
char *name;
|
|
@@ -330,7 +330,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
err = pci_dma_mapping_error(map);
|
|
|
if(err) {
|
|
|
- printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
|
|
+ printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
|
|
qdev->ndev->name, err);
|
|
|
dev_kfree_skb(lrg_buf_cb->skb);
|
|
|
lrg_buf_cb->skb = NULL;
|
|
@@ -884,14 +884,14 @@ static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
|
|
|
u16 reg;
|
|
|
|
|
|
/* Enable Auto-negotiation sense */
|
|
|
- ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®,
|
|
|
+ ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®,
|
|
|
PHYAddr[qdev->mac_index]);
|
|
|
reg |= PETBI_TBI_AUTO_SENSE;
|
|
|
- ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
|
|
|
+ ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
|
|
|
PHYAddr[qdev->mac_index]);
|
|
|
|
|
|
ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
|
|
|
- PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
|
|
|
+ PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
|
|
|
PHYAddr[qdev->mac_index]);
|
|
|
|
|
|
ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
|
|
@@ -945,7 +945,7 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
|
|
|
ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
|
|
|
/* Write new PHYAD w/bit 5 set */
|
|
|
ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
|
|
|
- /*
|
|
|
+ /*
|
|
|
* Disable diagnostic mode bit 2 = 0
|
|
|
* Power up device bit 11 = 0
|
|
|
* Link up (on) and activity (blink)
|
|
@@ -955,18 +955,18 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
|
|
|
ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
|
|
|
}
|
|
|
|
|
|
-static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
|
|
|
+static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
|
|
|
u16 phyIdReg0, u16 phyIdReg1)
|
|
|
{
|
|
|
PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
|
|
|
- u32 oui;
|
|
|
+ u32 oui;
|
|
|
u16 model;
|
|
|
- int i;
|
|
|
+ int i;
|
|
|
|
|
|
if (phyIdReg0 == 0xffff) {
|
|
|
return result;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
if (phyIdReg1 == 0xffff) {
|
|
|
return result;
|
|
|
}
|
|
@@ -984,7 +984,7 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
|
|
|
|
|
|
printk(KERN_INFO "%s: Phy: %s\n",
|
|
|
qdev->ndev->name, PHY_DEVICES[i].name);
|
|
|
-
|
|
|
+
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
@@ -1033,7 +1033,7 @@ static int ql_is_full_dup(struct ql3_adapter *qdev)
|
|
|
{
|
|
|
if (ql_mii_read_reg(qdev, 0x1A, ®))
|
|
|
return 0;
|
|
|
-
|
|
|
+
|
|
|
return ((reg & 0x0080) && (reg & 0x1000)) != 0;
|
|
|
}
|
|
|
case PHY_VITESSE_VSC8211:
|
|
@@ -1082,19 +1082,19 @@ static int PHY_Setup(struct ql3_adapter *qdev)
|
|
|
/* Check if we have a Agere PHY */
|
|
|
if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
|
|
|
|
|
|
- /* Determine which MII address we should be using
|
|
|
+ /* Determine which MII address we should be using
|
|
|
determined by the index of the card */
|
|
|
if (qdev->mac_index == 0) {
|
|
|
miiAddr = MII_AGERE_ADDR_1;
|
|
|
} else {
|
|
|
miiAddr = MII_AGERE_ADDR_2;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr);
|
|
|
if(err != 0) {
|
|
|
printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
|
|
|
qdev->ndev->name);
|
|
|
- return err;
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr);
|
|
@@ -1103,9 +1103,9 @@ static int PHY_Setup(struct ql3_adapter *qdev)
|
|
|
qdev->ndev->name);
|
|
|
return err;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
/* We need to remember to initialize the Agere PHY */
|
|
|
- agereAddrChangeNeeded = true;
|
|
|
+ agereAddrChangeNeeded = true;
|
|
|
}
|
|
|
|
|
|
/* Determine the particular PHY we have on board to apply
|
|
@@ -1114,7 +1114,7 @@ static int PHY_Setup(struct ql3_adapter *qdev)
|
|
|
|
|
|
if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
|
|
|
/* need this here so address gets changed */
|
|
|
- phyAgereSpecificInit(qdev, miiAddr);
|
|
|
+ phyAgereSpecificInit(qdev, miiAddr);
|
|
|
} else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
|
|
|
printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
|
|
|
return -EIO;
|
|
@@ -1427,7 +1427,7 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
|
|
|
|
|
|
static void ql_phy_reset_ex(struct ql3_adapter *qdev)
|
|
|
{
|
|
|
- ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
|
|
|
+ ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
|
|
|
PHYAddr[qdev->mac_index]);
|
|
|
}
|
|
|
|
|
@@ -1438,7 +1438,7 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
|
|
|
|
|
|
if(qdev->phyType == PHY_AGERE_ET1011C) {
|
|
|
/* turn off external loopback */
|
|
|
- ql_mii_write_reg(qdev, 0x13, 0x0000);
|
|
|
+ ql_mii_write_reg(qdev, 0x13, 0x0000);
|
|
|
}
|
|
|
|
|
|
if(qdev->mac_index == 0)
|
|
@@ -1452,23 +1452,23 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
|
|
|
portConfiguration = PORT_CONFIG_DEFAULT;
|
|
|
|
|
|
/* Set the 1000 advertisements */
|
|
|
- ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®,
|
|
|
+ ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®,
|
|
|
PHYAddr[qdev->mac_index]);
|
|
|
reg &= ~PHY_GIG_ALL_PARAMS;
|
|
|
|
|
|
- if(portConfiguration &
|
|
|
+ if(portConfiguration &
|
|
|
PORT_CONFIG_FULL_DUPLEX_ENABLED &
|
|
|
PORT_CONFIG_1000MB_SPEED) {
|
|
|
reg |= PHY_GIG_ADV_1000F;
|
|
|
}
|
|
|
-
|
|
|
- if(portConfiguration &
|
|
|
+
|
|
|
+ if(portConfiguration &
|
|
|
PORT_CONFIG_HALF_DUPLEX_ENABLED &
|
|
|
PORT_CONFIG_1000MB_SPEED) {
|
|
|
reg |= PHY_GIG_ADV_1000H;
|
|
|
}
|
|
|
|
|
|
- ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
|
|
|
+ ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
|
|
|
PHYAddr[qdev->mac_index]);
|
|
|
|
|
|
/* Set the 10/100 & pause negotiation advertisements */
|
|
@@ -1482,7 +1482,7 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
|
|
|
if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
|
|
|
if(portConfiguration & PORT_CONFIG_100MB_SPEED)
|
|
|
reg |= PHY_NEG_ADV_100F;
|
|
|
-
|
|
|
+
|
|
|
if(portConfiguration & PORT_CONFIG_10MB_SPEED)
|
|
|
reg |= PHY_NEG_ADV_10F;
|
|
|
}
|
|
@@ -1490,22 +1490,22 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
|
|
|
if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
|
|
|
if(portConfiguration & PORT_CONFIG_100MB_SPEED)
|
|
|
reg |= PHY_NEG_ADV_100H;
|
|
|
-
|
|
|
+
|
|
|
if(portConfiguration & PORT_CONFIG_10MB_SPEED)
|
|
|
reg |= PHY_NEG_ADV_10H;
|
|
|
}
|
|
|
|
|
|
if(portConfiguration &
|
|
|
PORT_CONFIG_1000MB_SPEED) {
|
|
|
- reg |= 1;
|
|
|
+ reg |= 1;
|
|
|
}
|
|
|
|
|
|
- ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
|
|
|
+ ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
|
|
|
PHYAddr[qdev->mac_index]);
|
|
|
|
|
|
ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]);
|
|
|
-
|
|
|
- ql_mii_write_reg_ex(qdev, CONTROL_REG,
|
|
|
+
|
|
|
+ ql_mii_write_reg_ex(qdev, CONTROL_REG,
|
|
|
reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
|
|
|
PHYAddr[qdev->mac_index]);
|
|
|
}
|
|
@@ -1660,7 +1660,7 @@ static void ql_link_state_machine(struct ql3_adapter *qdev)
|
|
|
"%s: Reset in progress, skip processing link "
|
|
|
"state.\n", qdev->ndev->name);
|
|
|
|
|
|
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
|
|
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -1752,7 +1752,7 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
|
|
|
return -1;
|
|
|
|
|
|
if (qdev->device_id == QL3032_DEVICE_ID)
|
|
|
- ql_write_page0_reg(qdev,
|
|
|
+ ql_write_page0_reg(qdev,
|
|
|
&port_regs->macMIIMgmtControlReg, 0x0f00000);
|
|
|
|
|
|
/* Divide 125MHz clock by 28 to meet PHY timing requirements */
|
|
@@ -1936,7 +1936,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
|
|
|
|
|
|
err = pci_dma_mapping_error(map);
|
|
|
if(err) {
|
|
|
- printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
|
|
+ printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
|
|
qdev->ndev->name, err);
|
|
|
dev_kfree_skb(lrg_buf_cb->skb);
|
|
|
lrg_buf_cb->skb = NULL;
|
|
@@ -2044,7 +2044,7 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
|
|
|
if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
|
|
|
printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
|
|
|
|
|
|
/* Check the transmit response flags for any errors */
|
|
@@ -2108,13 +2108,13 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
|
|
|
|
|
|
/*
|
|
|
* The difference between 3022 and 3032 for inbound completions:
|
|
|
- * 3022 uses two buffers per completion. The first buffer contains
|
|
|
- * (some) header info, the second the remainder of the headers plus
|
|
|
- * the data. For this chip we reserve some space at the top of the
|
|
|
- * receive buffer so that the header info in buffer one can be
|
|
|
- * prepended to the buffer two. Buffer two is the sent up while
|
|
|
+ * 3022 uses two buffers per completion. The first buffer contains
|
|
|
+ * (some) header info, the second the remainder of the headers plus
|
|
|
+ * the data. For this chip we reserve some space at the top of the
|
|
|
+ * receive buffer so that the header info in buffer one can be
|
|
|
+ * prepended to the buffer two. Buffer two is the sent up while
|
|
|
* buffer one is returned to the hardware to be reused.
|
|
|
- * 3032 receives all of it's data and headers in one buffer for a
|
|
|
+ * 3032 receives all of it's data and headers in one buffer for a
|
|
|
* simpler process. 3032 also supports checksum verification as
|
|
|
* can be seen in ql_process_macip_rx_intr().
|
|
|
*/
|
|
@@ -2205,13 +2205,13 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
|
|
|
skb_push(skb2, size), size);
|
|
|
} else {
|
|
|
u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
|
|
|
- if (checksum &
|
|
|
- (IB_IP_IOCB_RSP_3032_ICE |
|
|
|
- IB_IP_IOCB_RSP_3032_CE)) {
|
|
|
+ if (checksum &
|
|
|
+ (IB_IP_IOCB_RSP_3032_ICE |
|
|
|
+ IB_IP_IOCB_RSP_3032_CE)) {
|
|
|
printk(KERN_ERR
|
|
|
"%s: Bad checksum for this %s packet, checksum = %x.\n",
|
|
|
__func__,
|
|
|
- ((checksum &
|
|
|
+ ((checksum &
|
|
|
IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
|
|
|
"UDP"),checksum);
|
|
|
} else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
|
|
@@ -2394,12 +2394,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Get the total number of segments needed for the
|
|
|
+ * Get the total number of segments needed for the
|
|
|
* given number of fragments. This is necessary because
|
|
|
* outbound address lists (OAL) will be used when more than
|
|
|
- * two frags are given. Each address list has 5 addr/len
|
|
|
+ * two frags are given. Each address list has 5 addr/len
|
|
|
* pairs. The 5th pair in each AOL is used to point to
|
|
|
- * the next AOL if more frags are coming.
|
|
|
+ * the next AOL if more frags are coming.
|
|
|
* That is why the frags:segment count ratio is not linear.
|
|
|
*/
|
|
|
static int ql_get_seg_count(struct ql3_adapter *qdev,
|
|
@@ -2476,12 +2476,12 @@ static int ql_send_map(struct ql3_adapter *qdev,
|
|
|
|
|
|
err = pci_dma_mapping_error(map);
|
|
|
if(err) {
|
|
|
- printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
|
|
+ printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
|
|
qdev->ndev->name, err);
|
|
|
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
|
|
|
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
|
|
|
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
|
|
@@ -2511,7 +2511,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
|
|
|
err = pci_dma_mapping_error(map);
|
|
|
if(err) {
|
|
|
|
|
|
- printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
|
|
|
+ printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
|
|
|
qdev->ndev->name, err);
|
|
|
goto map_error;
|
|
|
}
|
|
@@ -2537,7 +2537,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
|
|
|
|
|
|
err = pci_dma_mapping_error(map);
|
|
|
if(err) {
|
|
|
- printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
|
|
|
+ printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
|
|
|
qdev->ndev->name, err);
|
|
|
goto map_error;
|
|
|
}
|
|
@@ -2558,10 +2558,10 @@ static int ql_send_map(struct ql3_adapter *qdev,
|
|
|
|
|
|
map_error:
|
|
|
/* A PCI mapping failed and now we will need to back out
|
|
|
- * We need to traverse through the oal's and associated pages which
|
|
|
+ * We need to traverse through the oal's and associated pages which
|
|
|
* have been mapped and now we must unmap them to clean up properly
|
|
|
*/
|
|
|
-
|
|
|
+
|
|
|
seg = 1;
|
|
|
oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
|
|
|
oal = tx_cb->oal;
|
|
@@ -2599,11 +2599,11 @@ map_error:
|
|
|
* The difference between 3022 and 3032 sends:
|
|
|
* 3022 only supports a simple single segment transmission.
|
|
|
* 3032 supports checksumming and scatter/gather lists (fragments).
|
|
|
- * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
|
|
|
- * in the IOCB plus a chain of outbound address lists (OAL) that
|
|
|
- * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
|
|
|
- * will used to point to an OAL when more ALP entries are required.
|
|
|
- * The IOCB is always the top of the chain followed by one or more
|
|
|
+ * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
|
|
|
+ * in the IOCB plus a chain of outbound address lists (OAL) that
|
|
|
+ * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
|
|
|
+ * will used to point to an OAL when more ALP entries are required.
|
|
|
+ * The IOCB is always the top of the chain followed by one or more
|
|
|
* OALs (when necessary).
|
|
|
*/
|
|
|
static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
|
|
@@ -2617,14 +2617,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
|
|
|
if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
|
|
|
if((tx_cb->seg_count = ql_get_seg_count(qdev,
|
|
|
(skb_shinfo(skb)->nr_frags))) == -1) {
|
|
|
printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
mac_iocb_ptr = tx_cb->queue_entry;
|
|
|
memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
|
|
|
mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
|
|
@@ -2636,12 +2636,12 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
|
|
|
if (qdev->device_id == QL3032_DEVICE_ID &&
|
|
|
skb->ip_summed == CHECKSUM_PARTIAL)
|
|
|
ql_hw_csum_setup(skb, mac_iocb_ptr);
|
|
|
-
|
|
|
+
|
|
|
if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
|
|
|
printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
wmb();
|
|
|
qdev->req_producer_index++;
|
|
|
if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
|
|
@@ -2739,7 +2739,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
|
|
|
"%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
qdev->lrg_buf_q_alloc_virt_addr =
|
|
|
pci_alloc_consistent(qdev->pdev,
|
|
|
qdev->lrg_buf_q_alloc_size,
|