|
@@ -118,6 +118,7 @@ static struct of_device_id ehea_device_table[] = {
|
|
},
|
|
},
|
|
{},
|
|
{},
|
|
};
|
|
};
|
|
|
|
+MODULE_DEVICE_TABLE(of, ehea_device_table);
|
|
|
|
|
|
static struct of_platform_driver ehea_driver = {
|
|
static struct of_platform_driver ehea_driver = {
|
|
.name = "ehea",
|
|
.name = "ehea",
|
|
@@ -137,6 +138,12 @@ void ehea_dump(void *adr, int len, char *msg)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void ehea_schedule_port_reset(struct ehea_port *port)
|
|
|
|
+{
|
|
|
|
+ if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
|
|
|
|
+ schedule_work(&port->reset_task);
|
|
|
|
+}
|
|
|
|
+
|
|
static void ehea_update_firmware_handles(void)
|
|
static void ehea_update_firmware_handles(void)
|
|
{
|
|
{
|
|
struct ehea_fw_handle_entry *arr = NULL;
|
|
struct ehea_fw_handle_entry *arr = NULL;
|
|
@@ -241,7 +248,7 @@ static void ehea_update_bcmc_registrations(void)
|
|
}
|
|
}
|
|
|
|
|
|
if (num_registrations) {
|
|
if (num_registrations) {
|
|
- arr = kzalloc(num_registrations * sizeof(*arr), GFP_KERNEL);
|
|
|
|
|
|
+ arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
|
|
if (!arr)
|
|
if (!arr)
|
|
return; /* Keep the existing array */
|
|
return; /* Keep the existing array */
|
|
} else
|
|
} else
|
|
@@ -301,7 +308,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
|
|
|
|
|
|
memset(stats, 0, sizeof(*stats));
|
|
memset(stats, 0, sizeof(*stats));
|
|
|
|
|
|
- cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
|
|
|
|
|
+ cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
|
|
if (!cb2) {
|
|
if (!cb2) {
|
|
ehea_error("no mem for cb2");
|
|
ehea_error("no mem for cb2");
|
|
goto out;
|
|
goto out;
|
|
@@ -587,7 +594,7 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
|
|
"Resetting port.", pr->qp->init_attr.qp_nr);
|
|
"Resetting port.", pr->qp->init_attr.qp_nr);
|
|
ehea_dump(cqe, sizeof(*cqe), "CQE");
|
|
ehea_dump(cqe, sizeof(*cqe), "CQE");
|
|
}
|
|
}
|
|
- schedule_work(&pr->port->reset_task);
|
|
|
|
|
|
+ ehea_schedule_port_reset(pr->port);
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -616,7 +623,7 @@ static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
|
|
*tcph = tcp_hdr(skb);
|
|
*tcph = tcp_hdr(skb);
|
|
|
|
|
|
/* check if ip header and tcp header are complete */
|
|
/* check if ip header and tcp header are complete */
|
|
- if (iph->tot_len < ip_len + tcp_hdrlen(skb))
|
|
|
|
|
|
+ if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
*hdr_flags = LRO_IPV4 | LRO_TCP;
|
|
*hdr_flags = LRO_IPV4 | LRO_TCP;
|
|
@@ -765,7 +772,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
|
|
ehea_error("Send Completion Error: Resetting port");
|
|
ehea_error("Send Completion Error: Resetting port");
|
|
if (netif_msg_tx_err(pr->port))
|
|
if (netif_msg_tx_err(pr->port))
|
|
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
|
|
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
|
|
- schedule_work(&pr->port->reset_task);
|
|
|
|
|
|
+ ehea_schedule_port_reset(pr->port);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -885,7 +892,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
|
|
eqe = ehea_poll_eq(port->qp_eq);
|
|
eqe = ehea_poll_eq(port->qp_eq);
|
|
}
|
|
}
|
|
|
|
|
|
- schedule_work(&port->reset_task);
|
|
|
|
|
|
+ ehea_schedule_port_reset(port);
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
@@ -1763,7 +1770,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
|
|
|
|
|
|
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
|
|
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
|
|
|
|
|
|
- mutex_lock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
+ spin_lock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
/* Deregister old MAC in pHYP */
|
|
/* Deregister old MAC in pHYP */
|
|
if (port->state == EHEA_PORT_UP) {
|
|
if (port->state == EHEA_PORT_UP) {
|
|
@@ -1785,7 +1792,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
|
|
|
|
|
|
out_upregs:
|
|
out_upregs:
|
|
ehea_update_bcmc_registrations();
|
|
ehea_update_bcmc_registrations();
|
|
- mutex_unlock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
+ spin_unlock(&ehea_bcmc_regs.lock);
|
|
out_free:
|
|
out_free:
|
|
kfree(cb0);
|
|
kfree(cb0);
|
|
out:
|
|
out:
|
|
@@ -1947,7 +1954,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
|
|
}
|
|
}
|
|
ehea_promiscuous(dev, 0);
|
|
ehea_promiscuous(dev, 0);
|
|
|
|
|
|
- mutex_lock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
+ spin_lock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
ehea_allmulti(dev, 1);
|
|
ehea_allmulti(dev, 1);
|
|
@@ -1978,7 +1985,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
|
|
}
|
|
}
|
|
out:
|
|
out:
|
|
ehea_update_bcmc_registrations();
|
|
ehea_update_bcmc_registrations();
|
|
- mutex_unlock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
+ spin_unlock(&ehea_bcmc_regs.lock);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2497,7 +2504,7 @@ static int ehea_up(struct net_device *dev)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- mutex_lock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
+ spin_lock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
|
|
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
|
|
if (ret) {
|
|
if (ret) {
|
|
@@ -2520,7 +2527,7 @@ out:
|
|
ehea_info("Failed starting %s. ret=%i", dev->name, ret);
|
|
ehea_info("Failed starting %s. ret=%i", dev->name, ret);
|
|
|
|
|
|
ehea_update_bcmc_registrations();
|
|
ehea_update_bcmc_registrations();
|
|
- mutex_unlock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
+ spin_unlock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
ehea_update_firmware_handles();
|
|
ehea_update_firmware_handles();
|
|
mutex_unlock(&ehea_fw_handles.lock);
|
|
mutex_unlock(&ehea_fw_handles.lock);
|
|
@@ -2575,7 +2582,7 @@ static int ehea_down(struct net_device *dev)
|
|
|
|
|
|
mutex_lock(&ehea_fw_handles.lock);
|
|
mutex_lock(&ehea_fw_handles.lock);
|
|
|
|
|
|
- mutex_lock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
+ spin_lock(&ehea_bcmc_regs.lock);
|
|
ehea_drop_multicast_list(dev);
|
|
ehea_drop_multicast_list(dev);
|
|
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
|
|
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
|
|
|
|
|
|
@@ -2584,7 +2591,7 @@ static int ehea_down(struct net_device *dev)
|
|
port->state = EHEA_PORT_DOWN;
|
|
port->state = EHEA_PORT_DOWN;
|
|
|
|
|
|
ehea_update_bcmc_registrations();
|
|
ehea_update_bcmc_registrations();
|
|
- mutex_unlock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
+ spin_unlock(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
ret = ehea_clean_all_portres(port);
|
|
ret = ehea_clean_all_portres(port);
|
|
if (ret)
|
|
if (ret)
|
|
@@ -2605,13 +2612,14 @@ static int ehea_stop(struct net_device *dev)
|
|
if (netif_msg_ifdown(port))
|
|
if (netif_msg_ifdown(port))
|
|
ehea_info("disabling port %s", dev->name);
|
|
ehea_info("disabling port %s", dev->name);
|
|
|
|
|
|
|
|
+ set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
|
|
cancel_work_sync(&port->reset_task);
|
|
cancel_work_sync(&port->reset_task);
|
|
-
|
|
|
|
mutex_lock(&port->port_lock);
|
|
mutex_lock(&port->port_lock);
|
|
netif_stop_queue(dev);
|
|
netif_stop_queue(dev);
|
|
port_napi_disable(port);
|
|
port_napi_disable(port);
|
|
ret = ehea_down(dev);
|
|
ret = ehea_down(dev);
|
|
mutex_unlock(&port->port_lock);
|
|
mutex_unlock(&port->port_lock);
|
|
|
|
+ clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2941,7 +2949,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
|
|
|
|
|
|
if (netif_carrier_ok(dev) &&
|
|
if (netif_carrier_ok(dev) &&
|
|
!test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
|
|
!test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
|
|
- schedule_work(&port->reset_task);
|
|
|
|
|
|
+ ehea_schedule_port_reset(port);
|
|
}
|
|
}
|
|
|
|
|
|
int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
|
|
int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
|
|
@@ -3590,7 +3598,7 @@ int __init ehea_module_init(void)
|
|
memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
|
|
memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
|
|
|
|
|
|
mutex_init(&ehea_fw_handles.lock);
|
|
mutex_init(&ehea_fw_handles.lock);
|
|
- mutex_init(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
+ spin_lock_init(&ehea_bcmc_regs.lock);
|
|
|
|
|
|
ret = check_module_parm();
|
|
ret = check_module_parm();
|
|
if (ret)
|
|
if (ret)
|