|
@@ -423,11 +423,18 @@ static void enic_mtu_check(struct enic *enic)
|
|
|
|
|
|
if (mtu && mtu != enic->port_mtu) {
|
|
|
enic->port_mtu = mtu;
|
|
|
- if (mtu < netdev->mtu)
|
|
|
- netdev_warn(netdev,
|
|
|
- "interface MTU (%d) set higher "
|
|
|
- "than switch port MTU (%d)\n",
|
|
|
- netdev->mtu, mtu);
|
|
|
+ if (enic_is_dynamic(enic)) {
|
|
|
+ mtu = max_t(int, ENIC_MIN_MTU,
|
|
|
+ min_t(int, ENIC_MAX_MTU, mtu));
|
|
|
+ if (mtu != netdev->mtu)
|
|
|
+ schedule_work(&enic->change_mtu_work);
|
|
|
+ } else {
|
|
|
+ if (mtu < netdev->mtu)
|
|
|
+ netdev_warn(netdev,
|
|
|
+ "interface MTU (%d) set higher "
|
|
|
+ "than switch port MTU (%d)\n",
|
|
|
+ netdev->mtu, mtu);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1688,6 +1695,9 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ if (enic_is_dynamic(enic))
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
if (running)
|
|
|
enic_stop(netdev);
|
|
|
|
|
@@ -1704,6 +1714,55 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void enic_change_mtu_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct enic *enic = container_of(work, struct enic, change_mtu_work);
|
|
|
+ struct net_device *netdev = enic->netdev;
|
|
|
+ int new_mtu = vnic_dev_mtu(enic->vdev);
|
|
|
+ int err;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
|
|
|
+
|
|
|
+ rtnl_lock();
|
|
|
+
|
|
|
+ /* Stop RQ */
|
|
|
+ del_timer_sync(&enic->notify_timer);
|
|
|
+
|
|
|
+ for (i = 0; i < enic->rq_count; i++)
|
|
|
+ napi_disable(&enic->napi[i]);
|
|
|
+
|
|
|
+ vnic_intr_mask(&enic->intr[0]);
|
|
|
+ enic_synchronize_irqs(enic);
|
|
|
+ err = vnic_rq_disable(&enic->rq[0]);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(netdev, "Unable to disable RQ.\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
|
|
|
+ vnic_cq_clean(&enic->cq[0]);
|
|
|
+ vnic_intr_clean(&enic->intr[0]);
|
|
|
+
|
|
|
+ /* Fill RQ with new_mtu-sized buffers */
|
|
|
+ netdev->mtu = new_mtu;
|
|
|
+ vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
|
|
|
+ /* Need at least one buffer on ring to get going */
|
|
|
+ if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
|
|
|
+ netdev_err(netdev, "Unable to alloc receive buffers.\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Start RQ */
|
|
|
+ vnic_rq_enable(&enic->rq[0]);
|
|
|
+ napi_enable(&enic->napi[0]);
|
|
|
+ vnic_intr_unmask(&enic->intr[0]);
|
|
|
+ enic_notify_timer_start(enic);
|
|
|
+
|
|
|
+ rtnl_unlock();
|
|
|
+
|
|
|
+ netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
static void enic_poll_controller(struct net_device *netdev)
|
|
|
{
|
|
@@ -2345,6 +2404,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
|
|
|
enic->notify_timer.data = (unsigned long)enic;
|
|
|
|
|
|
INIT_WORK(&enic->reset, enic_reset);
|
|
|
+ INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
|
|
|
|
|
|
for (i = 0; i < enic->wq_count; i++)
|
|
|
spin_lock_init(&enic->wq_lock[i]);
|
|
@@ -2427,6 +2487,7 @@ static void __devexit enic_remove(struct pci_dev *pdev)
|
|
|
struct enic *enic = netdev_priv(netdev);
|
|
|
|
|
|
cancel_work_sync(&enic->reset);
|
|
|
+ cancel_work_sync(&enic->change_mtu_work);
|
|
|
unregister_netdev(netdev);
|
|
|
enic_dev_deinit(enic);
|
|
|
vnic_dev_close(enic->vdev);
|