|
@@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
-void __netif_schedule(struct Qdisc *q)
|
|
|
|
|
|
+static inline void __netif_reschedule(struct Qdisc *q)
|
|
{
|
|
{
|
|
- if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
|
|
|
|
- struct softnet_data *sd;
|
|
|
|
- unsigned long flags;
|
|
|
|
|
|
+ struct softnet_data *sd;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- sd = &__get_cpu_var(softnet_data);
|
|
|
|
- q->next_sched = sd->output_queue;
|
|
|
|
- sd->output_queue = q;
|
|
|
|
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
- }
|
|
|
|
|
|
+ local_irq_save(flags);
|
|
|
|
+ sd = &__get_cpu_var(softnet_data);
|
|
|
|
+ q->next_sched = sd->output_queue;
|
|
|
|
+ sd->output_queue = q;
|
|
|
|
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
|
|
|
|
+ local_irq_restore(flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void __netif_schedule(struct Qdisc *q)
|
|
|
|
+{
|
|
|
|
+ if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
|
|
|
|
+ __netif_reschedule(q);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(__netif_schedule);
|
|
EXPORT_SYMBOL(__netif_schedule);
|
|
|
|
|
|
@@ -1980,15 +1984,15 @@ static void net_tx_action(struct softirq_action *h)
|
|
|
|
|
|
head = head->next_sched;
|
|
head = head->next_sched;
|
|
|
|
|
|
- smp_mb__before_clear_bit();
|
|
|
|
- clear_bit(__QDISC_STATE_SCHED, &q->state);
|
|
|
|
-
|
|
|
|
root_lock = qdisc_lock(q);
|
|
root_lock = qdisc_lock(q);
|
|
if (spin_trylock(root_lock)) {
|
|
if (spin_trylock(root_lock)) {
|
|
|
|
+ smp_mb__before_clear_bit();
|
|
|
|
+ clear_bit(__QDISC_STATE_SCHED,
|
|
|
|
+ &q->state);
|
|
qdisc_run(q);
|
|
qdisc_run(q);
|
|
spin_unlock(root_lock);
|
|
spin_unlock(root_lock);
|
|
} else {
|
|
} else {
|
|
- __netif_schedule(q);
|
|
|
|
|
|
+ __netif_reschedule(q);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|