|
@@ -17,54 +17,34 @@
|
|
|
* claimed NULL, 3 -> {pending} : claimed to be enqueued
|
|
|
* pending next, 3 -> {busy} : queued, pending callback
|
|
|
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
|
|
|
- *
|
|
|
- * We use the lower two bits of the next pointer to keep PENDING and BUSY
|
|
|
- * flags.
|
|
|
*/
|
|
|
|
|
|
#define IRQ_WORK_PENDING 1UL
|
|
|
#define IRQ_WORK_BUSY 2UL
|
|
|
#define IRQ_WORK_FLAGS 3UL
|
|
|
|
|
|
-static inline bool irq_work_is_set(struct irq_work *entry, int flags)
|
|
|
-{
|
|
|
- return (unsigned long)entry->next & flags;
|
|
|
-}
|
|
|
-
|
|
|
-static inline struct irq_work *irq_work_next(struct irq_work *entry)
|
|
|
-{
|
|
|
- unsigned long next = (unsigned long)entry->next;
|
|
|
- next &= ~IRQ_WORK_FLAGS;
|
|
|
- return (struct irq_work *)next;
|
|
|
-}
|
|
|
-
|
|
|
-static inline struct irq_work *next_flags(struct irq_work *entry, int flags)
|
|
|
-{
|
|
|
- unsigned long next = (unsigned long)entry;
|
|
|
- next |= flags;
|
|
|
- return (struct irq_work *)next;
|
|
|
-}
|
|
|
-
|
|
|
-static DEFINE_PER_CPU(struct irq_work *, irq_work_list);
|
|
|
+static DEFINE_PER_CPU(struct llist_head, irq_work_list);
|
|
|
|
|
|
/*
|
|
|
* Claim the entry so that no one else will poke at it.
|
|
|
*/
|
|
|
-static bool irq_work_claim(struct irq_work *entry)
|
|
|
+static bool irq_work_claim(struct irq_work *work)
|
|
|
{
|
|
|
- struct irq_work *next, *nflags;
|
|
|
+ unsigned long flags, nflags;
|
|
|
|
|
|
- do {
|
|
|
- next = entry->next;
|
|
|
- if ((unsigned long)next & IRQ_WORK_PENDING)
|
|
|
+ for (;;) {
|
|
|
+ flags = work->flags;
|
|
|
+ if (flags & IRQ_WORK_PENDING)
|
|
|
return false;
|
|
|
- nflags = next_flags(next, IRQ_WORK_FLAGS);
|
|
|
- } while (cmpxchg(&entry->next, next, nflags) != next);
|
|
|
+ nflags = flags | IRQ_WORK_FLAGS;
|
|
|
+ if (cmpxchg(&work->flags, flags, nflags) == flags)
|
|
|
+ break;
|
|
|
+ cpu_relax();
|
|
|
+ }
|
|
|
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
void __weak arch_irq_work_raise(void)
|
|
|
{
|
|
|
/*
|
|
@@ -75,20 +55,15 @@ void __weak arch_irq_work_raise(void)
|
|
|
/*
|
|
|
* Queue the entry and raise the IPI if needed.
|
|
|
*/
|
|
|
-static void __irq_work_queue(struct irq_work *entry)
|
|
|
+static void __irq_work_queue(struct irq_work *work)
|
|
|
{
|
|
|
- struct irq_work *next;
|
|
|
+ bool empty;
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
|
- do {
|
|
|
- next = __this_cpu_read(irq_work_list);
|
|
|
- /* Can assign non-atomic because we keep the flags set. */
|
|
|
- entry->next = next_flags(next, IRQ_WORK_FLAGS);
|
|
|
- } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
|
|
|
-
|
|
|
+ empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
|
|
|
/* The list was empty, raise self-interrupt to start processing. */
|
|
|
- if (!irq_work_next(entry))
|
|
|
+ if (empty)
|
|
|
arch_irq_work_raise();
|
|
|
|
|
|
preempt_enable();
|
|
@@ -100,16 +75,16 @@ static void __irq_work_queue(struct irq_work *entry)
|
|
|
*
|
|
|
* Can be re-enqueued while the callback is still in progress.
|
|
|
*/
|
|
|
-bool irq_work_queue(struct irq_work *entry)
|
|
|
+bool irq_work_queue(struct irq_work *work)
|
|
|
{
|
|
|
- if (!irq_work_claim(entry)) {
|
|
|
+ if (!irq_work_claim(work)) {
|
|
|
/*
|
|
|
* Already enqueued, can't do!
|
|
|
*/
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
- __irq_work_queue(entry);
|
|
|
+ __irq_work_queue(work);
|
|
|
return true;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_work_queue);
|
|
@@ -120,34 +95,34 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
|
|
|
*/
|
|
|
void irq_work_run(void)
|
|
|
{
|
|
|
- struct irq_work *list;
|
|
|
+ struct irq_work *work;
|
|
|
+ struct llist_head *this_list;
|
|
|
+ struct llist_node *llnode;
|
|
|
|
|
|
- if (this_cpu_read(irq_work_list) == NULL)
|
|
|
+ this_list = &__get_cpu_var(irq_work_list);
|
|
|
+ if (llist_empty(this_list))
|
|
|
return;
|
|
|
|
|
|
BUG_ON(!in_irq());
|
|
|
BUG_ON(!irqs_disabled());
|
|
|
|
|
|
- list = this_cpu_xchg(irq_work_list, NULL);
|
|
|
-
|
|
|
- while (list != NULL) {
|
|
|
- struct irq_work *entry = list;
|
|
|
+ llnode = llist_del_all(this_list);
|
|
|
+ while (llnode != NULL) {
|
|
|
+ work = llist_entry(llnode, struct irq_work, llnode);
|
|
|
|
|
|
- list = irq_work_next(list);
|
|
|
+ llnode = llnode->next;
|
|
|
|
|
|
/*
|
|
|
- * Clear the PENDING bit, after this point the @entry
|
|
|
+ * Clear the PENDING bit, after this point the @work
|
|
|
* can be re-used.
|
|
|
*/
|
|
|
- entry->next = next_flags(NULL, IRQ_WORK_BUSY);
|
|
|
- entry->func(entry);
|
|
|
+ work->flags = IRQ_WORK_BUSY;
|
|
|
+ work->func(work);
|
|
|
/*
|
|
|
* Clear the BUSY bit and return to the free state if
|
|
|
* no-one else claimed it meanwhile.
|
|
|
*/
|
|
|
- (void)cmpxchg(&entry->next,
|
|
|
- next_flags(NULL, IRQ_WORK_BUSY),
|
|
|
- NULL);
|
|
|
+ (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
|
|
|
}
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_work_run);
|
|
@@ -156,11 +131,11 @@ EXPORT_SYMBOL_GPL(irq_work_run);
|
|
|
* Synchronize against the irq_work @entry, ensures the entry is not
|
|
|
* currently in use.
|
|
|
*/
|
|
|
-void irq_work_sync(struct irq_work *entry)
|
|
|
+void irq_work_sync(struct irq_work *work)
|
|
|
{
|
|
|
WARN_ON_ONCE(irqs_disabled());
|
|
|
|
|
|
- while (irq_work_is_set(entry, IRQ_WORK_BUSY))
|
|
|
+ while (work->flags & IRQ_WORK_BUSY)
|
|
|
cpu_relax();
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(irq_work_sync);
|