irq_work.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /*
  2. * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  3. *
  4. * Provides a framework for enqueueing and running callbacks from hardirq
  5. * context. The enqueueing is NMI-safe.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/irq_work.h>
  10. #include <linux/hardirq.h>
  11. /*
  12. * An entry can be in one of four states:
  13. *
  14. * free NULL, 0 -> {claimed} : free to be used
  15. * claimed NULL, 3 -> {pending} : claimed to be enqueued
  16. * pending next, 3 -> {busy} : queued, pending callback
  17. * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
  18. */
  19. #define IRQ_WORK_PENDING 1UL
  20. #define IRQ_WORK_BUSY 2UL
  21. #define IRQ_WORK_FLAGS 3UL
  22. static DEFINE_PER_CPU(struct llist_head, irq_work_list);
  23. /*
  24. * Claim the entry so that no one else will poke at it.
  25. */
  26. static bool irq_work_claim(struct irq_work *work)
  27. {
  28. unsigned long flags, nflags;
  29. for (;;) {
  30. flags = work->flags;
  31. if (flags & IRQ_WORK_PENDING)
  32. return false;
  33. nflags = flags | IRQ_WORK_FLAGS;
  34. if (cmpxchg(&work->flags, flags, nflags) == flags)
  35. break;
  36. cpu_relax();
  37. }
  38. return true;
  39. }
  40. void __weak arch_irq_work_raise(void)
  41. {
  42. /*
  43. * Lame architectures will get the timer tick callback
  44. */
  45. }
  46. /*
  47. * Queue the entry and raise the IPI if needed.
  48. */
  49. static void __irq_work_queue(struct irq_work *work)
  50. {
  51. bool empty;
  52. preempt_disable();
  53. empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
  54. /* The list was empty, raise self-interrupt to start processing. */
  55. if (empty)
  56. arch_irq_work_raise();
  57. preempt_enable();
  58. }
  59. /*
  60. * Enqueue the irq_work @entry, returns true on success, failure when the
  61. * @entry was already enqueued by someone else.
  62. *
  63. * Can be re-enqueued while the callback is still in progress.
  64. */
  65. bool irq_work_queue(struct irq_work *work)
  66. {
  67. if (!irq_work_claim(work)) {
  68. /*
  69. * Already enqueued, can't do!
  70. */
  71. return false;
  72. }
  73. __irq_work_queue(work);
  74. return true;
  75. }
  76. EXPORT_SYMBOL_GPL(irq_work_queue);
  77. /*
  78. * Run the irq_work entries on this cpu. Requires to be ran from hardirq
  79. * context with local IRQs disabled.
  80. */
  81. void irq_work_run(void)
  82. {
  83. struct irq_work *work;
  84. struct llist_head *this_list;
  85. struct llist_node *llnode;
  86. this_list = &__get_cpu_var(irq_work_list);
  87. if (llist_empty(this_list))
  88. return;
  89. BUG_ON(!in_irq());
  90. BUG_ON(!irqs_disabled());
  91. llnode = llist_del_all(this_list);
  92. while (llnode != NULL) {
  93. work = llist_entry(llnode, struct irq_work, llnode);
  94. llnode = llist_next(llnode);
  95. /*
  96. * Clear the PENDING bit, after this point the @work
  97. * can be re-used.
  98. */
  99. work->flags = IRQ_WORK_BUSY;
  100. work->func(work);
  101. /*
  102. * Clear the BUSY bit and return to the free state if
  103. * no-one else claimed it meanwhile.
  104. */
  105. (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
  106. }
  107. }
  108. EXPORT_SYMBOL_GPL(irq_work_run);
  109. /*
  110. * Synchronize against the irq_work @entry, ensures the entry is not
  111. * currently in use.
  112. */
  113. void irq_work_sync(struct irq_work *work)
  114. {
  115. WARN_ON_ONCE(irqs_disabled());
  116. while (work->flags & IRQ_WORK_BUSY)
  117. cpu_relax();
  118. }
  119. EXPORT_SYMBOL_GPL(irq_work_sync);