migration.c 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. #include <linux/irq.h>
  2. void move_masked_irq(int irq)
  3. {
  4. struct irq_desc *desc = irq_to_desc(irq);
  5. if (likely(!(desc->status & IRQ_MOVE_PENDING)))
  6. return;
  7. /*
  8. * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
  9. */
  10. if (CHECK_IRQ_PER_CPU(desc->status)) {
  11. WARN_ON(1);
  12. return;
  13. }
  14. desc->status &= ~IRQ_MOVE_PENDING;
  15. if (unlikely(cpumask_empty(&desc->pending_mask)))
  16. return;
  17. if (!desc->chip->set_affinity)
  18. return;
  19. assert_spin_locked(&desc->lock);
  20. /*
  21. * If there was a valid mask to work with, please
  22. * do the disable, re-program, enable sequence.
  23. * This is *not* particularly important for level triggered
  24. * but in a edge trigger case, we might be setting rte
  25. * when an active trigger is comming in. This could
  26. * cause some ioapics to mal-function.
  27. * Being paranoid i guess!
  28. *
  29. * For correct operation this depends on the caller
  30. * masking the irqs.
  31. */
  32. if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask)
  33. < nr_cpu_ids)) {
  34. cpumask_and(&desc->affinity,
  35. &desc->pending_mask, cpu_online_mask);
  36. desc->chip->set_affinity(irq, &desc->affinity);
  37. }
  38. cpumask_clear(&desc->pending_mask);
  39. }
  40. void move_native_irq(int irq)
  41. {
  42. struct irq_desc *desc = irq_to_desc(irq);
  43. if (likely(!(desc->status & IRQ_MOVE_PENDING)))
  44. return;
  45. if (unlikely(desc->status & IRQ_DISABLED))
  46. return;
  47. desc->chip->mask(irq);
  48. move_masked_irq(irq);
  49. desc->chip->unmask(irq);
  50. }