hardirq.h 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455
  1. #ifndef _ASM_X86_HARDIRQ_H
  2. #define _ASM_X86_HARDIRQ_H
  3. #include <linux/threads.h>
  4. #include <linux/irq.h>
  5. typedef struct {
  6. unsigned int __softirq_pending;
  7. unsigned int __nmi_count; /* arch dependent */
  8. unsigned int irq0_irqs;
  9. #ifdef CONFIG_X86_LOCAL_APIC
  10. unsigned int apic_timer_irqs; /* arch dependent */
  11. unsigned int irq_spurious_count;
  12. #endif
  13. unsigned int generic_irqs; /* arch dependent */
  14. unsigned int apic_perf_irqs;
  15. unsigned int apic_pending_irqs;
  16. #ifdef CONFIG_SMP
  17. unsigned int irq_resched_count;
  18. unsigned int irq_call_count;
  19. unsigned int irq_tlb_count;
  20. #endif
  21. #ifdef CONFIG_X86_MCE
  22. unsigned int irq_thermal_count;
  23. # ifdef CONFIG_X86_MCE_THRESHOLD
  24. unsigned int irq_threshold_count;
  25. # endif
  26. #endif
  27. } ____cacheline_aligned irq_cpustat_t;
  28. DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
  29. /* We can have at most NR_VECTORS irqs routed to a cpu at a time */
  30. #define MAX_HARDIRQS_PER_CPU NR_VECTORS
  31. #define __ARCH_IRQ_STAT
  32. #define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
  33. #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
  34. #define __ARCH_SET_SOFTIRQ_PENDING
  35. #define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
  36. #define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
  37. extern void ack_bad_irq(unsigned int irq);
  38. extern u64 arch_irq_stat_cpu(unsigned int cpu);
  39. #define arch_irq_stat_cpu arch_irq_stat_cpu
  40. extern u64 arch_irq_stat(void);
  41. #define arch_irq_stat arch_irq_stat
  42. #endif /* _ASM_X86_HARDIRQ_H */