smp.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #ifndef __ASM_SMP_H
  2. #define __ASM_SMP_H
  3. /*
  4. * We need the APIC definitions automatically as part of 'smp.h'
  5. */
  6. #include <linux/threads.h>
  7. #include <linux/cpumask.h>
  8. #include <linux/bitops.h>
  9. extern int disable_apic;
  10. #include <asm/fixmap.h>
  11. #include <asm/mpspec.h>
  12. #include <asm/io_apic.h>
  13. #include <asm/apic.h>
  14. #include <asm/thread_info.h>
  15. #ifdef CONFIG_SMP
  16. #include <asm/pda.h>
  17. struct pt_regs;
  18. extern cpumask_t cpu_present_mask;
  19. extern cpumask_t cpu_possible_map;
  20. extern cpumask_t cpu_online_map;
  21. extern cpumask_t cpu_callout_map;
  22. extern cpumask_t cpu_initialized;
  23. /*
  24. * Private routines/data
  25. */
  26. extern void smp_alloc_memory(void);
  27. extern volatile unsigned long smp_invalidate_needed;
  28. extern void lock_ipi_call_lock(void);
  29. extern void unlock_ipi_call_lock(void);
  30. extern int smp_num_siblings;
  31. extern void smp_send_reschedule(int cpu);
  32. void smp_stop_cpu(void);
  33. extern cpumask_t cpu_sibling_map[NR_CPUS];
  34. extern cpumask_t cpu_core_map[NR_CPUS];
  35. extern u8 cpu_llc_id[NR_CPUS];
  36. #define SMP_TRAMPOLINE_BASE 0x6000
  37. /*
  38. * On x86 all CPUs are mapped 1:1 to the APIC space.
  39. * This simplifies scheduling and IPI sending and
  40. * compresses data structures.
  41. */
  42. static inline int num_booting_cpus(void)
  43. {
  44. return cpus_weight(cpu_callout_map);
  45. }
  46. #define raw_smp_processor_id() read_pda(cpunumber)
  47. static inline int hard_smp_processor_id(void)
  48. {
  49. /* we don't want to mark this access volatile - bad code generation */
  50. return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
  51. }
  52. extern int __cpu_disable(void);
  53. extern void __cpu_die(unsigned int cpu);
  54. extern void prefill_possible_map(void);
  55. extern unsigned num_processors;
  56. extern unsigned disabled_cpus;
  57. #define NO_PROC_ID 0xFF /* No processor magic marker */
  58. #endif
  59. /*
  60. * Some lowlevel functions might want to know about
  61. * the real APIC ID <-> CPU # mapping.
  62. */
  63. extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
  64. extern u8 x86_cpu_to_log_apicid[NR_CPUS];
  65. extern u8 bios_cpu_apicid[];
  66. static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
  67. {
  68. return cpus_addr(cpumask)[0];
  69. }
  70. static inline int cpu_present_to_apicid(int mps_cpu)
  71. {
  72. if (mps_cpu < NR_CPUS)
  73. return (int)bios_cpu_apicid[mps_cpu];
  74. else
  75. return BAD_APICID;
  76. }
  77. #ifndef CONFIG_SMP
  78. #define stack_smp_processor_id() 0
  79. #define cpu_logical_map(x) (x)
  80. #else
  81. #include <asm/thread_info.h>
  82. #define stack_smp_processor_id() \
  83. ({ \
  84. struct thread_info *ti; \
  85. __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
  86. ti->cpu; \
  87. })
  88. #endif
  89. static __inline int logical_smp_processor_id(void)
  90. {
  91. /* we don't want to mark this access volatile - bad code generation */
  92. return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
  93. }
  94. #ifdef CONFIG_SMP
  95. #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
  96. #else
  97. #define cpu_physical_id(cpu) boot_cpu_id
  98. static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
  99. void *info, int retry, int wait)
  100. {
  101. /* Disable interrupts here? */
  102. func(info);
  103. return 0;
  104. }
  105. #endif /* !CONFIG_SMP */
  106. #endif