smp.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. #ifndef __ASM_SMP_H
  2. #define __ASM_SMP_H
  3. /*
  4. * We need the APIC definitions automatically as part of 'smp.h'
  5. */
  6. #ifndef __ASSEMBLY__
  7. #include <linux/threads.h>
  8. #include <linux/cpumask.h>
  9. #include <linux/bitops.h>
  10. extern int disable_apic;
  11. #endif
  12. #ifdef CONFIG_X86_LOCAL_APIC
  13. #ifndef __ASSEMBLY__
  14. #include <asm/fixmap.h>
  15. #include <asm/mpspec.h>
  16. #ifdef CONFIG_X86_IO_APIC
  17. #include <asm/io_apic.h>
  18. #endif
  19. #include <asm/apic.h>
  20. #include <asm/thread_info.h>
  21. #endif
  22. #endif
  23. #ifdef CONFIG_SMP
  24. #ifndef ASSEMBLY
  25. #include <asm/pda.h>
  26. struct pt_regs;
  27. extern cpumask_t cpu_present_mask;
  28. extern cpumask_t cpu_possible_map;
  29. extern cpumask_t cpu_online_map;
  30. extern cpumask_t cpu_callout_map;
  31. extern cpumask_t cpu_initialized;
  32. /*
  33. * Private routines/data
  34. */
  35. extern void smp_alloc_memory(void);
  36. extern volatile unsigned long smp_invalidate_needed;
  37. extern int pic_mode;
  38. extern void lock_ipi_call_lock(void);
  39. extern void unlock_ipi_call_lock(void);
  40. extern int smp_num_siblings;
  41. extern void smp_send_reschedule(int cpu);
  42. void smp_stop_cpu(void);
  43. extern cpumask_t cpu_sibling_map[NR_CPUS];
  44. extern cpumask_t cpu_core_map[NR_CPUS];
  45. extern u8 cpu_llc_id[NR_CPUS];
  46. #define SMP_TRAMPOLINE_BASE 0x6000
  47. /*
  48. * On x86 all CPUs are mapped 1:1 to the APIC space.
  49. * This simplifies scheduling and IPI sending and
  50. * compresses data structures.
  51. */
  52. static inline int num_booting_cpus(void)
  53. {
  54. return cpus_weight(cpu_callout_map);
  55. }
  56. #define raw_smp_processor_id() read_pda(cpunumber)
  57. static inline int hard_smp_processor_id(void)
  58. {
  59. /* we don't want to mark this access volatile - bad code generation */
  60. return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
  61. }
  62. extern int safe_smp_processor_id(void);
  63. extern int __cpu_disable(void);
  64. extern void __cpu_die(unsigned int cpu);
  65. extern void prefill_possible_map(void);
  66. extern unsigned num_processors;
  67. extern unsigned disabled_cpus;
  68. #endif /* !ASSEMBLY */
  69. #define NO_PROC_ID 0xFF /* No processor magic marker */
  70. #endif
  71. #ifndef ASSEMBLY
  72. /*
  73. * Some lowlevel functions might want to know about
  74. * the real APIC ID <-> CPU # mapping.
  75. */
  76. extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
  77. extern u8 x86_cpu_to_log_apicid[NR_CPUS];
  78. extern u8 bios_cpu_apicid[];
  79. static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
  80. {
  81. return cpus_addr(cpumask)[0];
  82. }
  83. static inline int cpu_present_to_apicid(int mps_cpu)
  84. {
  85. if (mps_cpu < NR_CPUS)
  86. return (int)bios_cpu_apicid[mps_cpu];
  87. else
  88. return BAD_APICID;
  89. }
  90. #endif /* !ASSEMBLY */
  91. #ifndef CONFIG_SMP
  92. #define stack_smp_processor_id() 0
  93. #define safe_smp_processor_id() 0
  94. #define cpu_logical_map(x) (x)
  95. #else
  96. #include <asm/thread_info.h>
  97. #define stack_smp_processor_id() \
  98. ({ \
  99. struct thread_info *ti; \
  100. __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
  101. ti->cpu; \
  102. })
  103. #endif
  104. #ifndef __ASSEMBLY__
  105. static __inline int logical_smp_processor_id(void)
  106. {
  107. /* we don't want to mark this access volatile - bad code generation */
  108. return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
  109. }
  110. #endif
  111. #ifdef CONFIG_SMP
  112. #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
  113. #else
  114. #define cpu_physical_id(cpu) boot_cpu_id
  115. #endif
  116. #endif