smp_64.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #ifndef __ASM_SMP_H
  2. #define __ASM_SMP_H
  3. /*
  4. * We need the APIC definitions automatically as part of 'smp.h'
  5. */
  6. #include <linux/threads.h>
  7. #include <linux/cpumask.h>
  8. #include <linux/bitops.h>
  9. #include <linux/init.h>
  10. extern int disable_apic;
  11. #include <asm/mpspec.h>
  12. #include <asm/apic.h>
  13. #include <asm/io_apic.h>
  14. #include <asm/thread_info.h>
  15. #ifdef CONFIG_SMP
  16. #include <asm/pda.h>
  17. struct pt_regs;
  18. extern cpumask_t cpu_present_mask;
  19. extern cpumask_t cpu_possible_map;
  20. extern cpumask_t cpu_online_map;
  21. extern cpumask_t cpu_callout_map;
  22. extern cpumask_t cpu_initialized;
  23. /*
  24. * Private routines/data
  25. */
  26. extern void smp_alloc_memory(void);
  27. extern volatile unsigned long smp_invalidate_needed;
  28. extern void lock_ipi_call_lock(void);
  29. extern void unlock_ipi_call_lock(void);
  30. extern int smp_num_siblings;
  31. extern void smp_send_reschedule(int cpu);
  32. extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
  33. void *info, int wait);
  34. /*
  35. * cpu_sibling_map and cpu_core_map now live
  36. * in the per cpu area
  37. *
  38. * extern cpumask_t cpu_sibling_map[NR_CPUS];
  39. * extern cpumask_t cpu_core_map[NR_CPUS];
  40. */
  41. DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
  42. DECLARE_PER_CPU(cpumask_t, cpu_core_map);
  43. DECLARE_PER_CPU(u8, cpu_llc_id);
  44. #define SMP_TRAMPOLINE_BASE 0x6000
  45. /*
  46. * On x86 all CPUs are mapped 1:1 to the APIC space.
  47. * This simplifies scheduling and IPI sending and
  48. * compresses data structures.
  49. */
  50. static inline int num_booting_cpus(void)
  51. {
  52. return cpus_weight(cpu_callout_map);
  53. }
  54. #define raw_smp_processor_id() read_pda(cpunumber)
  55. extern int __cpu_disable(void);
  56. extern void __cpu_die(unsigned int cpu);
  57. extern void prefill_possible_map(void);
  58. extern unsigned num_processors;
  59. extern unsigned __cpuinitdata disabled_cpus;
  60. #define NO_PROC_ID 0xFF /* No processor magic marker */
  61. #endif /* CONFIG_SMP */
  62. #define safe_smp_processor_id() smp_processor_id()
  63. static inline int hard_smp_processor_id(void)
  64. {
  65. /* we don't want to mark this access volatile - bad code generation */
  66. return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
  67. }
  68. /*
  69. * Some lowlevel functions might want to know about
  70. * the real APIC ID <-> CPU # mapping.
  71. */
  72. extern u8 __initdata x86_cpu_to_apicid_init[];
  73. extern void *x86_cpu_to_apicid_ptr;
  74. DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */
  75. extern u8 bios_cpu_apicid[];
  76. static inline int cpu_present_to_apicid(int mps_cpu)
  77. {
  78. if (mps_cpu < NR_CPUS)
  79. return (int)bios_cpu_apicid[mps_cpu];
  80. else
  81. return BAD_APICID;
  82. }
  83. #ifndef CONFIG_SMP
  84. #define stack_smp_processor_id() 0
  85. #define cpu_logical_map(x) (x)
  86. #else
  87. #include <asm/thread_info.h>
  88. #define stack_smp_processor_id() \
  89. ({ \
  90. struct thread_info *ti; \
  91. __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
  92. ti->cpu; \
  93. })
  94. #endif
  95. static __inline int logical_smp_processor_id(void)
  96. {
  97. /* we don't want to mark this access volatile - bad code generation */
  98. return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
  99. }
  100. #ifdef CONFIG_SMP
  101. #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
  102. #else
  103. extern unsigned int boot_cpu_id;
  104. #define cpu_physical_id(cpu) boot_cpu_id
  105. #endif /* !CONFIG_SMP */
  106. #endif