smp_64.h 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. #ifndef __ASM_SMP_H
  2. #define __ASM_SMP_H
  3. #include <linux/cpumask.h>
  4. #include <linux/init.h>
  5. /*
  6. * We need the APIC definitions automatically as part of 'smp.h'
  7. */
  8. #include <asm/apic.h>
  9. #include <asm/io_apic.h>
  10. #include <asm/mpspec.h>
  11. #include <asm/pda.h>
  12. #include <asm/thread_info.h>
  13. extern cpumask_t cpu_initialized;
  14. extern cpumask_t cpu_callin_map;
  15. extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
  16. void *info, int wait);
  17. #ifdef CONFIG_SMP
  18. #define raw_smp_processor_id() read_pda(cpunumber)
  19. #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
  20. #define stack_smp_processor_id() \
  21. ({ \
  22. struct thread_info *ti; \
  23. __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
  24. ti->cpu; \
  25. })
  26. /*
  27. * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies
  28. * scheduling and IPI sending and compresses data structures.
  29. */
  30. static inline int num_booting_cpus(void)
  31. {
  32. return cpus_weight(cpu_callout_map);
  33. }
  34. #else /* CONFIG_SMP */
  35. extern unsigned int boot_cpu_id;
  36. #define cpu_physical_id(cpu) boot_cpu_id
  37. #define stack_smp_processor_id() 0
  38. #endif /* !CONFIG_SMP */
  39. #define safe_smp_processor_id() smp_processor_id()
  40. static __inline int logical_smp_processor_id(void)
  41. {
  42. /* we don't want to mark this access volatile - bad code generation */
  43. return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
  44. }
  45. #include <mach_apicdef.h>
  46. static inline int hard_smp_processor_id(void)
  47. {
  48. /* we don't want to mark this access volatile - bad code generation */
  49. return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID));
  50. }
  51. #endif