apic.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. #ifndef __ASM_MACH_APIC_H
  2. #define __ASM_MACH_APIC_H
  3. #define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
  4. static inline int bigsmp_apic_id_registered(void)
  5. {
  6. return 1;
  7. }
  8. static inline const cpumask_t *bigsmp_target_cpus(void)
  9. {
  10. #ifdef CONFIG_SMP
  11. return &cpu_online_map;
  12. #else
  13. return &cpumask_of_cpu(0);
  14. #endif
  15. }
  16. #define APIC_DFR_VALUE (APIC_DFR_FLAT)
  17. static inline unsigned long
  18. bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
  19. {
  20. return 0;
  21. }
  22. static inline unsigned long bigsmp_check_apicid_present(int bit)
  23. {
  24. return 1;
  25. }
  26. static inline unsigned long calculate_ldr(int cpu)
  27. {
  28. unsigned long val, id;
  29. val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
  30. id = xapic_phys_to_log_apicid(cpu);
  31. val |= SET_APIC_LOGICAL_ID(id);
  32. return val;
  33. }
  34. /*
  35. * Set up the logical destination ID.
  36. *
  37. * Intel recommends to set DFR, LDR and TPR before enabling
  38. * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
  39. * document number 292116). So here it goes...
  40. */
  41. static inline void bigsmp_init_apic_ldr(void)
  42. {
  43. unsigned long val;
  44. int cpu = smp_processor_id();
  45. apic_write(APIC_DFR, APIC_DFR_VALUE);
  46. val = calculate_ldr(cpu);
  47. apic_write(APIC_LDR, val);
  48. }
  49. static inline void bigsmp_setup_apic_routing(void)
  50. {
  51. printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
  52. "Physflat", nr_ioapics);
  53. }
  54. static inline int bigsmp_apicid_to_node(int logical_apicid)
  55. {
  56. return apicid_2_node[hard_smp_processor_id()];
  57. }
  58. static inline int cpu_present_to_apicid(int mps_cpu)
  59. {
  60. if (mps_cpu < nr_cpu_ids)
  61. return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
  62. return BAD_APICID;
  63. }
  64. static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
  65. {
  66. return physid_mask_of_physid(phys_apicid);
  67. }
  68. extern u8 cpu_2_logical_apicid[];
  69. /* Mapping from cpu number to logical apicid */
  70. static inline int cpu_to_logical_apicid(int cpu)
  71. {
  72. if (cpu >= nr_cpu_ids)
  73. return BAD_APICID;
  74. return cpu_physical_id(cpu);
  75. }
  76. static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
  77. {
  78. /* For clustered we don't have a good way to do this yet - hack */
  79. return physids_promote(0xFFL);
  80. }
  81. static inline void setup_portio_remap(void)
  82. {
  83. }
  84. static inline void enable_apic_mode(void)
  85. {
  86. }
  87. static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
  88. {
  89. return (1);
  90. }
  91. /* As we are using single CPU as destination, pick only one CPU here */
  92. static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
  93. {
  94. int cpu;
  95. int apicid;
  96. cpu = first_cpu(*cpumask);
  97. apicid = cpu_to_logical_apicid(cpu);
  98. return apicid;
  99. }
  100. static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
  101. const struct cpumask *andmask)
  102. {
  103. int cpu;
  104. /*
  105. * We're using fixed IRQ delivery, can only return one phys APIC ID.
  106. * May as well be the first.
  107. */
  108. for_each_cpu_and(cpu, cpumask, andmask)
  109. if (cpumask_test_cpu(cpu, cpu_online_mask))
  110. break;
  111. if (cpu < nr_cpu_ids)
  112. return cpu_to_logical_apicid(cpu);
  113. return BAD_APICID;
  114. }
  115. static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  116. {
  117. return cpuid_apic >> index_msb;
  118. }
  119. #endif /* __ASM_MACH_APIC_H */