apic.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. #ifndef __ASM_SUMMIT_APIC_H
  2. #define __ASM_SUMMIT_APIC_H
  3. #include <asm/smp.h>
  4. #include <linux/gfp.h>
  5. /* In clustered mode, the high nibble of APIC ID is a cluster number.
  6. * The low nibble is a 4-bit bitmap. */
  7. #define XAPIC_DEST_CPUS_SHIFT 4
  8. #define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
  9. #define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
  10. #define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
  11. static inline const cpumask_t *summit_target_cpus(void)
  12. {
  13. /* CPU_MASK_ALL (0xff) has undefined behaviour with
  14. * dest_LowestPrio mode logical clustered apic interrupt routing
  15. * Just start on cpu 0. IRQ balancing will spread load
  16. */
  17. return &cpumask_of_cpu(0);
  18. }
  19. static inline unsigned long
  20. summit_check_apicid_used(physid_mask_t bitmap, int apicid)
  21. {
  22. return 0;
  23. }
  24. /* we don't use the phys_cpu_present_map to indicate apicid presence */
  25. static inline unsigned long summit_check_apicid_present(int bit)
  26. {
  27. return 1;
  28. }
  29. #define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
  30. extern u8 cpu_2_logical_apicid[];
  31. static inline void summit_init_apic_ldr(void)
  32. {
  33. unsigned long val, id;
  34. int count = 0;
  35. u8 my_id = (u8)hard_smp_processor_id();
  36. u8 my_cluster = (u8)apicid_cluster(my_id);
  37. #ifdef CONFIG_SMP
  38. u8 lid;
  39. int i;
  40. /* Create logical APIC IDs by counting CPUs already in cluster. */
  41. for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
  42. lid = cpu_2_logical_apicid[i];
  43. if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
  44. ++count;
  45. }
  46. #endif
  47. /* We only have a 4 wide bitmap in cluster mode. If a deranged
  48. * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
  49. BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
  50. id = my_cluster | (1UL << count);
  51. apic_write(APIC_DFR, APIC_DFR_VALUE);
  52. val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
  53. val |= SET_APIC_LOGICAL_ID(id);
  54. apic_write(APIC_LDR, val);
  55. }
  56. static inline int summit_apic_id_registered(void)
  57. {
  58. return 1;
  59. }
  60. static inline void summit_setup_apic_routing(void)
  61. {
  62. printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
  63. nr_ioapics);
  64. }
  65. static inline int summit_apicid_to_node(int logical_apicid)
  66. {
  67. #ifdef CONFIG_SMP
  68. return apicid_2_node[hard_smp_processor_id()];
  69. #else
  70. return 0;
  71. #endif
  72. }
  73. /* Mapping from cpu number to logical apicid */
  74. static inline int cpu_to_logical_apicid(int cpu)
  75. {
  76. #ifdef CONFIG_SMP
  77. if (cpu >= nr_cpu_ids)
  78. return BAD_APICID;
  79. return (int)cpu_2_logical_apicid[cpu];
  80. #else
  81. return logical_smp_processor_id();
  82. #endif
  83. }
  84. static inline int cpu_present_to_apicid(int mps_cpu)
  85. {
  86. if (mps_cpu < nr_cpu_ids)
  87. return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
  88. else
  89. return BAD_APICID;
  90. }
  91. static inline physid_mask_t
  92. summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
  93. {
  94. /* For clustered we don't have a good way to do this yet - hack */
  95. return physids_promote(0x0F);
  96. }
  97. static inline physid_mask_t apicid_to_cpu_present(int apicid)
  98. {
  99. return physid_mask_of_physid(0);
  100. }
  101. static inline void setup_portio_remap(void)
  102. {
  103. }
  104. static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
  105. {
  106. return 1;
  107. }
  108. static inline void enable_apic_mode(void)
  109. {
  110. }
  111. static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
  112. {
  113. int num_bits_set;
  114. int cpus_found = 0;
  115. int cpu;
  116. int apicid;
  117. num_bits_set = cpus_weight(*cpumask);
  118. /* Return id to all */
  119. if (num_bits_set >= nr_cpu_ids)
  120. return (int) 0xFF;
  121. /*
  122. * The cpus in the mask must all be on the apic cluster. If are not
  123. * on the same apicid cluster return default value of target_cpus():
  124. */
  125. cpu = first_cpu(*cpumask);
  126. apicid = cpu_to_logical_apicid(cpu);
  127. while (cpus_found < num_bits_set) {
  128. if (cpu_isset(cpu, *cpumask)) {
  129. int new_apicid = cpu_to_logical_apicid(cpu);
  130. if (apicid_cluster(apicid) !=
  131. apicid_cluster(new_apicid)){
  132. printk ("%s: Not a valid mask!\n", __func__);
  133. return 0xFF;
  134. }
  135. apicid = apicid | new_apicid;
  136. cpus_found++;
  137. }
  138. cpu++;
  139. }
  140. return apicid;
  141. }
  142. static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
  143. const struct cpumask *andmask)
  144. {
  145. int apicid = cpu_to_logical_apicid(0);
  146. cpumask_var_t cpumask;
  147. if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
  148. return apicid;
  149. cpumask_and(cpumask, inmask, andmask);
  150. cpumask_and(cpumask, cpumask, cpu_online_mask);
  151. apicid = cpu_mask_to_apicid(cpumask);
  152. free_cpumask_var(cpumask);
  153. return apicid;
  154. }
  155. /* cpuid returns the value latched in the HW at reset, not the APIC ID
  156. * register's value. For any box whose BIOS changes APIC IDs, like
  157. * clustered APIC systems, we must use hard_smp_processor_id.
  158. *
  159. * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
  160. */
  161. static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  162. {
  163. return hard_smp_processor_id() >> index_msb;
  164. }
  165. #endif /* __ASM_SUMMIT_APIC_H */