apic.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. #ifndef __ASM_ES7000_APIC_H
  2. #define __ASM_ES7000_APIC_H
  3. #include <linux/gfp.h>
  4. #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
  5. static inline int es7000_apic_id_registered(void)
  6. {
  7. return 1;
  8. }
  9. static inline const cpumask_t *target_cpus_cluster(void)
  10. {
  11. return &CPU_MASK_ALL;
  12. }
  13. static inline const cpumask_t *es7000_target_cpus(void)
  14. {
  15. return &cpumask_of_cpu(smp_processor_id());
  16. }
  17. #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
  18. #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
  19. #define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
  20. #define NO_BALANCE_IRQ_CLUSTER (1)
  21. #define APIC_DFR_VALUE (APIC_DFR_FLAT)
  22. #define NO_BALANCE_IRQ (0)
  23. static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
  24. {
  25. return 0;
  26. }
  27. static inline unsigned long check_apicid_present(int bit)
  28. {
  29. return physid_isset(bit, phys_cpu_present_map);
  30. }
  31. #define apicid_cluster(apicid) (apicid & 0xF0)
  32. static inline unsigned long calculate_ldr(int cpu)
  33. {
  34. unsigned long id;
  35. id = xapic_phys_to_log_apicid(cpu);
  36. return (SET_APIC_LOGICAL_ID(id));
  37. }
  38. /*
  39. * Set up the logical destination ID.
  40. *
  41. * Intel recommends to set DFR, LdR and TPR before enabling
  42. * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
  43. * document number 292116). So here it goes...
  44. */
  45. static inline void init_apic_ldr_cluster(void)
  46. {
  47. unsigned long val;
  48. int cpu = smp_processor_id();
  49. apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
  50. val = calculate_ldr(cpu);
  51. apic_write(APIC_LDR, val);
  52. }
  53. static inline void init_apic_ldr(void)
  54. {
  55. unsigned long val;
  56. int cpu = smp_processor_id();
  57. apic_write(APIC_DFR, APIC_DFR_VALUE);
  58. val = calculate_ldr(cpu);
  59. apic_write(APIC_LDR, val);
  60. }
  61. extern int apic_version [MAX_APICS];
  62. static inline void setup_apic_routing(void)
  63. {
  64. int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
  65. printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
  66. (apic_version[apic] == 0x14) ?
  67. "Physical Cluster" : "Logical Cluster",
  68. nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
  69. }
  70. static inline int multi_timer_check(int apic, int irq)
  71. {
  72. return 0;
  73. }
  74. static inline int apicid_to_node(int logical_apicid)
  75. {
  76. return 0;
  77. }
  78. static inline int cpu_present_to_apicid(int mps_cpu)
  79. {
  80. if (!mps_cpu)
  81. return boot_cpu_physical_apicid;
  82. else if (mps_cpu < nr_cpu_ids)
  83. return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
  84. else
  85. return BAD_APICID;
  86. }
  87. static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
  88. {
  89. static int id = 0;
  90. physid_mask_t mask;
  91. mask = physid_mask_of_physid(id);
  92. ++id;
  93. return mask;
  94. }
  95. extern u8 cpu_2_logical_apicid[];
  96. /* Mapping from cpu number to logical apicid */
  97. static inline int cpu_to_logical_apicid(int cpu)
  98. {
  99. #ifdef CONFIG_SMP
  100. if (cpu >= nr_cpu_ids)
  101. return BAD_APICID;
  102. return (int)cpu_2_logical_apicid[cpu];
  103. #else
  104. return logical_smp_processor_id();
  105. #endif
  106. }
  107. static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
  108. {
  109. /* For clustered we don't have a good way to do this yet - hack */
  110. return physids_promote(0xff);
  111. }
  112. static inline void setup_portio_remap(void)
  113. {
  114. }
  115. extern unsigned int boot_cpu_physical_apicid;
  116. static inline int check_phys_apicid_present(int cpu_physical_apicid)
  117. {
  118. boot_cpu_physical_apicid = read_apic_id();
  119. return (1);
  120. }
  121. static inline unsigned int
  122. cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
  123. {
  124. int num_bits_set;
  125. int cpus_found = 0;
  126. int cpu;
  127. int apicid;
  128. num_bits_set = cpumask_weight(cpumask);
  129. /* Return id to all */
  130. if (num_bits_set == nr_cpu_ids)
  131. return 0xFF;
  132. /*
  133. * The cpus in the mask must all be on the apic cluster. If are not
  134. * on the same apicid cluster return default value of target_cpus():
  135. */
  136. cpu = cpumask_first(cpumask);
  137. apicid = cpu_to_logical_apicid(cpu);
  138. while (cpus_found < num_bits_set) {
  139. if (cpumask_test_cpu(cpu, cpumask)) {
  140. int new_apicid = cpu_to_logical_apicid(cpu);
  141. if (apicid_cluster(apicid) !=
  142. apicid_cluster(new_apicid)){
  143. printk ("%s: Not a valid mask!\n", __func__);
  144. return 0xFF;
  145. }
  146. apicid = new_apicid;
  147. cpus_found++;
  148. }
  149. cpu++;
  150. }
  151. return apicid;
  152. }
  153. static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
  154. {
  155. int num_bits_set;
  156. int cpus_found = 0;
  157. int cpu;
  158. int apicid;
  159. num_bits_set = cpus_weight(*cpumask);
  160. /* Return id to all */
  161. if (num_bits_set == nr_cpu_ids)
  162. return cpu_to_logical_apicid(0);
  163. /*
  164. * The cpus in the mask must all be on the apic cluster. If are not
  165. * on the same apicid cluster return default value of target_cpus():
  166. */
  167. cpu = first_cpu(*cpumask);
  168. apicid = cpu_to_logical_apicid(cpu);
  169. while (cpus_found < num_bits_set) {
  170. if (cpu_isset(cpu, *cpumask)) {
  171. int new_apicid = cpu_to_logical_apicid(cpu);
  172. if (apicid_cluster(apicid) !=
  173. apicid_cluster(new_apicid)){
  174. printk ("%s: Not a valid mask!\n", __func__);
  175. return cpu_to_logical_apicid(0);
  176. }
  177. apicid = new_apicid;
  178. cpus_found++;
  179. }
  180. cpu++;
  181. }
  182. return apicid;
  183. }
  184. static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
  185. const struct cpumask *andmask)
  186. {
  187. int apicid = cpu_to_logical_apicid(0);
  188. cpumask_var_t cpumask;
  189. if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
  190. return apicid;
  191. cpumask_and(cpumask, inmask, andmask);
  192. cpumask_and(cpumask, cpumask, cpu_online_mask);
  193. apicid = cpu_mask_to_apicid(cpumask);
  194. free_cpumask_var(cpumask);
  195. return apicid;
  196. }
  197. static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  198. {
  199. return cpuid_apic >> index_msb;
  200. }
  201. #endif /* __ASM_ES7000_APIC_H */