apic.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. #ifndef __ASM_ES7000_APIC_H
  2. #define __ASM_ES7000_APIC_H
  3. #include <linux/gfp.h>
  4. #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
  5. static inline int es7000_apic_id_registered(void)
  6. {
  7. return 1;
  8. }
  9. static inline const cpumask_t *target_cpus_cluster(void)
  10. {
  11. return &CPU_MASK_ALL;
  12. }
  13. static inline const cpumask_t *es7000_target_cpus(void)
  14. {
  15. return &cpumask_of_cpu(smp_processor_id());
  16. }
  17. #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
  18. #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
  19. #define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
  20. #define APIC_DFR_VALUE (APIC_DFR_FLAT)
  21. static inline unsigned long
  22. es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
  23. {
  24. return 0;
  25. }
  26. static inline unsigned long es7000_check_apicid_present(int bit)
  27. {
  28. return physid_isset(bit, phys_cpu_present_map);
  29. }
  30. #define apicid_cluster(apicid) (apicid & 0xF0)
  31. static inline unsigned long calculate_ldr(int cpu)
  32. {
  33. unsigned long id;
  34. id = xapic_phys_to_log_apicid(cpu);
  35. return (SET_APIC_LOGICAL_ID(id));
  36. }
  37. /*
  38. * Set up the logical destination ID.
  39. *
  40. * Intel recommends to set DFR, LdR and TPR before enabling
  41. * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
  42. * document number 292116). So here it goes...
  43. */
  44. static inline void es7000_init_apic_ldr_cluster(void)
  45. {
  46. unsigned long val;
  47. int cpu = smp_processor_id();
  48. apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
  49. val = calculate_ldr(cpu);
  50. apic_write(APIC_LDR, val);
  51. }
  52. static inline void es7000_init_apic_ldr(void)
  53. {
  54. unsigned long val;
  55. int cpu = smp_processor_id();
  56. apic_write(APIC_DFR, APIC_DFR_VALUE);
  57. val = calculate_ldr(cpu);
  58. apic_write(APIC_LDR, val);
  59. }
  60. extern int apic_version [MAX_APICS];
  61. static inline void es7000_setup_apic_routing(void)
  62. {
  63. int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
  64. printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
  65. (apic_version[apic] == 0x14) ?
  66. "Physical Cluster" : "Logical Cluster",
  67. nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
  68. }
  69. static inline int apicid_to_node(int logical_apicid)
  70. {
  71. return 0;
  72. }
  73. static inline int cpu_present_to_apicid(int mps_cpu)
  74. {
  75. if (!mps_cpu)
  76. return boot_cpu_physical_apicid;
  77. else if (mps_cpu < nr_cpu_ids)
  78. return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
  79. else
  80. return BAD_APICID;
  81. }
  82. static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
  83. {
  84. static int id = 0;
  85. physid_mask_t mask;
  86. mask = physid_mask_of_physid(id);
  87. ++id;
  88. return mask;
  89. }
  90. extern u8 cpu_2_logical_apicid[];
  91. /* Mapping from cpu number to logical apicid */
  92. static inline int cpu_to_logical_apicid(int cpu)
  93. {
  94. #ifdef CONFIG_SMP
  95. if (cpu >= nr_cpu_ids)
  96. return BAD_APICID;
  97. return (int)cpu_2_logical_apicid[cpu];
  98. #else
  99. return logical_smp_processor_id();
  100. #endif
  101. }
  102. static inline physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map)
  103. {
  104. /* For clustered we don't have a good way to do this yet - hack */
  105. return physids_promote(0xff);
  106. }
  107. static inline void setup_portio_remap(void)
  108. {
  109. }
  110. extern unsigned int boot_cpu_physical_apicid;
  111. static inline int check_phys_apicid_present(int cpu_physical_apicid)
  112. {
  113. boot_cpu_physical_apicid = read_apic_id();
  114. return (1);
  115. }
  116. static inline unsigned int
  117. cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
  118. {
  119. int num_bits_set;
  120. int cpus_found = 0;
  121. int cpu;
  122. int apicid;
  123. num_bits_set = cpumask_weight(cpumask);
  124. /* Return id to all */
  125. if (num_bits_set == nr_cpu_ids)
  126. return 0xFF;
  127. /*
  128. * The cpus in the mask must all be on the apic cluster. If are not
  129. * on the same apicid cluster return default value of target_cpus():
  130. */
  131. cpu = cpumask_first(cpumask);
  132. apicid = cpu_to_logical_apicid(cpu);
  133. while (cpus_found < num_bits_set) {
  134. if (cpumask_test_cpu(cpu, cpumask)) {
  135. int new_apicid = cpu_to_logical_apicid(cpu);
  136. if (apicid_cluster(apicid) !=
  137. apicid_cluster(new_apicid)){
  138. printk ("%s: Not a valid mask!\n", __func__);
  139. return 0xFF;
  140. }
  141. apicid = new_apicid;
  142. cpus_found++;
  143. }
  144. cpu++;
  145. }
  146. return apicid;
  147. }
  148. static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
  149. {
  150. int num_bits_set;
  151. int cpus_found = 0;
  152. int cpu;
  153. int apicid;
  154. num_bits_set = cpus_weight(*cpumask);
  155. /* Return id to all */
  156. if (num_bits_set == nr_cpu_ids)
  157. return cpu_to_logical_apicid(0);
  158. /*
  159. * The cpus in the mask must all be on the apic cluster. If are not
  160. * on the same apicid cluster return default value of target_cpus():
  161. */
  162. cpu = first_cpu(*cpumask);
  163. apicid = cpu_to_logical_apicid(cpu);
  164. while (cpus_found < num_bits_set) {
  165. if (cpu_isset(cpu, *cpumask)) {
  166. int new_apicid = cpu_to_logical_apicid(cpu);
  167. if (apicid_cluster(apicid) !=
  168. apicid_cluster(new_apicid)){
  169. printk ("%s: Not a valid mask!\n", __func__);
  170. return cpu_to_logical_apicid(0);
  171. }
  172. apicid = new_apicid;
  173. cpus_found++;
  174. }
  175. cpu++;
  176. }
  177. return apicid;
  178. }
  179. static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
  180. const struct cpumask *andmask)
  181. {
  182. int apicid = cpu_to_logical_apicid(0);
  183. cpumask_var_t cpumask;
  184. if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
  185. return apicid;
  186. cpumask_and(cpumask, inmask, andmask);
  187. cpumask_and(cpumask, cpumask, cpu_online_mask);
  188. apicid = cpu_mask_to_apicid(cpumask);
  189. free_cpumask_var(cpumask);
  190. return apicid;
  191. }
  192. static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  193. {
  194. return cpuid_apic >> index_msb;
  195. }
  196. #endif /* __ASM_ES7000_APIC_H */