apic.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. #ifndef __ASM_ES7000_APIC_H
  2. #define __ASM_ES7000_APIC_H
  3. #include <linux/gfp.h>
  4. #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
  5. static inline int es7000_apic_id_registered(void)
  6. {
  7. return 1;
  8. }
  9. static inline const cpumask_t *target_cpus_cluster(void)
  10. {
  11. return &CPU_MASK_ALL;
  12. }
  13. static inline const cpumask_t *es7000_target_cpus(void)
  14. {
  15. return &cpumask_of_cpu(smp_processor_id());
  16. }
  17. #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
  18. #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
  19. #define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
  20. #define NO_BALANCE_IRQ_CLUSTER (1)
  21. #define APIC_DFR_VALUE (APIC_DFR_FLAT)
  22. #define NO_BALANCE_IRQ (0)
  23. static inline unsigned long
  24. es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
  25. {
  26. return 0;
  27. }
  28. static inline unsigned long es7000_check_apicid_present(int bit)
  29. {
  30. return physid_isset(bit, phys_cpu_present_map);
  31. }
  32. #define apicid_cluster(apicid) (apicid & 0xF0)
  33. static inline unsigned long calculate_ldr(int cpu)
  34. {
  35. unsigned long id;
  36. id = xapic_phys_to_log_apicid(cpu);
  37. return (SET_APIC_LOGICAL_ID(id));
  38. }
  39. /*
  40. * Set up the logical destination ID.
  41. *
  42. * Intel recommends to set DFR, LdR and TPR before enabling
  43. * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
  44. * document number 292116). So here it goes...
  45. */
  46. static inline void init_apic_ldr_cluster(void)
  47. {
  48. unsigned long val;
  49. int cpu = smp_processor_id();
  50. apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
  51. val = calculate_ldr(cpu);
  52. apic_write(APIC_LDR, val);
  53. }
  54. static inline void init_apic_ldr(void)
  55. {
  56. unsigned long val;
  57. int cpu = smp_processor_id();
  58. apic_write(APIC_DFR, APIC_DFR_VALUE);
  59. val = calculate_ldr(cpu);
  60. apic_write(APIC_LDR, val);
  61. }
  62. extern int apic_version [MAX_APICS];
  63. static inline void setup_apic_routing(void)
  64. {
  65. int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
  66. printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
  67. (apic_version[apic] == 0x14) ?
  68. "Physical Cluster" : "Logical Cluster",
  69. nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
  70. }
  71. static inline int multi_timer_check(int apic, int irq)
  72. {
  73. return 0;
  74. }
  75. static inline int apicid_to_node(int logical_apicid)
  76. {
  77. return 0;
  78. }
  79. static inline int cpu_present_to_apicid(int mps_cpu)
  80. {
  81. if (!mps_cpu)
  82. return boot_cpu_physical_apicid;
  83. else if (mps_cpu < nr_cpu_ids)
  84. return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
  85. else
  86. return BAD_APICID;
  87. }
  88. static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
  89. {
  90. static int id = 0;
  91. physid_mask_t mask;
  92. mask = physid_mask_of_physid(id);
  93. ++id;
  94. return mask;
  95. }
  96. extern u8 cpu_2_logical_apicid[];
  97. /* Mapping from cpu number to logical apicid */
  98. static inline int cpu_to_logical_apicid(int cpu)
  99. {
  100. #ifdef CONFIG_SMP
  101. if (cpu >= nr_cpu_ids)
  102. return BAD_APICID;
  103. return (int)cpu_2_logical_apicid[cpu];
  104. #else
  105. return logical_smp_processor_id();
  106. #endif
  107. }
  108. static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
  109. {
  110. /* For clustered we don't have a good way to do this yet - hack */
  111. return physids_promote(0xff);
  112. }
  113. static inline void setup_portio_remap(void)
  114. {
  115. }
  116. extern unsigned int boot_cpu_physical_apicid;
  117. static inline int check_phys_apicid_present(int cpu_physical_apicid)
  118. {
  119. boot_cpu_physical_apicid = read_apic_id();
  120. return (1);
  121. }
  122. static inline unsigned int
  123. cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
  124. {
  125. int num_bits_set;
  126. int cpus_found = 0;
  127. int cpu;
  128. int apicid;
  129. num_bits_set = cpumask_weight(cpumask);
  130. /* Return id to all */
  131. if (num_bits_set == nr_cpu_ids)
  132. return 0xFF;
  133. /*
  134. * The cpus in the mask must all be on the apic cluster. If are not
  135. * on the same apicid cluster return default value of target_cpus():
  136. */
  137. cpu = cpumask_first(cpumask);
  138. apicid = cpu_to_logical_apicid(cpu);
  139. while (cpus_found < num_bits_set) {
  140. if (cpumask_test_cpu(cpu, cpumask)) {
  141. int new_apicid = cpu_to_logical_apicid(cpu);
  142. if (apicid_cluster(apicid) !=
  143. apicid_cluster(new_apicid)){
  144. printk ("%s: Not a valid mask!\n", __func__);
  145. return 0xFF;
  146. }
  147. apicid = new_apicid;
  148. cpus_found++;
  149. }
  150. cpu++;
  151. }
  152. return apicid;
  153. }
  154. static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
  155. {
  156. int num_bits_set;
  157. int cpus_found = 0;
  158. int cpu;
  159. int apicid;
  160. num_bits_set = cpus_weight(*cpumask);
  161. /* Return id to all */
  162. if (num_bits_set == nr_cpu_ids)
  163. return cpu_to_logical_apicid(0);
  164. /*
  165. * The cpus in the mask must all be on the apic cluster. If are not
  166. * on the same apicid cluster return default value of target_cpus():
  167. */
  168. cpu = first_cpu(*cpumask);
  169. apicid = cpu_to_logical_apicid(cpu);
  170. while (cpus_found < num_bits_set) {
  171. if (cpu_isset(cpu, *cpumask)) {
  172. int new_apicid = cpu_to_logical_apicid(cpu);
  173. if (apicid_cluster(apicid) !=
  174. apicid_cluster(new_apicid)){
  175. printk ("%s: Not a valid mask!\n", __func__);
  176. return cpu_to_logical_apicid(0);
  177. }
  178. apicid = new_apicid;
  179. cpus_found++;
  180. }
  181. cpu++;
  182. }
  183. return apicid;
  184. }
  185. static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
  186. const struct cpumask *andmask)
  187. {
  188. int apicid = cpu_to_logical_apicid(0);
  189. cpumask_var_t cpumask;
  190. if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
  191. return apicid;
  192. cpumask_and(cpumask, inmask, andmask);
  193. cpumask_and(cpumask, cpumask, cpu_online_mask);
  194. apicid = cpu_mask_to_apicid(cpumask);
  195. free_cpumask_var(cpumask);
  196. return apicid;
  197. }
  198. static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  199. {
  200. return cpuid_apic >> index_msb;
  201. }
  202. #endif /* __ASM_ES7000_APIC_H */