apic.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. #ifndef __ASM_ES7000_APIC_H
  2. #define __ASM_ES7000_APIC_H
  3. #include <linux/gfp.h>
  4. #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
  5. #define esr_disable (1)
  6. static inline int es7000_apic_id_registered(void)
  7. {
  8. return 1;
  9. }
  10. static inline const cpumask_t *target_cpus_cluster(void)
  11. {
  12. return &CPU_MASK_ALL;
  13. }
  14. static inline const cpumask_t *es7000_target_cpus(void)
  15. {
  16. return &cpumask_of_cpu(smp_processor_id());
  17. }
  18. #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
  19. #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
  20. #define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
  21. #define NO_BALANCE_IRQ_CLUSTER (1)
  22. #define APIC_DFR_VALUE (APIC_DFR_FLAT)
  23. #define NO_BALANCE_IRQ (0)
  24. #undef APIC_DEST_LOGICAL
  25. #define APIC_DEST_LOGICAL 0x0
  26. static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
  27. {
  28. return 0;
  29. }
  30. static inline unsigned long check_apicid_present(int bit)
  31. {
  32. return physid_isset(bit, phys_cpu_present_map);
  33. }
  34. #define apicid_cluster(apicid) (apicid & 0xF0)
  35. static inline unsigned long calculate_ldr(int cpu)
  36. {
  37. unsigned long id;
  38. id = xapic_phys_to_log_apicid(cpu);
  39. return (SET_APIC_LOGICAL_ID(id));
  40. }
  41. /*
  42. * Set up the logical destination ID.
  43. *
  44. * Intel recommends to set DFR, LdR and TPR before enabling
  45. * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
  46. * document number 292116). So here it goes...
  47. */
  48. static inline void init_apic_ldr_cluster(void)
  49. {
  50. unsigned long val;
  51. int cpu = smp_processor_id();
  52. apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
  53. val = calculate_ldr(cpu);
  54. apic_write(APIC_LDR, val);
  55. }
  56. static inline void init_apic_ldr(void)
  57. {
  58. unsigned long val;
  59. int cpu = smp_processor_id();
  60. apic_write(APIC_DFR, APIC_DFR_VALUE);
  61. val = calculate_ldr(cpu);
  62. apic_write(APIC_LDR, val);
  63. }
  64. extern int apic_version [MAX_APICS];
  65. static inline void setup_apic_routing(void)
  66. {
  67. int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
  68. printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
  69. (apic_version[apic] == 0x14) ?
  70. "Physical Cluster" : "Logical Cluster",
  71. nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
  72. }
  73. static inline int multi_timer_check(int apic, int irq)
  74. {
  75. return 0;
  76. }
  77. static inline int apicid_to_node(int logical_apicid)
  78. {
  79. return 0;
  80. }
  81. static inline int cpu_present_to_apicid(int mps_cpu)
  82. {
  83. if (!mps_cpu)
  84. return boot_cpu_physical_apicid;
  85. else if (mps_cpu < nr_cpu_ids)
  86. return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
  87. else
  88. return BAD_APICID;
  89. }
  90. static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
  91. {
  92. static int id = 0;
  93. physid_mask_t mask;
  94. mask = physid_mask_of_physid(id);
  95. ++id;
  96. return mask;
  97. }
  98. extern u8 cpu_2_logical_apicid[];
  99. /* Mapping from cpu number to logical apicid */
  100. static inline int cpu_to_logical_apicid(int cpu)
  101. {
  102. #ifdef CONFIG_SMP
  103. if (cpu >= nr_cpu_ids)
  104. return BAD_APICID;
  105. return (int)cpu_2_logical_apicid[cpu];
  106. #else
  107. return logical_smp_processor_id();
  108. #endif
  109. }
  110. static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
  111. {
  112. /* For clustered we don't have a good way to do this yet - hack */
  113. return physids_promote(0xff);
  114. }
  115. static inline void setup_portio_remap(void)
  116. {
  117. }
  118. extern unsigned int boot_cpu_physical_apicid;
  119. static inline int check_phys_apicid_present(int cpu_physical_apicid)
  120. {
  121. boot_cpu_physical_apicid = read_apic_id();
  122. return (1);
  123. }
  124. static inline unsigned int
  125. cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
  126. {
  127. int num_bits_set;
  128. int cpus_found = 0;
  129. int cpu;
  130. int apicid;
  131. num_bits_set = cpumask_weight(cpumask);
  132. /* Return id to all */
  133. if (num_bits_set == nr_cpu_ids)
  134. return 0xFF;
  135. /*
  136. * The cpus in the mask must all be on the apic cluster. If are not
  137. * on the same apicid cluster return default value of target_cpus():
  138. */
  139. cpu = cpumask_first(cpumask);
  140. apicid = cpu_to_logical_apicid(cpu);
  141. while (cpus_found < num_bits_set) {
  142. if (cpumask_test_cpu(cpu, cpumask)) {
  143. int new_apicid = cpu_to_logical_apicid(cpu);
  144. if (apicid_cluster(apicid) !=
  145. apicid_cluster(new_apicid)){
  146. printk ("%s: Not a valid mask!\n", __func__);
  147. return 0xFF;
  148. }
  149. apicid = new_apicid;
  150. cpus_found++;
  151. }
  152. cpu++;
  153. }
  154. return apicid;
  155. }
  156. static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
  157. {
  158. int num_bits_set;
  159. int cpus_found = 0;
  160. int cpu;
  161. int apicid;
  162. num_bits_set = cpus_weight(*cpumask);
  163. /* Return id to all */
  164. if (num_bits_set == nr_cpu_ids)
  165. return cpu_to_logical_apicid(0);
  166. /*
  167. * The cpus in the mask must all be on the apic cluster. If are not
  168. * on the same apicid cluster return default value of target_cpus():
  169. */
  170. cpu = first_cpu(*cpumask);
  171. apicid = cpu_to_logical_apicid(cpu);
  172. while (cpus_found < num_bits_set) {
  173. if (cpu_isset(cpu, *cpumask)) {
  174. int new_apicid = cpu_to_logical_apicid(cpu);
  175. if (apicid_cluster(apicid) !=
  176. apicid_cluster(new_apicid)){
  177. printk ("%s: Not a valid mask!\n", __func__);
  178. return cpu_to_logical_apicid(0);
  179. }
  180. apicid = new_apicid;
  181. cpus_found++;
  182. }
  183. cpu++;
  184. }
  185. return apicid;
  186. }
  187. static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
  188. const struct cpumask *andmask)
  189. {
  190. int apicid = cpu_to_logical_apicid(0);
  191. cpumask_var_t cpumask;
  192. if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
  193. return apicid;
  194. cpumask_and(cpumask, inmask, andmask);
  195. cpumask_and(cpumask, cpumask, cpu_online_mask);
  196. apicid = cpu_mask_to_apicid(cpumask);
  197. free_cpumask_var(cpumask);
  198. return apicid;
  199. }
  200. static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  201. {
  202. return cpuid_apic >> index_msb;
  203. }
  204. #endif /* __ASM_ES7000_APIC_H */