apic.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. #ifndef __ASM_ES7000_APIC_H
  2. #define __ASM_ES7000_APIC_H
  3. #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
  4. #define esr_disable (1)
  5. static inline int apic_id_registered(void)
  6. {
  7. return (1);
  8. }
  9. static inline const cpumask_t *target_cpus_cluster(void)
  10. {
  11. return &CPU_MASK_ALL;
  12. }
  13. static inline const cpumask_t *target_cpus(void)
  14. {
  15. return &cpumask_of_cpu(smp_processor_id());
  16. }
  17. #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
  18. #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
  19. #define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
  20. #define NO_BALANCE_IRQ_CLUSTER (1)
  21. #define APIC_DFR_VALUE (APIC_DFR_FLAT)
  22. #define INT_DELIVERY_MODE (dest_Fixed)
  23. #define INT_DEST_MODE (0) /* phys delivery to target procs */
  24. #define NO_BALANCE_IRQ (0)
  25. #undef APIC_DEST_LOGICAL
  26. #define APIC_DEST_LOGICAL 0x0
  27. static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
  28. {
  29. return 0;
  30. }
  31. static inline unsigned long check_apicid_present(int bit)
  32. {
  33. return physid_isset(bit, phys_cpu_present_map);
  34. }
  35. #define apicid_cluster(apicid) (apicid & 0xF0)
  36. static inline unsigned long calculate_ldr(int cpu)
  37. {
  38. unsigned long id;
  39. id = xapic_phys_to_log_apicid(cpu);
  40. return (SET_APIC_LOGICAL_ID(id));
  41. }
  42. /*
  43. * Set up the logical destination ID.
  44. *
  45. * Intel recommends to set DFR, LdR and TPR before enabling
  46. * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
  47. * document number 292116). So here it goes...
  48. */
  49. static inline void init_apic_ldr_cluster(void)
  50. {
  51. unsigned long val;
  52. int cpu = smp_processor_id();
  53. apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
  54. val = calculate_ldr(cpu);
  55. apic_write(APIC_LDR, val);
  56. }
  57. static inline void init_apic_ldr(void)
  58. {
  59. unsigned long val;
  60. int cpu = smp_processor_id();
  61. apic_write(APIC_DFR, APIC_DFR_VALUE);
  62. val = calculate_ldr(cpu);
  63. apic_write(APIC_LDR, val);
  64. }
  65. extern int apic_version [MAX_APICS];
  66. static inline void setup_apic_routing(void)
  67. {
  68. int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
  69. printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
  70. (apic_version[apic] == 0x14) ?
  71. "Physical Cluster" : "Logical Cluster",
  72. nr_ioapics, cpus_addr(*target_cpus())[0]);
  73. }
  74. static inline int multi_timer_check(int apic, int irq)
  75. {
  76. return 0;
  77. }
  78. static inline int apicid_to_node(int logical_apicid)
  79. {
  80. return 0;
  81. }
  82. static inline int cpu_present_to_apicid(int mps_cpu)
  83. {
  84. if (!mps_cpu)
  85. return boot_cpu_physical_apicid;
  86. else if (mps_cpu < nr_cpu_ids)
  87. return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
  88. else
  89. return BAD_APICID;
  90. }
  91. static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
  92. {
  93. static int id = 0;
  94. physid_mask_t mask;
  95. mask = physid_mask_of_physid(id);
  96. ++id;
  97. return mask;
  98. }
  99. extern u8 cpu_2_logical_apicid[];
  100. /* Mapping from cpu number to logical apicid */
  101. static inline int cpu_to_logical_apicid(int cpu)
  102. {
  103. #ifdef CONFIG_SMP
  104. if (cpu >= nr_cpu_ids)
  105. return BAD_APICID;
  106. return (int)cpu_2_logical_apicid[cpu];
  107. #else
  108. return logical_smp_processor_id();
  109. #endif
  110. }
  111. static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
  112. {
  113. /* For clustered we don't have a good way to do this yet - hack */
  114. return physids_promote(0xff);
  115. }
  116. static inline void setup_portio_remap(void)
  117. {
  118. }
  119. extern unsigned int boot_cpu_physical_apicid;
  120. static inline int check_phys_apicid_present(int cpu_physical_apicid)
  121. {
  122. boot_cpu_physical_apicid = read_apic_id();
  123. return (1);
  124. }
  125. static inline unsigned int
  126. cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
  127. {
  128. int num_bits_set;
  129. int cpus_found = 0;
  130. int cpu;
  131. int apicid;
  132. num_bits_set = cpumask_weight(cpumask);
  133. /* Return id to all */
  134. if (num_bits_set == nr_cpu_ids)
  135. return 0xFF;
  136. /*
  137. * The cpus in the mask must all be on the apic cluster. If are not
  138. * on the same apicid cluster return default value of TARGET_CPUS.
  139. */
  140. cpu = cpumask_first(cpumask);
  141. apicid = cpu_to_logical_apicid(cpu);
  142. while (cpus_found < num_bits_set) {
  143. if (cpumask_test_cpu(cpu, cpumask)) {
  144. int new_apicid = cpu_to_logical_apicid(cpu);
  145. if (apicid_cluster(apicid) !=
  146. apicid_cluster(new_apicid)){
  147. printk ("%s: Not a valid mask!\n", __func__);
  148. return 0xFF;
  149. }
  150. apicid = new_apicid;
  151. cpus_found++;
  152. }
  153. cpu++;
  154. }
  155. return apicid;
  156. }
  157. static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
  158. {
  159. int num_bits_set;
  160. int cpus_found = 0;
  161. int cpu;
  162. int apicid;
  163. num_bits_set = cpus_weight(*cpumask);
  164. /* Return id to all */
  165. if (num_bits_set == nr_cpu_ids)
  166. return cpu_to_logical_apicid(0);
  167. /*
  168. * The cpus in the mask must all be on the apic cluster. If are not
  169. * on the same apicid cluster return default value of TARGET_CPUS.
  170. */
  171. cpu = first_cpu(*cpumask);
  172. apicid = cpu_to_logical_apicid(cpu);
  173. while (cpus_found < num_bits_set) {
  174. if (cpu_isset(cpu, *cpumask)) {
  175. int new_apicid = cpu_to_logical_apicid(cpu);
  176. if (apicid_cluster(apicid) !=
  177. apicid_cluster(new_apicid)){
  178. printk ("%s: Not a valid mask!\n", __func__);
  179. return cpu_to_logical_apicid(0);
  180. }
  181. apicid = new_apicid;
  182. cpus_found++;
  183. }
  184. cpu++;
  185. }
  186. return apicid;
  187. }
  188. static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
  189. const struct cpumask *andmask)
  190. {
  191. int apicid = cpu_to_logical_apicid(0);
  192. cpumask_var_t cpumask;
  193. if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
  194. return apicid;
  195. cpumask_and(cpumask, inmask, andmask);
  196. cpumask_and(cpumask, cpumask, cpu_online_mask);
  197. apicid = cpu_mask_to_apicid(cpumask);
  198. free_cpumask_var(cpumask);
  199. return apicid;
  200. }
  201. static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  202. {
  203. return cpuid_apic >> index_msb;
  204. }
  205. #endif /* __ASM_ES7000_APIC_H */