apic.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. #ifndef __ASM_ES7000_APIC_H
  2. #define __ASM_ES7000_APIC_H
  3. #include <linux/gfp.h>
  4. #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
  5. #define esr_disable (1)
  6. static inline int apic_id_registered(void)
  7. {
  8. return (1);
  9. }
  10. static inline const cpumask_t *target_cpus_cluster(void)
  11. {
  12. return &CPU_MASK_ALL;
  13. }
  14. static inline const cpumask_t *target_cpus(void)
  15. {
  16. return &cpumask_of_cpu(smp_processor_id());
  17. }
  18. #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
  19. #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
  20. #define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
  21. #define NO_BALANCE_IRQ_CLUSTER (1)
  22. #define APIC_DFR_VALUE (APIC_DFR_FLAT)
  23. #define INT_DELIVERY_MODE (dest_Fixed)
  24. #define INT_DEST_MODE (0) /* phys delivery to target procs */
  25. #define NO_BALANCE_IRQ (0)
  26. #undef APIC_DEST_LOGICAL
  27. #define APIC_DEST_LOGICAL 0x0
  28. static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
  29. {
  30. return 0;
  31. }
  32. static inline unsigned long check_apicid_present(int bit)
  33. {
  34. return physid_isset(bit, phys_cpu_present_map);
  35. }
  36. #define apicid_cluster(apicid) (apicid & 0xF0)
  37. static inline unsigned long calculate_ldr(int cpu)
  38. {
  39. unsigned long id;
  40. id = xapic_phys_to_log_apicid(cpu);
  41. return (SET_APIC_LOGICAL_ID(id));
  42. }
  43. /*
  44. * Set up the logical destination ID.
  45. *
  46. * Intel recommends to set DFR, LdR and TPR before enabling
  47. * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
  48. * document number 292116). So here it goes...
  49. */
  50. static inline void init_apic_ldr_cluster(void)
  51. {
  52. unsigned long val;
  53. int cpu = smp_processor_id();
  54. apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
  55. val = calculate_ldr(cpu);
  56. apic_write(APIC_LDR, val);
  57. }
  58. static inline void init_apic_ldr(void)
  59. {
  60. unsigned long val;
  61. int cpu = smp_processor_id();
  62. apic_write(APIC_DFR, APIC_DFR_VALUE);
  63. val = calculate_ldr(cpu);
  64. apic_write(APIC_LDR, val);
  65. }
  66. extern int apic_version [MAX_APICS];
  67. static inline void setup_apic_routing(void)
  68. {
  69. int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
  70. printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
  71. (apic_version[apic] == 0x14) ?
  72. "Physical Cluster" : "Logical Cluster",
  73. nr_ioapics, cpus_addr(*target_cpus())[0]);
  74. }
  75. static inline int multi_timer_check(int apic, int irq)
  76. {
  77. return 0;
  78. }
  79. static inline int apicid_to_node(int logical_apicid)
  80. {
  81. return 0;
  82. }
  83. static inline int cpu_present_to_apicid(int mps_cpu)
  84. {
  85. if (!mps_cpu)
  86. return boot_cpu_physical_apicid;
  87. else if (mps_cpu < nr_cpu_ids)
  88. return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
  89. else
  90. return BAD_APICID;
  91. }
  92. static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
  93. {
  94. static int id = 0;
  95. physid_mask_t mask;
  96. mask = physid_mask_of_physid(id);
  97. ++id;
  98. return mask;
  99. }
  100. extern u8 cpu_2_logical_apicid[];
  101. /* Mapping from cpu number to logical apicid */
  102. static inline int cpu_to_logical_apicid(int cpu)
  103. {
  104. #ifdef CONFIG_SMP
  105. if (cpu >= nr_cpu_ids)
  106. return BAD_APICID;
  107. return (int)cpu_2_logical_apicid[cpu];
  108. #else
  109. return logical_smp_processor_id();
  110. #endif
  111. }
  112. static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
  113. {
  114. /* For clustered we don't have a good way to do this yet - hack */
  115. return physids_promote(0xff);
  116. }
  117. static inline void setup_portio_remap(void)
  118. {
  119. }
  120. extern unsigned int boot_cpu_physical_apicid;
  121. static inline int check_phys_apicid_present(int cpu_physical_apicid)
  122. {
  123. boot_cpu_physical_apicid = read_apic_id();
  124. return (1);
  125. }
  126. static inline unsigned int
  127. cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
  128. {
  129. int num_bits_set;
  130. int cpus_found = 0;
  131. int cpu;
  132. int apicid;
  133. num_bits_set = cpumask_weight(cpumask);
  134. /* Return id to all */
  135. if (num_bits_set == nr_cpu_ids)
  136. return 0xFF;
  137. /*
  138. * The cpus in the mask must all be on the apic cluster. If are not
  139. * on the same apicid cluster return default value of TARGET_CPUS.
  140. */
  141. cpu = cpumask_first(cpumask);
  142. apicid = cpu_to_logical_apicid(cpu);
  143. while (cpus_found < num_bits_set) {
  144. if (cpumask_test_cpu(cpu, cpumask)) {
  145. int new_apicid = cpu_to_logical_apicid(cpu);
  146. if (apicid_cluster(apicid) !=
  147. apicid_cluster(new_apicid)){
  148. printk ("%s: Not a valid mask!\n", __func__);
  149. return 0xFF;
  150. }
  151. apicid = new_apicid;
  152. cpus_found++;
  153. }
  154. cpu++;
  155. }
  156. return apicid;
  157. }
  158. static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
  159. {
  160. int num_bits_set;
  161. int cpus_found = 0;
  162. int cpu;
  163. int apicid;
  164. num_bits_set = cpus_weight(*cpumask);
  165. /* Return id to all */
  166. if (num_bits_set == nr_cpu_ids)
  167. return cpu_to_logical_apicid(0);
  168. /*
  169. * The cpus in the mask must all be on the apic cluster. If are not
  170. * on the same apicid cluster return default value of TARGET_CPUS.
  171. */
  172. cpu = first_cpu(*cpumask);
  173. apicid = cpu_to_logical_apicid(cpu);
  174. while (cpus_found < num_bits_set) {
  175. if (cpu_isset(cpu, *cpumask)) {
  176. int new_apicid = cpu_to_logical_apicid(cpu);
  177. if (apicid_cluster(apicid) !=
  178. apicid_cluster(new_apicid)){
  179. printk ("%s: Not a valid mask!\n", __func__);
  180. return cpu_to_logical_apicid(0);
  181. }
  182. apicid = new_apicid;
  183. cpus_found++;
  184. }
  185. cpu++;
  186. }
  187. return apicid;
  188. }
  189. static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
  190. const struct cpumask *andmask)
  191. {
  192. int apicid = cpu_to_logical_apicid(0);
  193. cpumask_var_t cpumask;
  194. if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
  195. return apicid;
  196. cpumask_and(cpumask, inmask, andmask);
  197. cpumask_and(cpumask, cpumask, cpu_online_mask);
  198. apicid = cpu_mask_to_apicid(cpumask);
  199. free_cpumask_var(cpumask);
  200. return apicid;
  201. }
  202. static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
  203. {
  204. return cpuid_apic >> index_msb;
  205. }
  206. #endif /* __ASM_ES7000_APIC_H */