topology.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. * Written by: Matthew Dobson, IBM Corporation
  3. *
  4. * Copyright (C) 2002, IBM Corp.
  5. *
  6. * All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  16. * NON INFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  22. *
  23. * Send feedback to <colpatch@us.ibm.com>
  24. */
  25. #ifndef _ASM_X86_TOPOLOGY_H
  26. #define _ASM_X86_TOPOLOGY_H
  27. #ifdef CONFIG_X86_32
  28. # ifdef CONFIG_X86_HT
  29. # define ENABLE_TOPO_DEFINES
  30. # endif
  31. #else
  32. # ifdef CONFIG_SMP
  33. # define ENABLE_TOPO_DEFINES
  34. # endif
  35. #endif
  36. /* Node not present */
  37. #define NUMA_NO_NODE (-1)
  38. #ifdef CONFIG_NUMA
  39. #include <linux/cpumask.h>
  40. #include <asm/mpspec.h>
  41. #ifdef CONFIG_X86_32
  42. /* Mappings between node number and cpus on that node. */
  43. extern cpumask_t node_to_cpumask_map[];
  44. /* Mappings between logical cpu number and node number */
  45. extern int cpu_to_node_map[];
  46. /* Returns the number of the node containing CPU 'cpu' */
  47. static inline int cpu_to_node(int cpu)
  48. {
  49. return cpu_to_node_map[cpu];
  50. }
  51. #define early_cpu_to_node(cpu) cpu_to_node(cpu)
  52. /* Returns a bitmask of CPUs on Node 'node'.
  53. *
  54. * Side note: this function creates the returned cpumask on the stack
  55. * so with a high NR_CPUS count, excessive stack space is used. The
  56. * cpumask_of_node function should be used whenever possible.
  57. */
  58. static inline cpumask_t node_to_cpumask(int node)
  59. {
  60. return node_to_cpumask_map[node];
  61. }
  62. /* Returns a bitmask of CPUs on Node 'node'. */
  63. static inline const struct cpumask *cpumask_of_node(int node)
  64. {
  65. return &node_to_cpumask_map[node];
  66. }
  67. #else /* CONFIG_X86_64 */
  68. /* Mappings between node number and cpus on that node. */
  69. extern cpumask_t *node_to_cpumask_map;
  70. /* Mappings between logical cpu number and node number */
  71. DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
  72. /* Returns the number of the current Node. */
  73. #define numa_node_id() read_pda(nodenumber)
  74. #ifdef CONFIG_DEBUG_PER_CPU_MAPS
  75. extern int cpu_to_node(int cpu);
  76. extern int early_cpu_to_node(int cpu);
  77. extern const cpumask_t *cpumask_of_node(int node);
  78. extern cpumask_t node_to_cpumask(int node);
  79. #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
  80. /* Returns the number of the node containing CPU 'cpu' */
  81. static inline int cpu_to_node(int cpu)
  82. {
  83. return per_cpu(x86_cpu_to_node_map, cpu);
  84. }
  85. /* Same function but used if called before per_cpu areas are setup */
  86. static inline int early_cpu_to_node(int cpu)
  87. {
  88. if (early_per_cpu_ptr(x86_cpu_to_node_map))
  89. return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
  90. return per_cpu(x86_cpu_to_node_map, cpu);
  91. }
  92. /* Returns a pointer to the cpumask of CPUs on Node 'node'. */
  93. static inline const cpumask_t *cpumask_of_node(int node)
  94. {
  95. return &node_to_cpumask_map[node];
  96. }
  97. /* Returns a bitmask of CPUs on Node 'node'. */
  98. static inline cpumask_t node_to_cpumask(int node)
  99. {
  100. return node_to_cpumask_map[node];
  101. }
  102. #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
  103. /*
  104. * Replace default node_to_cpumask_ptr with optimized version
  105. * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
  106. */
  107. #define node_to_cpumask_ptr(v, node) \
  108. const cpumask_t *v = cpumask_of_node(node)
  109. #define node_to_cpumask_ptr_next(v, node) \
  110. v = cpumask_of_node(node)
  111. #endif /* CONFIG_X86_64 */
  112. /*
  113. * Returns the number of the node containing Node 'node'. This
  114. * architecture is flat, so it is a pretty simple function!
  115. */
  116. #define parent_node(node) (node)
  117. #define pcibus_to_node(bus) __pcibus_to_node(bus)
  118. #define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus)
  119. #ifdef CONFIG_X86_32
  120. extern unsigned long node_start_pfn[];
  121. extern unsigned long node_end_pfn[];
  122. extern unsigned long node_remap_size[];
  123. #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid])
  124. # define SD_CACHE_NICE_TRIES 1
  125. # define SD_IDLE_IDX 1
  126. # define SD_NEWIDLE_IDX 2
  127. # define SD_FORKEXEC_IDX 0
  128. #else
  129. # define SD_CACHE_NICE_TRIES 2
  130. # define SD_IDLE_IDX 2
  131. # define SD_NEWIDLE_IDX 2
  132. # define SD_FORKEXEC_IDX 1
  133. #endif
  134. /* sched_domains SD_NODE_INIT for NUMA machines */
  135. #define SD_NODE_INIT (struct sched_domain) { \
  136. .min_interval = 8, \
  137. .max_interval = 32, \
  138. .busy_factor = 32, \
  139. .imbalance_pct = 125, \
  140. .cache_nice_tries = SD_CACHE_NICE_TRIES, \
  141. .busy_idx = 3, \
  142. .idle_idx = SD_IDLE_IDX, \
  143. .newidle_idx = SD_NEWIDLE_IDX, \
  144. .wake_idx = 1, \
  145. .forkexec_idx = SD_FORKEXEC_IDX, \
  146. .flags = SD_LOAD_BALANCE \
  147. | SD_BALANCE_EXEC \
  148. | SD_BALANCE_FORK \
  149. | SD_WAKE_AFFINE \
  150. | SD_WAKE_BALANCE \
  151. | SD_SERIALIZE, \
  152. .last_balance = jiffies, \
  153. .balance_interval = 1, \
  154. }
  155. #ifdef CONFIG_X86_64_ACPI_NUMA
  156. extern int __node_distance(int, int);
  157. #define node_distance(a, b) __node_distance(a, b)
  158. #endif
  159. #else /* !CONFIG_NUMA */
  160. #define numa_node_id() 0
  161. #define cpu_to_node(cpu) 0
  162. #define early_cpu_to_node(cpu) 0
  163. static inline const cpumask_t *cpumask_of_node(int node)
  164. {
  165. return &cpu_online_map;
  166. }
  167. static inline cpumask_t node_to_cpumask(int node)
  168. {
  169. return cpu_online_map;
  170. }
  171. static inline int node_to_first_cpu(int node)
  172. {
  173. return first_cpu(cpu_online_map);
  174. }
  175. /*
  176. * Replace default node_to_cpumask_ptr with optimized version
  177. * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
  178. */
  179. #define node_to_cpumask_ptr(v, node) \
  180. const cpumask_t *v = cpumask_of_node(node)
  181. #define node_to_cpumask_ptr_next(v, node) \
  182. v = cpumask_of_node(node)
  183. #endif
  184. #include <asm-generic/topology.h>
  185. #ifdef CONFIG_NUMA
  186. /* Returns the number of the first CPU on Node 'node'. */
  187. static inline int node_to_first_cpu(int node)
  188. {
  189. return cpumask_first(cpumask_of_node(node));
  190. }
  191. #endif
  192. extern cpumask_t cpu_coregroup_map(int cpu);
  193. extern const struct cpumask *cpu_coregroup_mask(int cpu);
  194. #ifdef ENABLE_TOPO_DEFINES
  195. #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
  196. #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
  197. #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
  198. #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
  199. #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
  200. #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
  201. /* indicates that pointers to the topology cpumask_t maps are valid */
  202. #define arch_provides_topology_pointers yes
  203. #endif
  204. static inline void arch_fix_phys_package_id(int num, u32 slot)
  205. {
  206. }
  207. struct pci_bus;
  208. void set_pci_bus_resources_arch_default(struct pci_bus *b);
  209. #ifdef CONFIG_SMP
  210. #define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids)
  211. #define smt_capable() (smp_num_siblings > 1)
  212. #endif
  213. #ifdef CONFIG_NUMA
  214. extern int get_mp_bus_to_node(int busnum);
  215. extern void set_mp_bus_to_node(int busnum, int node);
  216. #else
  217. static inline int get_mp_bus_to_node(int busnum)
  218. {
  219. return 0;
  220. }
  221. static inline void set_mp_bus_to_node(int busnum, int node)
  222. {
  223. }
  224. #endif
  225. #endif /* _ASM_X86_TOPOLOGY_H */