genx2apic_uv_x.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * SGI UV APIC functions (note: not an Intel compatible APIC)
  7. *
  8. * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/threads.h>
  12. #include <linux/cpumask.h>
  13. #include <linux/string.h>
  14. #include <linux/kernel.h>
  15. #include <linux/ctype.h>
  16. #include <linux/init.h>
  17. #include <linux/sched.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/module.h>
  20. #include <linux/hardirq.h>
  21. #include <asm/smp.h>
  22. #include <asm/ipi.h>
  23. #include <asm/genapic.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/uv/uv_mmrs.h>
  26. #include <asm/uv/uv_hub.h>
  27. DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
  28. EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
  29. struct uv_blade_info *uv_blade_info;
  30. EXPORT_SYMBOL_GPL(uv_blade_info);
  31. short *uv_node_to_blade;
  32. EXPORT_SYMBOL_GPL(uv_node_to_blade);
  33. short *uv_cpu_to_blade;
  34. EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
  35. short uv_possible_blades;
  36. EXPORT_SYMBOL_GPL(uv_possible_blades);
  37. /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
  38. static cpumask_t uv_target_cpus(void)
  39. {
  40. return cpumask_of_cpu(0);
  41. }
  42. static cpumask_t uv_vector_allocation_domain(int cpu)
  43. {
  44. cpumask_t domain = CPU_MASK_NONE;
  45. cpu_set(cpu, domain);
  46. return domain;
  47. }
  48. int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
  49. {
  50. unsigned long val;
  51. int pnode;
  52. pnode = uv_apicid_to_pnode(phys_apicid);
  53. val = (1UL << UVH_IPI_INT_SEND_SHFT) |
  54. (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
  55. (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
  56. APIC_DM_INIT;
  57. uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
  58. mdelay(10);
  59. val = (1UL << UVH_IPI_INT_SEND_SHFT) |
  60. (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
  61. (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
  62. APIC_DM_STARTUP;
  63. uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
  64. return 0;
  65. }
  66. static void uv_send_IPI_one(int cpu, int vector)
  67. {
  68. unsigned long val, apicid, lapicid;
  69. int pnode;
  70. apicid = per_cpu(x86_cpu_to_apicid, cpu); /* ZZZ - cache node-local ? */
  71. lapicid = apicid & 0x3f; /* ZZZ macro needed */
  72. pnode = uv_apicid_to_pnode(apicid);
  73. val =
  74. (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid <<
  75. UVH_IPI_INT_APIC_ID_SHFT) |
  76. (vector << UVH_IPI_INT_VECTOR_SHFT);
  77. uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
  78. }
  79. static void uv_send_IPI_mask(cpumask_t mask, int vector)
  80. {
  81. unsigned int cpu;
  82. for (cpu = 0; cpu < NR_CPUS; ++cpu)
  83. if (cpu_isset(cpu, mask))
  84. uv_send_IPI_one(cpu, vector);
  85. }
  86. static void uv_send_IPI_allbutself(int vector)
  87. {
  88. cpumask_t mask = cpu_online_map;
  89. cpu_clear(smp_processor_id(), mask);
  90. if (!cpus_empty(mask))
  91. uv_send_IPI_mask(mask, vector);
  92. }
  93. static void uv_send_IPI_all(int vector)
  94. {
  95. uv_send_IPI_mask(cpu_online_map, vector);
  96. }
  97. static int uv_apic_id_registered(void)
  98. {
  99. return 1;
  100. }
  101. static void uv_init_apic_ldr(void)
  102. {
  103. }
  104. static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
  105. {
  106. int cpu;
  107. /*
  108. * We're using fixed IRQ delivery, can only return one phys APIC ID.
  109. * May as well be the first.
  110. */
  111. cpu = first_cpu(cpumask);
  112. if ((unsigned)cpu < NR_CPUS)
  113. return per_cpu(x86_cpu_to_apicid, cpu);
  114. else
  115. return BAD_APICID;
  116. }
  117. static unsigned int uv_read_apic_id(void)
  118. {
  119. unsigned int id;
  120. WARN_ON(preemptible() && num_online_cpus() > 1);
  121. id = apic_read(APIC_ID) | __get_cpu_var(x2apic_extra_bits);
  122. return id;
  123. }
  124. static unsigned int phys_pkg_id(int index_msb)
  125. {
  126. return uv_read_apic_id() >> index_msb;
  127. }
  128. #ifdef ZZZ /* Needs x2apic patch */
  129. static void uv_send_IPI_self(int vector)
  130. {
  131. apic_write(APIC_SELF_IPI, vector);
  132. }
  133. #endif
  134. struct genapic apic_x2apic_uv_x = {
  135. .name = "UV large system",
  136. .int_delivery_mode = dest_Fixed,
  137. .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
  138. .target_cpus = uv_target_cpus,
  139. .vector_allocation_domain = uv_vector_allocation_domain,/* Fixme ZZZ */
  140. .apic_id_registered = uv_apic_id_registered,
  141. .init_apic_ldr = uv_init_apic_ldr,
  142. .send_IPI_all = uv_send_IPI_all,
  143. .send_IPI_allbutself = uv_send_IPI_allbutself,
  144. .send_IPI_mask = uv_send_IPI_mask,
  145. /* ZZZ.send_IPI_self = uv_send_IPI_self, */
  146. .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
  147. .phys_pkg_id = phys_pkg_id, /* Fixme ZZZ */
  148. .read_apic_id = uv_read_apic_id,
  149. };
  150. static __cpuinit void set_x2apic_extra_bits(int pnode)
  151. {
  152. __get_cpu_var(x2apic_extra_bits) = (pnode << 6);
  153. }
  154. /*
  155. * Called on boot cpu.
  156. */
  157. static __init int boot_pnode_to_blade(int pnode)
  158. {
  159. int blade;
  160. for (blade = 0; blade < uv_num_possible_blades(); blade++)
  161. if (pnode == uv_blade_info[blade].pnode)
  162. return blade;
  163. BUG();
  164. }
  165. struct redir_addr {
  166. unsigned long redirect;
  167. unsigned long alias;
  168. };
  169. #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
  170. static __initdata struct redir_addr redir_addrs[] = {
  171. {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
  172. {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
  173. {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
  174. };
  175. static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
  176. {
  177. union uvh_si_alias0_overlay_config_u alias;
  178. union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
  179. int i;
  180. for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
  181. alias.v = uv_read_local_mmr(redir_addrs[i].alias);
  182. if (alias.s.base == 0) {
  183. *size = (1UL << alias.s.m_alias);
  184. redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
  185. *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
  186. return;
  187. }
  188. }
  189. BUG();
  190. }
  191. static __init void map_low_mmrs(void)
  192. {
  193. init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
  194. init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
  195. }
  196. enum map_type {map_wb, map_uc};
  197. static void map_high(char *id, unsigned long base, int shift, enum map_type map_type)
  198. {
  199. unsigned long bytes, paddr;
  200. paddr = base << shift;
  201. bytes = (1UL << shift);
  202. printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
  203. paddr + bytes);
  204. if (map_type == map_uc)
  205. init_extra_mapping_uc(paddr, bytes);
  206. else
  207. init_extra_mapping_wb(paddr, bytes);
  208. }
  209. static __init void map_gru_high(int max_pnode)
  210. {
  211. union uvh_rh_gam_gru_overlay_config_mmr_u gru;
  212. int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
  213. gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
  214. if (gru.s.enable)
  215. map_high("GRU", gru.s.base, shift, map_wb);
  216. }
  217. static __init void map_config_high(int max_pnode)
  218. {
  219. union uvh_rh_gam_cfg_overlay_config_mmr_u cfg;
  220. int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT;
  221. cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR);
  222. if (cfg.s.enable)
  223. map_high("CONFIG", cfg.s.base, shift, map_uc);
  224. }
  225. static __init void map_mmr_high(int max_pnode)
  226. {
  227. union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
  228. int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
  229. mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
  230. if (mmr.s.enable)
  231. map_high("MMR", mmr.s.base, shift, map_uc);
  232. }
  233. static __init void map_mmioh_high(int max_pnode)
  234. {
  235. union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
  236. int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
  237. mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
  238. if (mmioh.s.enable)
  239. map_high("MMIOH", mmioh.s.base, shift, map_uc);
  240. }
  241. static __init void uv_system_init(void)
  242. {
  243. union uvh_si_addr_map_config_u m_n_config;
  244. union uvh_node_id_u node_id;
  245. unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
  246. int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
  247. int max_pnode = 0;
  248. unsigned long mmr_base, present;
  249. map_low_mmrs();
  250. m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
  251. m_val = m_n_config.s.m_skt;
  252. n_val = m_n_config.s.n_skt;
  253. mmr_base =
  254. uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
  255. ~UV_MMR_ENABLE;
  256. printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
  257. for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
  258. uv_possible_blades +=
  259. hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
  260. printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
  261. bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
  262. uv_blade_info = alloc_bootmem_pages(bytes);
  263. get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
  264. bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
  265. uv_node_to_blade = alloc_bootmem_pages(bytes);
  266. memset(uv_node_to_blade, 255, bytes);
  267. bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
  268. uv_cpu_to_blade = alloc_bootmem_pages(bytes);
  269. memset(uv_cpu_to_blade, 255, bytes);
  270. blade = 0;
  271. for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
  272. present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
  273. for (j = 0; j < 64; j++) {
  274. if (!test_bit(j, &present))
  275. continue;
  276. uv_blade_info[blade].pnode = (i * 64 + j);
  277. uv_blade_info[blade].nr_possible_cpus = 0;
  278. uv_blade_info[blade].nr_online_cpus = 0;
  279. blade++;
  280. }
  281. }
  282. node_id.v = uv_read_local_mmr(UVH_NODE_ID);
  283. gnode_upper = (((unsigned long)node_id.s.node_id) &
  284. ~((1 << n_val) - 1)) << m_val;
  285. for_each_present_cpu(cpu) {
  286. nid = cpu_to_node(cpu);
  287. pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
  288. blade = boot_pnode_to_blade(pnode);
  289. lcpu = uv_blade_info[blade].nr_possible_cpus;
  290. uv_blade_info[blade].nr_possible_cpus++;
  291. uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
  292. uv_cpu_hub_info(cpu)->lowmem_remap_top =
  293. lowmem_redir_base + lowmem_redir_size;
  294. uv_cpu_hub_info(cpu)->m_val = m_val;
  295. uv_cpu_hub_info(cpu)->n_val = m_val;
  296. uv_cpu_hub_info(cpu)->numa_blade_id = blade;
  297. uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
  298. uv_cpu_hub_info(cpu)->pnode = pnode;
  299. uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1;
  300. uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
  301. uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
  302. uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
  303. uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
  304. uv_node_to_blade[nid] = blade;
  305. uv_cpu_to_blade[cpu] = blade;
  306. max_pnode = max(pnode, max_pnode);
  307. printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
  308. "lcpu %d, blade %d\n",
  309. cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
  310. lcpu, blade);
  311. }
  312. map_gru_high(max_pnode);
  313. map_mmr_high(max_pnode);
  314. map_config_high(max_pnode);
  315. map_mmioh_high(max_pnode);
  316. }
  317. /*
  318. * Called on each cpu to initialize the per_cpu UV data area.
  319. * ZZZ hotplug not supported yet
  320. */
  321. void __cpuinit uv_cpu_init(void)
  322. {
  323. if (!uv_node_to_blade)
  324. uv_system_init();
  325. uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
  326. if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
  327. set_x2apic_extra_bits(uv_hub_info->pnode);
  328. }