x2apic_uv_x.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * SGI UV APIC functions (note: not an Intel compatible APIC)
  7. *
  8. * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
  9. */
  10. #include <linux/cpumask.h>
  11. #include <linux/hardirq.h>
  12. #include <linux/proc_fs.h>
  13. #include <linux/threads.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/string.h>
  17. #include <linux/ctype.h>
  18. #include <linux/sched.h>
  19. #include <linux/timer.h>
  20. #include <linux/cpu.h>
  21. #include <linux/init.h>
  22. #include <linux/io.h>
  23. #include <asm/uv/uv_mmrs.h>
  24. #include <asm/uv/uv_hub.h>
  25. #include <asm/current.h>
  26. #include <asm/pgtable.h>
  27. #include <asm/uv/bios.h>
  28. #include <asm/uv/uv.h>
  29. #include <asm/apic.h>
  30. #include <asm/ipi.h>
  31. #include <asm/smp.h>
  32. #include <asm/x86_init.h>
  33. DEFINE_PER_CPU(int, x2apic_extra_bits);
  34. static enum uv_system_type uv_system_type;
  35. static u64 gru_start_paddr, gru_end_paddr;
  36. static inline bool is_GRU_range(u64 start, u64 end)
  37. {
  38. return start >= gru_start_paddr && end <= gru_end_paddr;
  39. }
  40. static bool uv_is_untracked_pat_range(u64 start, u64 end)
  41. {
  42. return is_ISA_range(start, end) || is_GRU_range(start, end);
  43. }
  44. static int early_get_nodeid(void)
  45. {
  46. union uvh_node_id_u node_id;
  47. unsigned long *mmr;
  48. mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr));
  49. node_id.v = *mmr;
  50. early_iounmap(mmr, sizeof(*mmr));
  51. return node_id.s.node_id;
  52. }
  53. static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
  54. {
  55. if (!strcmp(oem_id, "SGI")) {
  56. x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
  57. if (!strcmp(oem_table_id, "UVL"))
  58. uv_system_type = UV_LEGACY_APIC;
  59. else if (!strcmp(oem_table_id, "UVX"))
  60. uv_system_type = UV_X2APIC;
  61. else if (!strcmp(oem_table_id, "UVH")) {
  62. __get_cpu_var(x2apic_extra_bits) =
  63. early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1);
  64. uv_system_type = UV_NON_UNIQUE_APIC;
  65. return 1;
  66. }
  67. }
  68. return 0;
  69. }
  70. enum uv_system_type get_uv_system_type(void)
  71. {
  72. return uv_system_type;
  73. }
  74. int is_uv_system(void)
  75. {
  76. return uv_system_type != UV_NONE;
  77. }
  78. EXPORT_SYMBOL_GPL(is_uv_system);
  79. DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
  80. EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
  81. struct uv_blade_info *uv_blade_info;
  82. EXPORT_SYMBOL_GPL(uv_blade_info);
  83. short *uv_node_to_blade;
  84. EXPORT_SYMBOL_GPL(uv_node_to_blade);
  85. short *uv_cpu_to_blade;
  86. EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
  87. short uv_possible_blades;
  88. EXPORT_SYMBOL_GPL(uv_possible_blades);
  89. unsigned long sn_rtc_cycles_per_second;
  90. EXPORT_SYMBOL(sn_rtc_cycles_per_second);
  91. /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
  92. static const struct cpumask *uv_target_cpus(void)
  93. {
  94. return cpumask_of(0);
  95. }
  96. static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
  97. {
  98. cpumask_clear(retmask);
  99. cpumask_set_cpu(cpu, retmask);
  100. }
  101. static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
  102. {
  103. #ifdef CONFIG_SMP
  104. unsigned long val;
  105. int pnode;
  106. pnode = uv_apicid_to_pnode(phys_apicid);
  107. val = (1UL << UVH_IPI_INT_SEND_SHFT) |
  108. (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
  109. ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
  110. APIC_DM_INIT;
  111. uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
  112. mdelay(10);
  113. val = (1UL << UVH_IPI_INT_SEND_SHFT) |
  114. (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
  115. ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
  116. APIC_DM_STARTUP;
  117. uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
  118. atomic_set(&init_deasserted, 1);
  119. #endif
  120. return 0;
  121. }
  122. static void uv_send_IPI_one(int cpu, int vector)
  123. {
  124. unsigned long apicid;
  125. int pnode;
  126. apicid = per_cpu(x86_cpu_to_apicid, cpu);
  127. pnode = uv_apicid_to_pnode(apicid);
  128. uv_hub_send_ipi(pnode, apicid, vector);
  129. }
  130. static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
  131. {
  132. unsigned int cpu;
  133. for_each_cpu(cpu, mask)
  134. uv_send_IPI_one(cpu, vector);
  135. }
  136. static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
  137. {
  138. unsigned int this_cpu = smp_processor_id();
  139. unsigned int cpu;
  140. for_each_cpu(cpu, mask) {
  141. if (cpu != this_cpu)
  142. uv_send_IPI_one(cpu, vector);
  143. }
  144. }
  145. static void uv_send_IPI_allbutself(int vector)
  146. {
  147. unsigned int this_cpu = smp_processor_id();
  148. unsigned int cpu;
  149. for_each_online_cpu(cpu) {
  150. if (cpu != this_cpu)
  151. uv_send_IPI_one(cpu, vector);
  152. }
  153. }
  154. static void uv_send_IPI_all(int vector)
  155. {
  156. uv_send_IPI_mask(cpu_online_mask, vector);
  157. }
  158. static int uv_apic_id_registered(void)
  159. {
  160. return 1;
  161. }
  162. static void uv_init_apic_ldr(void)
  163. {
  164. }
  165. static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
  166. {
  167. /*
  168. * We're using fixed IRQ delivery, can only return one phys APIC ID.
  169. * May as well be the first.
  170. */
  171. int cpu = cpumask_first(cpumask);
  172. if ((unsigned)cpu < nr_cpu_ids)
  173. return per_cpu(x86_cpu_to_apicid, cpu);
  174. else
  175. return BAD_APICID;
  176. }
  177. static unsigned int
  178. uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
  179. const struct cpumask *andmask)
  180. {
  181. int cpu;
  182. /*
  183. * We're using fixed IRQ delivery, can only return one phys APIC ID.
  184. * May as well be the first.
  185. */
  186. for_each_cpu_and(cpu, cpumask, andmask) {
  187. if (cpumask_test_cpu(cpu, cpu_online_mask))
  188. break;
  189. }
  190. return per_cpu(x86_cpu_to_apicid, cpu);
  191. }
  192. static unsigned int x2apic_get_apic_id(unsigned long x)
  193. {
  194. unsigned int id;
  195. WARN_ON(preemptible() && num_online_cpus() > 1);
  196. id = x | __get_cpu_var(x2apic_extra_bits);
  197. return id;
  198. }
  199. static unsigned long set_apic_id(unsigned int id)
  200. {
  201. unsigned long x;
  202. /* maskout x2apic_extra_bits ? */
  203. x = id;
  204. return x;
  205. }
  206. static unsigned int uv_read_apic_id(void)
  207. {
  208. return x2apic_get_apic_id(apic_read(APIC_ID));
  209. }
  210. static int uv_phys_pkg_id(int initial_apicid, int index_msb)
  211. {
  212. return uv_read_apic_id() >> index_msb;
  213. }
  214. static void uv_send_IPI_self(int vector)
  215. {
  216. apic_write(APIC_SELF_IPI, vector);
  217. }
  218. struct apic __refdata apic_x2apic_uv_x = {
  219. .name = "UV large system",
  220. .probe = NULL,
  221. .acpi_madt_oem_check = uv_acpi_madt_oem_check,
  222. .apic_id_registered = uv_apic_id_registered,
  223. .irq_delivery_mode = dest_Fixed,
  224. .irq_dest_mode = 0, /* physical */
  225. .target_cpus = uv_target_cpus,
  226. .disable_esr = 0,
  227. .dest_logical = APIC_DEST_LOGICAL,
  228. .check_apicid_used = NULL,
  229. .check_apicid_present = NULL,
  230. .vector_allocation_domain = uv_vector_allocation_domain,
  231. .init_apic_ldr = uv_init_apic_ldr,
  232. .ioapic_phys_id_map = NULL,
  233. .setup_apic_routing = NULL,
  234. .multi_timer_check = NULL,
  235. .apicid_to_node = NULL,
  236. .cpu_to_logical_apicid = NULL,
  237. .cpu_present_to_apicid = default_cpu_present_to_apicid,
  238. .apicid_to_cpu_present = NULL,
  239. .setup_portio_remap = NULL,
  240. .check_phys_apicid_present = default_check_phys_apicid_present,
  241. .enable_apic_mode = NULL,
  242. .phys_pkg_id = uv_phys_pkg_id,
  243. .mps_oem_check = NULL,
  244. .get_apic_id = x2apic_get_apic_id,
  245. .set_apic_id = set_apic_id,
  246. .apic_id_mask = 0xFFFFFFFFu,
  247. .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
  248. .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
  249. .send_IPI_mask = uv_send_IPI_mask,
  250. .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
  251. .send_IPI_allbutself = uv_send_IPI_allbutself,
  252. .send_IPI_all = uv_send_IPI_all,
  253. .send_IPI_self = uv_send_IPI_self,
  254. .wakeup_secondary_cpu = uv_wakeup_secondary,
  255. .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
  256. .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
  257. .wait_for_init_deassert = NULL,
  258. .smp_callin_clear_local_apic = NULL,
  259. .inquire_remote_apic = NULL,
  260. .read = native_apic_msr_read,
  261. .write = native_apic_msr_write,
  262. .icr_read = native_x2apic_icr_read,
  263. .icr_write = native_x2apic_icr_write,
  264. .wait_icr_idle = native_x2apic_wait_icr_idle,
  265. .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
  266. };
  267. static __cpuinit void set_x2apic_extra_bits(int pnode)
  268. {
  269. __get_cpu_var(x2apic_extra_bits) = (pnode << 6);
  270. }
  271. /*
  272. * Called on boot cpu.
  273. */
  274. static __init int boot_pnode_to_blade(int pnode)
  275. {
  276. int blade;
  277. for (blade = 0; blade < uv_num_possible_blades(); blade++)
  278. if (pnode == uv_blade_info[blade].pnode)
  279. return blade;
  280. BUG();
  281. }
  282. struct redir_addr {
  283. unsigned long redirect;
  284. unsigned long alias;
  285. };
  286. #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
  287. static __initdata struct redir_addr redir_addrs[] = {
  288. {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
  289. {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
  290. {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
  291. };
  292. static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
  293. {
  294. union uvh_si_alias0_overlay_config_u alias;
  295. union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
  296. int i;
  297. for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
  298. alias.v = uv_read_local_mmr(redir_addrs[i].alias);
  299. if (alias.s.enable && alias.s.base == 0) {
  300. *size = (1UL << alias.s.m_alias);
  301. redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
  302. *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
  303. return;
  304. }
  305. }
  306. *base = *size = 0;
  307. }
  308. enum map_type {map_wb, map_uc};
  309. static __init void map_high(char *id, unsigned long base, int shift,
  310. int max_pnode, enum map_type map_type)
  311. {
  312. unsigned long bytes, paddr;
  313. paddr = base << shift;
  314. bytes = (1UL << shift) * (max_pnode + 1);
  315. printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
  316. paddr + bytes);
  317. if (map_type == map_uc)
  318. init_extra_mapping_uc(paddr, bytes);
  319. else
  320. init_extra_mapping_wb(paddr, bytes);
  321. }
  322. static __init void map_gru_high(int max_pnode)
  323. {
  324. union uvh_rh_gam_gru_overlay_config_mmr_u gru;
  325. int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
  326. gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
  327. if (gru.s.enable) {
  328. map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
  329. gru_start_paddr = ((u64)gru.s.base << shift);
  330. gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
  331. }
  332. }
  333. static __init void map_mmr_high(int max_pnode)
  334. {
  335. union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
  336. int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
  337. mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
  338. if (mmr.s.enable)
  339. map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
  340. }
  341. static __init void map_mmioh_high(int max_pnode)
  342. {
  343. union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
  344. int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
  345. mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
  346. if (mmioh.s.enable)
  347. map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc);
  348. }
  349. static __init void map_low_mmrs(void)
  350. {
  351. init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
  352. init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
  353. }
  354. static __init void uv_rtc_init(void)
  355. {
  356. long status;
  357. u64 ticks_per_sec;
  358. status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
  359. &ticks_per_sec);
  360. if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
  361. printk(KERN_WARNING
  362. "unable to determine platform RTC clock frequency, "
  363. "guessing.\n");
  364. /* BIOS gives wrong value for clock freq. so guess */
  365. sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
  366. } else
  367. sn_rtc_cycles_per_second = ticks_per_sec;
  368. }
  369. /*
  370. * percpu heartbeat timer
  371. */
  372. static void uv_heartbeat(unsigned long ignored)
  373. {
  374. struct timer_list *timer = &uv_hub_info->scir.timer;
  375. unsigned char bits = uv_hub_info->scir.state;
  376. /* flip heartbeat bit */
  377. bits ^= SCIR_CPU_HEARTBEAT;
  378. /* is this cpu idle? */
  379. if (idle_cpu(raw_smp_processor_id()))
  380. bits &= ~SCIR_CPU_ACTIVITY;
  381. else
  382. bits |= SCIR_CPU_ACTIVITY;
  383. /* update system controller interface reg */
  384. uv_set_scir_bits(bits);
  385. /* enable next timer period */
  386. mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
  387. }
  388. static void __cpuinit uv_heartbeat_enable(int cpu)
  389. {
  390. if (!uv_cpu_hub_info(cpu)->scir.enabled) {
  391. struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
  392. uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
  393. setup_timer(timer, uv_heartbeat, cpu);
  394. timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
  395. add_timer_on(timer, cpu);
  396. uv_cpu_hub_info(cpu)->scir.enabled = 1;
  397. }
  398. /* check boot cpu */
  399. if (!uv_cpu_hub_info(0)->scir.enabled)
  400. uv_heartbeat_enable(0);
  401. }
  402. #ifdef CONFIG_HOTPLUG_CPU
  403. static void __cpuinit uv_heartbeat_disable(int cpu)
  404. {
  405. if (uv_cpu_hub_info(cpu)->scir.enabled) {
  406. uv_cpu_hub_info(cpu)->scir.enabled = 0;
  407. del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
  408. }
  409. uv_set_cpu_scir_bits(cpu, 0xff);
  410. }
  411. /*
  412. * cpu hotplug notifier
  413. */
  414. static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
  415. unsigned long action, void *hcpu)
  416. {
  417. long cpu = (long)hcpu;
  418. switch (action) {
  419. case CPU_ONLINE:
  420. uv_heartbeat_enable(cpu);
  421. break;
  422. case CPU_DOWN_PREPARE:
  423. uv_heartbeat_disable(cpu);
  424. break;
  425. default:
  426. break;
  427. }
  428. return NOTIFY_OK;
  429. }
  430. static __init void uv_scir_register_cpu_notifier(void)
  431. {
  432. hotcpu_notifier(uv_scir_cpu_notify, 0);
  433. }
  434. #else /* !CONFIG_HOTPLUG_CPU */
  435. static __init void uv_scir_register_cpu_notifier(void)
  436. {
  437. }
  438. static __init int uv_init_heartbeat(void)
  439. {
  440. int cpu;
  441. if (is_uv_system())
  442. for_each_online_cpu(cpu)
  443. uv_heartbeat_enable(cpu);
  444. return 0;
  445. }
  446. late_initcall(uv_init_heartbeat);
  447. #endif /* !CONFIG_HOTPLUG_CPU */
  448. /*
  449. * Called on each cpu to initialize the per_cpu UV data area.
  450. * FIXME: hotplug not supported yet
  451. */
  452. void __cpuinit uv_cpu_init(void)
  453. {
  454. /* CPU 0 initilization will be done via uv_system_init. */
  455. if (!uv_blade_info)
  456. return;
  457. uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
  458. if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
  459. set_x2apic_extra_bits(uv_hub_info->pnode);
  460. }
  461. void __init uv_system_init(void)
  462. {
  463. union uvh_si_addr_map_config_u m_n_config;
  464. union uvh_node_id_u node_id;
  465. unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
  466. int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
  467. int gnode_extra, max_pnode = 0;
  468. unsigned long mmr_base, present, paddr;
  469. unsigned short pnode_mask;
  470. map_low_mmrs();
  471. m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
  472. m_val = m_n_config.s.m_skt;
  473. n_val = m_n_config.s.n_skt;
  474. mmr_base =
  475. uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
  476. ~UV_MMR_ENABLE;
  477. pnode_mask = (1 << n_val) - 1;
  478. node_id.v = uv_read_local_mmr(UVH_NODE_ID);
  479. gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
  480. gnode_upper = ((unsigned long)gnode_extra << m_val);
  481. printk(KERN_DEBUG "UV: N %d, M %d, gnode_upper 0x%lx, gnode_extra 0x%x\n",
  482. n_val, m_val, gnode_upper, gnode_extra);
  483. printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
  484. for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
  485. uv_possible_blades +=
  486. hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
  487. printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
  488. bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
  489. uv_blade_info = kmalloc(bytes, GFP_KERNEL);
  490. BUG_ON(!uv_blade_info);
  491. for (blade = 0; blade < uv_num_possible_blades(); blade++)
  492. uv_blade_info[blade].memory_nid = -1;
  493. get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
  494. bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
  495. uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
  496. BUG_ON(!uv_node_to_blade);
  497. memset(uv_node_to_blade, 255, bytes);
  498. bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
  499. uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
  500. BUG_ON(!uv_cpu_to_blade);
  501. memset(uv_cpu_to_blade, 255, bytes);
  502. blade = 0;
  503. for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
  504. present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
  505. for (j = 0; j < 64; j++) {
  506. if (!test_bit(j, &present))
  507. continue;
  508. uv_blade_info[blade].pnode = (i * 64 + j);
  509. uv_blade_info[blade].nr_possible_cpus = 0;
  510. uv_blade_info[blade].nr_online_cpus = 0;
  511. blade++;
  512. }
  513. }
  514. uv_bios_init();
  515. uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
  516. &sn_coherency_id, &sn_region_size);
  517. uv_rtc_init();
  518. for_each_present_cpu(cpu) {
  519. int apicid = per_cpu(x86_cpu_to_apicid, cpu);
  520. nid = cpu_to_node(cpu);
  521. pnode = uv_apicid_to_pnode(apicid);
  522. blade = boot_pnode_to_blade(pnode);
  523. lcpu = uv_blade_info[blade].nr_possible_cpus;
  524. uv_blade_info[blade].nr_possible_cpus++;
  525. /* Any node on the blade, else will contain -1. */
  526. uv_blade_info[blade].memory_nid = nid;
  527. uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
  528. uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
  529. uv_cpu_hub_info(cpu)->m_val = m_val;
  530. uv_cpu_hub_info(cpu)->n_val = n_val;
  531. uv_cpu_hub_info(cpu)->numa_blade_id = blade;
  532. uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
  533. uv_cpu_hub_info(cpu)->pnode = pnode;
  534. uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
  535. uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
  536. uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
  537. uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
  538. uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
  539. uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
  540. uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
  541. uv_node_to_blade[nid] = blade;
  542. uv_cpu_to_blade[cpu] = blade;
  543. max_pnode = max(pnode, max_pnode);
  544. printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
  545. cpu, apicid, pnode, nid, lcpu, blade);
  546. }
  547. /* Add blade/pnode info for nodes without cpus */
  548. for_each_online_node(nid) {
  549. if (uv_node_to_blade[nid] >= 0)
  550. continue;
  551. paddr = node_start_pfn(nid) << PAGE_SHIFT;
  552. paddr = uv_soc_phys_ram_to_gpa(paddr);
  553. pnode = (paddr >> m_val) & pnode_mask;
  554. blade = boot_pnode_to_blade(pnode);
  555. uv_node_to_blade[nid] = blade;
  556. max_pnode = max(pnode, max_pnode);
  557. }
  558. map_gru_high(max_pnode);
  559. map_mmr_high(max_pnode);
  560. map_mmioh_high(max_pnode);
  561. uv_cpu_init();
  562. uv_scir_register_cpu_notifier();
  563. proc_mkdir("sgi_uv", NULL);
  564. }