|
@@ -33,6 +33,7 @@
|
|
#include <linux/serial.h>
|
|
#include <linux/serial.h>
|
|
#include <linux/serial_8250.h>
|
|
#include <linux/serial_8250.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/debugfs.h>
|
|
|
|
+#include <linux/percpu.h>
|
|
#include <asm/io.h>
|
|
#include <asm/io.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/processor.h>
|
|
@@ -57,6 +58,7 @@
|
|
#include <asm/mmu.h>
|
|
#include <asm/mmu.h>
|
|
#include <asm/lmb.h>
|
|
#include <asm/lmb.h>
|
|
#include <asm/xmon.h>
|
|
#include <asm/xmon.h>
|
|
|
|
+#include <asm/cputhreads.h>
|
|
|
|
|
|
#include "setup.h"
|
|
#include "setup.h"
|
|
|
|
|
|
@@ -327,6 +329,31 @@ void __init check_for_initrd(void)
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
+int threads_per_core, threads_shift;
|
|
|
|
+cpumask_t threads_core_mask;
|
|
|
|
+
|
|
|
|
+static void __init cpu_init_thread_core_maps(int tpc)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ threads_per_core = tpc;
|
|
|
|
+ threads_core_mask = CPU_MASK_NONE;
|
|
|
|
+
|
|
|
|
+ /* This implementation only supports power of 2 number of threads
|
|
|
|
+ * for simplicity and performance
|
|
|
|
+ */
|
|
|
|
+ threads_shift = ilog2(tpc);
|
|
|
|
+ BUG_ON(tpc != (1 << threads_shift));
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < tpc; i++)
|
|
|
|
+ cpu_set(i, threads_core_mask);
|
|
|
|
+
|
|
|
|
+ printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
|
|
|
|
+ tpc, tpc > 1 ? "s" : "");
|
|
|
|
+ printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* setup_cpu_maps - initialize the following cpu maps:
|
|
* setup_cpu_maps - initialize the following cpu maps:
|
|
* cpu_possible_map
|
|
* cpu_possible_map
|
|
@@ -350,22 +377,32 @@ void __init smp_setup_cpu_maps(void)
|
|
{
|
|
{
|
|
struct device_node *dn = NULL;
|
|
struct device_node *dn = NULL;
|
|
int cpu = 0;
|
|
int cpu = 0;
|
|
|
|
+ int nthreads = 1;
|
|
|
|
+
|
|
|
|
+ DBG("smp_setup_cpu_maps()\n");
|
|
|
|
|
|
while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
|
|
while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
|
|
const int *intserv;
|
|
const int *intserv;
|
|
- int j, len = sizeof(u32), nthreads = 1;
|
|
|
|
|
|
+ int j, len;
|
|
|
|
+
|
|
|
|
+ DBG(" * %s...\n", dn->full_name);
|
|
|
|
|
|
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
|
|
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
|
|
&len);
|
|
&len);
|
|
- if (intserv)
|
|
|
|
|
|
+ if (intserv) {
|
|
nthreads = len / sizeof(int);
|
|
nthreads = len / sizeof(int);
|
|
- else {
|
|
|
|
|
|
+ DBG(" ibm,ppc-interrupt-server#s -> %d threads\n",
|
|
|
|
+ nthreads);
|
|
|
|
+ } else {
|
|
|
|
+ DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n");
|
|
intserv = of_get_property(dn, "reg", NULL);
|
|
intserv = of_get_property(dn, "reg", NULL);
|
|
if (!intserv)
|
|
if (!intserv)
|
|
intserv = &cpu; /* assume logical == phys */
|
|
intserv = &cpu; /* assume logical == phys */
|
|
}
|
|
}
|
|
|
|
|
|
for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
|
|
for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
|
|
|
|
+ DBG(" thread %d -> cpu %d (hard id %d)\n",
|
|
|
|
+ j, cpu, intserv[j]);
|
|
cpu_set(cpu, cpu_present_map);
|
|
cpu_set(cpu, cpu_present_map);
|
|
set_hard_smp_processor_id(cpu, intserv[j]);
|
|
set_hard_smp_processor_id(cpu, intserv[j]);
|
|
cpu_set(cpu, cpu_possible_map);
|
|
cpu_set(cpu, cpu_possible_map);
|
|
@@ -373,6 +410,12 @@ void __init smp_setup_cpu_maps(void)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* If no SMT supported, nthreads is forced to 1 */
|
|
|
|
+ if (!cpu_has_feature(CPU_FTR_SMT)) {
|
|
|
|
+ DBG(" SMT disabled ! nthreads forced to 1\n");
|
|
|
|
+ nthreads = 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
#ifdef CONFIG_PPC64
|
|
#ifdef CONFIG_PPC64
|
|
/*
|
|
/*
|
|
* On pSeries LPAR, we need to know how many cpus
|
|
* On pSeries LPAR, we need to know how many cpus
|
|
@@ -395,7 +438,7 @@ void __init smp_setup_cpu_maps(void)
|
|
|
|
|
|
/* Double maxcpus for processors which have SMT capability */
|
|
/* Double maxcpus for processors which have SMT capability */
|
|
if (cpu_has_feature(CPU_FTR_SMT))
|
|
if (cpu_has_feature(CPU_FTR_SMT))
|
|
- maxcpus *= 2;
|
|
|
|
|
|
+ maxcpus *= nthreads;
|
|
|
|
|
|
if (maxcpus > NR_CPUS) {
|
|
if (maxcpus > NR_CPUS) {
|
|
printk(KERN_WARNING
|
|
printk(KERN_WARNING
|
|
@@ -412,9 +455,16 @@ void __init smp_setup_cpu_maps(void)
|
|
out:
|
|
out:
|
|
of_node_put(dn);
|
|
of_node_put(dn);
|
|
}
|
|
}
|
|
-
|
|
|
|
vdso_data->processorCount = num_present_cpus();
|
|
vdso_data->processorCount = num_present_cpus();
|
|
#endif /* CONFIG_PPC64 */
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
+
|
|
|
|
+ /* Initialize CPU <=> thread mapping/
|
|
|
|
+ *
|
|
|
|
+ * WARNING: We assume that the number of threads is the same for
|
|
|
|
+ * every CPU in the system. If that is not the case, then some code
|
|
|
|
+ * here will have to be reworked
|
|
|
|
+ */
|
|
|
|
+ cpu_init_thread_core_maps(nthreads);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -424,17 +474,19 @@ void __init smp_setup_cpu_maps(void)
|
|
*/
|
|
*/
|
|
void __init smp_setup_cpu_sibling_map(void)
|
|
void __init smp_setup_cpu_sibling_map(void)
|
|
{
|
|
{
|
|
-#if defined(CONFIG_PPC64)
|
|
|
|
- int cpu;
|
|
|
|
|
|
+#ifdef CONFIG_PPC64
|
|
|
|
+ int i, cpu, base;
|
|
|
|
|
|
- /*
|
|
|
|
- * Do the sibling map; assume only two threads per processor.
|
|
|
|
- */
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
for_each_possible_cpu(cpu) {
|
|
- cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
|
|
|
|
- if (cpu_has_feature(CPU_FTR_SMT))
|
|
|
|
- cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu));
|
|
|
|
|
|
+ DBG("Sibling map for CPU %d:", cpu);
|
|
|
|
+ base = cpu_first_thread_in_core(cpu);
|
|
|
|
+ for (i = 0; i < threads_per_core; i++) {
|
|
|
|
+ cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
|
|
|
|
+ DBG(" %d", base + i);
|
|
|
|
+ }
|
|
|
|
+ DBG("\n");
|
|
}
|
|
}
|
|
|
|
+
|
|
#endif /* CONFIG_PPC64 */
|
|
#endif /* CONFIG_PPC64 */
|
|
}
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
#endif /* CONFIG_SMP */
|