|
@@ -26,6 +26,20 @@ extern unsigned long __toc_start;
|
|
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S
|
|
|
|
|
|
+/*
|
|
|
+ * We only have to have statically allocated lppaca structs on
|
|
|
+ * legacy iSeries, which supports at most 64 cpus.
|
|
|
+ */
|
|
|
+#ifdef CONFIG_PPC_ISERIES
|
|
|
+#if NR_CPUS < 64
|
|
|
+#define NR_LPPACAS NR_CPUS
|
|
|
+#else
|
|
|
+#define NR_LPPACAS 64
|
|
|
+#endif
|
|
|
+#else /* not iSeries */
|
|
|
+#define NR_LPPACAS 1
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* The structure which the hypervisor knows about - this structure
|
|
|
* should not cross a page boundary. The vpa_init/register_vpa call
|
|
@@ -36,7 +50,7 @@ extern unsigned long __toc_start;
|
|
|
* will suffice to ensure that it doesn't cross a page boundary.
|
|
|
*/
|
|
|
struct lppaca lppaca[] = {
|
|
|
- [0 ... (NR_CPUS-1)] = {
|
|
|
+ [0 ... (NR_LPPACAS-1)] = {
|
|
|
.desc = 0xd397d781, /* "LpPa" */
|
|
|
.size = sizeof(struct lppaca),
|
|
|
.dyn_proc_status = 2,
|
|
@@ -49,6 +63,54 @@ struct lppaca lppaca[] = {
|
|
|
},
|
|
|
};
|
|
|
|
|
|
+static struct lppaca *extra_lppacas;
|
|
|
+static long __initdata lppaca_size;
|
|
|
+
|
|
|
+static void allocate_lppacas(int nr_cpus, unsigned long limit)
|
|
|
+{
|
|
|
+ if (nr_cpus <= NR_LPPACAS)
|
|
|
+ return;
|
|
|
+
|
|
|
+ lppaca_size = PAGE_ALIGN(sizeof(struct lppaca) *
|
|
|
+ (nr_cpus - NR_LPPACAS));
|
|
|
+ extra_lppacas = __va(memblock_alloc_base(lppaca_size,
|
|
|
+ PAGE_SIZE, limit));
|
|
|
+}
|
|
|
+
|
|
|
+static struct lppaca *new_lppaca(int cpu)
|
|
|
+{
|
|
|
+ struct lppaca *lp;
|
|
|
+
|
|
|
+ if (cpu < NR_LPPACAS)
|
|
|
+ return &lppaca[cpu];
|
|
|
+
|
|
|
+ lp = extra_lppacas + (cpu - NR_LPPACAS);
|
|
|
+ *lp = lppaca[0];
|
|
|
+
|
|
|
+ return lp;
|
|
|
+}
|
|
|
+
|
|
|
+static void free_lppacas(void)
|
|
|
+{
|
|
|
+ long new_size = 0, nr;
|
|
|
+
|
|
|
+ if (!lppaca_size)
|
|
|
+ return;
|
|
|
+ nr = num_possible_cpus() - NR_LPPACAS;
|
|
|
+ if (nr > 0)
|
|
|
+ new_size = PAGE_ALIGN(nr * sizeof(struct lppaca));
|
|
|
+ if (new_size >= lppaca_size)
|
|
|
+ return;
|
|
|
+
|
|
|
+ memblock_free(__pa(extra_lppacas) + new_size, lppaca_size - new_size);
|
|
|
+ lppaca_size = new_size;
|
|
|
+}
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+static inline void allocate_lppacas(int, unsigned long) { }
|
|
|
+static inline void free_lppacas(void) { }
|
|
|
+
|
|
|
#endif /* CONFIG_PPC_BOOK3S */
|
|
|
|
|
|
#ifdef CONFIG_PPC_STD_MMU_64
|
|
@@ -88,7 +150,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
|
|
|
unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL;
|
|
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S
|
|
|
- new_paca->lppaca_ptr = &lppaca[cpu];
|
|
|
+ new_paca->lppaca_ptr = new_lppaca(cpu);
|
|
|
#else
|
|
|
new_paca->kernel_pgd = swapper_pg_dir;
|
|
|
#endif
|
|
@@ -144,6 +206,8 @@ void __init allocate_pacas(void)
|
|
|
printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
|
|
|
paca_size, nr_cpus, paca);
|
|
|
|
|
|
+ allocate_lppacas(nr_cpus, limit);
|
|
|
+
|
|
|
/* Can't use for_each_*_cpu, as they aren't functional yet */
|
|
|
for (cpu = 0; cpu < nr_cpus; cpu++)
|
|
|
initialise_paca(&paca[cpu], cpu);
|
|
@@ -164,4 +228,6 @@ void __init free_unused_pacas(void)
|
|
|
paca_size - new_size);
|
|
|
|
|
|
paca_size = new_size;
|
|
|
+
|
|
|
+ free_lppacas();
|
|
|
}
|