Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge

Linus Torvalds 19 years ago
parent
commit
fe3f2053fd

+ 1 - 1
arch/powerpc/Kconfig

@@ -227,7 +227,7 @@ config SMP
 	  If you don't know what to do here, say N.
 
 config NR_CPUS
-	int "Maximum number of CPUs (2-32)"
+	int "Maximum number of CPUs (2-128)"
 	range 2 128
 	depends on SMP
 	default "32" if PPC64

+ 9 - 1
arch/powerpc/kernel/setup_64.c

@@ -102,7 +102,15 @@ int boot_cpuid_phys = 0;
 dev_t boot_dev;
 u64 ppc64_pft_size;
 
-struct ppc64_caches ppc64_caches;
+/* Pick defaults since we might want to patch instructions
+ * before we've read this from the device tree.
+ */
+struct ppc64_caches ppc64_caches = {
+	.dline_size = 0x80,
+	.log_dline_size = 7,
+	.iline_size = 0x80,
+	.log_iline_size = 7
+};
 EXPORT_SYMBOL_GPL(ppc64_caches);
 
 /*

+ 1 - 1
arch/powerpc/mm/hash_utils_64.c

@@ -601,7 +601,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
 	/* Handle hugepage regions */
 	if (unlikely(in_hugepage_area(mm->context, ea))) {
 		DBG_LOW(" -> huge page !\n");
-		return hash_huge_page(mm, access, ea, vsid, local);
+		return hash_huge_page(mm, access, ea, vsid, local, trap);
 	}
 
 	/* Get PTE and page size from page tables */

+ 77 - 18
arch/powerpc/mm/hugetlbpage.c

@@ -148,43 +148,63 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
 	return 0;
 }
 
+struct slb_flush_info {
+	struct mm_struct *mm;
+	u16 newareas;
+};
+
 static void flush_low_segments(void *parm)
 {
-	u16 areas = (unsigned long) parm;
+	struct slb_flush_info *fi = parm;
 	unsigned long i;
 
-	asm volatile("isync" : : : "memory");
+	BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
+
+	if (current->active_mm != fi->mm)
+		return;
+
+	/* Only need to do anything if this CPU is working in the same
+	 * mm as the one which has changed */
 
-	BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS);
+	/* update the paca copy of the context struct */
+	get_paca()->context = current->active_mm->context;
 
+	asm volatile("isync" : : : "memory");
 	for (i = 0; i < NUM_LOW_AREAS; i++) {
-		if (! (areas & (1U << i)))
+		if (! (fi->newareas & (1U << i)))
 			continue;
 		asm volatile("slbie %0"
 			     : : "r" ((i << SID_SHIFT) | SLBIE_C));
 	}
-
 	asm volatile("isync" : : : "memory");
 }
 
 static void flush_high_segments(void *parm)
 {
-	u16 areas = (unsigned long) parm;
+	struct slb_flush_info *fi = parm;
 	unsigned long i, j;
 
-	asm volatile("isync" : : : "memory");
 
-	BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS);
+	BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
 
+	if (current->active_mm != fi->mm)
+		return;
+
+	/* Only need to do anything if this CPU is working in the same
+	 * mm as the one which has changed */
+
+	/* update the paca copy of the context struct */
+	get_paca()->context = current->active_mm->context;
+
+	asm volatile("isync" : : : "memory");
 	for (i = 0; i < NUM_HIGH_AREAS; i++) {
-		if (! (areas & (1U << i)))
+		if (! (fi->newareas & (1U << i)))
 			continue;
 		for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
 			asm volatile("slbie %0"
 				     :: "r" (((i << HTLB_AREA_SHIFT)
-					     + (j << SID_SHIFT)) | SLBIE_C));
+					      + (j << SID_SHIFT)) | SLBIE_C));
 	}
-
 	asm volatile("isync" : : : "memory");
 }
 
@@ -229,6 +249,7 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
 static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
 {
 	unsigned long i;
+	struct slb_flush_info fi;
 
 	BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
 	BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
@@ -244,19 +265,20 @@ static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
 
 	mm->context.low_htlb_areas |= newareas;
 
-	/* update the paca copy of the context struct */
-	get_paca()->context = mm->context;
-
 	/* the context change must make it to memory before the flush,
 	 * so that further SLB misses do the right thing. */
 	mb();
-	on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1);
+
+	fi.mm = mm;
+	fi.newareas = newareas;
+	on_each_cpu(flush_low_segments, &fi, 0, 1);
 
 	return 0;
 }
 
 static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
 {
+	struct slb_flush_info fi;
 	unsigned long i;
 
 	BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
@@ -280,7 +302,10 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
 	/* the context change must make it to memory before the flush,
 	 * so that further SLB misses do the right thing. */
 	mb();
-	on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1);
+
+	fi.mm = mm;
+	fi.newareas = newareas;
+	on_each_cpu(flush_high_segments, &fi, 0, 1);
 
 	return 0;
 }
@@ -639,8 +664,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 	return -ENOMEM;
 }
 
+/*
+ * Called by asm hashtable.S for doing lazy icache flush
+ */
+static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
+						  pte_t pte, int trap)
+{
+	struct page *page;
+	int i;
+
+	if (!pfn_valid(pte_pfn(pte)))
+		return rflags;
+
+	page = pte_page(pte);
+
+	/* page is dirty */
+	if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
+		if (trap == 0x400) {
+			for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
+				__flush_dcache_icache(page_address(page+i));
+			set_bit(PG_arch_1, &page->flags);
+		} else {
+			rflags |= HPTE_R_N;
+		}
+	}
+	return rflags;
+}
+
 int hash_huge_page(struct mm_struct *mm, unsigned long access,
-		   unsigned long ea, unsigned long vsid, int local)
+		   unsigned long ea, unsigned long vsid, int local,
+		   unsigned long trap)
 {
 	pte_t *ptep;
 	unsigned long old_pte, new_pte;
@@ -691,6 +744,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
 	rflags = 0x2 | (!(new_pte & _PAGE_RW));
  	/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
 	rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
+	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+		/* No CPU has hugepages but lacks no execute, so we
+		 * don't need to worry about that case */
+		rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
+						       trap);
 
 	/* Check if pte already has an hpte (case 2) */
 	if (unlikely(old_pte & _PAGE_HASHPTE)) {
@@ -703,7 +761,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
 		slot += (old_pte & _PAGE_F_GIX) >> 12;
 
-		if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
+		if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
+					 local) == -1)
 			old_pte &= ~_PAGE_HPTEFLAGS;
 	}
 

+ 1 - 1
arch/powerpc/mm/numa.c

@@ -125,7 +125,7 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn,
 
 	/* We didnt find a matching region, return start/end as 0 */
 	if (*start_pfn == -1UL)
-		start_pfn = 0;
+		*start_pfn = 0;
 }
 
 static inline void map_cpu_to_node(int cpu, int node)

+ 1 - 6
arch/powerpc/mm/stab.c

@@ -288,11 +288,6 @@ void stab_initialize(unsigned long stab)
 		return;
 	}
 #endif /* CONFIG_PPC_ISERIES */
-#ifdef CONFIG_PPC_PSERIES
-	if (platform_is_lpar()) {
-		plpar_hcall_norets(H_SET_ASR, stabreal);
-		return;
-	}
-#endif
+
 	mtspr(SPRN_ASR, stabreal);
 }

+ 16 - 5
arch/powerpc/platforms/powermac/feature.c

@@ -1650,11 +1650,19 @@ void pmac_tweak_clock_spreading(int enable)
 	 */
 
 	if (macio->type == macio_intrepid) {
-		if (enable)
-			UN_OUT(UNI_N_CLOCK_SPREADING, 2);
-		else
-			UN_OUT(UNI_N_CLOCK_SPREADING, 0);
-		mdelay(40);
+		struct device_node *clock =
+			of_find_node_by_path("/uni-n@f8000000/hw-clock");
+		if (clock && get_property(clock, "platform-do-clockspreading",
+					  NULL)) {
+			printk(KERN_INFO "%sabling clock spreading on Intrepid"
+			       " ASIC\n", enable ? "En" : "Dis");
+			if (enable)
+				UN_OUT(UNI_N_CLOCK_SPREADING, 2);
+			else
+				UN_OUT(UNI_N_CLOCK_SPREADING, 0);
+			mdelay(40);
+		}
+		of_node_put(clock);
 	}
 
 	while (machine_is_compatible("PowerBook5,2") ||
@@ -1724,6 +1732,9 @@ void pmac_tweak_clock_spreading(int enable)
 			pmac_low_i2c_close(ui2c);
 			break;
 		}
+		printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n",
+		       enable ? "En" : "Dis");
+
 		pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
 		rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
 		DBG("write result: %d,", rc);

+ 7 - 4
arch/powerpc/platforms/pseries/iommu.c

@@ -109,6 +109,9 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
 	u64 rc;
 	union tce_entry tce;
 
+	tcenum <<= TCE_PAGE_FACTOR;
+	npages <<= TCE_PAGE_FACTOR;
+
 	tce.te_word = 0;
 	tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
 	tce.te_rdwr = 1;
@@ -143,10 +146,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 	union tce_entry tce, *tcep;
 	long l, limit;
 
-	tcenum <<= TCE_PAGE_FACTOR;
-	npages <<= TCE_PAGE_FACTOR;
-
-	if (npages == 1)
+	if (TCE_PAGE_FACTOR == 0 && npages == 1)
 		return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
 					   direction);
 
@@ -164,6 +164,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 		__get_cpu_var(tce_page) = tcep;
 	}
 
+	tcenum <<= TCE_PAGE_FACTOR;
+	npages <<= TCE_PAGE_FACTOR;
+
 	tce.te_word = 0;
 	tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
 	tce.te_rdwr = 1;

+ 0 - 12
arch/powerpc/platforms/pseries/lpar.c

@@ -298,18 +298,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
 	if (!(vflags & HPTE_V_BOLTED))
 		DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
 
-#if 1
-	{
-		int i;
-		for (i=0;i<8;i++) {
-			unsigned long w0, w1;
-			plpar_pte_read(0, hpte_group, &w0, &w1);
-			BUG_ON (HPTE_V_COMPARE(hpte_v, w0)
-				&& (w0 & HPTE_V_VALID));
-		}
-	}
-#endif
-
 	/* Now fill in the actual HPTE */
 	/* Set CEC cookie to 0         */
 	/* Zero page = 0               */

+ 3 - 3
arch/ppc/Kconfig

@@ -767,14 +767,14 @@ config CPM2
 	  on it (826x, 827x, 8560).
 
 config PPC_CHRP
-	bool "  Common Hardware Reference Platform (CHRP) based machines"
+	bool
 	depends on PPC_MULTIPLATFORM
 	select PPC_I8259
 	select PPC_INDIRECT_PCI
 	default y
 
 config PPC_PMAC
-	bool "  Apple PowerMac based machines"
+	bool
 	depends on PPC_MULTIPLATFORM
 	select PPC_INDIRECT_PCI
 	default y
@@ -785,7 +785,7 @@ config PPC_PMAC64
 	default y
 
 config PPC_PREP
-	bool "  PowerPC Reference Platform (PReP) based machines"
+	bool
 	depends on PPC_MULTIPLATFORM
 	select PPC_I8259
 	select PPC_INDIRECT_PCI

+ 4 - 0
arch/ppc/kernel/smp.c

@@ -301,6 +301,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 
 	/* Probe platform for CPUs: always linear. */
 	num_cpus = smp_ops->probe();
+	
+	if (num_cpus < 2)
+		smp_tb_synchronized = 1;
+	
 	for (i = 0; i < num_cpus; ++i)
 		cpu_set(i, cpu_possible_map);
 

+ 15 - 5
arch/ppc/platforms/pmac_feature.c

@@ -1606,11 +1606,19 @@ void pmac_tweak_clock_spreading(int enable)
 	 */
 
 	if (macio->type == macio_intrepid) {
-		if (enable)
-			UN_OUT(UNI_N_CLOCK_SPREADING, 2);
-		else
-			UN_OUT(UNI_N_CLOCK_SPREADING, 0);
-		mdelay(40);
+		struct device_node *clock =
+			of_find_node_by_path("/uni-n@f8000000/hw-clock");
+		if (clock && get_property(clock, "platform-do-clockspreading",
+					  NULL)) {
+			printk(KERN_INFO "%sabling clock spreading on Intrepid"
+			       " ASIC\n", enable ? "En" : "Dis");
+			if (enable)
+				UN_OUT(UNI_N_CLOCK_SPREADING, 2);
+			else
+				UN_OUT(UNI_N_CLOCK_SPREADING, 0);
+			mdelay(40);
+		}
+		of_node_put(clock);
 	}
 
 	while (machine_is_compatible("PowerBook5,2") ||
@@ -1680,6 +1688,8 @@ void pmac_tweak_clock_spreading(int enable)
 			pmac_low_i2c_close(ui2c);
 			break;
 		}
+		printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n",
+		       enable ? "En" : "Dis");
 		pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
 		rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
 		DBG("write result: %d,", rc);

+ 2 - 2
drivers/macintosh/windfarm_pm81.c

@@ -207,7 +207,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
 	},
 	/* Model ID 3 */
 	{
-		.model_id	= 2,
+		.model_id	= 3,
 		.itarget	= 0x350000,
 		.gd		= 0x08e00000,
 		.gp		= 0x00566666,
@@ -219,7 +219,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
 	},
 	/* Model ID 5 */
 	{
-		.model_id	= 2,
+		.model_id	= 5,
 		.itarget	= 0x3a0000,
 		.gd		= 0x15400000,
 		.gp		= 0x00233333,

+ 2 - 1
include/asm-powerpc/mmu.h

@@ -220,7 +220,8 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
 			   unsigned int local);
 struct mm_struct;
 extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
-			  unsigned long ea, unsigned long vsid, int local);
+			  unsigned long ea, unsigned long vsid, int local,
+			  unsigned long trap);
 
 extern void htab_finish_init(void);
 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,