Browse Source

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc fixes from Ben Herrenschmidt:
 "Here are a few powerpc fixes, all aimed at -stable, found in part
  thanks to the ramping up of a major distro testing and in part thanks
  to the LE guys hitting all sort interesting corner cases.

  The most scary are probably the register clobber issues in
  csum_partial_copy_generic(), especially since Anton even had a test
  case for that thing, which didn't manage to hit the bugs :-)

  Another highlight is that memory hotplug should work again with these
  fixes.

  Oh and the vio modalias one is worse than the cset implies as it
  upsets distro installers, so I've been told at least, which is why I'm
  shooting it to stable"

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc/tm: Switch out userspace PPR and DSCR sooner
  powerpc/tm: Turn interrupts hard off in tm_reclaim()
  powerpc/perf: Fix handling of FAB events
  powerpc/vio: Fix modalias_show return values
  powerpc/iommu: Use GFP_KERNEL instead of GFP_ATOMIC in iommu_init_table()
  powerpc/sysfs: Disable writing to PURR in guest mode
  powerpc: Restore registers on error exit from csum_partial_copy_generic()
  powerpc: Fix parameter clobber in csum_partial_copy_generic()
  powerpc: Fix memory hotplug with sparse vmemmap
Linus Torvalds 11 years ago
parent
commit
c15f5bbc94

+ 1 - 1
arch/powerpc/kernel/iommu.c

@@ -661,7 +661,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
 	/* number of bytes needed for the bitmap */
 	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 
-	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+	page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
 	if (!page)
 		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
 	tbl->it_map = page_address(page);

+ 16 - 2
arch/powerpc/kernel/sysfs.c

@@ -17,6 +17,7 @@
 #include <asm/machdep.h>
 #include <asm/smp.h>
 #include <asm/pmc.h>
+#include <asm/firmware.h>
 
 #include "cacheinfo.h"
 
@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
 SYSFS_PMCSETUP(dscr, SPRN_DSCR);
 SYSFS_PMCSETUP(pir, SPRN_PIR);
 
+/*
+  Lets only enable read for phyp resources and
+  enable write when needed with a separate function.
+  Lets be conservative and default to pseries.
+*/
 static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
 static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
 static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
-static DEVICE_ATTR(purr, 0600, show_purr, store_purr);
+static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
 static DEVICE_ATTR(pir, 0400, show_pir, NULL);
 
 unsigned long dscr_default = 0;
 EXPORT_SYMBOL(dscr_default);
 
+static void add_write_permission_dev_attr(struct device_attribute *attr)
+{
+	attr->attr.mode |= 0200;
+}
+
 static ssize_t show_dscr_default(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -394,8 +405,11 @@ static void register_cpu_online(unsigned int cpu)
 	if (cpu_has_feature(CPU_FTR_MMCRA))
 		device_create_file(s, &dev_attr_mmcra);
 
-	if (cpu_has_feature(CPU_FTR_PURR))
+	if (cpu_has_feature(CPU_FTR_PURR)) {
+		if (!firmware_has_feature(FW_FEATURE_LPAR))
+			add_write_permission_dev_attr(&dev_attr_purr);
 		device_create_file(s, &dev_attr_purr);
+	}
 
 	if (cpu_has_feature(CPU_FTR_SPURR))
 		device_create_file(s, &dev_attr_spurr);

+ 64 - 31
arch/powerpc/kernel/tm.S

@@ -79,6 +79,11 @@ _GLOBAL(tm_abort)
 	TABORT(R3)
 	blr
 
+	.section	".toc","aw"
+DSCR_DEFAULT:
+	.tc dscr_default[TC],dscr_default
+
+	.section	".text"
 
 /* void tm_reclaim(struct thread_struct *thread,
  *                 unsigned long orig_msr,
@@ -123,6 +128,7 @@ _GLOBAL(tm_reclaim)
 	mr	r15, r14
 	ori	r15, r15, MSR_FP
 	li	r16, MSR_RI
+	ori	r16, r16, MSR_EE /* IRQs hard off */
 	andc	r15, r15, r16
 	oris	r15, r15, MSR_VEC@h
 #ifdef CONFIG_VSX
@@ -187,11 +193,18 @@ dont_backup_fp:
 	std	r1, PACATMSCRATCH(r13)
 	ld	r1, PACAR1(r13)
 
+	/* Store the PPR in r11 and reset to decent value */
+	std	r11, GPR11(r1)			/* Temporary stash */
+	mfspr	r11, SPRN_PPR
+	HMT_MEDIUM
+
 	/* Now get some more GPRS free */
 	std	r7, GPR7(r1)			/* Temporary stash */
 	std	r12, GPR12(r1)			/* ''   ''    ''   */
 	ld	r12, STACK_PARAM(0)(r1)		/* Param 0, thread_struct * */
 
+	std	r11, THREAD_TM_PPR(r12)		/* Store PPR and free r11 */
+
 	addi	r7, r12, PT_CKPT_REGS		/* Thread's ckpt_regs */
 
 	/* Make r7 look like an exception frame so that we
@@ -203,15 +216,19 @@ dont_backup_fp:
 	SAVE_GPR(0, r7)				/* user r0 */
 	SAVE_GPR(2, r7)			/* user r2 */
 	SAVE_4GPRS(3, r7)			/* user r3-r6 */
-	SAVE_4GPRS(8, r7)			/* user r8-r11 */
+	SAVE_GPR(8, r7)				/* user r8 */
+	SAVE_GPR(9, r7)				/* user r9 */
+	SAVE_GPR(10, r7)			/* user r10 */
 	ld	r3, PACATMSCRATCH(r13)		/* user r1 */
 	ld	r4, GPR7(r1)			/* user r7 */
-	ld	r5, GPR12(r1)			/* user r12 */
-	GET_SCRATCH0(6)				/* user r13 */
+	ld	r5, GPR11(r1)			/* user r11 */
+	ld	r6, GPR12(r1)			/* user r12 */
+	GET_SCRATCH0(8)				/* user r13 */
 	std	r3, GPR1(r7)
 	std	r4, GPR7(r7)
-	std	r5, GPR12(r7)
-	std	r6, GPR13(r7)
+	std	r5, GPR11(r7)
+	std	r6, GPR12(r7)
+	std	r8, GPR13(r7)
 
 	SAVE_NVGPRS(r7)				/* user r14-r31 */
 
@@ -234,14 +251,12 @@ dont_backup_fp:
 	std	r6, _XER(r7)
 
 
-	/* ******************** TAR, PPR, DSCR ********** */
+	/* ******************** TAR, DSCR ********** */
 	mfspr	r3, SPRN_TAR
-	mfspr	r4, SPRN_PPR
-	mfspr	r5, SPRN_DSCR
+	mfspr	r4, SPRN_DSCR
 
 	std	r3, THREAD_TM_TAR(r12)
-	std	r4, THREAD_TM_PPR(r12)
-	std	r5, THREAD_TM_DSCR(r12)
+	std	r4, THREAD_TM_DSCR(r12)
 
 	/* MSR and flags:  We don't change CRs, and we don't need to alter
 	 * MSR.
@@ -258,7 +273,7 @@ dont_backup_fp:
 	std	r3, THREAD_TM_TFHAR(r12)
 	std	r4, THREAD_TM_TFIAR(r12)
 
-	/* AMR and PPR are checkpointed too, but are unsupported by Linux. */
+	/* AMR is checkpointed too, but is unsupported by Linux. */
 
 	/* Restore original MSR/IRQ state & clear TM mode */
 	ld	r14, TM_FRAME_L0(r1)		/* Orig MSR */
@@ -274,6 +289,12 @@ dont_backup_fp:
 	mtcr	r4
 	mtlr	r0
 	ld	r2, 40(r1)
+
+	/* Load system default DSCR */
+	ld	r4, DSCR_DEFAULT@toc(r2)
+	ld	r0, 0(r4)
+	mtspr	SPRN_DSCR, r0
+
 	blr
 
 
@@ -358,25 +379,24 @@ dont_restore_fp:
 
 restore_gprs:
 
-	/* ******************** TAR, PPR, DSCR ********** */
-	ld	r4, THREAD_TM_TAR(r3)
-	ld	r5, THREAD_TM_PPR(r3)
-	ld	r6, THREAD_TM_DSCR(r3)
+	/* ******************** CR,LR,CCR,MSR ********** */
+	ld	r4, _CTR(r7)
+	ld	r5, _LINK(r7)
+	ld	r6, _CCR(r7)
+	ld	r8, _XER(r7)
 
-	mtspr	SPRN_TAR,	r4
-	mtspr	SPRN_PPR,	r5
-	mtspr	SPRN_DSCR,	r6
+	mtctr	r4
+	mtlr	r5
+	mtcr	r6
+	mtxer	r8
 
-	/* ******************** CR,LR,CCR,MSR ********** */
-	ld	r3, _CTR(r7)
-	ld	r4, _LINK(r7)
-	ld	r5, _CCR(r7)
-	ld	r6, _XER(r7)
+	/* ******************** TAR ******************** */
+	ld	r4, THREAD_TM_TAR(r3)
+	mtspr	SPRN_TAR,	r4
 
-	mtctr	r3
-	mtlr	r4
-	mtcr	r5
-	mtxer	r6
+	/* Load up the PPR and DSCR in GPRs only at this stage */
+	ld	r5, THREAD_TM_DSCR(r3)
+	ld	r6, THREAD_TM_PPR(r3)
 
 	/* Clear the MSR RI since we are about to change R1.  EE is already off
 	 */
@@ -384,19 +404,26 @@ restore_gprs:
 	mtmsrd	r4, 1
 
 	REST_4GPRS(0, r7)			/* GPR0-3 */
-	REST_GPR(4, r7)				/* GPR4-6 */
-	REST_GPR(5, r7)
-	REST_GPR(6, r7)
+	REST_GPR(4, r7)				/* GPR4 */
 	REST_4GPRS(8, r7)			/* GPR8-11 */
 	REST_2GPRS(12, r7)			/* GPR12-13 */
 
 	REST_NVGPRS(r7)				/* GPR14-31 */
 
-	ld	r7, GPR7(r7)			/* GPR7 */
+	/* Load up PPR and DSCR here so we don't run with user values for long
+	 */
+	mtspr	SPRN_DSCR, r5
+	mtspr	SPRN_PPR, r6
+
+	REST_GPR(5, r7)				/* GPR5-7 */
+	REST_GPR(6, r7)
+	ld	r7, GPR7(r7)
 
 	/* Commit register state as checkpointed state: */
 	TRECHKPT
 
+	HMT_MEDIUM
+
 	/* Our transactional state has now changed.
 	 *
 	 * Now just get out of here.  Transactional (current) state will be
@@ -419,6 +446,12 @@ restore_gprs:
 	mtcr	r4
 	mtlr	r0
 	ld	r2, 40(r1)
+
+	/* Load system default DSCR */
+	ld	r4, DSCR_DEFAULT@toc(r2)
+	ld	r0, 0(r4)
+	mtspr	SPRN_DSCR, r0
+
 	blr
 
 	/* ****************************************************************** */

+ 8 - 4
arch/powerpc/kernel/vio.c

@@ -1530,11 +1530,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 	const char *cp;
 
 	dn = dev->of_node;
-	if (!dn)
-		return -ENODEV;
+	if (!dn) {
+		strcat(buf, "\n");
+		return strlen(buf);
+	}
 	cp = of_get_property(dn, "compatible", NULL);
-	if (!cp)
-		return -ENODEV;
+	if (!cp) {
+		strcat(buf, "\n");
+		return strlen(buf);
+	}
 
 	return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
 }

+ 42 - 16
arch/powerpc/lib/checksum_64.S

@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
 	blr
 
 
-	.macro source
+	.macro srcnr
 100:
 	.section __ex_table,"a"
 	.align 3
-	.llong 100b,.Lsrc_error
+	.llong 100b,.Lsrc_error_nr
 	.previous
 	.endm
 
-	.macro dest
+	.macro source
+150:
+	.section __ex_table,"a"
+	.align 3
+	.llong 150b,.Lsrc_error
+	.previous
+	.endm
+
+	.macro dstnr
 200:
 	.section __ex_table,"a"
 	.align 3
-	.llong 200b,.Ldest_error
+	.llong 200b,.Ldest_error_nr
+	.previous
+	.endm
+
+	.macro dest
+250:
+	.section __ex_table,"a"
+	.align 3
+	.llong 250b,.Ldest_error
 	.previous
 	.endm
 
@@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic)
 	rldicl. r6,r3,64-1,64-2		/* r6 = (r3 & 0x3) >> 1 */
 	beq	.Lcopy_aligned
 
-	li	r7,4
-	sub	r6,r7,r6
+	li	r9,4
+	sub	r6,r9,r6
 	mtctr	r6
 
 1:
-source;	lhz	r6,0(r3)		/* align to doubleword */
+srcnr;	lhz	r6,0(r3)		/* align to doubleword */
 	subi	r5,r5,2
 	addi	r3,r3,2
 	adde	r0,r0,r6
-dest;	sth	r6,0(r4)
+dstnr;	sth	r6,0(r4)
 	addi	r4,r4,2
 	bdnz	1b
 
@@ -392,10 +408,10 @@ dest;	std	r16,56(r4)
 
 	mtctr	r6
 3:
-source;	ld	r6,0(r3)
+srcnr;	ld	r6,0(r3)
 	addi	r3,r3,8
 	adde	r0,r0,r6
-dest;	std	r6,0(r4)
+dstnr;	std	r6,0(r4)
 	addi	r4,r4,8
 	bdnz	3b
 
@@ -405,10 +421,10 @@ dest;	std	r6,0(r4)
 	srdi.	r6,r5,2
 	beq	.Lcopy_tail_halfword
 
-source;	lwz	r6,0(r3)
+srcnr;	lwz	r6,0(r3)
 	addi	r3,r3,4
 	adde	r0,r0,r6
-dest;	stw	r6,0(r4)
+dstnr;	stw	r6,0(r4)
 	addi	r4,r4,4
 	subi	r5,r5,4
 
@@ -416,10 +432,10 @@ dest;	stw	r6,0(r4)
 	srdi.	r6,r5,1
 	beq	.Lcopy_tail_byte
 
-source;	lhz	r6,0(r3)
+srcnr;	lhz	r6,0(r3)
 	addi	r3,r3,2
 	adde	r0,r0,r6
-dest;	sth	r6,0(r4)
+dstnr;	sth	r6,0(r4)
 	addi	r4,r4,2
 	subi	r5,r5,2
 
@@ -427,10 +443,10 @@ dest;	sth	r6,0(r4)
 	andi.	r6,r5,1
 	beq	.Lcopy_finish
 
-source;	lbz	r6,0(r3)
+srcnr;	lbz	r6,0(r3)
 	sldi	r9,r6,8			/* Pad the byte out to 16 bits */
 	adde	r0,r0,r9
-dest;	stb	r6,0(r4)
+dstnr;	stb	r6,0(r4)
 
 .Lcopy_finish:
 	addze	r0,r0			/* add in final carry */
@@ -440,6 +456,11 @@ dest;	stb	r6,0(r4)
 	blr
 
 .Lsrc_error:
+	ld	r14,STK_REG(R14)(r1)
+	ld	r15,STK_REG(R15)(r1)
+	ld	r16,STK_REG(R16)(r1)
+	addi	r1,r1,STACKFRAMESIZE
+.Lsrc_error_nr:
 	cmpdi	0,r7,0
 	beqlr
 	li	r6,-EFAULT
@@ -447,6 +468,11 @@ dest;	stb	r6,0(r4)
 	blr
 
 .Ldest_error:
+	ld	r14,STK_REG(R14)(r1)
+	ld	r15,STK_REG(R15)(r1)
+	ld	r16,STK_REG(R16)(r1)
+	addi	r1,r1,STACKFRAMESIZE
+.Ldest_error_nr:
 	cmpdi	0,r8,0
 	beqlr
 	li	r6,-EFAULT

+ 4 - 0
arch/powerpc/mm/init_64.c

@@ -300,5 +300,9 @@ void vmemmap_free(unsigned long start, unsigned long end)
 {
 }
 
+void register_page_bootmem_memmap(unsigned long section_nr,
+				  struct page *start_page, unsigned long size)
+{
+}
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 

+ 9 - 0
arch/powerpc/mm/mem.c

@@ -297,12 +297,21 @@ void __init paging_init(void)
 }
 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
 
+static void __init register_page_bootmem_info(void)
+{
+	int i;
+
+	for_each_online_node(i)
+		register_page_bootmem_info_node(NODE_DATA(i));
+}
+
 void __init mem_init(void)
 {
 #ifdef CONFIG_SWIOTLB
 	swiotlb_init(0);
 #endif
 
+	register_page_bootmem_info();
 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 	set_max_mapnr(max_pfn);
 	free_all_bootmem();

+ 3 - 2
arch/powerpc/perf/power8-pmu.c

@@ -199,6 +199,7 @@
 #define MMCR1_UNIT_SHIFT(pmc)		(60 - (4 * ((pmc) - 1)))
 #define MMCR1_COMBINE_SHIFT(pmc)	(35 - ((pmc) - 1))
 #define MMCR1_PMCSEL_SHIFT(pmc)		(24 - (((pmc) - 1)) * 8)
+#define MMCR1_FAB_SHIFT			36
 #define MMCR1_DC_QUAL_SHIFT		47
 #define MMCR1_IC_QUAL_SHIFT		46
 
@@ -388,8 +389,8 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
 		 * the threshold bits are used for the match value.
 		 */
 		if (event_is_fab_match(event[i])) {
-			mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) &
-				  EVENT_THR_CTL_MASK;
+			mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
+				  EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
 		} else {
 			val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
 			mmcra |= val << MMCRA_THR_CTL_SHIFT;

+ 1 - 1
mm/Kconfig

@@ -183,7 +183,7 @@ config MEMORY_HOTPLUG_SPARSE
 config MEMORY_HOTREMOVE
 	bool "Allow for memory hot remove"
 	select MEMORY_ISOLATION
-	select HAVE_BOOTMEM_INFO_NODE if X86_64
+	select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
 	depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
 	depends on MIGRATION