Browse Source

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc:
  [POWERPC] Fix invalid semicolon after if statement
  [POWERPC] ps3: Fix no storage devices found
  [POWERPC] Fix for assembler -g
  [POWERPC] Fix small race in 44x tlbie function
  [POWERPC] Remove unused code causing a compile warning
  [POWERPC] cell: Fix errno for modular spufs_create with invalid neighbour
Linus Torvalds 18 years ago
parent
commit
844d6c48be

+ 0 - 18
arch/powerpc/boot/flatdevtree.c

@@ -134,20 +134,6 @@ static char *ft_next(struct ft_cxt *cxt, char *p, struct ft_atom *ret)
 #define HDR_SIZE	_ALIGN(sizeof(struct boot_param_header), 8)
 #define EXPAND_INCR	1024	/* alloc this much extra when expanding */
 
-/* See if the regions are in the standard order and non-overlapping */
-static int ft_ordered(struct ft_cxt *cxt)
-{
-	char *p = (char *)cxt->bph + HDR_SIZE;
-	enum ft_rgn_id r;
-
-	for (r = FT_RSVMAP; r <= FT_STRINGS; ++r) {
-		if (p > cxt->rgn[r].start)
-			return 0;
-		p = cxt->rgn[r].start + cxt->rgn[r].size;
-	}
-	return p <= (char *)cxt->bph + cxt->max_size;
-}
-
 /* Copy the tree to a newly-allocated region and put things in order */
 static int ft_reorder(struct ft_cxt *cxt, int nextra)
 {
@@ -573,10 +559,6 @@ int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size,
 	cxt->rgn[FT_STRUCT].size = struct_size(cxt);
 	cxt->rgn[FT_STRINGS].start = blob + be32_to_cpu(bph->off_dt_strings);
 	cxt->rgn[FT_STRINGS].size = be32_to_cpu(bph->dt_strings_size);
-	/* Leave as '0' to force first ft_make_space call to do a ft_reorder
-	 * and move dt to an area allocated by realloc.
-	cxt->isordered = ft_ordered(cxt);
-	*/
 
 	cxt->p = cxt->rgn[FT_STRUCT].start;
 	cxt->str_anchor = cxt->rgn[FT_STRINGS].start;

+ 1 - 0
arch/powerpc/kernel/Makefile

@@ -81,6 +81,7 @@ obj-y				+= iomap.o
 endif
 
 ifeq ($(CONFIG_PPC_ISERIES),y)
+CFLAGS_lparmap.s		+= -g0
 extra-y += lparmap.s
 $(obj)/head_64.o:	$(obj)/lparmap.s
 AFLAGS_head_64.o += -I$(obj)

+ 11 - 1
arch/powerpc/kernel/misc_32.S

@@ -301,9 +301,19 @@ _GLOBAL(_tlbie)
 	mfspr	r4,SPRN_MMUCR
 	mfspr	r5,SPRN_PID			/* Get PID */
 	rlwimi	r4,r5,0,24,31			/* Set TID */
-	mtspr	SPRN_MMUCR,r4
 
+	/* We have to run the search with interrupts disabled, even critical
+	 * and debug interrupts (in fact the only critical exceptions we have
+	 * are debug and machine check).  Otherwise  an interrupt which causes
+	 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
+	mfmsr	r5
+	lis	r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
+	addi	r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
+	andc	r6,r5,r6
+	mtmsr	r6
+	mtspr	SPRN_MMUCR,r4
 	tlbsx.	r3, 0, r3
+	mtmsr	r5
 	bne	10f
 	sync
 	/* There are only 64 TLB entries, so r3 < 64,

+ 1 - 1
arch/powerpc/mm/hash_utils_64.c

@@ -795,7 +795,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
 
 #ifdef CONFIG_PPC_MM_SLICES
 	/* We only prefault standard pages for now */
-	if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize));
+	if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
 		return;
 #endif
 

+ 1 - 0
arch/powerpc/platforms/cell/spu_syscalls.c

@@ -45,6 +45,7 @@ asmlinkage long sys_spu_create(const char __user *name,
 	if (owner && try_module_get(owner)) {
 		if (flags & SPU_CREATE_AFFINITY_SPU) {
 			neighbor = fget_light(neighbor_fd, &fput_needed);
+			ret = -EBADF;
 			if (neighbor) {
 				ret = spufs_calls.create_thread(name, flags,
 								mode, neighbor);

+ 1 - 1
arch/powerpc/platforms/ps3/device-init.c

@@ -372,7 +372,7 @@ static int ps3_storage_wait_for_device(const struct ps3_repository_device *repo)
 		    notify_event->dev_type == repo->dev_type) {
 			pr_debug("%s:%u: device ready: dev_id %u\n", __func__,
 				 __LINE__, repo->dev_id);
-			result = 0;
+			error = 0;
 			break;
 		}
 

+ 11 - 1
arch/ppc/kernel/misc.S

@@ -237,9 +237,19 @@ _GLOBAL(_tlbie)
 	mfspr	r4,SPRN_MMUCR
 	mfspr	r5,SPRN_PID			/* Get PID */
 	rlwimi	r4,r5,0,24,31			/* Set TID */
-	mtspr	SPRN_MMUCR,r4
 
+	/* We have to run the search with interrupts disabled, even critical
+	 * and debug interrupts (in fact the only critical exceptions we have
+	 * are debug and machine check).  Otherwise  an interrupt which causes
+	 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
+	mfmsr	r5
+	lis	r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
+	addi	r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
+	andc	r6,r5,r6
+	mtmsr	r6
+	mtspr	SPRN_MMUCR,r4
 	tlbsx.	r3, 0, r3
+	mtmsr	r5
 	bne	10f
 	sync
 	/* There are only 64 TLB entries, so r3 < 64,