浏览代码

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus: (26 commits)
  MIPS: Alchemy: Fix reset for MTX-1 and XXS1500
  MIPS: MTX-1: Make au1000_eth probe all PHY addresses
  MIPS: Jz4740: Add HAVE_CLK
  MIPS: Move idle task creation to work queue
  MIPS, Perf-events: Use unsigned delta for right shift in event update
  MIPS, Perf-events: Work with the new callchain interface
  MIPS, Perf-events: Fix event check in validate_event()
  MIPS, Perf-events: Work with the new PMU interface
  MIPS, Perf-events: Work with irq_work
  MIPS: Fix always CONFIG_LOONGSON_UART_BASE=y
  MIPS: Loongson: Fix potentially wrong string handling
  MIPS: Fix GCC-4.6 'set but not used' warning in arch/mips/mm/init.c
  MIPS: Fix GCC-4.6 'set but not used' warning in ieee754int.h
  MIPS: Remove unused code from arch/mips/kernel/syscall.c
  MIPS: Fix GCC-4.6 'set but not used' warning in signal*.c
  MIPS: MSP: Fix MSP71xx bpci interrupt handler return value
  MIPS: Select R4K timer lib for all MSP platforms
  MIPS: Loongson: Remove ad-hoc cmdline default
  MIPS: Clear the correct flag in sysmips(MIPS_FIXADE, ...).
  MIPS: Add an unreachable return statement to satisfy buggy GCCs.
  ...
Linus Torvalds 14 年之前
父节点
当前提交
2990821d0e

+ 4 - 0
arch/mips/Kconfig

@@ -4,6 +4,7 @@ config MIPS
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_IDE
 	select HAVE_IDE
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE
+	select HAVE_IRQ_WORK
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_EVENTS
 	select PERF_USE_VMALLOC
 	select PERF_USE_VMALLOC
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_KGDB
@@ -208,6 +209,7 @@ config MACH_JZ4740
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
 	select SYS_HAS_EARLY_PRINTK
 	select SYS_HAS_EARLY_PRINTK
 	select HAVE_PWM
 	select HAVE_PWM
+	select HAVE_CLK
 
 
 config LASAT
 config LASAT
 	bool "LASAT Networks platforms"
 	bool "LASAT Networks platforms"
@@ -333,6 +335,8 @@ config PNX8550_STB810
 config PMC_MSP
 config PMC_MSP
 	bool "PMC-Sierra MSP chipsets"
 	bool "PMC-Sierra MSP chipsets"
 	depends on EXPERIMENTAL
 	depends on EXPERIMENTAL
+	select CEVT_R4K
+	select CSRC_R4K
 	select DMA_NONCOHERENT
 	select DMA_NONCOHERENT
 	select SWAP_IO_SPACE
 	select SWAP_IO_SPACE
 	select NO_EXCEPT_FILL
 	select NO_EXCEPT_FILL

+ 2 - 2
arch/mips/alchemy/mtx-1/board_setup.c

@@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert);
 
 
 static void mtx1_reset(char *c)
 static void mtx1_reset(char *c)
 {
 {
-	/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
-	au_writel(0x00000000, 0xAE00001C);
+	/* Jump to the reset vector */
+	__asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
 }
 }
 
 
 static void mtx1_power_off(void)
 static void mtx1_power_off(void)

+ 9 - 0
arch/mips/alchemy/mtx-1/platform.c

@@ -28,6 +28,8 @@
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/physmap.h>
 #include <mtd/mtd-abi.h>
 #include <mtd/mtd-abi.h>
 
 
+#include <asm/mach-au1x00/au1xxx_eth.h>
+
 static struct gpio_keys_button mtx1_gpio_button[] = {
 static struct gpio_keys_button mtx1_gpio_button[] = {
 	{
 	{
 		.gpio = 207,
 		.gpio = 207,
@@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = {
 	&mtx1_mtd,
 	&mtx1_mtd,
 };
 };
 
 
+static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
+	.phy_search_highest_addr	= 1,
+	.phy1_search_mac0 		= 1,
+};
+
 static int __init mtx1_register_devices(void)
 static int __init mtx1_register_devices(void)
 {
 {
 	int rc;
 	int rc;
 
 
+	au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
+
 	rc = gpio_request(mtx1_gpio_button[0].gpio,
 	rc = gpio_request(mtx1_gpio_button[0].gpio,
 					mtx1_gpio_button[0].desc);
 					mtx1_gpio_button[0].desc);
 	if (rc < 0) {
 	if (rc < 0) {

+ 2 - 2
arch/mips/alchemy/xxs1500/board_setup.c

@@ -36,8 +36,8 @@
 
 
 static void xxs1500_reset(char *c)
 static void xxs1500_reset(char *c)
 {
 {
-	/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
-	au_writel(0x00000000, 0xAE00001C);
+	/* Jump to the reset vector */
+	__asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
 }
 }
 
 
 static void xxs1500_power_off(void)
 static void xxs1500_power_off(void)

+ 1 - 11
arch/mips/include/asm/perf_event.h

@@ -11,15 +11,5 @@
 
 
 #ifndef __MIPS_PERF_EVENT_H__
 #ifndef __MIPS_PERF_EVENT_H__
 #define __MIPS_PERF_EVENT_H__
 #define __MIPS_PERF_EVENT_H__
-
-/*
- * MIPS performance counters do not raise NMI upon overflow, a regular
- * interrupt will be signaled. Hence we can do the pending perf event
- * work at the tail of the irq handler.
- */
-static inline void
-set_perf_event_pending(void)
-{
-}
-
+/* Leave it empty here. The file is required by linux/perf_event.h */
 #endif /* __MIPS_PERF_EVENT_H__ */
 #endif /* __MIPS_PERF_EVENT_H__ */

+ 95 - 84
arch/mips/kernel/ftrace.c

@@ -17,29 +17,13 @@
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/uasm.h>
 #include <asm/uasm.h>
 
 
-/*
- * If the Instruction Pointer is in module space (0xc0000000), return true;
- * otherwise, it is in kernel space (0x80000000), return false.
- *
- * FIXME: This will not work when the kernel space and module space are the
- * same. If they are the same, we need to modify scripts/recordmcount.pl,
- * ftrace_make_nop/call() and the other related parts to ensure the
- * enabling/disabling of the calling site to _mcount is right for both kernel
- * and module.
- */
-
-static inline int in_module(unsigned long ip)
-{
-	return ip & 0x40000000;
-}
+#include <asm-generic/sections.h>
 
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 
 #define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
 #define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
 #define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
 #define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
 
 
-#define INSN_B_1F_4 0x10000004	/* b 1f; offset = 4 */
-#define INSN_B_1F_5 0x10000005	/* b 1f; offset = 5 */
 #define INSN_NOP 0x00000000	/* nop */
 #define INSN_NOP 0x00000000	/* nop */
 #define INSN_JAL(addr)	\
 #define INSN_JAL(addr)	\
 	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
 	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void)
 #endif
 #endif
 }
 }
 
 
+/*
+ * Check if the address is in kernel space
+ *
+ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
+ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
+ */
+static inline int in_kernel_space(unsigned long ip)
+{
+	if (ip >= (unsigned long)_stext &&
+	    ip <= (unsigned long)_etext)
+		return 1;
+	return 0;
+}
+
 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
 {
 {
 	int faulted;
 	int faulted;
@@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
 	return 0;
 	return 0;
 }
 }
 
 
+/*
+ * The details about the calling site of mcount on MIPS
+ *
+ * 1. For kernel:
+ *
+ * move at, ra
+ * jal _mcount		--> nop
+ *
+ * 2. For modules:
+ *
+ * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
+ *
+ * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ *  sub sp, sp, 8
+ *                                  1: offset = 5 instructions
+ * 2.2 For the Other situations
+ *
+ * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * jalr v1
+ *  nop | move $12, ra_address | sub sp, sp, 8
+ *                                  1: offset = 4 instructions
+ */
+
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+#define MCOUNT_OFFSET_INSNS 5
+#else
+#define MCOUNT_OFFSET_INSNS 4
+#endif
+#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
+
 int ftrace_make_nop(struct module *mod,
 int ftrace_make_nop(struct module *mod,
 		    struct dyn_ftrace *rec, unsigned long addr)
 		    struct dyn_ftrace *rec, unsigned long addr)
 {
 {
@@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod,
 	unsigned long ip = rec->ip;
 	unsigned long ip = rec->ip;
 
 
 	/*
 	/*
-	 * We have compiled module with -mlong-calls, but compiled the kernel
-	 * without it, we need to cope with them respectively.
+	 * If ip is in kernel space, no long call, otherwise, long call is
+	 * needed.
 	 */
 	 */
-	if (in_module(ip)) {
-#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
-		/*
-		 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
-		 * addiu v1, v1, low_16bit_of_mcount
-		 * move at, ra
-		 * move $12, ra_address
-		 * jalr v1
-		 *  sub sp, sp, 8
-		 *                                  1: offset = 5 instructions
-		 */
-		new = INSN_B_1F_5;
-#else
-		/*
-		 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
-		 * addiu v1, v1, low_16bit_of_mcount
-		 * move at, ra
-		 * jalr v1
-		 *  nop | move $12, ra_address | sub sp, sp, 8
-		 *                                  1: offset = 4 instructions
-		 */
-		new = INSN_B_1F_4;
-#endif
-	} else {
-		/*
-		 * move at, ra
-		 * jal _mcount		--> nop
-		 */
-		new = INSN_NOP;
-	}
+	new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+
 	return ftrace_modify_code(ip, new);
 	return ftrace_modify_code(ip, new);
 }
 }
 
 
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 	unsigned int new;
 	unsigned int new;
 	unsigned long ip = rec->ip;
 	unsigned long ip = rec->ip;
 
 
-	/* ip, module: 0xc0000000, kernel: 0x80000000 */
-	new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
+	new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
+		insn_lui_v1_hi16_mcount;
 
 
 	return ftrace_modify_code(ip, new);
 	return ftrace_modify_code(ip, new);
 }
 }
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void)
 #define S_R_SP	(0xafb0 << 16)  /* s{d,w} R, offset(sp) */
 #define S_R_SP	(0xafb0 << 16)  /* s{d,w} R, offset(sp) */
 #define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
 #define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
 
 
-unsigned long ftrace_get_parent_addr(unsigned long self_addr,
-				     unsigned long parent,
-				     unsigned long parent_addr,
-				     unsigned long fp)
+unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
 {
 {
-	unsigned long sp, ip, ra;
+	unsigned long sp, ip, tmp;
 	unsigned int code;
 	unsigned int code;
 	int faulted;
 	int faulted;
 
 
 	/*
 	/*
-	 * For module, move the ip from calling site of mcount to the
-	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
-	 * kernel, move to the instruction "move ra, at"(offset is 12)
+	 * For module, move the ip from the return address after the
+	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
+	 * kernel, move after the instruction "move ra, at"(offset is 16)
 	 */
 	 */
-	ip = self_addr - (in_module(self_addr) ? 20 : 12);
+	ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
 
 
 	/*
 	/*
 	 * search the text until finding the non-store instruction or "s{d,w}
 	 * search the text until finding the non-store instruction or "s{d,w}
 	 * ra, offset(sp)" instruction
 	 * ra, offset(sp)" instruction
 	 */
 	 */
 	do {
 	do {
-		ip -= 4;
-
 		/* get the code at "ip": code = *(unsigned int *)ip; */
 		/* get the code at "ip": code = *(unsigned int *)ip; */
 		safe_load_code(code, ip, faulted);
 		safe_load_code(code, ip, faulted);
 
 
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
 		 * store the ra on the stack
 		 * store the ra on the stack
 		 */
 		 */
 		if ((code & S_R_SP) != S_R_SP)
 		if ((code & S_R_SP) != S_R_SP)
-			return parent_addr;
+			return parent_ra_addr;
 
 
-	} while (((code & S_RA_SP) != S_RA_SP));
+		/* Move to the next instruction */
+		ip -= 4;
+	} while ((code & S_RA_SP) != S_RA_SP);
 
 
 	sp = fp + (code & OFFSET_MASK);
 	sp = fp + (code & OFFSET_MASK);
 
 
-	/* ra = *(unsigned long *)sp; */
-	safe_load_stack(ra, sp, faulted);
+	/* tmp = *(unsigned long *)sp; */
+	safe_load_stack(tmp, sp, faulted);
 	if (unlikely(faulted))
 	if (unlikely(faulted))
 		return 0;
 		return 0;
 
 
-	if (ra == parent)
+	if (tmp == old_parent_ra)
 		return sp;
 		return sp;
 	return 0;
 	return 0;
 }
 }
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
  * Hook the return address and push it in the stack of return addrs
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  * in current thread info.
  */
  */
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
 			   unsigned long fp)
 			   unsigned long fp)
 {
 {
-	unsigned long old;
+	unsigned long old_parent_ra;
 	struct ftrace_graph_ent trace;
 	struct ftrace_graph_ent trace;
 	unsigned long return_hooker = (unsigned long)
 	unsigned long return_hooker = (unsigned long)
 	    &return_to_handler;
 	    &return_to_handler;
-	int faulted;
+	int faulted, insns;
 
 
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return;
 		return;
 
 
 	/*
 	/*
-	 * "parent" is the stack address saved the return address of the caller
-	 * of _mcount.
+	 * "parent_ra_addr" is the stack address saved the return address of
+	 * the caller of _mcount.
 	 *
 	 *
 	 * if the gcc < 4.5, a leaf function does not save the return address
 	 * if the gcc < 4.5, a leaf function does not save the return address
 	 * in the stack address, so, we "emulate" one in _mcount's stack space,
 	 * in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 	 * do it in ftrace_graph_caller of mcount.S.
 	 * do it in ftrace_graph_caller of mcount.S.
 	 */
 	 */
 
 
-	/* old = *parent; */
-	safe_load_stack(old, parent, faulted);
+	/* old_parent_ra = *parent_ra_addr; */
+	safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
 	if (unlikely(faulted))
 	if (unlikely(faulted))
 		goto out;
 		goto out;
 #ifndef KBUILD_MCOUNT_RA_ADDRESS
 #ifndef KBUILD_MCOUNT_RA_ADDRESS
-	parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
-			(unsigned long)parent, fp);
+	parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
+			old_parent_ra, (unsigned long)parent_ra_addr, fp);
 	/*
 	/*
 	 * If fails when getting the stack address of the non-leaf function's
 	 * If fails when getting the stack address of the non-leaf function's
 	 * ra, stop function graph tracer and return
 	 * ra, stop function graph tracer and return
 	 */
 	 */
-	if (parent == 0)
+	if (parent_ra_addr == 0)
 		goto out;
 		goto out;
 #endif
 #endif
-	/* *parent = return_hooker; */
-	safe_store_stack(return_hooker, parent, faulted);
+	/* *parent_ra_addr = return_hooker; */
+	safe_store_stack(return_hooker, parent_ra_addr, faulted);
 	if (unlikely(faulted))
 	if (unlikely(faulted))
 		goto out;
 		goto out;
 
 
-	if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
-	    -EBUSY) {
-		*parent = old;
+	if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
+	    == -EBUSY) {
+		*parent_ra_addr = old_parent_ra;
 		return;
 		return;
 	}
 	}
 
 
-	trace.func = self_addr;
+	/*
+	 * Get the recorded ip of the current mcount calling site in the
+	 * __mcount_loc section, which will be used to filter the function
+	 * entries configured through the tracing/set_graph_function interface.
+	 */
+
+	insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+	trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
 
 
 	/* Only trace if the calling function expects to */
 	/* Only trace if the calling function expects to */
 	if (!ftrace_graph_entry(&trace)) {
 	if (!ftrace_graph_entry(&trace)) {
 		current->curr_ret_stack--;
 		current->curr_ret_stack--;
-		*parent = old;
+		*parent_ra_addr = old_parent_ra;
 	}
 	}
 	return;
 	return;
 out:
 out:

+ 166 - 179
arch/mips/kernel/perf_event.c

@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event,
 	return ret;
 	return ret;
 }
 }
 
 
-static int mipspmu_enable(struct perf_event *event)
-{
-	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-	struct hw_perf_event *hwc = &event->hw;
-	int idx;
-	int err = 0;
-
-	/* To look for a free counter for this event. */
-	idx = mipspmu->alloc_counter(cpuc, hwc);
-	if (idx < 0) {
-		err = idx;
-		goto out;
-	}
-
-	/*
-	 * If there is an event in the counter we are going to use then
-	 * make sure it is disabled.
-	 */
-	event->hw.idx = idx;
-	mipspmu->disable_event(idx);
-	cpuc->events[idx] = event;
-
-	/* Set the period for the event. */
-	mipspmu_event_set_period(event, hwc, idx);
-
-	/* Enable the event. */
-	mipspmu->enable_event(hwc, idx);
-
-	/* Propagate our changes to the userspace mapping. */
-	perf_event_update_userpage(event);
-
-out:
-	return err;
-}
-
 static void mipspmu_event_update(struct perf_event *event,
 static void mipspmu_event_update(struct perf_event *event,
 			struct hw_perf_event *hwc,
 			struct hw_perf_event *hwc,
 			int idx)
 			int idx)
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event,
 	unsigned long flags;
 	unsigned long flags;
 	int shift = 64 - TOTAL_BITS;
 	int shift = 64 - TOTAL_BITS;
 	s64 prev_raw_count, new_raw_count;
 	s64 prev_raw_count, new_raw_count;
-	s64 delta;
+	u64 delta;
 
 
 again:
 again:
 	prev_raw_count = local64_read(&hwc->prev_count);
 	prev_raw_count = local64_read(&hwc->prev_count);
@@ -231,32 +196,90 @@ again:
 	return;
 	return;
 }
 }
 
 
-static void mipspmu_disable(struct perf_event *event)
+static void mipspmu_start(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (!mipspmu)
+		return;
+
+	if (flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+	hwc->state = 0;
+
+	/* Set the period for the event. */
+	mipspmu_event_set_period(event, hwc, hwc->idx);
+
+	/* Enable the event. */
+	mipspmu->enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_stop(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	if (!mipspmu)
+		return;
+
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		/* We are working on a local event. */
+		mipspmu->disable_event(hwc->idx);
+		barrier();
+		mipspmu_event_update(event, hwc, hwc->idx);
+		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	}
+}
+
+static int mipspmu_add(struct perf_event *event, int flags)
 {
 {
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event *hwc = &event->hw;
-	int idx = hwc->idx;
+	int idx;
+	int err = 0;
 
 
+	perf_pmu_disable(event->pmu);
 
 
-	WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+	/* To look for a free counter for this event. */
+	idx = mipspmu->alloc_counter(cpuc, hwc);
+	if (idx < 0) {
+		err = idx;
+		goto out;
+	}
 
 
-	/* We are working on a local event. */
+	/*
+	 * If there is an event in the counter we are going to use then
+	 * make sure it is disabled.
+	 */
+	event->hw.idx = idx;
 	mipspmu->disable_event(idx);
 	mipspmu->disable_event(idx);
+	cpuc->events[idx] = event;
 
 
-	barrier();
-
-	mipspmu_event_update(event, hwc, idx);
-	cpuc->events[idx] = NULL;
-	clear_bit(idx, cpuc->used_mask);
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	if (flags & PERF_EF_START)
+		mipspmu_start(event, PERF_EF_RELOAD);
 
 
+	/* Propagate our changes to the userspace mapping. */
 	perf_event_update_userpage(event);
 	perf_event_update_userpage(event);
+
+out:
+	perf_pmu_enable(event->pmu);
+	return err;
 }
 }
 
 
-static void mipspmu_unthrottle(struct perf_event *event)
+static void mipspmu_del(struct perf_event *event, int flags)
 {
 {
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
 	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
 
 
-	mipspmu->enable_event(hwc, hwc->idx);
+	WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
+
+	mipspmu_stop(event, PERF_EF_UPDATE);
+	cpuc->events[idx] = NULL;
+	clear_bit(idx, cpuc->used_mask);
+
+	perf_event_update_userpage(event);
 }
 }
 
 
 static void mipspmu_read(struct perf_event *event)
 static void mipspmu_read(struct perf_event *event)
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event)
 	mipspmu_event_update(event, hwc, hwc->idx);
 	mipspmu_event_update(event, hwc, hwc->idx);
 }
 }
 
 
-static struct pmu pmu = {
-	.enable		= mipspmu_enable,
-	.disable	= mipspmu_disable,
-	.unthrottle	= mipspmu_unthrottle,
-	.read		= mipspmu_read,
-};
+static void mipspmu_enable(struct pmu *pmu)
+{
+	if (mipspmu)
+		mipspmu->start();
+}
+
+static void mipspmu_disable(struct pmu *pmu)
+{
+	if (mipspmu)
+		mipspmu->stop();
+}
 
 
 static atomic_t active_events = ATOMIC_INIT(0);
 static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmu_reserve_mutex);
 static DEFINE_MUTEX(pmu_reserve_mutex);
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void)
 		perf_irq = save_perf_irq;
 		perf_irq = save_perf_irq;
 }
 }
 
 
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+	if (atomic_dec_and_mutex_lock(&active_events,
+				&pmu_reserve_mutex)) {
+		/*
+		 * We must not call the destroy function with interrupts
+		 * disabled.
+		 */
+		on_each_cpu(reset_counters,
+			(void *)(long)mipspmu->num_counters, 1);
+		mipspmu_free_irq();
+		mutex_unlock(&pmu_reserve_mutex);
+	}
+}
+
+static int mipspmu_event_init(struct perf_event *event)
+{
+	int err = 0;
+
+	switch (event->attr.type) {
+	case PERF_TYPE_RAW:
+	case PERF_TYPE_HARDWARE:
+	case PERF_TYPE_HW_CACHE:
+		break;
+
+	default:
+		return -ENOENT;
+	}
+
+	if (!mipspmu || event->cpu >= nr_cpumask_bits ||
+		(event->cpu >= 0 && !cpu_online(event->cpu)))
+		return -ENODEV;
+
+	if (!atomic_inc_not_zero(&active_events)) {
+		if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
+			atomic_dec(&active_events);
+			return -ENOSPC;
+		}
+
+		mutex_lock(&pmu_reserve_mutex);
+		if (atomic_read(&active_events) == 0)
+			err = mipspmu_get_irq();
+
+		if (!err)
+			atomic_inc(&active_events);
+		mutex_unlock(&pmu_reserve_mutex);
+	}
+
+	if (err)
+		return err;
+
+	err = __hw_perf_event_init(event);
+	if (err)
+		hw_perf_event_destroy(event);
+
+	return err;
+}
+
+static struct pmu pmu = {
+	.pmu_enable	= mipspmu_enable,
+	.pmu_disable	= mipspmu_disable,
+	.event_init	= mipspmu_event_init,
+	.add		= mipspmu_add,
+	.del		= mipspmu_del,
+	.start		= mipspmu_start,
+	.stop		= mipspmu_stop,
+	.read		= mipspmu_read,
+};
+
 static inline unsigned int
 static inline unsigned int
 mipspmu_perf_event_encode(const struct mips_perf_event *pev)
 mipspmu_perf_event_encode(const struct mips_perf_event *pev)
 {
 {
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc,
 {
 {
 	struct hw_perf_event fake_hwc = event->hw;
 	struct hw_perf_event fake_hwc = event->hw;
 
 
-	if (event->pmu && event->pmu != &pmu)
-		return 0;
+	/* Allow mixed event group. So return 1 to pass validation. */
+	if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+		return 1;
 
 
 	return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
 	return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
 }
 }
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event)
 	return 0;
 	return 0;
 }
 }
 
 
-/*
- * mipsxx/rm9000/loongson2 have different performance counters, they have
- * specific low-level init routines.
- */
-static void reset_counters(void *arg);
-static int __hw_perf_event_init(struct perf_event *event);
-
-static void hw_perf_event_destroy(struct perf_event *event)
-{
-	if (atomic_dec_and_mutex_lock(&active_events,
-				&pmu_reserve_mutex)) {
-		/*
-		 * We must not call the destroy function with interrupts
-		 * disabled.
-		 */
-		on_each_cpu(reset_counters,
-			(void *)(long)mipspmu->num_counters, 1);
-		mipspmu_free_irq();
-		mutex_unlock(&pmu_reserve_mutex);
-	}
-}
-
-const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
-	int err = 0;
-
-	if (!mipspmu || event->cpu >= nr_cpumask_bits ||
-		(event->cpu >= 0 && !cpu_online(event->cpu)))
-		return ERR_PTR(-ENODEV);
-
-	if (!atomic_inc_not_zero(&active_events)) {
-		if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
-			atomic_dec(&active_events);
-			return ERR_PTR(-ENOSPC);
-		}
-
-		mutex_lock(&pmu_reserve_mutex);
-		if (atomic_read(&active_events) == 0)
-			err = mipspmu_get_irq();
-
-		if (!err)
-			atomic_inc(&active_events);
-		mutex_unlock(&pmu_reserve_mutex);
-	}
-
-	if (err)
-		return ERR_PTR(err);
-
-	err = __hw_perf_event_init(event);
-	if (err)
-		hw_perf_event_destroy(event);
-
-	return err ? ERR_PTR(err) : &pmu;
-}
-
-void hw_perf_enable(void)
-{
-	if (mipspmu)
-		mipspmu->start();
-}
-
-void hw_perf_disable(void)
-{
-	if (mipspmu)
-		mipspmu->stop();
-}
-
 /* This is needed by specific irq handlers in perf_event_*.c */
 /* This is needed by specific irq handlers in perf_event_*.c */
 static void
 static void
 handle_associated_event(struct cpu_hw_events *cpuc,
 handle_associated_event(struct cpu_hw_events *cpuc,
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc,
 #include "perf_event_mipsxx.c"
 #include "perf_event_mipsxx.c"
 
 
 /* Callchain handling code. */
 /* Callchain handling code. */
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
-		u64 ip)
-{
-	if (entry->nr < PERF_MAX_STACK_DEPTH)
-		entry->ip[entry->nr++] = ip;
-}
 
 
 /*
 /*
  * Leave userspace callchain empty for now. When we find a way to trace
  * Leave userspace callchain empty for now. When we find a way to trace
  * the user stack callchains, we add here.
  * the user stack callchains, we add here.
  */
  */
-static void
-perf_callchain_user(struct pt_regs *regs,
-		    struct perf_callchain_entry *entry)
+void perf_callchain_user(struct perf_callchain_entry *entry,
+		    struct pt_regs *regs)
 {
 {
 }
 }
 
 
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
 	while (!kstack_end(sp)) {
 	while (!kstack_end(sp)) {
 		addr = *sp++;
 		addr = *sp++;
 		if (__kernel_text_address(addr)) {
 		if (__kernel_text_address(addr)) {
-			callchain_store(entry, addr);
+			perf_callchain_store(entry, addr);
 			if (entry->nr >= PERF_MAX_STACK_DEPTH)
 			if (entry->nr >= PERF_MAX_STACK_DEPTH)
 				break;
 				break;
 		}
 		}
 	}
 	}
 }
 }
 
 
-static void
-perf_callchain_kernel(struct pt_regs *regs,
-		      struct perf_callchain_entry *entry)
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+		      struct pt_regs *regs)
 {
 {
 	unsigned long sp = regs->regs[29];
 	unsigned long sp = regs->regs[29];
 #ifdef CONFIG_KALLSYMS
 #ifdef CONFIG_KALLSYMS
 	unsigned long ra = regs->regs[31];
 	unsigned long ra = regs->regs[31];
 	unsigned long pc = regs->cp0_epc;
 	unsigned long pc = regs->cp0_epc;
 
 
-	callchain_store(entry, PERF_CONTEXT_KERNEL);
 	if (raw_show_trace || !__kernel_text_address(pc)) {
 	if (raw_show_trace || !__kernel_text_address(pc)) {
 		unsigned long stack_page =
 		unsigned long stack_page =
 			(unsigned long)task_stack_page(current);
 			(unsigned long)task_stack_page(current);
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs,
 		return;
 		return;
 	}
 	}
 	do {
 	do {
-		callchain_store(entry, pc);
+		perf_callchain_store(entry, pc);
 		if (entry->nr >= PERF_MAX_STACK_DEPTH)
 		if (entry->nr >= PERF_MAX_STACK_DEPTH)
 			break;
 			break;
 		pc = unwind_stack(current, &sp, pc, &ra);
 		pc = unwind_stack(current, &sp, pc, &ra);
 	} while (pc);
 	} while (pc);
 #else
 #else
-	callchain_store(entry, PERF_CONTEXT_KERNEL);
 	save_raw_perf_callchain(entry, sp);
 	save_raw_perf_callchain(entry, sp);
 #endif
 #endif
 }
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs,
-		  struct perf_callchain_entry *entry)
-{
-	int is_user;
-
-	if (!regs)
-		return;
-
-	is_user = user_mode(regs);
-
-	if (!current || !current->pid)
-		return;
-
-	if (is_user && current->state != TASK_RUNNING)
-		return;
-
-	if (!is_user) {
-		perf_callchain_kernel(regs, entry);
-		if (current->mm)
-			regs = task_pt_regs(current);
-		else
-			regs = NULL;
-	}
-	if (regs)
-		perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
-	struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
-	entry->nr = 0;
-	perf_do_callchain(regs, entry);
-	return entry;
-}

+ 3 - 1
arch/mips/kernel/perf_event_mipsxx.c

@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
 	 * interrupt, not NMI.
 	 * interrupt, not NMI.
 	 */
 	 */
 	if (handled == IRQ_HANDLED)
 	if (handled == IRQ_HANDLED)
-		perf_event_do_pending();
+		irq_work_run();
 
 
 #ifdef CONFIG_MIPS_MT_SMP
 #ifdef CONFIG_MIPS_MT_SMP
 	read_unlock(&pmuint_rwlock);
 	read_unlock(&pmuint_rwlock);
@@ -1045,6 +1045,8 @@ init_hw_perf_events(void)
 			"CPU, irq %d%s\n", mipspmu->name, counters, irq,
 			"CPU, irq %d%s\n", mipspmu->name, counters, irq,
 			irq < 0 ? " (share with timer interrupt)" : "");
 			irq < 0 ? " (share with timer interrupt)" : "");
 
 
+	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
 	return 0;
 	return 0;
 }
 }
 early_initcall(init_hw_perf_events);
 early_initcall(init_hw_perf_events);

+ 1 - 1
arch/mips/kernel/signal.c

@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
 
 
 static int protected_restore_fp_context(struct sigcontext __user *sc)
 static int protected_restore_fp_context(struct sigcontext __user *sc)
 {
 {
-	int err, tmp;
+	int err, tmp __maybe_unused;
 	while (1) {
 	while (1) {
 		lock_fpu_owner();
 		lock_fpu_owner();
 		own_fpu_inatomic(0);
 		own_fpu_inatomic(0);

+ 1 - 1
arch/mips/kernel/signal32.c

@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
 
 
 static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
 static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
 {
 {
-	int err, tmp;
+	int err, tmp __maybe_unused;
 	while (1) {
 	while (1) {
 		lock_fpu_owner();
 		lock_fpu_owner();
 		own_fpu_inatomic(0);
 		own_fpu_inatomic(0);

+ 29 - 2
arch/mips/kernel/smp.c

@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void)
  */
  */
 static struct task_struct *cpu_idle_thread[NR_CPUS];
 static struct task_struct *cpu_idle_thread[NR_CPUS];
 
 
+struct create_idle {
+	struct work_struct work;
+	struct task_struct *idle;
+	struct completion done;
+	int cpu;
+};
+
+static void __cpuinit do_fork_idle(struct work_struct *work)
+{
+	struct create_idle *c_idle =
+		container_of(work, struct create_idle, work);
+
+	c_idle->idle = fork_idle(c_idle->cpu);
+	complete(&c_idle->done);
+}
+
 int __cpuinit __cpu_up(unsigned int cpu)
 int __cpuinit __cpu_up(unsigned int cpu)
 {
 {
 	struct task_struct *idle;
 	struct task_struct *idle;
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
 	 * Linux can schedule processes on this slave.
 	 * Linux can schedule processes on this slave.
 	 */
 	 */
 	if (!cpu_idle_thread[cpu]) {
 	if (!cpu_idle_thread[cpu]) {
-		idle = fork_idle(cpu);
-		cpu_idle_thread[cpu] = idle;
+		/*
+		 * Schedule work item to avoid forking user task
+		 * Ported from arch/x86/kernel/smpboot.c
+		 */
+		struct create_idle c_idle = {
+			.cpu    = cpu,
+			.done   = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+		};
+
+		INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
+		schedule_work(&c_idle.work);
+		wait_for_completion(&c_idle.done);
+		idle = cpu_idle_thread[cpu] = c_idle.idle;
 
 
 		if (IS_ERR(idle))
 		if (IS_ERR(idle))
 			panic(KERN_ERR "Fork failed for CPU %d", cpu);
 			panic(KERN_ERR "Fork failed for CPU %d", cpu);

+ 2 - 3
arch/mips/kernel/syscall.c

@@ -383,12 +383,11 @@ save_static_function(sys_sysmips);
 static int __used noinline
 static int __used noinline
 _sys_sysmips(nabi_no_regargs struct pt_regs regs)
 _sys_sysmips(nabi_no_regargs struct pt_regs regs)
 {
 {
-	long cmd, arg1, arg2, arg3;
+	long cmd, arg1, arg2;
 
 
 	cmd = regs.regs[4];
 	cmd = regs.regs[4];
 	arg1 = regs.regs[5];
 	arg1 = regs.regs[5];
 	arg2 = regs.regs[6];
 	arg2 = regs.regs[6];
-	arg3 = regs.regs[7];
 
 
 	switch (cmd) {
 	switch (cmd) {
 	case MIPS_ATOMIC_SET:
 	case MIPS_ATOMIC_SET:
@@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs)
 		if (arg1 & 2)
 		if (arg1 & 2)
 			set_thread_flag(TIF_LOGADE);
 			set_thread_flag(TIF_LOGADE);
 		else
 		else
-			clear_thread_flag(TIF_FIXADE);
+			clear_thread_flag(TIF_LOGADE);
 
 
 		return 0;
 		return 0;
 
 

+ 2 - 2
arch/mips/kernel/vpe.c

@@ -148,9 +148,9 @@ struct {
 	spinlock_t tc_list_lock;
 	spinlock_t tc_list_lock;
 	struct list_head tc_list;	/* Thread contexts */
 	struct list_head tc_list;	/* Thread contexts */
 } vpecontrol = {
 } vpecontrol = {
-	.vpe_list_lock	= SPIN_LOCK_UNLOCKED,
+	.vpe_list_lock	= __SPIN_LOCK_UNLOCKED(vpe_list_lock),
 	.vpe_list	= LIST_HEAD_INIT(vpecontrol.vpe_list),
 	.vpe_list	= LIST_HEAD_INIT(vpecontrol.vpe_list),
-	.tc_list_lock	= SPIN_LOCK_UNLOCKED,
+	.tc_list_lock	= __SPIN_LOCK_UNLOCKED(tc_list_lock),
 	.tc_list	= LIST_HEAD_INIT(vpecontrol.tc_list)
 	.tc_list	= LIST_HEAD_INIT(vpecontrol.tc_list)
 };
 };
 
 

+ 4 - 1
arch/mips/loongson/Kconfig

@@ -1,6 +1,7 @@
+if MACH_LOONGSON
+
 choice
 choice
 	prompt "Machine Type"
 	prompt "Machine Type"
-	depends on MACH_LOONGSON
 
 
 config LEMOTE_FULOONG2E
 config LEMOTE_FULOONG2E
 	bool "Lemote Fuloong(2e) mini-PC"
 	bool "Lemote Fuloong(2e) mini-PC"
@@ -87,3 +88,5 @@ config LOONGSON_UART_BASE
 config LOONGSON_MC146818
 config LOONGSON_MC146818
 	bool
 	bool
 	default n
 	default n
+
+endif # MACH_LOONGSON

+ 0 - 5
arch/mips/loongson/common/cmdline.c

@@ -44,10 +44,5 @@ void __init prom_init_cmdline(void)
 		strcat(arcs_cmdline, " ");
 		strcat(arcs_cmdline, " ");
 	}
 	}
 
 
-	if ((strstr(arcs_cmdline, "console=")) == NULL)
-		strcat(arcs_cmdline, " console=ttyS0,115200");
-	if ((strstr(arcs_cmdline, "root=")) == NULL)
-		strcat(arcs_cmdline, " root=/dev/hda1");
-
 	prom_init_machtype();
 	prom_init_machtype();
 }
 }

+ 2 - 1
arch/mips/loongson/common/machtype.c

@@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void)
 
 
 void __init prom_init_machtype(void)
 void __init prom_init_machtype(void)
 {
 {
-	char *p, str[MACHTYPE_LEN];
+	char *p, str[MACHTYPE_LEN + 1];
 	int machtype = MACH_LEMOTE_FL2E;
 	int machtype = MACH_LEMOTE_FL2E;
 
 
 	mips_machtype = LOONGSON_MACHTYPE;
 	mips_machtype = LOONGSON_MACHTYPE;
@@ -53,6 +53,7 @@ void __init prom_init_machtype(void)
 	}
 	}
 	p += strlen("machtype=");
 	p += strlen("machtype=");
 	strncpy(str, p, MACHTYPE_LEN);
 	strncpy(str, p, MACHTYPE_LEN);
+	str[MACHTYPE_LEN] = '\0';
 	p = strstr(str, " ");
 	p = strstr(str, " ");
 	if (p)
 	if (p)
 		*p = '\0';
 		*p = '\0';

+ 2 - 2
arch/mips/math-emu/ieee754int.h

@@ -70,7 +70,7 @@
 
 
 
 
 #define COMPXSP \
 #define COMPXSP \
-  unsigned xm; int xe; int xs; int xc
+  unsigned xm; int xe; int xs __maybe_unused; int xc
 
 
 #define COMPYSP \
 #define COMPYSP \
   unsigned ym; int ye; int ys; int yc
   unsigned ym; int ye; int ys; int yc
@@ -104,7 +104,7 @@
 
 
 
 
 #define COMPXDP \
 #define COMPXDP \
-u64 xm; int xe; int xs; int xc
+u64 xm; int xe; int xs __maybe_unused; int xc
 
 
 #define COMPYDP \
 #define COMPYDP \
 u64 ym; int ye; int ys; int yc
 u64 ym; int ye; int ys; int yc

+ 1 - 1
arch/mips/mm/init.c

@@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr)
 void __init paging_init(void)
 void __init paging_init(void)
 {
 {
 	unsigned long max_zone_pfns[MAX_NR_ZONES];
 	unsigned long max_zone_pfns[MAX_NR_ZONES];
-	unsigned long lastpfn;
+	unsigned long lastpfn __maybe_unused;
 
 
 	pagetable_init();
 	pagetable_init();
 
 

+ 2 - 0
arch/mips/mm/tlbex.c

@@ -109,6 +109,8 @@ static bool scratchpad_available(void)
 static int scratchpad_offset(int i)
 static int scratchpad_offset(int i)
 {
 {
 	BUG();
 	BUG();
+	/* Really unreachable, but evidently some GCC want this. */
+	return 0;
 }
 }
 #endif
 #endif
 /*
 /*

+ 2 - 2
arch/mips/pci/ops-pmcmsp.c

@@ -308,7 +308,7 @@ static struct resource pci_mem_resource = {
  *  RETURNS:     PCIBIOS_SUCCESSFUL  - success
  *  RETURNS:     PCIBIOS_SUCCESSFUL  - success
  *
  *
  ****************************************************************************/
  ****************************************************************************/
-static int bpci_interrupt(int irq, void *dev_id)
+static irqreturn_t bpci_interrupt(int irq, void *dev_id)
 {
 {
 	struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
 	struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
 	unsigned int stat = preg->if_status;
 	unsigned int stat = preg->if_status;
@@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id)
 	/* write to clear all asserted interrupts */
 	/* write to clear all asserted interrupts */
 	preg->if_status = stat;
 	preg->if_status = stat;
 
 
-	return PCIBIOS_SUCCESSFUL;
+	return IRQ_HANDLED;
 }
 }
 
 
 /*****************************************************************************
 /*****************************************************************************

+ 0 - 4
arch/mips/pmc-sierra/Kconfig

@@ -4,15 +4,11 @@ choice
 
 
 config PMC_MSP4200_EVAL
 config PMC_MSP4200_EVAL
 	bool "PMC-Sierra MSP4200 Eval Board"
 	bool "PMC-Sierra MSP4200 Eval Board"
-	select CEVT_R4K
-	select CSRC_R4K
 	select IRQ_MSP_SLP
 	select IRQ_MSP_SLP
 	select HW_HAS_PCI
 	select HW_HAS_PCI
 
 
 config PMC_MSP4200_GW
 config PMC_MSP4200_GW
 	bool "PMC-Sierra MSP4200 VoIP Gateway"
 	bool "PMC-Sierra MSP4200 VoIP Gateway"
-	select CEVT_R4K
-	select CSRC_R4K
 	select IRQ_MSP_SLP
 	select IRQ_MSP_SLP
 	select HW_HAS_PCI
 	select HW_HAS_PCI
 
 

+ 1 - 1
arch/mips/pmc-sierra/msp71xx/msp_time.c

@@ -81,7 +81,7 @@ void __init plat_time_init(void)
 	mips_hpt_frequency = cpu_rate/2;
 	mips_hpt_frequency = cpu_rate/2;
 }
 }
 
 
-unsigned int __init get_c0_compare_int(void)
+unsigned int __cpuinit get_c0_compare_int(void)
 {
 {
 	return MSP_INT_VPE0_TIMER;
 	return MSP_INT_VPE0_TIMER;
 }
 }