Browse Source

Merge tag 'fixes-non-3.12' of git://git.infradead.org/linux-mvebu into next/fixes-non-critical

From Jason Cooper:
mvebu fixes-non-critical for v3.12

 - dove
    - fix section mismatch (all callers are already _init, so it's just a space
      issue)

* tag 'fixes-non-3.12' of git://git.infradead.org/linux-mvebu:
  ARM: dove: fix missing __init section of dove_mpp_gpio_mode
  + Linux 3.11-rc2

Signed-off-by: Olof Johansson <olof@lixom.net>
Olof Johansson 12 years ago
parent
commit
6a33fc8cac
100 changed files with 158 additions and 246 deletions
  1. 3 3
      Documentation/cpu-hotplug.txt
  2. 1 1
      Makefile
  3. 5 5
      arch/alpha/kernel/smp.c
  4. 2 2
      arch/alpha/kernel/traps.c
  5. 2 2
      arch/arm/common/mcpm_platsmp.c
  6. 1 1
      arch/arm/include/asm/arch_timer.h
  7. 0 1
      arch/arm/kernel/head-common.S
  8. 0 1
      arch/arm/kernel/head-nommu.S
  9. 0 1
      arch/arm/kernel/head.S
  10. 2 2
      arch/arm/kernel/hw_breakpoint.c
  11. 3 3
      arch/arm/kernel/perf_event_cpu.c
  12. 1 2
      arch/arm/kernel/psci_smp.c
  13. 9 9
      arch/arm/kernel/smp.c
  14. 3 3
      arch/arm/kernel/smp_twd.c
  15. 1 1
      arch/arm/lib/delay.c
  16. 1 1
      arch/arm/mach-dove/mpp.c
  17. 0 2
      arch/arm/mach-exynos/headsmp.S
  18. 2 2
      arch/arm/mach-exynos/platsmp.c
  19. 1 1
      arch/arm/mach-highbank/platsmp.c
  20. 1 1
      arch/arm/mach-imx/platsmp.c
  21. 1 1
      arch/arm/mach-keystone/platsmp.c
  22. 0 2
      arch/arm/mach-msm/headsmp.S
  23. 3 3
      arch/arm/mach-msm/platsmp.c
  24. 2 2
      arch/arm/mach-msm/timer.c
  25. 1 1
      arch/arm/mach-mvebu/coherency.c
  26. 0 2
      arch/arm/mach-mvebu/headsmp.S
  27. 2 3
      arch/arm/mach-mvebu/platsmp.c
  28. 0 2
      arch/arm/mach-omap2/omap-headsmp.S
  29. 1 1
      arch/arm/mach-omap2/omap-mpuss-lowpower.c
  30. 2 2
      arch/arm/mach-omap2/omap-smp.c
  31. 2 2
      arch/arm/mach-omap2/omap-wakeupgen.c
  32. 0 2
      arch/arm/mach-prima2/headsmp.S
  33. 2 2
      arch/arm/mach-prima2/platsmp.c
  34. 1 1
      arch/arm/mach-s3c24xx/Kconfig
  35. 0 1
      arch/arm/mach-shmobile/headsmp-scu.S
  36. 0 2
      arch/arm/mach-shmobile/headsmp.S
  37. 1 1
      arch/arm/mach-shmobile/smp-emev2.c
  38. 1 1
      arch/arm/mach-shmobile/smp-r8a7779.c
  39. 1 1
      arch/arm/mach-shmobile/smp-sh73a0.c
  40. 0 1
      arch/arm/mach-socfpga/headsmp.S
  41. 1 1
      arch/arm/mach-socfpga/platsmp.c
  42. 1 1
      arch/arm/mach-spear/generic.h
  43. 2 2
      arch/arm/mach-spear/platsmp.c
  44. 3 3
      arch/arm/mach-sti/platsmp.c
  45. 2 2
      arch/arm/mach-tegra/platsmp.c
  46. 1 1
      arch/arm/mach-tegra/pm.c
  47. 2 2
      arch/arm/mach-ux500/platsmp.c
  48. 1 1
      arch/arm/mach-zynq/common.h
  49. 0 2
      arch/arm/mach-zynq/headsmp.S
  50. 3 3
      arch/arm/mach-zynq/platsmp.c
  51. 0 2
      arch/arm/mm/proc-arm1020.S
  52. 0 2
      arch/arm/mm/proc-arm1020e.S
  53. 0 2
      arch/arm/mm/proc-arm1022.S
  54. 0 3
      arch/arm/mm/proc-arm1026.S
  55. 0 2
      arch/arm/mm/proc-arm720.S
  56. 0 2
      arch/arm/mm/proc-arm740.S
  57. 0 2
      arch/arm/mm/proc-arm7tdmi.S
  58. 0 2
      arch/arm/mm/proc-arm920.S
  59. 0 2
      arch/arm/mm/proc-arm922.S
  60. 0 2
      arch/arm/mm/proc-arm925.S
  61. 0 2
      arch/arm/mm/proc-arm926.S
  62. 0 2
      arch/arm/mm/proc-arm940.S
  63. 0 2
      arch/arm/mm/proc-arm946.S
  64. 0 2
      arch/arm/mm/proc-arm9tdmi.S
  65. 0 2
      arch/arm/mm/proc-fa526.S
  66. 0 2
      arch/arm/mm/proc-feroceon.S
  67. 0 2
      arch/arm/mm/proc-mohawk.S
  68. 0 2
      arch/arm/mm/proc-sa110.S
  69. 0 2
      arch/arm/mm/proc-sa1100.S
  70. 0 2
      arch/arm/mm/proc-v6.S
  71. 0 4
      arch/arm/mm/proc-v7-2level.S
  72. 0 4
      arch/arm/mm/proc-v7-3level.S
  73. 0 2
      arch/arm/mm/proc-v7.S
  74. 0 2
      arch/arm/mm/proc-xsc3.S
  75. 0 2
      arch/arm/mm/proc-xscale.S
  76. 3 3
      arch/arm/plat-versatile/platsmp.c
  77. 1 1
      arch/arm64/include/asm/arch_timer.h
  78. 0 7
      arch/arm64/include/asm/debug-monitors.h
  79. 2 1
      arch/arm64/include/asm/system_misc.h
  80. 3 3
      arch/arm64/kernel/debug-monitors.c
  81. 2 2
      arch/arm64/kernel/hw_breakpoint.c
  82. 1 1
      arch/arm64/kernel/process.c
  83. 12 11
      arch/arm64/kernel/smp.c
  84. 20 26
      arch/arm64/mm/fault.c
  85. 1 1
      arch/blackfin/kernel/perf_event.c
  86. 2 2
      arch/blackfin/kernel/setup.c
  87. 3 3
      arch/blackfin/mach-bf561/smp.c
  88. 2 2
      arch/blackfin/mach-common/cache-c.c
  89. 1 1
      arch/blackfin/mach-common/ints-priority.c
  90. 9 9
      arch/blackfin/mach-common/smp.c
  91. 1 1
      arch/cris/arch-v32/kernel/smp.c
  92. 1 1
      arch/frv/kernel/setup.c
  93. 1 1
      arch/hexagon/kernel/setup.c
  94. 2 2
      arch/hexagon/kernel/smp.c
  95. 1 1
      arch/m32r/kernel/smpboot.c
  96. 3 3
      arch/metag/kernel/perf/perf_event.c
  97. 10 12
      arch/metag/kernel/smp.c
  98. 1 1
      arch/metag/kernel/traps.c
  99. 1 0
      arch/mips/Kconfig
  100. 1 1
      arch/mips/ath79/setup.c

+ 3 - 3
Documentation/cpu-hotplug.txt

@@ -267,8 +267,8 @@ Q: If i have some kernel code that needs to be aware of CPU arrival and
 A: This is what you would need in your kernel code to receive notifications.
 
 	#include <linux/cpu.h>
-	static int __cpuinit foobar_cpu_callback(struct notifier_block *nfb,
-					    unsigned long action, void *hcpu)
+	static int foobar_cpu_callback(struct notifier_block *nfb,
+				       unsigned long action, void *hcpu)
 	{
 		unsigned int cpu = (unsigned long)hcpu;
 
@@ -285,7 +285,7 @@ A: This is what you would need in your kernel code to receive notifications.
 		return NOTIFY_OK;
 	}
 
-	static struct notifier_block __cpuinitdata foobar_cpu_notifer =
+	static struct notifier_block foobar_cpu_notifer =
 	{
 	   .notifier_call = foobar_cpu_callback,
 	};

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Linux for Workgroups
 
 # *DOCUMENTATION*

+ 5 - 5
arch/alpha/kernel/smp.c

@@ -116,7 +116,7 @@ wait_boot_cpu_to_stop(int cpuid)
 /*
  * Where secondaries begin a life of C.
  */
-void __cpuinit
+void
 smp_callin(void)
 {
 	int cpuid = hard_smp_processor_id();
@@ -194,7 +194,7 @@ wait_for_txrdy (unsigned long cpumask)
  * Send a message to a secondary's console.  "START" is one such
  * interesting message.  ;-)
  */
-static void __cpuinit
+static void
 send_secondary_console_msg(char *str, int cpuid)
 {
 	struct percpu_struct *cpu;
@@ -285,7 +285,7 @@ recv_secondary_console_msg(void)
 /*
  * Convince the console to have a secondary cpu begin execution.
  */
-static int __cpuinit
+static int
 secondary_cpu_start(int cpuid, struct task_struct *idle)
 {
 	struct percpu_struct *cpu;
@@ -356,7 +356,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
 /*
  * Bring one cpu online.
  */
-static int __cpuinit
+static int
 smp_boot_one_cpu(int cpuid, struct task_struct *idle)
 {
 	unsigned long timeout;
@@ -472,7 +472,7 @@ smp_prepare_boot_cpu(void)
 {
 }
 
-int __cpuinit
+int
 __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
 	smp_boot_one_cpu(cpu, tidle);

+ 2 - 2
arch/alpha/kernel/traps.c

@@ -32,7 +32,7 @@
 
 static int opDEC_fix;
 
-static void __cpuinit
+static void
 opDEC_check(void)
 {
 	__asm__ __volatile__ (
@@ -1059,7 +1059,7 @@ give_sigbus:
 	return;
 }
 
-void __cpuinit
+void
 trap_init(void)
 {
 	/* Tell PAL-code what global pointer we want in the kernel.  */

+ 2 - 2
arch/arm/common/mcpm_platsmp.c

@@ -19,7 +19,7 @@
 #include <asm/smp.h>
 #include <asm/smp_plat.h>
 
-static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned int mpidr, pcpu, pcluster, ret;
 	extern void secondary_startup(void);
@@ -40,7 +40,7 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i
 	return 0;
 }
 
-static void __cpuinit mcpm_secondary_init(unsigned int cpu)
+static void mcpm_secondary_init(unsigned int cpu)
 {
 	mcpm_cpu_powered_up();
 }

+ 1 - 1
arch/arm/include/asm/arch_timer.h

@@ -89,7 +89,7 @@ static inline u64 arch_counter_get_cntvct(void)
 	return cval;
 }
 
-static inline void __cpuinit arch_counter_set_user_access(void)
+static inline void arch_counter_set_user_access(void)
 {
 	u32 cntkctl;
 

+ 0 - 1
arch/arm/kernel/head-common.S

@@ -149,7 +149,6 @@ ENDPROC(lookup_processor_type)
  *	r5 = proc_info pointer in physical address space
  *	r9 = cpuid (preserved)
  */
-	__CPUINIT
 __lookup_processor_type:
 	adr	r3, __lookup_processor_type_data
 	ldmia	r3, {r4 - r6}

+ 0 - 1
arch/arm/kernel/head-nommu.S

@@ -87,7 +87,6 @@ ENTRY(stext)
 ENDPROC(stext)
 
 #ifdef CONFIG_SMP
-	__CPUINIT
 ENTRY(secondary_startup)
 	/*
 	 * Common entry point for secondary CPUs.

+ 0 - 1
arch/arm/kernel/head.S

@@ -343,7 +343,6 @@ __turn_mmu_on_loc:
 	.long	__turn_mmu_on_end
 
 #if defined(CONFIG_SMP)
-	__CPUINIT
 ENTRY(secondary_startup)
 	/*
 	 * Common entry point for secondary CPUs.

+ 2 - 2
arch/arm/kernel/hw_breakpoint.c

@@ -1020,7 +1020,7 @@ out_mdbgen:
 		cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
 }
 
-static int __cpuinit dbg_reset_notify(struct notifier_block *self,
+static int dbg_reset_notify(struct notifier_block *self,
 				      unsigned long action, void *cpu)
 {
 	if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata dbg_reset_nb = {
+static struct notifier_block dbg_reset_nb = {
 	.notifier_call = dbg_reset_notify,
 };
 

+ 3 - 3
arch/arm/kernel/perf_event_cpu.c

@@ -157,8 +157,8 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
  * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
  * junk values out of them.
  */
-static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
-				    unsigned long action, void *hcpu)
+static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+			  void *hcpu)
 {
 	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
 		return NOTIFY_DONE;
@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
+static struct notifier_block cpu_pmu_hotplug_notifier = {
 	.notifier_call = cpu_pmu_notify,
 };
 

+ 1 - 2
arch/arm/kernel/psci_smp.c

@@ -46,8 +46,7 @@
 
 extern void secondary_startup(void);
 
-static int __cpuinit psci_boot_secondary(unsigned int cpu,
-					 struct task_struct *idle)
+static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	if (psci_ops.cpu_on)
 		return psci_ops.cpu_on(cpu_logical_map(cpu),

+ 9 - 9
arch/arm/kernel/smp.c

@@ -58,7 +58,7 @@ struct secondary_data secondary_data;
  * control for which core is the next to come out of the secondary
  * boot "holding pen"
  */
-volatile int __cpuinitdata pen_release = -1;
+volatile int pen_release = -1;
 
 enum ipi_msg_type {
 	IPI_WAKEUP,
@@ -86,7 +86,7 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
 	return pgdir >> ARCH_PGD_SHIFT;
 }
 
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
 	int ret;
 
@@ -138,7 +138,7 @@ void __init smp_init_cpus(void)
 		smp_ops.smp_init_cpus();
 }
 
-int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+int boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	if (smp_ops.smp_boot_secondary)
 		return smp_ops.smp_boot_secondary(cpu, idle);
@@ -170,7 +170,7 @@ static int platform_cpu_disable(unsigned int cpu)
 /*
  * __cpu_disable runs on the processor to be shutdown.
  */
-int __cpuinit __cpu_disable(void)
+int __cpu_disable(void)
 {
 	unsigned int cpu = smp_processor_id();
 	int ret;
@@ -216,7 +216,7 @@ static DECLARE_COMPLETION(cpu_died);
  * called on the thread which is asking for a CPU to be shutdown -
  * waits until shutdown has completed, or it is timed out.
  */
-void __cpuinit __cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
 {
 	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
 		pr_err("CPU%u: cpu didn't die\n", cpu);
@@ -306,7 +306,7 @@ void __ref cpu_die(void)
  * Called by both boot and secondaries to move global data into
  * per-processor storage.
  */
-static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+static void smp_store_cpu_info(unsigned int cpuid)
 {
 	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
 
@@ -322,7 +322,7 @@ static void percpu_timer_setup(void);
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
  */
-asmlinkage void __cpuinit secondary_start_kernel(void)
+asmlinkage void secondary_start_kernel(void)
 {
 	struct mm_struct *mm = &init_mm;
 	unsigned int cpu;
@@ -521,7 +521,7 @@ static void broadcast_timer_set_mode(enum clock_event_mode mode,
 {
 }
 
-static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
+static void broadcast_timer_setup(struct clock_event_device *evt)
 {
 	evt->name	= "dummy_timer";
 	evt->features	= CLOCK_EVT_FEAT_ONESHOT |
@@ -550,7 +550,7 @@ int local_timer_register(struct local_timer_ops *ops)
 }
 #endif
 
-static void __cpuinit percpu_timer_setup(void)
+static void percpu_timer_setup(void)
 {
 	unsigned int cpu = smp_processor_id();
 	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);

+ 3 - 3
arch/arm/kernel/smp_twd.c

@@ -187,7 +187,7 @@ core_initcall(twd_cpufreq_init);
 
 #endif
 
-static void __cpuinit twd_calibrate_rate(void)
+static void twd_calibrate_rate(void)
 {
 	unsigned long count;
 	u64 waitjiffies;
@@ -265,7 +265,7 @@ static void twd_get_clock(struct device_node *np)
 /*
  * Setup the local clock events for a CPU.
  */
-static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
+static int twd_timer_setup(struct clock_event_device *clk)
 {
 	struct clock_event_device **this_cpu_clk;
 	int cpu = smp_processor_id();
@@ -308,7 +308,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
 	return 0;
 }
 
-static struct local_timer_ops twd_lt_ops __cpuinitdata = {
+static struct local_timer_ops twd_lt_ops = {
 	.setup	= twd_timer_setup,
 	.stop	= twd_timer_stop,
 };

+ 1 - 1
arch/arm/lib/delay.c

@@ -86,7 +86,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
 	}
 }
 
-unsigned long __cpuinit calibrate_delay_is_known(void)
+unsigned long calibrate_delay_is_known(void)
 {
 	delay_calibrated = true;
 	return lpj_fine;

+ 1 - 1
arch/arm/mach-dove/mpp.c

@@ -47,7 +47,7 @@ static const struct dove_mpp_grp dove_mpp_grp[] = {
 
 /* Enable gpio for a range of pins. mode should be a combination of
    GPIO_OUTPUT_OK | GPIO_INPUT_OK */
-static void dove_mpp_gpio_mode(int start, int end, int gpio_mode)
+static void __init dove_mpp_gpio_mode(int start, int end, int gpio_mode)
 {
 	int i;
 

+ 0 - 2
arch/arm/mach-exynos/headsmp.S

@@ -13,8 +13,6 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-	__CPUINIT
-
 /*
  * exynos4 specific entry point for secondary CPUs.  This provides
  * a "holding pen" into which all secondary cores are held until we're

+ 2 - 2
arch/arm/mach-exynos/platsmp.c

@@ -75,7 +75,7 @@ static void __iomem *scu_base_addr(void)
 
 static DEFINE_SPINLOCK(boot_lock);
 
-static void __cpuinit exynos_secondary_init(unsigned int cpu)
+static void exynos_secondary_init(unsigned int cpu)
 {
 	/*
 	 * let the primary processor know we're out of the
@@ -90,7 +90,7 @@ static void __cpuinit exynos_secondary_init(unsigned int cpu)
 	spin_unlock(&boot_lock);
 }
 
-static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned long timeout;
 	unsigned long phys_cpu = cpu_logical_map(cpu);

+ 1 - 1
arch/arm/mach-highbank/platsmp.c

@@ -24,7 +24,7 @@
 
 extern void secondary_startup(void);
 
-static int __cpuinit highbank_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int highbank_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	highbank_set_cpu_jump(cpu, secondary_startup);
 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));

+ 1 - 1
arch/arm/mach-imx/platsmp.c

@@ -53,7 +53,7 @@ void imx_scu_standby_enable(void)
 	writel_relaxed(val, scu_base);
 }
 
-static int __cpuinit imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	imx_set_cpu_jump(cpu, v7_secondary_startup);
 	imx_enable_cpu(cpu, true);

+ 1 - 1
arch/arm/mach-keystone/platsmp.c

@@ -21,7 +21,7 @@
 
 #include "keystone.h"
 
-static int __cpuinit keystone_smp_boot_secondary(unsigned int cpu,
+static int keystone_smp_boot_secondary(unsigned int cpu,
 						struct task_struct *idle)
 {
 	unsigned long start = virt_to_phys(&secondary_startup);

+ 0 - 2
arch/arm/mach-msm/headsmp.S

@@ -11,8 +11,6 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-	__CPUINIT
-
 /*
  * MSM specific entry point for secondary CPUs.  This provides
  * a "holding pen" into which all secondary cores are held until we're

+ 3 - 3
arch/arm/mach-msm/platsmp.c

@@ -38,7 +38,7 @@ static inline int get_core_count(void)
 	return ((read_cpuid_id() >> 4) & 3) + 1;
 }
 
-static void __cpuinit msm_secondary_init(unsigned int cpu)
+static void msm_secondary_init(unsigned int cpu)
 {
 	/*
 	 * let the primary processor know we're out of the
@@ -54,7 +54,7 @@ static void __cpuinit msm_secondary_init(unsigned int cpu)
 	spin_unlock(&boot_lock);
 }
 
-static __cpuinit void prepare_cold_cpu(unsigned int cpu)
+static void prepare_cold_cpu(unsigned int cpu)
 {
 	int ret;
 	ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup),
@@ -73,7 +73,7 @@ static __cpuinit void prepare_cold_cpu(unsigned int cpu)
 				  "address\n");
 }
 
-static int __cpuinit msm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int msm_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned long timeout;
 	static int cold_boot_done;

+ 2 - 2
arch/arm/mach-msm/timer.c

@@ -139,7 +139,7 @@ static struct clocksource msm_clocksource = {
 };
 
 #ifdef CONFIG_LOCAL_TIMERS
-static int __cpuinit msm_local_timer_setup(struct clock_event_device *evt)
+static int msm_local_timer_setup(struct clock_event_device *evt)
 {
 	/* Use existing clock_event for cpu 0 */
 	if (!smp_processor_id())
@@ -164,7 +164,7 @@ static void msm_local_timer_stop(struct clock_event_device *evt)
 	disable_percpu_irq(evt->irq);
 }
 
-static struct local_timer_ops msm_local_timer_ops __cpuinitdata = {
+static struct local_timer_ops msm_local_timer_ops = {
 	.setup	= msm_local_timer_setup,
 	.stop	= msm_local_timer_stop,
 };

+ 1 - 1
arch/arm/mach-mvebu/coherency.c

@@ -28,7 +28,7 @@
 #include <asm/cacheflush.h>
 #include "armada-370-xp.h"
 
-unsigned long __cpuinitdata coherency_phys_base;
+unsigned long coherency_phys_base;
 static void __iomem *coherency_base;
 static void __iomem *coherency_cpu_base;
 

+ 0 - 2
arch/arm/mach-mvebu/headsmp.S

@@ -21,8 +21,6 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-	__CPUINIT
-
 /*
  * Armada XP specific entry point for secondary CPUs.
  * We add the CPU to the coherency fabric and then jump to secondary

+ 2 - 3
arch/arm/mach-mvebu/platsmp.c

@@ -71,13 +71,12 @@ void __init set_secondary_cpus_clock(void)
 	}
 }
 
-static void __cpuinit armada_xp_secondary_init(unsigned int cpu)
+static void armada_xp_secondary_init(unsigned int cpu)
 {
 	armada_xp_mpic_smp_cpu_init();
 }
 
-static int __cpuinit armada_xp_boot_secondary(unsigned int cpu,
-					      struct task_struct *idle)
+static int armada_xp_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	pr_info("Booting CPU %d\n", cpu);
 

+ 0 - 2
arch/arm/mach-omap2/omap-headsmp.S

@@ -20,8 +20,6 @@
 
 #include "omap44xx.h"
 
-	__CPUINIT
-
 /* Physical address needed since MMU not enabled yet on secondary core */
 #define AUX_CORE_BOOT0_PA			0x48281800
 

+ 1 - 1
arch/arm/mach-omap2/omap-mpuss-lowpower.c

@@ -291,7 +291,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
  * @cpu : CPU ID
  * @power_state: CPU low power state.
  */
-int __cpuinit omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
+int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
 {
 	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
 	unsigned int cpu_state = 0;

+ 2 - 2
arch/arm/mach-omap2/omap-smp.c

@@ -51,7 +51,7 @@ void __iomem *omap4_get_scu_base(void)
 	return scu_base;
 }
 
-static void __cpuinit omap4_secondary_init(unsigned int cpu)
+static void omap4_secondary_init(unsigned int cpu)
 {
 	/*
 	 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
@@ -72,7 +72,7 @@ static void __cpuinit omap4_secondary_init(unsigned int cpu)
 	spin_unlock(&boot_lock);
 }
 
-static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	static struct clockdomain *cpu1_clkdm;
 	static bool booted;

+ 2 - 2
arch/arm/mach-omap2/omap-wakeupgen.c

@@ -323,8 +323,8 @@ static void irq_save_secure_context(void)
 #endif
 
 #ifdef CONFIG_HOTPLUG_CPU
-static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
-					 unsigned long action, void *hcpu)
+static int irq_cpu_hotplug_notify(struct notifier_block *self,
+				  unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned int)hcpu;
 

+ 0 - 2
arch/arm/mach-prima2/headsmp.S

@@ -9,8 +9,6 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-	__CPUINIT
-
 /*
  * SIRFSOC specific entry point for secondary CPUs.  This provides
  * a "holding pen" into which all secondary cores are held until we're

+ 2 - 2
arch/arm/mach-prima2/platsmp.c

@@ -44,7 +44,7 @@ void __init sirfsoc_map_scu(void)
 	scu_base = (void __iomem *)SIRFSOC_VA(base);
 }
 
-static void __cpuinit sirfsoc_secondary_init(unsigned int cpu)
+static void sirfsoc_secondary_init(unsigned int cpu)
 {
 	/*
 	 * let the primary processor know we're out of the
@@ -65,7 +65,7 @@ static struct of_device_id rsc_ids[]  = {
 	{},
 };
 
-static int __cpuinit sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned long timeout;
 	struct device_node *np;

+ 1 - 1
arch/arm/mach-s3c24xx/Kconfig

@@ -208,7 +208,7 @@ config S3C24XX_GPIO_EXTRA128
 
 config S3C24XX_PLL
 	bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)"
-	depends on ARM_S3C24XX
+	depends on ARM_S3C24XX_CPUFREQ
 	help
 	  Compile in support for changing the PLL frequency from the
 	  S3C24XX series CPUfreq driver. The PLL takes time to settle

+ 0 - 1
arch/arm/mach-shmobile/headsmp-scu.S

@@ -23,7 +23,6 @@
 #include <linux/init.h>
 #include <asm/memory.h>
 
-	__CPUINIT
 /*
  * Boot code for secondary CPUs.
  *

+ 0 - 2
arch/arm/mach-shmobile/headsmp.S

@@ -14,8 +14,6 @@
 #include <linux/init.h>
 #include <asm/memory.h>
 
-	__CPUINIT
-
 ENTRY(shmobile_invalidate_start)
 	bl	v7_invalidate_l1
 	b	secondary_startup

+ 1 - 1
arch/arm/mach-shmobile/smp-emev2.c

@@ -30,7 +30,7 @@
 
 #define EMEV2_SCU_BASE 0x1e000000
 
-static int __cpuinit emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu)));
 	return 0;

+ 1 - 1
arch/arm/mach-shmobile/smp-r8a7779.c

@@ -81,7 +81,7 @@ static int r8a7779_platform_cpu_kill(unsigned int cpu)
 	return ret ? ret : 1;
 }
 
-static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	struct r8a7779_pm_ch *ch = NULL;
 	int ret = -EIO;

+ 1 - 1
arch/arm/mach-shmobile/smp-sh73a0.c

@@ -48,7 +48,7 @@ void __init sh73a0_register_twd(void)
 }
 #endif
 
-static int __cpuinit sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	cpu = cpu_logical_map(cpu);
 

+ 0 - 1
arch/arm/mach-socfpga/headsmp.S

@@ -10,7 +10,6 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-	__CPUINIT
 	.arch	armv7-a
 
 ENTRY(secondary_trampoline)

+ 1 - 1
arch/arm/mach-socfpga/platsmp.c

@@ -29,7 +29,7 @@
 
 #include "core.h"
 
-static int __cpuinit socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
 

+ 1 - 1
arch/arm/mach-spear/generic.h

@@ -37,7 +37,7 @@ void __init spear13xx_l2x0_init(void);
 void spear_restart(enum reboot_mode, const char *);
 
 void spear13xx_secondary_startup(void);
-void __cpuinit spear13xx_cpu_die(unsigned int cpu);
+void spear13xx_cpu_die(unsigned int cpu);
 
 extern struct smp_operations spear13xx_smp_ops;
 

+ 2 - 2
arch/arm/mach-spear/platsmp.c

@@ -24,7 +24,7 @@ static DEFINE_SPINLOCK(boot_lock);
 
 static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
 
-static void __cpuinit spear13xx_secondary_init(unsigned int cpu)
+static void spear13xx_secondary_init(unsigned int cpu)
 {
 	/*
 	 * let the primary processor know we're out of the
@@ -40,7 +40,7 @@ static void __cpuinit spear13xx_secondary_init(unsigned int cpu)
 	spin_unlock(&boot_lock);
 }
 
-static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned long timeout;
 

+ 3 - 3
arch/arm/mach-sti/platsmp.c

@@ -27,7 +27,7 @@
 
 #include "smp.h"
 
-static void __cpuinit write_pen_release(int val)
+static void write_pen_release(int val)
 {
 	pen_release = val;
 	smp_wmb();
@@ -37,7 +37,7 @@ static void __cpuinit write_pen_release(int val)
 
 static DEFINE_SPINLOCK(boot_lock);
 
-void __cpuinit sti_secondary_init(unsigned int cpu)
+void sti_secondary_init(unsigned int cpu)
 {
 	trace_hardirqs_off();
 
@@ -54,7 +54,7 @@ void __cpuinit sti_secondary_init(unsigned int cpu)
 	spin_unlock(&boot_lock);
 }
 
-int __cpuinit sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned long timeout;
 

+ 2 - 2
arch/arm/mach-tegra/platsmp.c

@@ -35,7 +35,7 @@
 
 static cpumask_t tegra_cpu_init_mask;
 
-static void __cpuinit tegra_secondary_init(unsigned int cpu)
+static void tegra_secondary_init(unsigned int cpu)
 {
 	cpumask_set_cpu(cpu, &tegra_cpu_init_mask);
 }
@@ -167,7 +167,7 @@ static int tegra114_boot_secondary(unsigned int cpu, struct task_struct *idle)
 	return ret;
 }
 
-static int __cpuinit tegra_boot_secondary(unsigned int cpu,
+static int tegra_boot_secondary(unsigned int cpu,
 					  struct task_struct *idle)
 {
 	if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_chip_id == TEGRA20)

+ 1 - 1
arch/arm/mach-tegra/pm.c

@@ -191,7 +191,7 @@ static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
 	[TEGRA_SUSPEND_LP0] = "LP0",
 };
 
-static int __cpuinit tegra_suspend_enter(suspend_state_t state)
+static int tegra_suspend_enter(suspend_state_t state)
 {
 	enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
 

+ 2 - 2
arch/arm/mach-ux500/platsmp.c

@@ -54,7 +54,7 @@ static void __iomem *scu_base_addr(void)
 
 static DEFINE_SPINLOCK(boot_lock);
 
-static void __cpuinit ux500_secondary_init(unsigned int cpu)
+static void ux500_secondary_init(unsigned int cpu)
 {
 	/*
 	 * let the primary processor know we're out of the
@@ -69,7 +69,7 @@ static void __cpuinit ux500_secondary_init(unsigned int cpu)
 	spin_unlock(&boot_lock);
 }
 
-static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned long timeout;
 

+ 1 - 1
arch/arm/mach-zynq/common.h

@@ -27,7 +27,7 @@ extern void secondary_startup(void);
 extern char zynq_secondary_trampoline;
 extern char zynq_secondary_trampoline_jump;
 extern char zynq_secondary_trampoline_end;
-extern int __cpuinit zynq_cpun_start(u32 address, int cpu);
+extern int zynq_cpun_start(u32 address, int cpu);
 extern struct smp_operations zynq_smp_ops __initdata;
 #endif
 

+ 0 - 2
arch/arm/mach-zynq/headsmp.S

@@ -9,8 +9,6 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-	__CPUINIT
-
 ENTRY(zynq_secondary_trampoline)
 	ldr	r0, [pc]
 	bx	r0

+ 3 - 3
arch/arm/mach-zynq/platsmp.c

@@ -30,11 +30,11 @@
 /*
  * Store number of cores in the system
  * Because of scu_get_core_count() must be in __init section and can't
- * be called from zynq_cpun_start() because it is in __cpuinit section.
+ * be called from zynq_cpun_start() because it is not in __init section.
  */
 static int ncores;
 
-int __cpuinit zynq_cpun_start(u32 address, int cpu)
+int zynq_cpun_start(u32 address, int cpu)
 {
 	u32 trampoline_code_size = &zynq_secondary_trampoline_end -
 						&zynq_secondary_trampoline;
@@ -92,7 +92,7 @@ int __cpuinit zynq_cpun_start(u32 address, int cpu)
 }
 EXPORT_SYMBOL(zynq_cpun_start);
 
-static int __cpuinit zynq_boot_secondary(unsigned int cpu,
+static int zynq_boot_secondary(unsigned int cpu,
 						struct task_struct *idle)
 {
 	return zynq_cpun_start(virt_to_phys(secondary_startup), cpu);

+ 0 - 2
arch/arm/mm/proc-arm1020.S

@@ -443,8 +443,6 @@ ENTRY(cpu_arm1020_set_pte_ext)
 #endif /* CONFIG_MMU */
 	mov	pc, lr
 
-	__CPUINIT
-
 	.type	__arm1020_setup, #function
 __arm1020_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm1020e.S

@@ -425,8 +425,6 @@ ENTRY(cpu_arm1020e_set_pte_ext)
 #endif /* CONFIG_MMU */
 	mov	pc, lr
 
-	__CPUINIT
-
 	.type	__arm1020e_setup, #function
 __arm1020e_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm1022.S

@@ -407,8 +407,6 @@ ENTRY(cpu_arm1022_set_pte_ext)
 #endif /* CONFIG_MMU */
 	mov	pc, lr
 
-	__CPUINIT
-
 	.type	__arm1022_setup, #function
 __arm1022_setup:
 	mov	r0, #0

+ 0 - 3
arch/arm/mm/proc-arm1026.S

@@ -396,9 +396,6 @@ ENTRY(cpu_arm1026_set_pte_ext)
 #endif /* CONFIG_MMU */
 	mov	pc, lr
 
-
-	__CPUINIT
-
 	.type	__arm1026_setup, #function
 __arm1026_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm720.S

@@ -116,8 +116,6 @@ ENTRY(cpu_arm720_reset)
 ENDPROC(cpu_arm720_reset)
 		.popsection
 
-	__CPUINIT
-
 	.type	__arm710_setup, #function
 __arm710_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm740.S

@@ -60,8 +60,6 @@ ENTRY(cpu_arm740_reset)
 ENDPROC(cpu_arm740_reset)
 	.popsection
 
-	__CPUINIT
-
 	.type	__arm740_setup, #function
 __arm740_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm7tdmi.S

@@ -51,8 +51,6 @@ ENTRY(cpu_arm7tdmi_reset)
 ENDPROC(cpu_arm7tdmi_reset)
 		.popsection
 
-		__CPUINIT
-
 		.type	__arm7tdmi_setup, #function
 __arm7tdmi_setup:
 		mov	pc, lr

+ 0 - 2
arch/arm/mm/proc-arm920.S

@@ -410,8 +410,6 @@ ENTRY(cpu_arm920_do_resume)
 ENDPROC(cpu_arm920_do_resume)
 #endif
 
-	__CPUINIT
-
 	.type	__arm920_setup, #function
 __arm920_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm922.S

@@ -388,8 +388,6 @@ ENTRY(cpu_arm922_set_pte_ext)
 #endif /* CONFIG_MMU */
 	mov	pc, lr
 
-	__CPUINIT
-
 	.type	__arm922_setup, #function
 __arm922_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm925.S

@@ -438,8 +438,6 @@ ENTRY(cpu_arm925_set_pte_ext)
 #endif /* CONFIG_MMU */
 	mov	pc, lr
 
-	__CPUINIT
-
 	.type	__arm925_setup, #function
 __arm925_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm926.S

@@ -425,8 +425,6 @@ ENTRY(cpu_arm926_do_resume)
 ENDPROC(cpu_arm926_do_resume)
 #endif
 
-	__CPUINIT
-
 	.type	__arm926_setup, #function
 __arm926_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm940.S

@@ -273,8 +273,6 @@ ENDPROC(arm940_dma_unmap_area)
 	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
 	define_cache_functions arm940
 
-	__CPUINIT
-
 	.type	__arm940_setup, #function
 __arm940_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm946.S

@@ -326,8 +326,6 @@ ENTRY(cpu_arm946_dcache_clean_area)
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
 	mov	pc, lr
 
-	__CPUINIT
-
 	.type	__arm946_setup, #function
 __arm946_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-arm9tdmi.S

@@ -51,8 +51,6 @@ ENTRY(cpu_arm9tdmi_reset)
 ENDPROC(cpu_arm9tdmi_reset)
 		.popsection
 
-		__CPUINIT
-
 		.type	__arm9tdmi_setup, #function
 __arm9tdmi_setup:
 		mov	pc, lr

+ 0 - 2
arch/arm/mm/proc-fa526.S

@@ -135,8 +135,6 @@ ENTRY(cpu_fa526_set_pte_ext)
 #endif
 	mov	pc, lr
 
-	__CPUINIT
-
 	.type	__fa526_setup, #function
 __fa526_setup:
 	/* On return of this routine, r0 must carry correct flags for CFG register */

+ 0 - 2
arch/arm/mm/proc-feroceon.S

@@ -514,8 +514,6 @@ ENTRY(cpu_feroceon_set_pte_ext)
 #endif
 	mov	pc, lr
 
-	__CPUINIT
-
 	.type	__feroceon_setup, #function
 __feroceon_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-mohawk.S

@@ -383,8 +383,6 @@ ENTRY(cpu_mohawk_do_resume)
 ENDPROC(cpu_mohawk_do_resume)
 #endif
 
-	__CPUINIT
-
 	.type	__mohawk_setup, #function
 __mohawk_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-sa110.S

@@ -159,8 +159,6 @@ ENTRY(cpu_sa110_set_pte_ext)
 #endif
 	mov	pc, lr
 
-	__CPUINIT
-
 	.type	__sa110_setup, #function
 __sa110_setup:
 	mov	r10, #0

+ 0 - 2
arch/arm/mm/proc-sa1100.S

@@ -198,8 +198,6 @@ ENTRY(cpu_sa1100_do_resume)
 ENDPROC(cpu_sa1100_do_resume)
 #endif
 
-	__CPUINIT
-
 	.type	__sa1100_setup, #function
 __sa1100_setup:
 	mov	r0, #0

+ 0 - 2
arch/arm/mm/proc-v6.S

@@ -180,8 +180,6 @@ ENDPROC(cpu_v6_do_resume)
 
 	.align
 
-	__CPUINIT
-
 /*
  *	__v6_setup
  *

+ 0 - 4
arch/arm/mm/proc-v7-2level.S

@@ -160,8 +160,6 @@ ENDPROC(cpu_v7_set_pte_ext)
 	mcr	p15, 0, \ttbr1, c2, c0, 1	@ load TTB1
 	.endm
 
-	__CPUINIT
-
 	/*   AT
 	 *  TFR   EV X F   I D LR    S
 	 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
@@ -172,5 +170,3 @@ ENDPROC(cpu_v7_set_pte_ext)
 	.type	v7_crval, #object
 v7_crval:
 	crval	clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
-
-	.previous

+ 0 - 4
arch/arm/mm/proc-v7-3level.S

@@ -140,8 +140,6 @@ ENDPROC(cpu_v7_set_pte_ext)
 	mcrr	p15, 0, \ttbr0, \zero, c2			@ load TTBR0
 	.endm
 
-	__CPUINIT
-
 	/*
 	 *   AT
 	 *  TFR   EV X F   IHD LR    S
@@ -153,5 +151,3 @@ ENDPROC(cpu_v7_set_pte_ext)
 	.type	v7_crval, #object
 v7_crval:
 	crval	clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
-
-	.previous

+ 0 - 2
arch/arm/mm/proc-v7.S

@@ -167,8 +167,6 @@ ENDPROC(cpu_pj4b_do_idle)
 
 #endif
 
-	__CPUINIT
-
 /*
  *	__v7_setup
  *

+ 0 - 2
arch/arm/mm/proc-xsc3.S

@@ -446,8 +446,6 @@ ENTRY(cpu_xsc3_do_resume)
 ENDPROC(cpu_xsc3_do_resume)
 #endif
 
-	__CPUINIT
-
 	.type	__xsc3_setup, #function
 __xsc3_setup:
 	mov	r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE

+ 0 - 2
arch/arm/mm/proc-xscale.S

@@ -558,8 +558,6 @@ ENTRY(cpu_xscale_do_resume)
 ENDPROC(cpu_xscale_do_resume)
 #endif
 
-	__CPUINIT
-
 	.type	__xscale_setup, #function
 __xscale_setup:
 	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I, D caches & BTB

+ 3 - 3
arch/arm/plat-versatile/platsmp.c

@@ -23,7 +23,7 @@
  * observers, irrespective of whether they're taking part in coherency
  * or not.  This is necessary for the hotplug code to work reliably.
  */
-static void __cpuinit write_pen_release(int val)
+static void write_pen_release(int val)
 {
 	pen_release = val;
 	smp_wmb();
@@ -33,7 +33,7 @@ static void __cpuinit write_pen_release(int val)
 
 static DEFINE_SPINLOCK(boot_lock);
 
-void __cpuinit versatile_secondary_init(unsigned int cpu)
+void versatile_secondary_init(unsigned int cpu)
 {
 	/*
 	 * let the primary processor know we're out of the
@@ -48,7 +48,7 @@ void __cpuinit versatile_secondary_init(unsigned int cpu)
 	spin_unlock(&boot_lock);
 }
 
-int __cpuinit versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned long timeout;
 

+ 1 - 1
arch/arm64/include/asm/arch_timer.h

@@ -97,7 +97,7 @@ static inline u32 arch_timer_get_cntfrq(void)
 	return val;
 }
 
-static inline void __cpuinit arch_counter_set_user_access(void)
+static inline void arch_counter_set_user_access(void)
 {
 	u32 cntkctl;
 

+ 0 - 7
arch/arm64/include/asm/debug-monitors.h

@@ -83,14 +83,7 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
 }
 #endif
 
-#ifdef CONFIG_COMPAT
 int aarch32_break_handler(struct pt_regs *regs);
-#else
-static int aarch32_break_handler(struct pt_regs *regs)
-{
-	return -EFAULT;
-}
-#endif
 
 #endif	/* __ASSEMBLY */
 #endif	/* __KERNEL__ */

+ 2 - 1
arch/arm64/include/asm/system_misc.h

@@ -23,6 +23,7 @@
 #include <linux/compiler.h>
 #include <linux/linkage.h>
 #include <linux/irqflags.h>
+#include <linux/reboot.h>
 
 struct pt_regs;
 
@@ -41,7 +42,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
 extern void __show_regs(struct pt_regs *);
 
 void soft_restart(unsigned long);
-extern void (*arm_pm_restart)(char str, const char *cmd);
+extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
 
 #define UDBG_UNDEFINED	(1 << 0)
 #define UDBG_SYSCALL	(1 << 1)

+ 3 - 3
arch/arm64/kernel/debug-monitors.c

@@ -141,7 +141,7 @@ static void clear_os_lock(void *unused)
 	isb();
 }
 
-static int __cpuinit os_lock_notify(struct notifier_block *self,
+static int os_lock_notify(struct notifier_block *self,
 				    unsigned long action, void *data)
 {
 	int cpu = (unsigned long)data;
@@ -150,11 +150,11 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata os_lock_nb = {
+static struct notifier_block os_lock_nb = {
 	.notifier_call = os_lock_notify,
 };
 
-static int __cpuinit debug_monitors_init(void)
+static int debug_monitors_init(void)
 {
 	/* Clear the OS lock. */
 	smp_call_function(clear_os_lock, NULL, 1);

+ 2 - 2
arch/arm64/kernel/hw_breakpoint.c

@@ -821,7 +821,7 @@ static void reset_ctrl_regs(void *unused)
 	}
 }
 
-static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
+static int hw_breakpoint_reset_notify(struct notifier_block *self,
 						unsigned long action,
 						void *hcpu)
 {
@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
+static struct notifier_block hw_breakpoint_reset_nb = {
 	.notifier_call = hw_breakpoint_reset_notify,
 };
 

+ 1 - 1
arch/arm64/kernel/process.c

@@ -132,7 +132,7 @@ void machine_restart(char *cmd)
 
 	/* Now call the architecture specific reboot code. */
 	if (arm_pm_restart)
-		arm_pm_restart('h', cmd);
+		arm_pm_restart(reboot_mode, cmd);
 
 	/*
 	 * Whoops - the architecture was unable to reboot.

+ 12 - 11
arch/arm64/kernel/smp.c

@@ -71,7 +71,7 @@ static DEFINE_RAW_SPINLOCK(boot_lock);
  * in coherency or not.  This is necessary for the hotplug code to work
  * reliably.
  */
-static void __cpuinit write_pen_release(u64 val)
+static void write_pen_release(u64 val)
 {
 	void *start = (void *)&secondary_holding_pen_release;
 	unsigned long size = sizeof(secondary_holding_pen_release);
@@ -84,7 +84,7 @@ static void __cpuinit write_pen_release(u64 val)
  * Boot a secondary CPU, and assign it the specified idle task.
  * This also gives us the initial stack to use for this CPU.
  */
-static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned long timeout;
 
@@ -122,7 +122,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
 
 static DECLARE_COMPLETION(cpu_running);
 
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
 	int ret;
 
@@ -162,7 +162,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
  */
-asmlinkage void __cpuinit secondary_start_kernel(void)
+asmlinkage void secondary_start_kernel(void)
 {
 	struct mm_struct *mm = &init_mm;
 	unsigned int cpu = smp_processor_id();
@@ -199,13 +199,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
 	raw_spin_lock(&boot_lock);
 	raw_spin_unlock(&boot_lock);
 
-	/*
-	 * Enable local interrupts.
-	 */
-	notify_cpu_starting(cpu);
-	local_irq_enable();
-	local_fiq_enable();
-
 	/*
 	 * OK, now it's safe to let the boot CPU continue.  Wait for
 	 * the CPU migration code to notice that the CPU is online
@@ -214,6 +207,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
 	set_cpu_online(cpu, true);
 	complete(&cpu_running);
 
+	/*
+	 * Enable GIC and timers.
+	 */
+	notify_cpu_starting(cpu);
+
+	local_irq_enable();
+	local_fiq_enable();
+
 	/*
 	 * OK, it's off to the idle thread for us
 	 */

+ 20 - 26
arch/arm64/mm/fault.c

@@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 #define ESR_CM			(1 << 8)
 #define ESR_LNX_EXEC		(1 << 24)
 
-/*
- * Check that the permissions on the VMA allow for the fault which occurred.
- * If we encountered a write fault, we must have write permission, otherwise
- * we allow any permission.
- */
-static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
-{
-	unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
-
-	if (esr & ESR_WRITE)
-		mask = VM_WRITE;
-	if (esr & ESR_LNX_EXEC)
-		mask = VM_EXEC;
-
-	return vma->vm_flags & mask ? false : true;
-}
-
 static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
-			   unsigned int esr, unsigned int flags,
+			   unsigned int mm_flags, unsigned long vm_flags,
 			   struct task_struct *tsk)
 {
 	struct vm_area_struct *vma;
@@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
 	 * it.
 	 */
 good_area:
-	if (access_error(esr, vma)) {
+	/*
+	 * Check that the permissions on the VMA allow for the fault which
+	 * occurred. If we encountered a write or exec fault, we must have
+	 * appropriate permissions, otherwise we allow any permission.
+	 */
+	if (!(vma->vm_flags & vm_flags)) {
 		fault = VM_FAULT_BADACCESS;
 		goto out;
 	}
 
-	return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+	return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
 
 check_stack:
 	if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
 	struct task_struct *tsk;
 	struct mm_struct *mm;
 	int fault, sig, code;
-	bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
-	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-		(write ? FAULT_FLAG_WRITE : 0);
+	unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
+	unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+	if (esr & ESR_LNX_EXEC) {
+		vm_flags = VM_EXEC;
+	} else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
+		vm_flags = VM_WRITE;
+		mm_flags |= FAULT_FLAG_WRITE;
+	}
 
 	tsk = current;
 	mm  = tsk->mm;
@@ -248,7 +242,7 @@ retry:
 #endif
 	}
 
-	fault = __do_page_fault(mm, addr, esr, flags, tsk);
+	fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
 
 	/*
 	 * If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +259,7 @@ retry:
 	 */
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
-	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+	if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
 		if (fault & VM_FAULT_MAJOR) {
 			tsk->maj_flt++;
 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,7 +274,7 @@ retry:
 			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
 			 * starvation.
 			 */
-			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
 			goto retry;
 		}
 	}

+ 1 - 1
arch/blackfin/kernel/perf_event.c

@@ -468,7 +468,7 @@ static void bfin_pmu_setup(int cpu)
 	memset(cpuhw, 0, sizeof(struct cpu_hw_events));
 }
 
-static int __cpuinit
+static int
 bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (long)hcpu;

+ 2 - 2
arch/blackfin/kernel/setup.c

@@ -99,7 +99,7 @@ void __init generate_cplb_tables(void)
 }
 #endif
 
-void __cpuinit bfin_setup_caches(unsigned int cpu)
+void bfin_setup_caches(unsigned int cpu)
 {
 #ifdef CONFIG_BFIN_ICACHE
 	bfin_icache_init(icplb_tbl[cpu]);
@@ -165,7 +165,7 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
 #endif
 }
 
-void __cpuinit bfin_setup_cpudata(unsigned int cpu)
+void bfin_setup_cpudata(unsigned int cpu)
 {
 	struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
 

+ 3 - 3
arch/blackfin/mach-bf561/smp.c

@@ -48,7 +48,7 @@ int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
 	return -EINVAL;
 }
 
-void __cpuinit platform_secondary_init(unsigned int cpu)
+void platform_secondary_init(unsigned int cpu)
 {
 	/* Clone setup for peripheral interrupt sources from CoreA. */
 	bfin_write_SICB_IMASK0(bfin_read_SIC_IMASK0());
@@ -73,7 +73,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
 	spin_unlock(&boot_lock);
 }
 
-int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle)
+int platform_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned long timeout;
 
@@ -154,7 +154,7 @@ void platform_clear_ipi(unsigned int cpu, int irq)
  * Setup core B's local core timer.
  * In SMP, core timer is used for clock event device.
  */
-void __cpuinit bfin_local_timer_setup(void)
+void bfin_local_timer_setup(void)
 {
 #if defined(CONFIG_TICKSOURCE_CORETMR)
 	struct irq_data *data = irq_get_irq_data(IRQ_CORETMR);

+ 2 - 2
arch/blackfin/mach-common/cache-c.c

@@ -52,7 +52,7 @@ bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr,
 }
 
 #ifdef CONFIG_BFIN_ICACHE
-void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
+void bfin_icache_init(struct cplb_entry *icplb_tbl)
 {
 	bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL,
 		(IMC | ENICPLB));
@@ -60,7 +60,7 @@ void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
 #endif
 
 #ifdef CONFIG_BFIN_DCACHE
-void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
+void bfin_dcache_init(struct cplb_entry *dcplb_tbl)
 {
 	/*
 	 *  Anomaly notes:

+ 1 - 1
arch/blackfin/mach-common/ints-priority.c

@@ -1281,7 +1281,7 @@ static struct irq_chip bfin_gpio_irqchip = {
 	.irq_set_wake = bfin_gpio_set_wake,
 };
 
-void __cpuinit init_exception_vectors(void)
+void init_exception_vectors(void)
 {
 	/* cannot program in software:
 	 * evt0 - emulation (jtag)

+ 9 - 9
arch/blackfin/mach-common/smp.c

@@ -46,7 +46,7 @@ struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
 unsigned long blackfin_iflush_l1_entry[NR_CPUS];
 #endif
 
-struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
+struct blackfin_initial_pda initial_pda_coreb;
 
 enum ipi_message_type {
 	BFIN_IPI_NONE,
@@ -147,7 +147,7 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
 	platform_clear_ipi(cpu, IRQ_SUPPLE_1);
 
 	bfin_ipi_data = &__get_cpu_var(bfin_ipi);
-	while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
+	while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) {
 		msg = 0;
 		do {
 			msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
@@ -182,8 +182,8 @@ static void bfin_ipi_init(void)
 	struct ipi_data *bfin_ipi_data;
 	for_each_possible_cpu(cpu) {
 		bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
-		bfin_ipi_data->bits = 0;
-		bfin_ipi_data->count = 0;
+		atomic_set(&bfin_ipi_data->bits, 0);
+		atomic_set(&bfin_ipi_data->count, 0);
 	}
 }
 
@@ -246,7 +246,7 @@ void smp_send_stop(void)
 	return;
 }
 
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
 	int ret;
 
@@ -259,7 +259,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
 	return ret;
 }
 
-static void __cpuinit setup_secondary(unsigned int cpu)
+static void setup_secondary(unsigned int cpu)
 {
 	unsigned long ilat;
 
@@ -277,7 +277,7 @@ static void __cpuinit setup_secondary(unsigned int cpu)
 	    IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
 }
 
-void __cpuinit secondary_start_kernel(void)
+void secondary_start_kernel(void)
 {
 	unsigned int cpu = smp_processor_id();
 	struct mm_struct *mm = &init_mm;
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(resync_core_dcache);
 #endif
 
 #ifdef CONFIG_HOTPLUG_CPU
-int __cpuexit __cpu_disable(void)
+int __cpu_disable(void)
 {
 	unsigned int cpu = smp_processor_id();
 
@@ -415,7 +415,7 @@ int __cpuexit __cpu_disable(void)
 
 static DECLARE_COMPLETION(cpu_killed);
 
-int __cpuexit __cpu_die(unsigned int cpu)
+int __cpu_die(unsigned int cpu)
 {
 	return wait_for_completion_timeout(&cpu_killed, 5000);
 }

+ 1 - 1
arch/cris/arch-v32/kernel/smp.c

@@ -197,7 +197,7 @@ int setup_profiling_timer(unsigned int multiplier)
  */
 unsigned long cache_decay_ticks = 1;
 
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
 	smp_boot_one_cpu(cpu, tidle);
 	return cpu_online(cpu) ? 0 : -ENOSYS;

+ 1 - 1
arch/frv/kernel/setup.c

@@ -709,7 +709,7 @@ static void __init reserve_dma_coherent(void)
 /*
  * calibrate the delay loop
  */
-void __cpuinit calibrate_delay(void)
+void calibrate_delay(void)
 {
 	loops_per_jiffy = __delay_loops_MHz * (1000000 / HZ);
 

+ 1 - 1
arch/hexagon/kernel/setup.c

@@ -41,7 +41,7 @@ static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
 
 int on_simulator;
 
-void __cpuinit calibrate_delay(void)
+void calibrate_delay(void)
 {
 	loops_per_jiffy = thread_freq_mhz * 1000000 / HZ;
 }

+ 2 - 2
arch/hexagon/kernel/smp.c

@@ -146,7 +146,7 @@ void __init smp_prepare_boot_cpu(void)
  * to point to current thread info
  */
 
-void __cpuinit start_secondary(void)
+void start_secondary(void)
 {
 	unsigned int cpu;
 	unsigned long thread_ptr;
@@ -194,7 +194,7 @@ void __cpuinit start_secondary(void)
  * maintains control until "cpu_online(cpu)" is set.
  */
 
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
 	struct thread_info *thread = (struct thread_info *)idle->stack;
 	void *stack_start;

+ 1 - 1
arch/m32r/kernel/smpboot.c

@@ -343,7 +343,7 @@ static void __init do_boot_cpu(int phys_id)
 	}
 }
 
-int __cpuinit __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
 {
 	int timeout;
 

+ 3 - 3
arch/metag/kernel/perf/perf_event.c

@@ -813,8 +813,8 @@ static struct metag_pmu _metag_pmu = {
 };
 
 /* PMU CPU hotplug notifier */
-static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
-		unsigned long action, void *hcpu)
+static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
+				void *hcpu)
 {
 	unsigned int cpu = (unsigned int)hcpu;
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@@ -828,7 +828,7 @@ static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata metag_pmu_notifier = {
+static struct notifier_block metag_pmu_notifier = {
 	.notifier_call = metag_pmu_cpu_notify,
 };
 

+ 10 - 12
arch/metag/kernel/smp.c

@@ -68,7 +68,7 @@ static DECLARE_COMPLETION(cpu_running);
 /*
  * "thread" is assumed to be a valid Meta hardware thread ID.
  */
-int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
+int boot_secondary(unsigned int thread, struct task_struct *idle)
 {
 	u32 val;
 
@@ -118,11 +118,9 @@ int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
  * If the cache partition has changed, prints a message to the log describing
  * those changes.
  */
-static __cpuinit void describe_cachepart_change(unsigned int thread,
-						const char *label,
-						unsigned int sz,
-						unsigned int old,
-						unsigned int new)
+static void describe_cachepart_change(unsigned int thread, const char *label,
+				      unsigned int sz, unsigned int old,
+				      unsigned int new)
 {
 	unsigned int lor1, land1, gor1, gand1;
 	unsigned int lor2, land2, gor2, gand2;
@@ -170,7 +168,7 @@ static __cpuinit void describe_cachepart_change(unsigned int thread,
  * Ensures that coherency is enabled and that the threads share the same cache
  * partitions.
  */
-static __cpuinit void setup_smp_cache(unsigned int thread)
+static void setup_smp_cache(unsigned int thread)
 {
 	unsigned int this_thread, lflags;
 	unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new;
@@ -215,7 +213,7 @@ static __cpuinit void setup_smp_cache(unsigned int thread)
 				  icpart_old, icpart_new);
 }
 
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned int thread = cpu_2_hwthread_id[cpu];
 	int ret;
@@ -268,7 +266,7 @@ static DECLARE_COMPLETION(cpu_killed);
 /*
  * __cpu_disable runs on the processor to be shutdown.
  */
-int __cpuexit __cpu_disable(void)
+int __cpu_disable(void)
 {
 	unsigned int cpu = smp_processor_id();
 
@@ -299,7 +297,7 @@ int __cpuexit __cpu_disable(void)
  * called on the thread which is asking for a CPU to be shutdown -
  * waits until shutdown has completed, or it is timed out.
  */
-void __cpuexit __cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
 {
 	if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))
 		pr_err("CPU%u: unable to kill\n", cpu);
@@ -311,7 +309,7 @@ void __cpuexit __cpu_die(unsigned int cpu)
  * Note that we do not return from this function. If this cpu is
  * brought online again it will need to run secondary_startup().
  */
-void __cpuexit cpu_die(void)
+void cpu_die(void)
 {
 	local_irq_disable();
 	idle_task_exit();
@@ -326,7 +324,7 @@ void __cpuexit cpu_die(void)
  * Called by both boot and secondaries to move global data into
  * per-processor storage.
  */
-void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+void smp_store_cpu_info(unsigned int cpuid)
 {
 	struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid);
 

+ 1 - 1
arch/metag/kernel/traps.c

@@ -812,7 +812,7 @@ static void set_trigger_mask(unsigned int mask)
 }
 #endif
 
-void __cpuinit per_cpu_trap_init(unsigned long cpu)
+void per_cpu_trap_init(unsigned long cpu)
 {
 	TBIRES int_context;
 	unsigned int thread = cpu_2_hwthread_id[cpu];

+ 1 - 0
arch/mips/Kconfig

@@ -1702,6 +1702,7 @@ endchoice
 
 config KVM_GUEST
 	bool "KVM Guest Kernel"
+	depends on BROKEN_ON_SMP
 	help
 	  Select this option if building a guest kernel for KVM (Trap & Emulate) mode
 

+ 1 - 1
arch/mips/ath79/setup.c

@@ -182,7 +182,7 @@ const char *get_system_type(void)
 	return ath79_sys_type;
 }
 
-unsigned int __cpuinit get_c0_compare_int(void)
+unsigned int get_c0_compare_int(void)
 {
 	return CP0_LEGACY_COMPARE_IRQ;
 }

Some files were not shown because too many files changed in this diff