Эх сурвалжийг харах

Merge branch 'for_3.3/pm/omap4-mpuss' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-omap-pm into omap4

Tony Lindgren 13 жил өмнө
parent
commit
4c89aad9f4

+ 21 - 0
arch/arm/mach-omap2/Kconfig

@@ -353,6 +353,27 @@ config OMAP3_SDRC_AC_TIMING
 	  wish to say no.  Selecting yes without understanding what is
 	  wish to say no.  Selecting yes without understanding what is
 	  going on could result in system crashes;
 	  going on could result in system crashes;
 
 
+config OMAP4_ERRATA_I688
+	bool "OMAP4 errata: Async Bridge Corruption"
+	depends on ARCH_OMAP4
+	select ARCH_HAS_BARRIERS
+	help
+	  If a data is stalled inside asynchronous bridge because of back
+	  pressure, it may be accepted multiple times, creating pointer
+	  misalignment that will corrupt next transfers on that data path
+	  until next reset of the system (No recovery procedure once the
+	  issue is hit, the path remains consistently broken). Async bridge
+	  can be found on path between MPU to EMIF and MPU to L3 interconnect.
+	  This situation can happen only when the idle is initiated by a
+	  Master Request Disconnection (which is trigged by software when
+	  executing WFI on CPU).
+	  The work-around for this errata needs all the initiators connected
+	  through async bridge must ensure that data path is properly drained
+	  before issuing WFI. This condition will be met if one Strongly ordered
+	  access is performed to the target right before executing the WFI.
+	  In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
+	  IO barrier ensure that there is no synchronisation loss on initiators
+	  operating on both interconnect port simultaneously.
 endmenu
 endmenu
 
 
 endif
 endif

+ 10 - 6
arch/arm/mach-omap2/Makefile

@@ -11,10 +11,11 @@ hwmod-common				= omap_hwmod.o \
 					  omap_hwmod_common_data.o
 					  omap_hwmod_common_data.o
 clock-common				= clock.o clock_common_data.o \
 clock-common				= clock.o clock_common_data.o \
 					  clkt_dpll.o clkt_clksel.o
 					  clkt_dpll.o clkt_clksel.o
+secure-common                          = omap-smc.o omap-secure.o
 
 
-obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
-obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common)
-obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common)
+obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
+obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
+obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common)
 
 
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
 
 
@@ -24,11 +25,13 @@ obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
 obj-$(CONFIG_SMP)			+= omap-smp.o omap-headsmp.o
 obj-$(CONFIG_SMP)			+= omap-smp.o omap-headsmp.o
 obj-$(CONFIG_LOCAL_TIMERS)		+= timer-mpu.o
 obj-$(CONFIG_LOCAL_TIMERS)		+= timer-mpu.o
 obj-$(CONFIG_HOTPLUG_CPU)		+= omap-hotplug.o
 obj-$(CONFIG_HOTPLUG_CPU)		+= omap-hotplug.o
-obj-$(CONFIG_ARCH_OMAP4)		+= omap44xx-smc.o omap4-common.o
+obj-$(CONFIG_ARCH_OMAP4)		+= omap4-common.o omap-wakeupgen.o \
+					   sleep44xx.o
 
 
 plus_sec := $(call as-instr,.arch_extension sec,+sec)
 plus_sec := $(call as-instr,.arch_extension sec,+sec)
 AFLAGS_omap-headsmp.o			:=-Wa,-march=armv7-a$(plus_sec)
 AFLAGS_omap-headsmp.o			:=-Wa,-march=armv7-a$(plus_sec)
-AFLAGS_omap44xx-smc.o			:=-Wa,-march=armv7-a$(plus_sec)
+AFLAGS_omap-smc.o			:=-Wa,-march=armv7-a$(plus_sec)
+AFLAGS_sleep44xx.o			:=-Wa,-march=armv7-a$(plus_sec)
 
 
 # Functions loaded to SRAM
 # Functions loaded to SRAM
 obj-$(CONFIG_SOC_OMAP2420)		+= sram242x.o
 obj-$(CONFIG_SOC_OMAP2420)		+= sram242x.o
@@ -62,7 +65,8 @@ obj-$(CONFIG_ARCH_OMAP2)		+= pm24xx.o
 obj-$(CONFIG_ARCH_OMAP2)		+= sleep24xx.o
 obj-$(CONFIG_ARCH_OMAP2)		+= sleep24xx.o
 obj-$(CONFIG_ARCH_OMAP3)		+= pm34xx.o sleep34xx.o \
 obj-$(CONFIG_ARCH_OMAP3)		+= pm34xx.o sleep34xx.o \
 					   cpuidle34xx.o
 					   cpuidle34xx.o
-obj-$(CONFIG_ARCH_OMAP4)		+= pm44xx.o
+obj-$(CONFIG_ARCH_OMAP4)		+= pm44xx.o omap-mpuss-lowpower.o \
+					   cpuidle44xx.o
 obj-$(CONFIG_PM_DEBUG)			+= pm-debug.o
 obj-$(CONFIG_PM_DEBUG)			+= pm-debug.o
 obj-$(CONFIG_OMAP_SMARTREFLEX)          += sr_device.o smartreflex.o
 obj-$(CONFIG_OMAP_SMARTREFLEX)          += sr_device.o smartreflex.o
 obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3)	+= smartreflex-class3.o
 obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3)	+= smartreflex-class3.o

+ 53 - 11
arch/arm/mach-omap2/common.h

@@ -24,9 +24,11 @@
 
 
 #ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
 #ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
 #define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
 #define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
+#ifndef __ASSEMBLER__
 
 
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <plat/common.h>
 #include <plat/common.h>
+#include <asm/proc-fns.h>
 
 
 #ifdef CONFIG_SOC_OMAP2420
 #ifdef CONFIG_SOC_OMAP2420
 extern void omap242x_map_common_io(void);
 extern void omap242x_map_common_io(void);
@@ -156,23 +158,23 @@ void omap3_intc_resume_idle(void);
 void omap2_intc_handle_irq(struct pt_regs *regs);
 void omap2_intc_handle_irq(struct pt_regs *regs);
 void omap3_intc_handle_irq(struct pt_regs *regs);
 void omap3_intc_handle_irq(struct pt_regs *regs);
 
 
-/*
- * wfi used in low power code. Directly opcode is used instead
- * of instruction to avoid mulit-omap build break
- */
-#ifdef CONFIG_THUMB2_KERNEL
-#define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory")
-#else
-#define do_wfi()			\
-		__asm__ __volatile__ (".word	0xe320f003" : : : "memory")
+#ifdef CONFIG_CACHE_L2X0
+extern void __iomem *omap4_get_l2cache_base(void);
 #endif
 #endif
 
 
-#ifdef CONFIG_CACHE_L2X0
-extern void __iomem *l2cache_base;
+#ifdef CONFIG_SMP
+extern void __iomem *omap4_get_scu_base(void);
+#else
+static inline void __iomem *omap4_get_scu_base(void)
+{
+	return NULL;
+}
 #endif
 #endif
 
 
 extern void __init gic_init_irq(void);
 extern void __init gic_init_irq(void);
 extern void omap_smc1(u32 fn, u32 arg);
 extern void omap_smc1(u32 fn, u32 arg);
+extern void __iomem *omap4_get_sar_ram_base(void);
+extern void omap_do_wfi(void);
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 /* Needed for secondary core boot */
 /* Needed for secondary core boot */
@@ -182,4 +184,44 @@ extern void omap_auxcoreboot_addr(u32 cpu_addr);
 extern u32 omap_read_auxcoreboot0(void);
 extern u32 omap_read_auxcoreboot0(void);
 #endif
 #endif
 
 
+#if defined(CONFIG_SMP) && defined(CONFIG_PM)
+extern int omap4_mpuss_init(void);
+extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
+extern int omap4_finish_suspend(unsigned long cpu_state);
+extern void omap4_cpu_resume(void);
+extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
+extern u32 omap4_mpuss_read_prev_context_state(void);
+#else
+static inline int omap4_enter_lowpower(unsigned int cpu,
+					unsigned int power_state)
+{
+	cpu_do_idle();
+	return 0;
+}
+
+static inline int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
+{
+	cpu_do_idle();
+	return 0;
+}
+
+static inline int omap4_mpuss_init(void)
+{
+	return 0;
+}
+
+static inline int omap4_finish_suspend(unsigned long cpu_state)
+{
+	return 0;
+}
+
+static inline void omap4_cpu_resume(void)
+{}
+
+static inline u32 omap4_mpuss_read_prev_context_state(void)
+{
+	return 0;
+}
+#endif
+#endif /* __ASSEMBLER__ */
 #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
 #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */

+ 15 - 0
arch/arm/mach-omap2/cpuidle34xx.c

@@ -25,6 +25,7 @@
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/cpuidle.h>
 #include <linux/cpuidle.h>
 #include <linux/export.h>
 #include <linux/export.h>
+#include <linux/cpu_pm.h>
 
 
 #include <plat/prcm.h>
 #include <plat/prcm.h>
 #include <plat/irqs.h>
 #include <plat/irqs.h>
@@ -124,9 +125,23 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
 		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
 		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
 	}
 	}
 
 
+	/*
+	 * Call idle CPU PM enter notifier chain so that
+	 * VFP context is saved.
+	 */
+	if (mpu_state == PWRDM_POWER_OFF)
+		cpu_pm_enter();
+
 	/* Execute ARM wfi */
 	/* Execute ARM wfi */
 	omap_sram_idle();
 	omap_sram_idle();
 
 
+	/*
+	 * Call idle CPU PM enter notifier chain to restore
+	 * VFP context.
+	 */
+	if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
+		cpu_pm_exit();
+
 	/* Re-allow idle for C1 */
 	/* Re-allow idle for C1 */
 	if (index == 0) {
 	if (index == 0) {
 		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
 		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);

+ 245 - 0
arch/arm/mach-omap2/cpuidle44xx.c

@@ -0,0 +1,245 @@
+/*
+ * OMAP4 CPU idle Routines
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/export.h>
+#include <linux/clockchips.h>
+
+#include <asm/proc-fns.h>
+
+#include "common.h"
+#include "pm.h"
+#include "prm.h"
+
+#ifdef CONFIG_CPU_IDLE
+
+/* Machine specific information to be recorded in the C-state driver_data */
+struct omap4_idle_statedata {
+	u32 cpu_state;
+	u32 mpu_logic_state;
+	u32 mpu_state;
+	u8 valid;
+};
+
+static struct cpuidle_params cpuidle_params_table[] = {
+	/* C1 - CPU0 ON + CPU1 ON + MPU ON */
+	{.exit_latency = 2 + 2 , .target_residency = 5, .valid = 1},
+	/* C2- CPU0 OFF + CPU1 OFF + MPU CSWR */
+	{.exit_latency = 328 + 440 , .target_residency = 960, .valid = 1},
+	/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
+	{.exit_latency = 460 + 518 , .target_residency = 1100, .valid = 1},
+};
+
+#define OMAP4_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
+
+struct omap4_idle_statedata omap4_idle_data[OMAP4_NUM_STATES];
+static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
+
+/**
+ * omap4_enter_idle - Programs OMAP4 to enter the specified state
+ * @dev: cpuidle device
+ * @drv: cpuidle driver
+ * @index: the index of state to be entered
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified low power state selected by the governor.
+ * Returns the amount of time spent in the low power state.
+ */
+static int omap4_enter_idle(struct cpuidle_device *dev,
+			struct cpuidle_driver *drv,
+			int index)
+{
+	struct omap4_idle_statedata *cx =
+			cpuidle_get_statedata(&dev->states_usage[index]);
+	struct timespec ts_preidle, ts_postidle, ts_idle;
+	u32 cpu1_state;
+	int idle_time;
+	int new_state_idx;
+	int cpu_id = smp_processor_id();
+
+	/* Used to keep track of the total time in idle */
+	getnstimeofday(&ts_preidle);
+
+	local_irq_disable();
+	local_fiq_disable();
+
+	/*
+	 * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state.
+	 * This is necessary to honour hardware recommondation
+	 * of triggeing all the possible low power modes once CPU1 is
+	 * out of coherency and in OFF mode.
+	 * Update dev->last_state so that governor stats reflects right
+	 * data.
+	 */
+	cpu1_state = pwrdm_read_pwrst(cpu1_pd);
+	if (cpu1_state != PWRDM_POWER_OFF) {
+		new_state_idx = drv->safe_state_index;
+		cx = cpuidle_get_statedata(&dev->states_usage[new_state_idx]);
+	}
+
+	if (index > 0)
+		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
+
+	/*
+	 * Call idle CPU PM enter notifier chain so that
+	 * VFP and per CPU interrupt context is saved.
+	 */
+	if (cx->cpu_state == PWRDM_POWER_OFF)
+		cpu_pm_enter();
+
+	pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
+	omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+
+	/*
+	 * Call idle CPU cluster PM enter notifier chain
+	 * to save GIC and wakeupgen context.
+	 */
+	if ((cx->mpu_state == PWRDM_POWER_RET) &&
+		(cx->mpu_logic_state == PWRDM_POWER_OFF))
+			cpu_cluster_pm_enter();
+
+	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
+
+	/*
+	 * Call idle CPU PM exit notifier chain to restore
+	 * VFP and per CPU IRQ context. Only CPU0 state is
+	 * considered since CPU1 is managed by CPU hotplug.
+	 */
+	if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF)
+		cpu_pm_exit();
+
+	/*
+	 * Call idle CPU cluster PM exit notifier chain
+	 * to restore GIC and wakeupgen context.
+	 */
+	if (omap4_mpuss_read_prev_context_state())
+		cpu_cluster_pm_exit();
+
+	if (index > 0)
+		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
+
+	getnstimeofday(&ts_postidle);
+	ts_idle = timespec_sub(ts_postidle, ts_preidle);
+
+	local_irq_enable();
+	local_fiq_enable();
+
+	idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
+								USEC_PER_SEC;
+
+	/* Update cpuidle counters */
+	dev->last_residency = idle_time;
+
+	return index;
+}
+
+DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
+
+struct cpuidle_driver omap4_idle_driver = {
+	.name =		"omap4_idle",
+	.owner =	THIS_MODULE,
+};
+
+static inline void _fill_cstate(struct cpuidle_driver *drv,
+					int idx, const char *descr)
+{
+	struct cpuidle_state *state = &drv->states[idx];
+
+	state->exit_latency	= cpuidle_params_table[idx].exit_latency;
+	state->target_residency	= cpuidle_params_table[idx].target_residency;
+	state->flags		= CPUIDLE_FLAG_TIME_VALID;
+	state->enter		= omap4_enter_idle;
+	sprintf(state->name, "C%d", idx + 1);
+	strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
+}
+
+static inline struct omap4_idle_statedata *_fill_cstate_usage(
+					struct cpuidle_device *dev,
+					int idx)
+{
+	struct omap4_idle_statedata *cx = &omap4_idle_data[idx];
+	struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];
+
+	cx->valid		= cpuidle_params_table[idx].valid;
+	cpuidle_set_statedata(state_usage, cx);
+
+	return cx;
+}
+
+
+
+/**
+ * omap4_idle_init - Init routine for OMAP4 idle
+ *
+ * Registers the OMAP4 specific cpuidle driver to the cpuidle
+ * framework with the valid set of states.
+ */
+int __init omap4_idle_init(void)
+{
+	struct omap4_idle_statedata *cx;
+	struct cpuidle_device *dev;
+	struct cpuidle_driver *drv = &omap4_idle_driver;
+	unsigned int cpu_id = 0;
+
+	mpu_pd = pwrdm_lookup("mpu_pwrdm");
+	cpu0_pd = pwrdm_lookup("cpu0_pwrdm");
+	cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
+	if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd))
+		return -ENODEV;
+
+
+	drv->safe_state_index = -1;
+	dev = &per_cpu(omap4_idle_dev, cpu_id);
+	dev->cpu = cpu_id;
+
+	/* C1 - CPU0 ON + CPU1 ON + MPU ON */
+	_fill_cstate(drv, 0, "MPUSS ON");
+	drv->safe_state_index = 0;
+	cx = _fill_cstate_usage(dev, 0);
+	cx->valid = 1;	/* C1 is always valid */
+	cx->cpu_state = PWRDM_POWER_ON;
+	cx->mpu_state = PWRDM_POWER_ON;
+	cx->mpu_logic_state = PWRDM_POWER_RET;
+
+	/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
+	_fill_cstate(drv, 1, "MPUSS CSWR");
+	cx = _fill_cstate_usage(dev, 1);
+	cx->cpu_state = PWRDM_POWER_OFF;
+	cx->mpu_state = PWRDM_POWER_RET;
+	cx->mpu_logic_state = PWRDM_POWER_RET;
+
+	/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
+	_fill_cstate(drv, 2, "MPUSS OSWR");
+	cx = _fill_cstate_usage(dev, 2);
+	cx->cpu_state = PWRDM_POWER_OFF;
+	cx->mpu_state = PWRDM_POWER_RET;
+	cx->mpu_logic_state = PWRDM_POWER_OFF;
+
+	drv->state_count = OMAP4_NUM_STATES;
+	cpuidle_register_driver(&omap4_idle_driver);
+
+	dev->state_count = OMAP4_NUM_STATES;
+	if (cpuidle_register_device(dev)) {
+		pr_err("%s: CPUidle register device failed\n", __func__);
+			return -EIO;
+		}
+
+	return 0;
+}
+#else
+int __init omap4_idle_init(void)
+{
+	return 0;
+}
+#endif /* CONFIG_CPU_IDLE */

+ 31 - 0
arch/arm/mach-omap2/include/mach/barriers.h

@@ -0,0 +1,31 @@
+/*
+ * OMAP memory barrier header.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *  Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *  Richard Woodruff <r-woodruff2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __MACH_BARRIERS_H
+#define __MACH_BARRIERS_H
+
+extern void omap_bus_sync(void);
+
+#define rmb()		dsb()
+#define wmb()		do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
+#define mb()		wmb()
+
+#endif	/* __MACH_BARRIERS_H */

+ 57 - 0
arch/arm/mach-omap2/include/mach/omap-secure.h

@@ -0,0 +1,57 @@
+/*
+ * omap-secure.h: OMAP Secure infrastructure header.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OMAP_ARCH_OMAP_SECURE_H
+#define OMAP_ARCH_OMAP_SECURE_H
+
+/* Monitor error code */
+#define  API_HAL_RET_VALUE_NS2S_CONVERSION_ERROR	0xFFFFFFFE
+#define  API_HAL_RET_VALUE_SERVICE_UNKNWON		0xFFFFFFFF
+
+/* HAL API error codes */
+#define  API_HAL_RET_VALUE_OK		0x00
+#define  API_HAL_RET_VALUE_FAIL		0x01
+
+/* Secure HAL API flags */
+#define FLAG_START_CRITICAL		0x4
+#define FLAG_IRQFIQ_MASK		0x3
+#define FLAG_IRQ_ENABLE			0x2
+#define FLAG_FIQ_ENABLE			0x1
+#define NO_FLAG				0x0
+
+/* Maximum Secure memory storage size */
+#define OMAP_SECURE_RAM_STORAGE	(88 * SZ_1K)
+
+/* Secure low power HAL API index */
+#define OMAP4_HAL_SAVESECURERAM_INDEX	0x1a
+#define OMAP4_HAL_SAVEHW_INDEX		0x1b
+#define OMAP4_HAL_SAVEALL_INDEX		0x1c
+#define OMAP4_HAL_SAVEGIC_INDEX		0x1d
+
+/* Secure Monitor mode APIs */
+#define OMAP4_MON_SCU_PWR_INDEX		0x108
+#define OMAP4_MON_L2X0_DBG_CTRL_INDEX	0x100
+#define OMAP4_MON_L2X0_CTRL_INDEX	0x102
+#define OMAP4_MON_L2X0_AUXCTRL_INDEX	0x109
+#define OMAP4_MON_L2X0_PREFETCH_INDEX	0x113
+
+/* Secure PPA(Primary Protected Application) APIs */
+#define OMAP4_PPA_L2_POR_INDEX		0x23
+#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX	0x25
+
+#ifndef __ASSEMBLER__
+
+extern u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs,
+				u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
+extern phys_addr_t omap_secure_ram_mempool_base(void);
+
+#endif /* __ASSEMBLER__ */
+#endif /* OMAP_ARCH_OMAP_SECURE_H */

+ 39 - 0
arch/arm/mach-omap2/include/mach/omap-wakeupgen.h

@@ -0,0 +1,39 @@
+/*
+ * OMAP WakeupGen header file
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OMAP_ARCH_WAKEUPGEN_H
+#define OMAP_ARCH_WAKEUPGEN_H
+
+#define OMAP_WKG_CONTROL_0			0x00
+#define OMAP_WKG_ENB_A_0			0x10
+#define OMAP_WKG_ENB_B_0			0x14
+#define OMAP_WKG_ENB_C_0			0x18
+#define OMAP_WKG_ENB_D_0			0x1c
+#define OMAP_WKG_ENB_SECURE_A_0			0x20
+#define OMAP_WKG_ENB_SECURE_B_0			0x24
+#define OMAP_WKG_ENB_SECURE_C_0			0x28
+#define OMAP_WKG_ENB_SECURE_D_0			0x2c
+#define OMAP_WKG_ENB_A_1			0x410
+#define OMAP_WKG_ENB_B_1			0x414
+#define OMAP_WKG_ENB_C_1			0x418
+#define OMAP_WKG_ENB_D_1			0x41c
+#define OMAP_WKG_ENB_SECURE_A_1			0x420
+#define OMAP_WKG_ENB_SECURE_B_1			0x424
+#define OMAP_WKG_ENB_SECURE_C_1			0x428
+#define OMAP_WKG_ENB_SECURE_D_1			0x42c
+#define OMAP_AUX_CORE_BOOT_0			0x800
+#define OMAP_AUX_CORE_BOOT_1			0x804
+#define OMAP_PTMSYNCREQ_MASK			0xc00
+#define OMAP_PTMSYNCREQ_EN			0xc04
+#define OMAP_TIMESTAMPCYCLELO			0xc08
+#define OMAP_TIMESTAMPCYCLEHI			0xc0c
+
+extern int __init omap_wakeupgen_init(void);
+#endif

+ 9 - 0
arch/arm/mach-omap2/io.c

@@ -237,6 +237,15 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
 		.length		= L4_EMU_44XX_SIZE,
 		.length		= L4_EMU_44XX_SIZE,
 		.type		= MT_DEVICE,
 		.type		= MT_DEVICE,
 	},
 	},
+#ifdef CONFIG_OMAP4_ERRATA_I688
+	{
+		.virtual	= OMAP4_SRAM_VA,
+		.pfn		= __phys_to_pfn(OMAP4_SRAM_PA),
+		.length		= PAGE_SIZE,
+		.type		= MT_MEMORY_SO,
+	},
+#endif
+
 };
 };
 #endif
 #endif
 
 

+ 0 - 5
arch/arm/mach-omap2/omap-headsmp.S

@@ -18,11 +18,6 @@
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <linux/init.h>
 
 
-/* Physical address needed since MMU not enabled yet on secondary core */
-#define OMAP4_AUX_CORE_BOOT1_PA			0x48281804
-
-	__INIT
-
 /*
 /*
  * OMAP4 specific entry point for secondary CPU to jump from ROM
  * OMAP4 specific entry point for secondary CPU to jump from ROM
  * code.  This routine also provides a holding flag into which
  * code.  This routine also provides a holding flag into which

+ 9 - 5
arch/arm/mach-omap2/omap-hotplug.c

@@ -22,6 +22,8 @@
 
 
 #include "common.h"
 #include "common.h"
 
 
+#include "powerdomain.h"
+
 int platform_cpu_kill(unsigned int cpu)
 int platform_cpu_kill(unsigned int cpu)
 {
 {
 	return 1;
 	return 1;
@@ -33,6 +35,8 @@ int platform_cpu_kill(unsigned int cpu)
  */
  */
 void platform_cpu_die(unsigned int cpu)
 void platform_cpu_die(unsigned int cpu)
 {
 {
+	unsigned int this_cpu;
+
 	flush_cache_all();
 	flush_cache_all();
 	dsb();
 	dsb();
 
 
@@ -40,15 +44,15 @@ void platform_cpu_die(unsigned int cpu)
 	 * we're ready for shutdown now, so do it
 	 * we're ready for shutdown now, so do it
 	 */
 	 */
 	if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0)
 	if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0)
-		printk(KERN_CRIT "Secure clear status failed\n");
+		pr_err("Secure clear status failed\n");
 
 
 	for (;;) {
 	for (;;) {
 		/*
 		/*
-		 * Execute WFI
+		 * Enter into low power state
 		 */
 		 */
-		do_wfi();
-
-		if (omap_read_auxcoreboot0() == cpu) {
+		omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
+		this_cpu = smp_processor_id();
+		if (omap_read_auxcoreboot0() == this_cpu) {
 			/*
 			/*
 			 * OK, proper wakeup, we're done
 			 * OK, proper wakeup, we're done
 			 */
 			 */

+ 398 - 0
arch/arm/mach-omap2/omap-mpuss-lowpower.c

@@ -0,0 +1,398 @@
+/*
+ * OMAP MPUSS low power code
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
+ * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
+ * CPU0 and CPU1 LPRM modules.
+ * CPU0, CPU1 and MPUSS each have there own power domain and
+ * hence multiple low power combinations of MPUSS are possible.
+ *
+ * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
+ * because the mode is not supported by hw constraints of dormant
+ * mode. While waking up from the dormant mode, a reset  signal
+ * to the Cortex-A9 processor must be asserted by the external
+ * power controller.
+ *
+ * With architectural inputs and hardware recommendations, only
+ * below modes are supported from power gain vs latency point of view.
+ *
+ *	CPU0		CPU1		MPUSS
+ *	----------------------------------------------
+ *	ON		ON		ON
+ *	ON(Inactive)	OFF		ON(Inactive)
+ *	OFF		OFF		CSWR
+ *	OFF		OFF		OSWR
+ *	OFF		OFF		OFF(Device OFF *TBD)
+ *	----------------------------------------------
+ *
+ * Note: CPU0 is the master core and it is the last CPU to go down
+ * and first to wake-up when MPUSS low power states are excercised
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/smp_scu.h>
+#include <asm/system.h>
+#include <asm/pgalloc.h>
+#include <asm/suspend.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <plat/omap44xx.h>
+
+#include "common.h"
+#include "omap4-sar-layout.h"
+#include "pm.h"
+#include "prcm_mpu44xx.h"
+#include "prminst44xx.h"
+#include "prcm44xx.h"
+#include "prm44xx.h"
+#include "prm-regbits-44xx.h"
+
+#ifdef CONFIG_SMP
+
+struct omap4_cpu_pm_info {
+	struct powerdomain *pwrdm;
+	void __iomem *scu_sar_addr;
+	void __iomem *wkup_sar_addr;
+	void __iomem *l2x0_sar_addr;
+};
+
+static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
+static struct powerdomain *mpuss_pd;
+static void __iomem *sar_base;
+
+/*
+ * Program the wakeup routine address for the CPU0 and CPU1
+ * used for OFF or DORMANT wakeup.
+ */
+static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)
+{
+	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+	__raw_writel(addr, pm_info->wkup_sar_addr);
+}
+
+/*
+ * Set the CPUx powerdomain's previous power state
+ */
+static inline void set_cpu_next_pwrst(unsigned int cpu_id,
+				unsigned int power_state)
+{
+	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+	pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
+}
+
+/*
+ * Read CPU's previous power state
+ */
+static inline unsigned int read_cpu_prev_pwrst(unsigned int cpu_id)
+{
+	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+	return pwrdm_read_prev_pwrst(pm_info->pwrdm);
+}
+
+/*
+ * Clear the CPUx powerdomain's previous power state
+ */
+static inline void clear_cpu_prev_pwrst(unsigned int cpu_id)
+{
+	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
+}
+
+/*
+ * Store the SCU power status value to scratchpad memory
+ */
+static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
+{
+	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+	u32 scu_pwr_st;
+
+	switch (cpu_state) {
+	case PWRDM_POWER_RET:
+		scu_pwr_st = SCU_PM_DORMANT;
+		break;
+	case PWRDM_POWER_OFF:
+		scu_pwr_st = SCU_PM_POWEROFF;
+		break;
+	case PWRDM_POWER_ON:
+	case PWRDM_POWER_INACTIVE:
+	default:
+		scu_pwr_st = SCU_PM_NORMAL;
+		break;
+	}
+
+	__raw_writel(scu_pwr_st, pm_info->scu_sar_addr);
+}
+
+/* Helper functions for MPUSS OSWR */
+static inline void mpuss_clear_prev_logic_pwrst(void)
+{
+	u32 reg;
+
+	reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+		OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
+	omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
+		OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
+}
+
+static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
+{
+	u32 reg;
+
+	if (cpu_id) {
+		reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
+					OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
+		omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
+					OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
+	} else {
+		reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
+					OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
+		omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
+					OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
+	}
+}
+
+/**
+ * omap4_mpuss_read_prev_context_state:
+ * Function returns the MPUSS previous context state
+ */
+u32 omap4_mpuss_read_prev_context_state(void)
+{
+	u32 reg;
+
+	reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+		OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
+	reg &= OMAP4430_LOSTCONTEXT_DFF_MASK;
+	return reg;
+}
+
+/*
+ * Store the CPU cluster state for L2X0 low power operations.
+ */
+static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
+{
+	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
+
+	__raw_writel(save_state, pm_info->l2x0_sar_addr);
+}
+
+/*
+ * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
+ * in every restore MPUSS OFF path.
+ */
+#ifdef CONFIG_CACHE_L2X0
+static void save_l2x0_context(void)
+{
+	u32 val;
+	void __iomem *l2x0_base = omap4_get_l2cache_base();
+
+	val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
+	__raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
+	val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
+	__raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
+}
+#else
+static void save_l2x0_context(void)
+{}
+#endif
+
+/**
+ * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
+ * The purpose of this function is to manage low power programming
+ * of OMAP4 MPUSS subsystem
+ * @cpu : CPU ID
+ * @power_state: Low power state.
+ *
+ * MPUSS states for the context save:
+ * save_state =
+ *	0 - Nothing lost and no need to save: MPUSS INACTIVE
+ *	1 - CPUx L1 and logic lost: MPUSS CSWR
+ *	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
+ *	3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
+ */
+int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
+{
+	unsigned int save_state = 0;
+	unsigned int wakeup_cpu;
+
+	if (omap_rev() == OMAP4430_REV_ES1_0)
+		return -ENXIO;
+
+	switch (power_state) {
+	case PWRDM_POWER_ON:
+	case PWRDM_POWER_INACTIVE:
+		save_state = 0;
+		break;
+	case PWRDM_POWER_OFF:
+		save_state = 1;
+		break;
+	case PWRDM_POWER_RET:
+	default:
+		/*
+		 * CPUx CSWR is invalid hardware state. Also CPUx OSWR
+		 * doesn't make much scense, since logic is lost and $L1
+		 * needs to be cleaned because of coherency. This makes
+		 * CPUx OSWR equivalent to CPUX OFF and hence not supported
+		 */
+		WARN_ON(1);
+		return -ENXIO;
+	}
+
+	pwrdm_pre_transition();
+
+	/*
+	 * Check MPUSS next state and save interrupt controller if needed.
+	 * In MPUSS OSWR or device OFF, interrupt controller  contest is lost.
+	 */
+	mpuss_clear_prev_logic_pwrst();
+	pwrdm_clear_all_prev_pwrst(mpuss_pd);
+	if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) &&
+		(pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF))
+		save_state = 2;
+
+	clear_cpu_prev_pwrst(cpu);
+	cpu_clear_prev_logic_pwrst(cpu);
+	set_cpu_next_pwrst(cpu, power_state);
+	set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume));
+	scu_pwrst_prepare(cpu, power_state);
+	l2x0_pwrst_prepare(cpu, save_state);
+
+	/*
+	 * Call low level function  with targeted low power state.
+	 */
+	cpu_suspend(save_state, omap4_finish_suspend);
+
+	/*
+	 * Restore the CPUx power state to ON otherwise CPUx
+	 * power domain can transitions to programmed low power
+	 * state while doing WFI outside the low powe code. On
+	 * secure devices, CPUx does WFI which can result in
+	 * domain transition
+	 */
+	wakeup_cpu = smp_processor_id();
+	set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON);
+
+	pwrdm_post_transition();
+
+	return 0;
+}
+
+/**
+ * omap4_hotplug_cpu: OMAP4 CPU hotplug entry
+ * @cpu : CPU ID
+ * @power_state: CPU low power state.
+ */
+int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
+{
+	unsigned int cpu_state = 0;
+
+	if (omap_rev() == OMAP4430_REV_ES1_0)
+		return -ENXIO;
+
+	if (power_state == PWRDM_POWER_OFF)
+		cpu_state = 1;
+
+	clear_cpu_prev_pwrst(cpu);
+	set_cpu_next_pwrst(cpu, power_state);
+	set_cpu_wakeup_addr(cpu, virt_to_phys(omap_secondary_startup));
+	scu_pwrst_prepare(cpu, power_state);
+
+	/*
+	 * CPU never retuns back if targetted power state is OFF mode.
+	 * CPU ONLINE follows normal CPU ONLINE ptah via
+	 * omap_secondary_startup().
+	 */
+	omap4_finish_suspend(cpu_state);
+
+	set_cpu_next_pwrst(cpu, PWRDM_POWER_ON);
+	return 0;
+}
+
+
+/*
+ * Initialise OMAP4 MPUSS
+ */
+int __init omap4_mpuss_init(void)
+{
+	struct omap4_cpu_pm_info *pm_info;
+
+	if (omap_rev() == OMAP4430_REV_ES1_0) {
+		WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
+		return -ENODEV;
+	}
+
+	sar_base = omap4_get_sar_ram_base();
+
+	/* Initilaise per CPU PM information */
+	pm_info = &per_cpu(omap4_pm_info, 0x0);
+	pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
+	pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
+	pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
+	pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
+	if (!pm_info->pwrdm) {
+		pr_err("Lookup failed for CPU0 pwrdm\n");
+		return -ENODEV;
+	}
+
+	/* Clear CPU previous power domain state */
+	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
+	cpu_clear_prev_logic_pwrst(0);
+
+	/* Initialise CPU0 power domain state to ON */
+	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
+
+	pm_info = &per_cpu(omap4_pm_info, 0x1);
+	pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
+	pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+	pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
+	pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
+	if (!pm_info->pwrdm) {
+		pr_err("Lookup failed for CPU1 pwrdm\n");
+		return -ENODEV;
+	}
+
+	/* Clear CPU previous power domain state */
+	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
+	cpu_clear_prev_logic_pwrst(1);
+
+	/* Initialise CPU1 power domain state to ON */
+	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
+
+	mpuss_pd = pwrdm_lookup("mpu_pwrdm");
+	if (!mpuss_pd) {
+		pr_err("Failed to lookup MPUSS power domain\n");
+		return -ENODEV;
+	}
+	pwrdm_clear_all_prev_pwrst(mpuss_pd);
+	mpuss_clear_prev_logic_pwrst();
+
+	/* Save device type on scratchpad for low level code to use */
+	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
+		__raw_writel(1, sar_base + OMAP_TYPE_OFFSET);
+	else
+		__raw_writel(0, sar_base + OMAP_TYPE_OFFSET);
+
+	save_l2x0_context();
+
+	return 0;
+}
+
+#endif

+ 81 - 0
arch/arm/mach-omap2/omap-secure.c

@@ -0,0 +1,81 @@
+/*
+ * OMAP Secure API infrastructure.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/cacheflush.h>
+
+#include <mach/omap-secure.h>
+
+static phys_addr_t omap_secure_memblock_base;
+
+/**
+ * omap_sec_dispatcher: Routine to dispatch low power secure
+ * service routines
+ * @idx: The HAL API index
+ * @flag: The flag indicating criticality of operation
+ * @nargs: Number of valid arguments out of four.
+ * @arg1, arg2, arg3 args4: Parameters passed to secure API
+ *
+ * Return the non-zero error value on failure.
+ */
+u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
+							 u32 arg3, u32 arg4)
+{
+	u32 ret;
+	u32 param[5];
+
+	param[0] = nargs;
+	param[1] = arg1;
+	param[2] = arg2;
+	param[3] = arg3;
+	param[4] = arg4;
+
+	/*
+	 * Secure API needs physical address
+	 * pointer for the parameters
+	 */
+	flush_cache_all();
+	outer_clean_range(__pa(param), __pa(param + 5));
+	ret = omap_smc2(idx, flag, __pa(param));
+
+	return ret;
+}
+
+/* Allocate the memory to save secure ram */
+int __init omap_secure_ram_reserve_memblock(void)
+{
+	phys_addr_t paddr;
+	u32 size = OMAP_SECURE_RAM_STORAGE;
+
+	size = ALIGN(size, SZ_1M);
+	paddr = memblock_alloc(size, SZ_1M);
+	if (!paddr) {
+		pr_err("%s: failed to reserve %x bytes\n",
+				__func__, size);
+		return -ENOMEM;
+	}
+	memblock_free(paddr, size);
+	memblock_remove(paddr, size);
+
+	omap_secure_memblock_base = paddr;
+
+	return 0;
+}
+
+phys_addr_t omap_secure_ram_mempool_base(void)
+{
+	return omap_secure_memblock_base;
+}

+ 23 - 0
arch/arm/mach-omap2/omap44xx-smc.S → arch/arm/mach-omap2/omap-smc.S

@@ -31,6 +31,29 @@ ENTRY(omap_smc1)
 	ldmfd   sp!, {r2-r12, pc}
 	ldmfd   sp!, {r2-r12, pc}
 ENDPROC(omap_smc1)
 ENDPROC(omap_smc1)
 
 
+/**
+ * u32 omap_smc2(u32 id, u32 falg, u32 pargs)
+ * Low level common routine for secure HAL and PPA APIs.
+ * @id: Application ID of HAL APIs
+ * @flag: Flag to indicate the criticality of operation
+ * @pargs: Physical address of parameter list starting
+ *	    with number of parametrs
+ */
+ENTRY(omap_smc2)
+	stmfd   sp!, {r4-r12, lr}
+	mov	r3, r2
+	mov	r2, r1
+	mov	r1, #0x0	@ Process ID
+	mov	r6, #0xff
+	mov	r12, #0x00	@ Secure Service ID
+	mov	r7, #0
+	mcr	p15, 0, r7, c7, c5, 6
+	dsb
+	dmb
+	smc	#0
+	ldmfd   sp!, {r4-r12, pc}
+ENDPROC(omap_smc2)
+
 ENTRY(omap_modify_auxcoreboot0)
 ENTRY(omap_modify_auxcoreboot0)
 	stmfd   sp!, {r1-r12, lr}
 	stmfd   sp!, {r1-r12, lr}
 	ldr	r12, =0x104
 	ldr	r12, =0x104

+ 45 - 0
arch/arm/mach-omap2/omap-smp.c

@@ -24,16 +24,36 @@
 #include <asm/hardware/gic.h>
 #include <asm/hardware/gic.h>
 #include <asm/smp_scu.h>
 #include <asm/smp_scu.h>
 #include <mach/hardware.h>
 #include <mach/hardware.h>
+#include <mach/omap-secure.h>
 
 
 #include "common.h"
 #include "common.h"
 
 
+#include "clockdomain.h"
+
 /* SCU base address */
 /* SCU base address */
 static void __iomem *scu_base;
 static void __iomem *scu_base;
 
 
 static DEFINE_SPINLOCK(boot_lock);
 static DEFINE_SPINLOCK(boot_lock);
 
 
+void __iomem *omap4_get_scu_base(void)
+{
+	return scu_base;
+}
+
 void __cpuinit platform_secondary_init(unsigned int cpu)
 void __cpuinit platform_secondary_init(unsigned int cpu)
 {
 {
+	/*
+	 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
+	 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
+	 * init and for CPU1, a secure PPA API provided. CPU0 must be ON
+	 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
+	 * OMAP443X GP devices- SMP bit isn't accessible.
+	 * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
+	 */
+	if (cpu_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
+		omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
+							4, 0, 0, 0, 0, 0);
+
 	/*
 	/*
 	 * If any interrupts are already enabled for the primary
 	 * If any interrupts are already enabled for the primary
 	 * core (e.g. timer irq), then they will not have been enabled
 	 * core (e.g. timer irq), then they will not have been enabled
@@ -50,6 +70,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
 
 
 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 {
+	static struct clockdomain *cpu1_clkdm;
+	static bool booted;
 	/*
 	/*
 	 * Set synchronisation state between this boot processor
 	 * Set synchronisation state between this boot processor
 	 * and the secondary one
 	 * and the secondary one
@@ -65,6 +87,29 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
 	omap_modify_auxcoreboot0(0x200, 0xfffffdff);
 	omap_modify_auxcoreboot0(0x200, 0xfffffdff);
 	flush_cache_all();
 	flush_cache_all();
 	smp_wmb();
 	smp_wmb();
+
+	if (!cpu1_clkdm)
+		cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
+
+	/*
+	 * The SGI(Software Generated Interrupts) are not wakeup capable
+	 * from low power states. This is known limitation on OMAP4 and
+	 * needs to be worked around by using software forced clockdomain
+	 * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
+	 * software force wakeup. The clockdomain is then put back to
+	 * hardware supervised mode.
+	 * More details can be found in OMAP4430 TRM - Version J
+	 * Section :
+	 *	4.3.4.2 Power States of CPU0 and CPU1
+	 */
+	if (booted) {
+		clkdm_wakeup(cpu1_clkdm);
+		clkdm_allow_idle(cpu1_clkdm);
+	} else {
+		dsb_sev();
+		booted = true;
+	}
+
 	gic_raise_softirq(cpumask_of(cpu), 1);
 	gic_raise_softirq(cpumask_of(cpu), 1);
 
 
 	/*
 	/*

+ 389 - 0
arch/arm/mach-omap2/omap-wakeupgen.c

@@ -0,0 +1,389 @@
+/*
+ * OMAP WakeupGen Source file
+ *
+ * OMAP WakeupGen is the interrupt controller extension used along
+ * with ARM GIC to wake the CPU out from low power states on
+ * external interrupts. It is responsible for generating wakeup
+ * event from the incoming interrupts and enable bits. It is
+ * implemented in MPU always ON power domain. During normal operation,
+ * WakeupGen delivers external interrupts directly to the GIC.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu_pm.h>
+
+#include <asm/hardware/gic.h>
+
+#include <mach/omap-wakeupgen.h>
+#include <mach/omap-secure.h>
+
+#include "omap4-sar-layout.h"
+#include "common.h"
+
+#define NR_REG_BANKS		4
+#define MAX_IRQS		128
+#define WKG_MASK_ALL		0x00000000
+#define WKG_UNMASK_ALL		0xffffffff
+#define CPU_ENA_OFFSET		0x400
+#define CPU0_ID			0x0
+#define CPU1_ID			0x1
+
+static void __iomem *wakeupgen_base;
+static void __iomem *sar_base;
+static DEFINE_PER_CPU(u32 [NR_REG_BANKS], irqmasks);
+static DEFINE_SPINLOCK(wakeupgen_lock);
+static unsigned int irq_target_cpu[NR_IRQS];
+
+/*
+ * Static helper functions.
+ */
+static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
+{
+	return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 +
+				(cpu * CPU_ENA_OFFSET) + (idx * 4));
+}
+
+static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
+{
+	__raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
+				(cpu * CPU_ENA_OFFSET) + (idx * 4));
+}
+
+static inline void sar_writel(u32 val, u32 offset, u8 idx)
+{
+	__raw_writel(val, sar_base + offset + (idx * 4));
+}
+
+static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
+{
+	u8 i;
+
+	for (i = 0; i < NR_REG_BANKS; i++)
+		wakeupgen_writel(reg, i, cpu);
+}
+
+static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
+{
+	unsigned int spi_irq;
+
+	/*
+	 * PPIs and SGIs are not supported.
+	 */
+	if (irq < OMAP44XX_IRQ_GIC_START)
+		return -EINVAL;
+
+	/*
+	 * Subtract the GIC offset.
+	 */
+	spi_irq = irq - OMAP44XX_IRQ_GIC_START;
+	if (spi_irq > MAX_IRQS) {
+		pr_err("omap wakeupGen: Invalid IRQ%d\n", irq);
+		return -EINVAL;
+	}
+
+	/*
+	 * Each WakeupGen register controls 32 interrupt.
+	 * i.e. 1 bit per SPI IRQ
+	 */
+	*reg_index = spi_irq >> 5;
+	*bit_posn = spi_irq %= 32;
+
+	return 0;
+}
+
+static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
+{
+	u32 val, bit_number;
+	u8 i;
+
+	if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
+		return;
+
+	val = wakeupgen_readl(i, cpu);
+	val &= ~BIT(bit_number);
+	wakeupgen_writel(val, i, cpu);
+}
+
+static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
+{
+	u32 val, bit_number;
+	u8 i;
+
+	if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
+		return;
+
+	val = wakeupgen_readl(i, cpu);
+	val |= BIT(bit_number);
+	wakeupgen_writel(val, i, cpu);
+}
+
+static void _wakeupgen_save_masks(unsigned int cpu)
+{
+	u8 i;
+
+	for (i = 0; i < NR_REG_BANKS; i++)
+		per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
+}
+
+static void _wakeupgen_restore_masks(unsigned int cpu)
+{
+	u8 i;
+
+	for (i = 0; i < NR_REG_BANKS; i++)
+		wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
+}
+
+/*
+ * Architecture specific Mask extension
+ */
+static void wakeupgen_mask(struct irq_data *d)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeupgen_lock, flags);
+	_wakeupgen_clear(d->irq, irq_target_cpu[d->irq]);
+	spin_unlock_irqrestore(&wakeupgen_lock, flags);
+}
+
+/*
+ * Architecture specific Unmask extension
+ */
+static void wakeupgen_unmask(struct irq_data *d)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeupgen_lock, flags);
+	_wakeupgen_set(d->irq, irq_target_cpu[d->irq]);
+	spin_unlock_irqrestore(&wakeupgen_lock, flags);
+}
+
+/*
+ * Mask or unmask all interrupts on given CPU.
+ *	0 = Mask all interrupts on the 'cpu'
+ *	1 = Unmask all interrupts on the 'cpu'
+ * Ensure that the initial mask is maintained. This is faster than
+ * iterating through GIC registers to arrive at the correct masks.
+ */
+static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeupgen_lock, flags);
+	if (set) {
+		_wakeupgen_save_masks(cpu);
+		_wakeupgen_set_all(cpu, WKG_MASK_ALL);
+	} else {
+		_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
+		_wakeupgen_restore_masks(cpu);
+	}
+	spin_unlock_irqrestore(&wakeupgen_lock, flags);
+}
+
+#ifdef CONFIG_CPU_PM
+/*
+ * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
+ * ROM code. WakeupGen IP is integrated along with GIC to manage the
+ * interrupt wakeups from CPU low power states. It manages
+ * masking/unmasking of Shared peripheral interrupts(SPI). So the
+ * interrupt enable/disable control should be in sync and consistent
+ * at WakeupGen and GIC so that interrupts are not lost.
+ */
+static void irq_save_context(void)
+{
+	u32 i, val;
+
+	if (omap_rev() == OMAP4430_REV_ES1_0)
+		return;
+
+	if (!sar_base)
+		sar_base = omap4_get_sar_ram_base();
+
+	for (i = 0; i < NR_REG_BANKS; i++) {
+		/* Save the CPUx interrupt mask for IRQ 0 to 127 */
+		val = wakeupgen_readl(i, 0);
+		sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
+		val = wakeupgen_readl(i, 1);
+		sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
+
+		/*
+		 * Disable the secure interrupts for CPUx. The restore
+		 * code blindly restores secure and non-secure interrupt
+		 * masks from SAR RAM. Secure interrupts are not suppose
+		 * to be enabled from HLOS. So overwrite the SAR location
+		 * so that the secure interrupt remains disabled.
+		 */
+		sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
+		sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
+	}
+
+	/* Save AuxBoot* registers */
+	val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+	__raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
+	val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+	__raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
+
+	/* Save SyncReq generation logic */
+	val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+	__raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
+	val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+	__raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
+
+	/* Save SyncReq generation logic */
+	val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
+	__raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
+	val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
+	__raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET);
+
+	/* Set the Backup Bit Mask status */
+	val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
+	val |= SAR_BACKUP_STATUS_WAKEUPGEN;
+	__raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
+}
+
+/*
+ * Clear WakeupGen SAR backup status.
+ */
+void irq_sar_clear(void)
+{
+	u32 val;
+	val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
+	val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
+	__raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
+}
+
+/*
+ * Save GIC and Wakeupgen interrupt context using secure API
+ * for HS/EMU devices.
+ */
+static void irq_save_secure_context(void)
+{
+	u32 ret;
+	ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
+				FLAG_START_CRITICAL,
+				0, 0, 0, 0, 0);
+	if (ret != API_HAL_RET_VALUE_OK)
+		pr_err("GIC and Wakeupgen context save failed\n");
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
+					 unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned int)hcpu;
+
+	switch (action) {
+	case CPU_ONLINE:
+		wakeupgen_irqmask_all(cpu, 0);
+		break;
+	case CPU_DEAD:
+		wakeupgen_irqmask_all(cpu, 1);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata irq_hotplug_notifier = {
+	.notifier_call = irq_cpu_hotplug_notify,
+};
+
+static void __init irq_hotplug_init(void)
+{
+	register_hotcpu_notifier(&irq_hotplug_notifier);
+}
+#else
+static void __init irq_hotplug_init(void)
+{}
+#endif
+
+#ifdef CONFIG_CPU_PM
+static int irq_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
+{
+	switch (cmd) {
+	case CPU_CLUSTER_PM_ENTER:
+		if (omap_type() == OMAP2_DEVICE_TYPE_GP)
+			irq_save_context();
+		else
+			irq_save_secure_context();
+		break;
+	case CPU_CLUSTER_PM_EXIT:
+		if (omap_type() == OMAP2_DEVICE_TYPE_GP)
+			irq_sar_clear();
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block irq_notifier_block = {
+	.notifier_call = irq_notifier,
+};
+
+static void __init irq_pm_init(void)
+{
+	cpu_pm_register_notifier(&irq_notifier_block);
+}
+#else
+static void __init irq_pm_init(void)
+{}
+#endif
+
+/*
+ * Initialise the wakeupgen module.
+ */
+int __init omap_wakeupgen_init(void)
+{
+	int i;
+	unsigned int boot_cpu = smp_processor_id();
+
+	/* Not supported on OMAP4 ES1.0 silicon */
+	if (omap_rev() == OMAP4430_REV_ES1_0) {
+		WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
+		return -EPERM;
+	}
+
+	/* Static mapping, never released */
+	wakeupgen_base = ioremap(OMAP44XX_WKUPGEN_BASE, SZ_4K);
+	if (WARN_ON(!wakeupgen_base))
+		return -ENOMEM;
+
+	/* Clear all IRQ bitmasks at wakeupGen level */
+	for (i = 0; i < NR_REG_BANKS; i++) {
+		wakeupgen_writel(0, i, CPU0_ID);
+		wakeupgen_writel(0, i, CPU1_ID);
+	}
+
+	/*
+	 * Override GIC architecture specific functions to add
+	 * OMAP WakeupGen interrupt controller along with GIC
+	 */
+	gic_arch_extn.irq_mask = wakeupgen_mask;
+	gic_arch_extn.irq_unmask = wakeupgen_unmask;
+	gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
+
+	/*
+	 * FIXME: Add support to set_smp_affinity() once the core
+	 * GIC code has necessary hooks in place.
+	 */
+
+	/* Associate all the IRQs to boot CPU like GIC init does. */
+	for (i = 0; i < NR_IRQS; i++)
+		irq_target_cpu[i] = boot_cpu;
+
+	irq_hotplug_init();
+	irq_pm_init();
+
+	return 0;
+}

+ 92 - 2
arch/arm/mach-omap2/omap4-common.c

@@ -15,18 +15,73 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
+#include <linux/memblock.h>
 
 
 #include <asm/hardware/gic.h>
 #include <asm/hardware/gic.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/hardware/cache-l2x0.h>
+#include <asm/mach/map.h>
 
 
 #include <plat/irqs.h>
 #include <plat/irqs.h>
+#include <plat/sram.h>
 
 
 #include <mach/hardware.h>
 #include <mach/hardware.h>
+#include <mach/omap-wakeupgen.h>
 
 
 #include "common.h"
 #include "common.h"
+#include "omap4-sar-layout.h"
 
 
 #ifdef CONFIG_CACHE_L2X0
 #ifdef CONFIG_CACHE_L2X0
-void __iomem *l2cache_base;
+static void __iomem *l2cache_base;
+#endif
+
+static void __iomem *sar_ram_base;
+
+#ifdef CONFIG_OMAP4_ERRATA_I688
+/* Used to implement memory barrier on DRAM path */
+#define OMAP4_DRAM_BARRIER_VA			0xfe600000
+
+void __iomem *dram_sync, *sram_sync;
+
+void omap_bus_sync(void)
+{
+	if (dram_sync && sram_sync) {
+		writel_relaxed(readl_relaxed(dram_sync), dram_sync);
+		writel_relaxed(readl_relaxed(sram_sync), sram_sync);
+		isb();
+	}
+}
+
+static int __init omap_barriers_init(void)
+{
+	struct map_desc dram_io_desc[1];
+	phys_addr_t paddr;
+	u32 size;
+
+	if (!cpu_is_omap44xx())
+		return -ENODEV;
+
+	size = ALIGN(PAGE_SIZE, SZ_1M);
+	paddr = memblock_alloc(size, SZ_1M);
+	if (!paddr) {
+		pr_err("%s: failed to reserve 4 Kbytes\n", __func__);
+		return -ENOMEM;
+	}
+	memblock_free(paddr, size);
+	memblock_remove(paddr, size);
+	dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
+	dram_io_desc[0].pfn = __phys_to_pfn(paddr);
+	dram_io_desc[0].length = size;
+	dram_io_desc[0].type = MT_MEMORY_SO;
+	iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
+	dram_sync = (void __iomem *) dram_io_desc[0].virtual;
+	sram_sync = (void __iomem *) OMAP4_SRAM_VA;
+
+	pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n",
+		(long long) paddr, dram_io_desc[0].virtual);
+
+	return 0;
+}
+core_initcall(omap_barriers_init);
 #endif
 #endif
 
 
 void __init gic_init_irq(void)
 void __init gic_init_irq(void)
@@ -42,11 +97,18 @@ void __init gic_init_irq(void)
 	omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
 	omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
 	BUG_ON(!omap_irq_base);
 	BUG_ON(!omap_irq_base);
 
 
+	omap_wakeupgen_init();
+
 	gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
 	gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
 }
 }
 
 
 #ifdef CONFIG_CACHE_L2X0
 #ifdef CONFIG_CACHE_L2X0
 
 
+void __iomem *omap4_get_l2cache_base(void)
+{
+	return l2cache_base;
+}
+
 static void omap4_l2x0_disable(void)
 static void omap4_l2x0_disable(void)
 {
 {
 	/* Disable PL310 L2 Cache controller */
 	/* Disable PL310 L2 Cache controller */
@@ -72,7 +134,8 @@ static int __init omap_l2_cache_init(void)
 
 
 	/* Static mapping, never released */
 	/* Static mapping, never released */
 	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
 	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
-	BUG_ON(!l2cache_base);
+	if (WARN_ON(!l2cache_base))
+		return -ENOMEM;
 
 
 	/*
 	/*
 	 * 16-way associativity, parity disabled
 	 * 16-way associativity, parity disabled
@@ -112,3 +175,30 @@ static int __init omap_l2_cache_init(void)
 }
 }
 early_initcall(omap_l2_cache_init);
 early_initcall(omap_l2_cache_init);
 #endif
 #endif
+
+void __iomem *omap4_get_sar_ram_base(void)
+{
+	return sar_ram_base;
+}
+
+/*
+ * SAR RAM used to save and restore the HW
+ * context in low power modes
+ */
+static int __init omap4_sar_ram_init(void)
+{
+	/*
+	 * To avoid code running on other OMAPs in
+	 * multi-omap builds
+	 */
+	if (!cpu_is_omap44xx())
+		return -ENOMEM;
+
+	/* Static mapping, never released */
+	sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K);
+	if (WARN_ON(!sar_ram_base))
+		return -ENOMEM;
+
+	return 0;
+}
+early_initcall(omap4_sar_ram_init);

+ 50 - 0
arch/arm/mach-omap2/omap4-sar-layout.h

@@ -0,0 +1,50 @@
+/*
+ * omap4-sar-layout.h: OMAP4 SAR RAM layout header file
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef OMAP_ARCH_OMAP4_SAR_LAYOUT_H
+#define OMAP_ARCH_OMAP4_SAR_LAYOUT_H
+
+/*
+ * SAR BANK offsets from base address OMAP44XX_SAR_RAM_BASE
+ */
+#define SAR_BANK1_OFFSET		0x0000
+#define SAR_BANK2_OFFSET		0x1000
+#define SAR_BANK3_OFFSET		0x2000
+#define SAR_BANK4_OFFSET		0x3000
+
+/* Scratch pad memory offsets from SAR_BANK1 */
+#define SCU_OFFSET0				0xd00
+#define SCU_OFFSET1				0xd04
+#define OMAP_TYPE_OFFSET			0xd10
+#define L2X0_SAVE_OFFSET0			0xd14
+#define L2X0_SAVE_OFFSET1			0xd18
+#define L2X0_AUXCTRL_OFFSET			0xd1c
+#define L2X0_PREFETCH_CTRL_OFFSET		0xd20
+
+/* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */
+#define CPU0_WAKEUP_NS_PA_ADDR_OFFSET		0xa04
+#define CPU1_WAKEUP_NS_PA_ADDR_OFFSET		0xa08
+
+#define SAR_BACKUP_STATUS_OFFSET		(SAR_BANK3_OFFSET + 0x500)
+#define SAR_SECURE_RAM_SIZE_OFFSET		(SAR_BANK3_OFFSET + 0x504)
+#define SAR_SECRAM_SAVED_AT_OFFSET		(SAR_BANK3_OFFSET + 0x508)
+
+/* WakeUpGen save restore offset from OMAP44XX_SAR_RAM_BASE */
+#define WAKEUPGENENB_OFFSET_CPU0		(SAR_BANK3_OFFSET + 0x684)
+#define WAKEUPGENENB_SECURE_OFFSET_CPU0		(SAR_BANK3_OFFSET + 0x694)
+#define WAKEUPGENENB_OFFSET_CPU1		(SAR_BANK3_OFFSET + 0x6a4)
+#define WAKEUPGENENB_SECURE_OFFSET_CPU1		(SAR_BANK3_OFFSET + 0x6b4)
+#define AUXCOREBOOT0_OFFSET			(SAR_BANK3_OFFSET + 0x6c4)
+#define AUXCOREBOOT1_OFFSET			(SAR_BANK3_OFFSET + 0x6c8)
+#define PTMSYNCREQ_MASK_OFFSET			(SAR_BANK3_OFFSET + 0x6cc)
+#define PTMSYNCREQ_EN_OFFSET			(SAR_BANK3_OFFSET + 0x6d0)
+#define SAR_BACKUP_STATUS_WAKEUPGEN		0x10
+
+#endif

+ 1 - 0
arch/arm/mach-omap2/pm.h

@@ -21,6 +21,7 @@ extern void omap_sram_idle(void);
 extern int omap3_can_sleep(void);
 extern int omap3_can_sleep(void);
 extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
 extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
 extern int omap3_idle_init(void);
 extern int omap3_idle_init(void);
+extern int omap4_idle_init(void);
 
 
 #if defined(CONFIG_PM_OPP)
 #if defined(CONFIG_PM_OPP)
 extern int omap3_opp_init(void);
 extern int omap3_opp_init(void);

+ 149 - 4
arch/arm/mach-omap2/pm44xx.c

@@ -1,8 +1,9 @@
 /*
 /*
  * OMAP4 Power Management Routines
  * OMAP4 Power Management Routines
  *
  *
- * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
  * Rajendra Nayak <rnayak@ti.com>
  * Rajendra Nayak <rnayak@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
  *
  *
  * This program is free software; you can redistribute it and/or modify
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * it under the terms of the GNU General Public License version 2 as
@@ -17,13 +18,16 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 
 
 #include "common.h"
 #include "common.h"
+#include "clockdomain.h"
 #include "powerdomain.h"
 #include "powerdomain.h"
+#include "pm.h"
 
 
 struct power_state {
 struct power_state {
 	struct powerdomain *pwrdm;
 	struct powerdomain *pwrdm;
 	u32 next_state;
 	u32 next_state;
 #ifdef CONFIG_SUSPEND
 #ifdef CONFIG_SUSPEND
 	u32 saved_state;
 	u32 saved_state;
+	u32 saved_logic_state;
 #endif
 #endif
 	struct list_head node;
 	struct list_head node;
 };
 };
@@ -33,7 +37,50 @@ static LIST_HEAD(pwrst_list);
 #ifdef CONFIG_SUSPEND
 #ifdef CONFIG_SUSPEND
 static int omap4_pm_suspend(void)
 static int omap4_pm_suspend(void)
 {
 {
-	do_wfi();
+	struct power_state *pwrst;
+	int state, ret = 0;
+	u32 cpu_id = smp_processor_id();
+
+	/* Save current powerdomain state */
+	list_for_each_entry(pwrst, &pwrst_list, node) {
+		pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
+		pwrst->saved_logic_state = pwrdm_read_logic_retst(pwrst->pwrdm);
+	}
+
+	/* Set targeted power domain states by suspend */
+	list_for_each_entry(pwrst, &pwrst_list, node) {
+		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
+		pwrdm_set_logic_retst(pwrst->pwrdm, PWRDM_POWER_OFF);
+	}
+
+	/*
+	 * For MPUSS to hit power domain retention(CSWR or OSWR),
+	 * CPU0 and CPU1 power domains need to be in OFF or DORMANT state,
+	 * since CPU power domain CSWR is not supported by hardware
+	 * Only master CPU follows suspend path. All other CPUs follow
+	 * CPU hotplug path in system wide suspend. On OMAP4, CPU power
+	 * domain CSWR is not supported by hardware.
+	 * More details can be found in OMAP4430 TRM section 4.3.4.2.
+	 */
+	omap4_enter_lowpower(cpu_id, PWRDM_POWER_OFF);
+
+	/* Restore next powerdomain state */
+	list_for_each_entry(pwrst, &pwrst_list, node) {
+		state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
+		if (state > pwrst->next_state) {
+			pr_info("Powerdomain (%s) didn't enter "
+			       "target state %d\n",
+			       pwrst->pwrdm->name, pwrst->next_state);
+			ret = -1;
+		}
+		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
+		pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state);
+	}
+	if (ret)
+		pr_crit("Could not enter target state in pm_suspend\n");
+	else
+		pr_info("Successfully put all powerdomains to target state\n");
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -73,6 +120,22 @@ static const struct platform_suspend_ops omap_pm_ops = {
 };
 };
 #endif /* CONFIG_SUSPEND */
 #endif /* CONFIG_SUSPEND */
 
 
+/*
+ * Enable hardware supervised mode for all clockdomains if it's
+ * supported. Initiate sleep transition for other clockdomains, if
+ * they are not used
+ */
+static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
+{
+	if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
+		clkdm_allow_idle(clkdm);
+	else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
+			atomic_read(&clkdm->usecount) == 0)
+		clkdm_sleep(clkdm);
+	return 0;
+}
+
+
 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
 {
 {
 	struct power_state *pwrst;
 	struct power_state *pwrst;
@@ -80,14 +143,48 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
 	if (!pwrdm->pwrsts)
 	if (!pwrdm->pwrsts)
 		return 0;
 		return 0;
 
 
+	/*
+	 * Skip CPU0 and CPU1 power domains. CPU1 is programmed
+	 * through hotplug path and CPU0 explicitly programmed
+	 * further down in the code path
+	 */
+	if (!strncmp(pwrdm->name, "cpu", 3))
+		return 0;
+
+	/*
+	 * FIXME: Remove this check when core retention is supported
+	 * Only MPUSS power domain is added in the list.
+	 */
+	if (strcmp(pwrdm->name, "mpu_pwrdm"))
+		return 0;
+
 	pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
 	pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
 	if (!pwrst)
 	if (!pwrst)
 		return -ENOMEM;
 		return -ENOMEM;
+
 	pwrst->pwrdm = pwrdm;
 	pwrst->pwrdm = pwrdm;
-	pwrst->next_state = PWRDM_POWER_ON;
+	pwrst->next_state = PWRDM_POWER_RET;
 	list_add(&pwrst->node, &pwrst_list);
 	list_add(&pwrst->node, &pwrst_list);
 
 
-	return pwrdm_set_next_pwrst(pwrst->pwrdm, pwrst->next_state);
+	return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
+}
+
+/**
+ * omap_default_idle - OMAP4 default ilde routine.'
+ *
+ * Implements OMAP4 memory, IO ordering requirements which can't be addressed
+ * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and
+ * by secondary CPU with CONFIG_CPUIDLE.
+ */
+static void omap_default_idle(void)
+{
+	local_irq_disable();
+	local_fiq_disable();
+
+	omap_do_wfi();
+
+	local_fiq_enable();
+	local_irq_enable();
 }
 }
 
 
 /**
 /**
@@ -99,10 +196,17 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
 static int __init omap4_pm_init(void)
 static int __init omap4_pm_init(void)
 {
 {
 	int ret;
 	int ret;
+	struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm;
+	struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
 
 
 	if (!cpu_is_omap44xx())
 	if (!cpu_is_omap44xx())
 		return -ENODEV;
 		return -ENODEV;
 
 
+	if (omap_rev() == OMAP4430_REV_ES1_0) {
+		WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
+		return -ENODEV;
+	}
+
 	pr_err("Power Management for TI OMAP4.\n");
 	pr_err("Power Management for TI OMAP4.\n");
 
 
 	ret = pwrdm_for_each(pwrdms_setup, NULL);
 	ret = pwrdm_for_each(pwrdms_setup, NULL);
@@ -111,10 +215,51 @@ static int __init omap4_pm_init(void)
 		goto err2;
 		goto err2;
 	}
 	}
 
 
+	/*
+	 * The dynamic dependency between MPUSS -> MEMIF and
+	 * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
+	 * expected. The hardware recommendation is to enable static
+	 * dependencies for these to avoid system lock ups or random crashes.
+	 */
+	mpuss_clkdm = clkdm_lookup("mpuss_clkdm");
+	emif_clkdm = clkdm_lookup("l3_emif_clkdm");
+	l3_1_clkdm = clkdm_lookup("l3_1_clkdm");
+	l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
+	l4_per_clkdm = clkdm_lookup("l4_per_clkdm");
+	ducati_clkdm = clkdm_lookup("ducati_clkdm");
+	if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) ||
+		(!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm))
+		goto err2;
+
+	ret = clkdm_add_wkdep(mpuss_clkdm, emif_clkdm);
+	ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm);
+	ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm);
+	ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm);
+	ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm);
+	ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm);
+	if (ret) {
+		pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 "
+				"wakeup dependency\n");
+		goto err2;
+	}
+
+	ret = omap4_mpuss_init();
+	if (ret) {
+		pr_err("Failed to initialise OMAP4 MPUSS\n");
+		goto err2;
+	}
+
+	(void) clkdm_for_each(clkdms_setup, NULL);
+
 #ifdef CONFIG_SUSPEND
 #ifdef CONFIG_SUSPEND
 	suspend_set_ops(&omap_pm_ops);
 	suspend_set_ops(&omap_pm_ops);
 #endif /* CONFIG_SUSPEND */
 #endif /* CONFIG_SUSPEND */
 
 
+	/* Overwrite the default arch_idle() */
+	pm_idle = omap_default_idle;
+
+	omap4_idle_init();
+
 err2:
 err2:
 	return ret;
 	return ret;
 }
 }

+ 379 - 0
arch/arm/mach-omap2/sleep44xx.S

@@ -0,0 +1,379 @@
+/*
+ * OMAP44xx sleep code.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * 	Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software,you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/system.h>
+#include <asm/smp_scu.h>
+#include <asm/memory.h>
+#include <asm/hardware/cache-l2x0.h>
+
+#include <plat/omap44xx.h>
+#include <mach/omap-secure.h>
+
+#include "common.h"
+#include "omap4-sar-layout.h"
+
+#if defined(CONFIG_SMP) && defined(CONFIG_PM)
+
+.macro	DO_SMC
+	dsb
+	smc	#0
+	dsb
+.endm
+
+ppa_zero_params:
+	.word		0x0
+
+ppa_por_params:
+	.word		1, 0
+
+/*
+ * =============================
+ * == CPU suspend finisher ==
+ * =============================
+ *
+ * void omap4_finish_suspend(unsigned long cpu_state)
+ *
+ * This function code saves the CPU context and performs the CPU
+ * power down sequence. Calling WFI effectively changes the CPU
+ * power domains states to the desired target power state.
+ *
+ * @cpu_state : contains context save state (r0)
+ *	0 - No context lost
+ * 	1 - CPUx L1 and logic lost: MPUSS CSWR
+ * 	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
+ *	3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
+ * @return: This function never returns for CPU OFF and DORMANT power states.
+ * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
+ * from this follows a full CPU reset path via ROM code to CPU restore code.
+ * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
+ * It returns to the caller for CPU INACTIVE and ON power states or in case
+ * CPU failed to transition to targeted OFF/DORMANT state.
+ */
+ENTRY(omap4_finish_suspend)
+	stmfd	sp!, {lr}
+	cmp	r0, #0x0
+	beq	do_WFI				@ No lowpower state, jump to WFI
+
+	/*
+	 * Flush all data from the L1 data cache before disabling
+	 * SCTLR.C bit.
+	 */
+	bl	omap4_get_sar_ram_base
+	ldr	r9, [r0, #OMAP_TYPE_OFFSET]
+	cmp	r9, #0x1			@ Check for HS device
+	bne	skip_secure_l1_clean
+	mov	r0, #SCU_PM_NORMAL
+	mov	r1, #0xFF			@ clean seucre L1
+	stmfd   r13!, {r4-r12, r14}
+	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
+	DO_SMC
+	ldmfd   r13!, {r4-r12, r14}
+skip_secure_l1_clean:
+	bl	v7_flush_dcache_all
+
+	/*
+	 * Clear the SCTLR.C bit to prevent further data cache
+	 * allocation. Clearing SCTLR.C would make all the data accesses
+	 * strongly ordered and would not hit the cache.
+	 */
+	mrc	p15, 0, r0, c1, c0, 0
+	bic	r0, r0, #(1 << 2)		@ Disable the C bit
+	mcr	p15, 0, r0, c1, c0, 0
+	isb
+
+	/*
+	 * Invalidate L1 data cache. Even though only invalidate is
+	 * necessary exported flush API is used here. Doing clean
+	 * on already clean cache would be almost NOP.
+	 */
+	bl	v7_flush_dcache_all
+
+	/*
+	 * Switch the CPU from Symmetric Multiprocessing (SMP) mode
+	 * to AsymmetricMultiprocessing (AMP) mode by programming
+	 * the SCU power status to DORMANT or OFF mode.
+	 * This enables the CPU to be taken out of coherency by
+	 * preventing the CPU from receiving cache, TLB, or BTB
+	 * maintenance operations broadcast by other CPUs in the cluster.
+	 */
+	bl	omap4_get_sar_ram_base
+	mov	r8, r0
+	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
+	cmp	r9, #0x1			@ Check for HS device
+	bne	scu_gp_set
+	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
+	ands	r0, r0, #0x0f
+	ldreq	r0, [r8, #SCU_OFFSET0]
+	ldrne	r0, [r8, #SCU_OFFSET1]
+	mov	r1, #0x00
+	stmfd   r13!, {r4-r12, r14}
+	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
+	DO_SMC
+	ldmfd   r13!, {r4-r12, r14}
+	b	skip_scu_gp_set
+scu_gp_set:
+	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
+	ands	r0, r0, #0x0f
+	ldreq	r1, [r8, #SCU_OFFSET0]
+	ldrne	r1, [r8, #SCU_OFFSET1]
+	bl	omap4_get_scu_base
+	bl	scu_power_mode
+skip_scu_gp_set:
+	mrc	p15, 0, r0, c1, c1, 2		@ Read NSACR data
+	tst	r0, #(1 << 18)
+	mrcne	p15, 0, r0, c1, c0, 1
+	bicne	r0, r0, #(1 << 6)		@ Disable SMP bit
+	mcrne	p15, 0, r0, c1, c0, 1
+	isb
+	dsb
+#ifdef CONFIG_CACHE_L2X0
+	/*
+	 * Clean and invalidate the L2 cache.
+	 * Common cache-l2x0.c functions can't be used here since it
+	 * uses spinlocks. We are out of coherency here with data cache
+	 * disabled. The spinlock implementation uses exclusive load/store
+	 * instruction which can fail without data cache being enabled.
+	 * OMAP4 hardware doesn't support exclusive monitor which can
+	 * overcome exclusive access issue. Because of this, CPU can
+	 * lead to deadlock.
+	 */
+	bl	omap4_get_sar_ram_base
+	mov	r8, r0
+	mrc	p15, 0, r5, c0, c0, 5		@ Read MPIDR
+	ands	r5, r5, #0x0f
+	ldreq	r0, [r8, #L2X0_SAVE_OFFSET0]	@ Retrieve L2 state from SAR
+	ldrne	r0, [r8, #L2X0_SAVE_OFFSET1]	@ memory.
+	cmp	r0, #3
+	bne	do_WFI
+#ifdef CONFIG_PL310_ERRATA_727915
+	mov	r0, #0x03
+	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
+	DO_SMC
+#endif
+	bl	omap4_get_l2cache_base
+	mov	r2, r0
+	ldr	r0, =0xffff
+	str	r0, [r2, #L2X0_CLEAN_INV_WAY]
+wait:
+	ldr	r0, [r2, #L2X0_CLEAN_INV_WAY]
+	ldr	r1, =0xffff
+	ands	r0, r0, r1
+	bne	wait
+#ifdef CONFIG_PL310_ERRATA_727915
+	mov	r0, #0x00
+	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
+	DO_SMC
+#endif
+l2x_sync:
+	bl	omap4_get_l2cache_base
+	mov	r2, r0
+	mov	r0, #0x0
+	str	r0, [r2, #L2X0_CACHE_SYNC]
+sync:
+	ldr	r0, [r2, #L2X0_CACHE_SYNC]
+	ands	r0, r0, #0x1
+	bne	sync
+#endif
+
+do_WFI:
+	bl	omap_do_wfi
+
+	/*
+	 * CPU is here when it failed to enter OFF/DORMANT or
+	 * no low power state was attempted.
+	 */
+	mrc	p15, 0, r0, c1, c0, 0
+	tst	r0, #(1 << 2)			@ Check C bit enabled?
+	orreq	r0, r0, #(1 << 2)		@ Enable the C bit
+	mcreq	p15, 0, r0, c1, c0, 0
+	isb
+
+	/*
+	 * Ensure the CPU power state is set to NORMAL in
+	 * SCU power state so that CPU is back in coherency.
+	 * In non-coherent mode CPU can lock-up and lead to
+	 * system deadlock.
+	 */
+	mrc	p15, 0, r0, c1, c0, 1
+	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
+	orreq	r0, r0, #(1 << 6)
+	mcreq	p15, 0, r0, c1, c0, 1
+	isb
+	bl	omap4_get_sar_ram_base
+	mov	r8, r0
+	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
+	cmp	r9, #0x1			@ Check for HS device
+	bne	scu_gp_clear
+	mov	r0, #SCU_PM_NORMAL
+	mov	r1, #0x00
+	stmfd   r13!, {r4-r12, r14}
+	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
+	DO_SMC
+	ldmfd   r13!, {r4-r12, r14}
+	b	skip_scu_gp_clear
+scu_gp_clear:
+	bl	omap4_get_scu_base
+	mov	r1, #SCU_PM_NORMAL
+	bl	scu_power_mode
+skip_scu_gp_clear:
+	isb
+	dsb
+	ldmfd	sp!, {pc}
+ENDPROC(omap4_finish_suspend)
+
+/*
+ * ============================
+ * == CPU resume entry point ==
+ * ============================
+ *
+ * void omap4_cpu_resume(void)
+ *
+ * ROM code jumps to this function while waking up from CPU
+ * OFF or DORMANT state. Physical address of the function is
+ * stored in the SAR RAM while entering to OFF or DORMANT mode.
+ * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
+ */
+ENTRY(omap4_cpu_resume)
+	/*
+	 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
+	 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
+	 * init and for CPU1, a secure PPA API provided. CPU0 must be ON
+	 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
+	 * OMAP443X GP devices- SMP bit isn't accessible.
+	 * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
+	 */
+	ldr	r8, =OMAP44XX_SAR_RAM_BASE
+	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
+	cmp	r9, #0x1			@ Skip if GP device
+	bne	skip_ns_smp_enable
+	mrc     p15, 0, r0, c0, c0, 5
+	ands    r0, r0, #0x0f
+	beq	skip_ns_smp_enable
+ppa_actrl_retry:
+	mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
+	adr	r3, ppa_zero_params		@ Pointer to parameters
+	mov	r1, #0x0			@ Process ID
+	mov	r2, #0x4			@ Flag
+	mov	r6, #0xff
+	mov	r12, #0x00			@ Secure Service ID
+	DO_SMC
+	cmp	r0, #0x0			@ API returns 0 on success.
+	beq	enable_smp_bit
+	b	ppa_actrl_retry
+enable_smp_bit:
+	mrc	p15, 0, r0, c1, c0, 1
+	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
+	orreq	r0, r0, #(1 << 6)
+	mcreq	p15, 0, r0, c1, c0, 1
+	isb
+skip_ns_smp_enable:
+#ifdef CONFIG_CACHE_L2X0
+	/*
+	 * Restore the L2 AUXCTRL and enable the L2 cache.
+	 * OMAP4_MON_L2X0_AUXCTRL_INDEX =  Program the L2X0 AUXCTRL
+	 * OMAP4_MON_L2X0_CTRL_INDEX =  Enable the L2 using L2X0 CTRL
+	 * register r0 contains value to be programmed.
+	 * L2 cache is already invalidate by ROM code as part
+	 * of MPUSS OFF wakeup path.
+	 */
+	ldr	r2, =OMAP44XX_L2CACHE_BASE
+	ldr	r0, [r2, #L2X0_CTRL]
+	and	r0, #0x0f
+	cmp	r0, #1
+	beq	skip_l2en			@ Skip if already enabled
+	ldr	r3, =OMAP44XX_SAR_RAM_BASE
+	ldr	r1, [r3, #OMAP_TYPE_OFFSET]
+	cmp	r1, #0x1			@ Check for HS device
+	bne     set_gp_por
+	ldr     r0, =OMAP4_PPA_L2_POR_INDEX
+	ldr     r1, =OMAP44XX_SAR_RAM_BASE
+	ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
+	adr     r3, ppa_por_params
+	str     r4, [r3, #0x04]
+	mov	r1, #0x0			@ Process ID
+	mov	r2, #0x4			@ Flag
+	mov	r6, #0xff
+	mov	r12, #0x00			@ Secure Service ID
+	DO_SMC
+	b	set_aux_ctrl
+set_gp_por:
+	ldr     r1, =OMAP44XX_SAR_RAM_BASE
+	ldr     r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
+	ldr	r12, =OMAP4_MON_L2X0_PREFETCH_INDEX	@ Setup L2 PREFETCH
+	DO_SMC
+set_aux_ctrl:
+	ldr     r1, =OMAP44XX_SAR_RAM_BASE
+	ldr	r0, [r1, #L2X0_AUXCTRL_OFFSET]
+	ldr	r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX	@ Setup L2 AUXCTRL
+	DO_SMC
+	mov	r0, #0x1
+	ldr	r12, =OMAP4_MON_L2X0_CTRL_INDEX		@ Enable L2 cache
+	DO_SMC
+skip_l2en:
+#endif
+
+	b	cpu_resume			@ Jump to generic resume
+ENDPROC(omap4_cpu_resume)
+#endif
+
+#ifndef CONFIG_OMAP4_ERRATA_I688
+ENTRY(omap_bus_sync)
+	mov	pc, lr
+ENDPROC(omap_bus_sync)
+#endif
+
+ENTRY(omap_do_wfi)
+	stmfd	sp!, {lr}
+	/* Drain interconnect write buffers. */
+	bl omap_bus_sync
+
+	/*
+	 * Execute an ISB instruction to ensure that all of the
+	 * CP15 register changes have been committed.
+	 */
+	isb
+
+	/*
+	 * Execute a barrier instruction to ensure that all cache,
+	 * TLB and branch predictor maintenance operations issued
+	 * by any CPU in the cluster have completed.
+	 */
+	dsb
+	dmb
+
+	/*
+	 * Execute a WFI instruction and wait until the
+	 * STANDBYWFI output is asserted to indicate that the
+	 * CPU is in idle and low power state. CPU can specualatively
+	 * prefetch the instructions so add NOPs after WFI. Sixteen
+	 * NOPs as per Cortex-A9 pipeline.
+	 */
+	wfi					@ Wait For Interrupt
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	ldmfd	sp!, {pc}
+ENDPROC(omap_do_wfi)

+ 3 - 0
arch/arm/plat-omap/common.c

@@ -22,6 +22,8 @@
 #include <plat/vram.h>
 #include <plat/vram.h>
 #include <plat/dsp.h>
 #include <plat/dsp.h>
 
 
+#include <plat/omap-secure.h>
+
 
 
 #define NO_LENGTH_CHECK 0xffffffff
 #define NO_LENGTH_CHECK 0xffffffff
 
 
@@ -66,6 +68,7 @@ void __init omap_reserve(void)
 	omapfb_reserve_sdram_memblock();
 	omapfb_reserve_sdram_memblock();
 	omap_vram_reserve_sdram_memblock();
 	omap_vram_reserve_sdram_memblock();
 	omap_dsp_reserve_sdram_memblock();
 	omap_dsp_reserve_sdram_memblock();
+	omap_secure_ram_reserve_memblock();
 }
 }
 
 
 void __init omap_init_consistent_dma_size(void)
 void __init omap_init_consistent_dma_size(void)

+ 13 - 0
arch/arm/plat-omap/include/plat/omap-secure.h

@@ -0,0 +1,13 @@
+#ifndef __OMAP_SECURE_H__
+#define __OMAP_SECURE_H__
+
+#include <linux/types.h>
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
+extern int omap_secure_ram_reserve_memblock(void);
+#else
+static inline void omap_secure_ram_reserve_memblock(void)
+{ }
+#endif
+
+#endif /* __OMAP_SECURE_H__ */

+ 1 - 0
arch/arm/plat-omap/include/plat/omap44xx.h

@@ -45,6 +45,7 @@
 #define OMAP44XX_WKUPGEN_BASE		0x48281000
 #define OMAP44XX_WKUPGEN_BASE		0x48281000
 #define OMAP44XX_MCPDM_BASE		0x40132000
 #define OMAP44XX_MCPDM_BASE		0x40132000
 #define OMAP44XX_MCPDM_L3_BASE		0x49032000
 #define OMAP44XX_MCPDM_L3_BASE		0x49032000
+#define OMAP44XX_SAR_RAM_BASE		0x4a326000
 
 
 #define OMAP44XX_MAILBOX_BASE		(L4_44XX_BASE + 0xF4000)
 #define OMAP44XX_MAILBOX_BASE		(L4_44XX_BASE + 0xF4000)
 #define OMAP44XX_HSUSB_OTG_BASE		(L4_44XX_BASE + 0xAB000)
 #define OMAP44XX_HSUSB_OTG_BASE		(L4_44XX_BASE + 0xAB000)

+ 5 - 1
arch/arm/plat-omap/include/plat/sram.h

@@ -95,6 +95,10 @@ static inline void omap_push_sram_idle(void) {}
  */
  */
 #define OMAP2_SRAM_PA		0x40200000
 #define OMAP2_SRAM_PA		0x40200000
 #define OMAP3_SRAM_PA           0x40200000
 #define OMAP3_SRAM_PA           0x40200000
+#ifdef CONFIG_OMAP4_ERRATA_I688
+#define OMAP4_SRAM_PA		0x40304000
+#define OMAP4_SRAM_VA		0xfe404000
+#else
 #define OMAP4_SRAM_PA		0x40300000
 #define OMAP4_SRAM_PA		0x40300000
-
+#endif
 #endif
 #endif

+ 8 - 0
arch/arm/plat-omap/sram.c

@@ -40,7 +40,11 @@
 #define OMAP1_SRAM_PA		0x20000000
 #define OMAP1_SRAM_PA		0x20000000
 #define OMAP2_SRAM_PUB_PA	(OMAP2_SRAM_PA + 0xf800)
 #define OMAP2_SRAM_PUB_PA	(OMAP2_SRAM_PA + 0xf800)
 #define OMAP3_SRAM_PUB_PA       (OMAP3_SRAM_PA + 0x8000)
 #define OMAP3_SRAM_PUB_PA       (OMAP3_SRAM_PA + 0x8000)
+#ifdef CONFIG_OMAP4_ERRATA_I688
+#define OMAP4_SRAM_PUB_PA	OMAP4_SRAM_PA
+#else
 #define OMAP4_SRAM_PUB_PA	(OMAP4_SRAM_PA + 0x4000)
 #define OMAP4_SRAM_PUB_PA	(OMAP4_SRAM_PA + 0x4000)
+#endif
 
 
 #if defined(CONFIG_ARCH_OMAP2PLUS)
 #if defined(CONFIG_ARCH_OMAP2PLUS)
 #define SRAM_BOOTLOADER_SZ	0x00
 #define SRAM_BOOTLOADER_SZ	0x00
@@ -163,6 +167,10 @@ static void __init omap_map_sram(void)
 	if (omap_sram_size == 0)
 	if (omap_sram_size == 0)
 		return;
 		return;
 
 
+#ifdef CONFIG_OMAP4_ERRATA_I688
+		omap_sram_start += PAGE_SIZE;
+		omap_sram_size -= SZ_16K;
+#endif
 	if (cpu_is_omap34xx()) {
 	if (cpu_is_omap34xx()) {
 		/*
 		/*
 		 * SRAM must be marked as non-cached on OMAP3 since the
 		 * SRAM must be marked as non-cached on OMAP3 since the