sleep44xx.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /*
  2. * OMAP44xx sleep code.
  3. *
  4. * Copyright (C) 2011 Texas Instruments, Inc.
  5. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  6. *
  7. * This program is free software,you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/smp_scu.h>
  13. #include <asm/memory.h>
  14. #include <asm/hardware/cache-l2x0.h>
  15. #include "omap-secure.h"
  16. #include "common.h"
  17. #include "omap44xx.h"
  18. #include "omap4-sar-layout.h"
  19. #if defined(CONFIG_SMP) && defined(CONFIG_PM)
  20. .macro DO_SMC
  21. dsb
  22. smc #0
  23. dsb
  24. .endm
  25. ppa_zero_params:
  26. .word 0x0
  27. ppa_por_params:
  28. .word 1, 0
  29. #ifdef CONFIG_ARCH_OMAP4
  30. /*
  31. * =============================
  32. * == CPU suspend finisher ==
  33. * =============================
  34. *
  35. * void omap4_finish_suspend(unsigned long cpu_state)
  36. *
  37. * This function code saves the CPU context and performs the CPU
  38. * power down sequence. Calling WFI effectively changes the CPU
  39. * power domains states to the desired target power state.
  40. *
  41. * @cpu_state : contains context save state (r0)
  42. * 0 - No context lost
  43. * 1 - CPUx L1 and logic lost: MPUSS CSWR
  44. * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
  45. * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
  46. * @return: This function never returns for CPU OFF and DORMANT power states.
  47. * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
  48. * from this follows a full CPU reset path via ROM code to CPU restore code.
  49. * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
  50. * It returns to the caller for CPU INACTIVE and ON power states or in case
  51. * CPU failed to transition to targeted OFF/DORMANT state.
  52. *
  53. * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
  54. * stack frame and it expects the caller to take care of it. Hence the entire
  55. * stack frame is saved to avoid possible stack corruption.
  56. */
  57. ENTRY(omap4_finish_suspend)
  58. stmfd sp!, {r4-r12, lr}
  59. cmp r0, #0x0
  60. beq do_WFI @ No lowpower state, jump to WFI
  61. /*
  62. * Flush all data from the L1 data cache before disabling
  63. * SCTLR.C bit.
  64. */
  65. bl omap4_get_sar_ram_base
  66. ldr r9, [r0, #OMAP_TYPE_OFFSET]
  67. cmp r9, #0x1 @ Check for HS device
  68. bne skip_secure_l1_clean
  69. mov r0, #SCU_PM_NORMAL
  70. mov r1, #0xFF @ clean seucre L1
  71. stmfd r13!, {r4-r12, r14}
  72. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  73. DO_SMC
  74. ldmfd r13!, {r4-r12, r14}
  75. skip_secure_l1_clean:
  76. bl v7_flush_dcache_all
  77. /*
  78. * Clear the SCTLR.C bit to prevent further data cache
  79. * allocation. Clearing SCTLR.C would make all the data accesses
  80. * strongly ordered and would not hit the cache.
  81. */
  82. mrc p15, 0, r0, c1, c0, 0
  83. bic r0, r0, #(1 << 2) @ Disable the C bit
  84. mcr p15, 0, r0, c1, c0, 0
  85. isb
  86. /*
  87. * Invalidate L1 data cache. Even though only invalidate is
  88. * necessary exported flush API is used here. Doing clean
  89. * on already clean cache would be almost NOP.
  90. */
  91. bl v7_flush_dcache_all
  92. /*
  93. * Switch the CPU from Symmetric Multiprocessing (SMP) mode
  94. * to AsymmetricMultiprocessing (AMP) mode by programming
  95. * the SCU power status to DORMANT or OFF mode.
  96. * This enables the CPU to be taken out of coherency by
  97. * preventing the CPU from receiving cache, TLB, or BTB
  98. * maintenance operations broadcast by other CPUs in the cluster.
  99. */
  100. bl omap4_get_sar_ram_base
  101. mov r8, r0
  102. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  103. cmp r9, #0x1 @ Check for HS device
  104. bne scu_gp_set
  105. mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
  106. ands r0, r0, #0x0f
  107. ldreq r0, [r8, #SCU_OFFSET0]
  108. ldrne r0, [r8, #SCU_OFFSET1]
  109. mov r1, #0x00
  110. stmfd r13!, {r4-r12, r14}
  111. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  112. DO_SMC
  113. ldmfd r13!, {r4-r12, r14}
  114. b skip_scu_gp_set
  115. scu_gp_set:
  116. mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
  117. ands r0, r0, #0x0f
  118. ldreq r1, [r8, #SCU_OFFSET0]
  119. ldrne r1, [r8, #SCU_OFFSET1]
  120. bl omap4_get_scu_base
  121. bl scu_power_mode
  122. skip_scu_gp_set:
  123. mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
  124. tst r0, #(1 << 18)
  125. mrcne p15, 0, r0, c1, c0, 1
  126. bicne r0, r0, #(1 << 6) @ Disable SMP bit
  127. mcrne p15, 0, r0, c1, c0, 1
  128. isb
  129. dsb
  130. #ifdef CONFIG_CACHE_L2X0
  131. /*
  132. * Clean and invalidate the L2 cache.
  133. * Common cache-l2x0.c functions can't be used here since it
  134. * uses spinlocks. We are out of coherency here with data cache
  135. * disabled. The spinlock implementation uses exclusive load/store
  136. * instruction which can fail without data cache being enabled.
  137. * OMAP4 hardware doesn't support exclusive monitor which can
  138. * overcome exclusive access issue. Because of this, CPU can
  139. * lead to deadlock.
  140. */
  141. bl omap4_get_sar_ram_base
  142. mov r8, r0
  143. mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
  144. ands r5, r5, #0x0f
  145. ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
  146. ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
  147. cmp r0, #3
  148. bne do_WFI
  149. #ifdef CONFIG_PL310_ERRATA_727915
  150. mov r0, #0x03
  151. mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
  152. DO_SMC
  153. #endif
  154. bl omap4_get_l2cache_base
  155. mov r2, r0
  156. ldr r0, =0xffff
  157. str r0, [r2, #L2X0_CLEAN_INV_WAY]
  158. wait:
  159. ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
  160. ldr r1, =0xffff
  161. ands r0, r0, r1
  162. bne wait
  163. #ifdef CONFIG_PL310_ERRATA_727915
  164. mov r0, #0x00
  165. mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
  166. DO_SMC
  167. #endif
  168. l2x_sync:
  169. bl omap4_get_l2cache_base
  170. mov r2, r0
  171. mov r0, #0x0
  172. str r0, [r2, #L2X0_CACHE_SYNC]
  173. sync:
  174. ldr r0, [r2, #L2X0_CACHE_SYNC]
  175. ands r0, r0, #0x1
  176. bne sync
  177. #endif
  178. do_WFI:
  179. bl omap_do_wfi
  180. /*
  181. * CPU is here when it failed to enter OFF/DORMANT or
  182. * no low power state was attempted.
  183. */
  184. mrc p15, 0, r0, c1, c0, 0
  185. tst r0, #(1 << 2) @ Check C bit enabled?
  186. orreq r0, r0, #(1 << 2) @ Enable the C bit
  187. mcreq p15, 0, r0, c1, c0, 0
  188. isb
  189. /*
  190. * Ensure the CPU power state is set to NORMAL in
  191. * SCU power state so that CPU is back in coherency.
  192. * In non-coherent mode CPU can lock-up and lead to
  193. * system deadlock.
  194. */
  195. mrc p15, 0, r0, c1, c0, 1
  196. tst r0, #(1 << 6) @ Check SMP bit enabled?
  197. orreq r0, r0, #(1 << 6)
  198. mcreq p15, 0, r0, c1, c0, 1
  199. isb
  200. bl omap4_get_sar_ram_base
  201. mov r8, r0
  202. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  203. cmp r9, #0x1 @ Check for HS device
  204. bne scu_gp_clear
  205. mov r0, #SCU_PM_NORMAL
  206. mov r1, #0x00
  207. stmfd r13!, {r4-r12, r14}
  208. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  209. DO_SMC
  210. ldmfd r13!, {r4-r12, r14}
  211. b skip_scu_gp_clear
  212. scu_gp_clear:
  213. bl omap4_get_scu_base
  214. mov r1, #SCU_PM_NORMAL
  215. bl scu_power_mode
  216. skip_scu_gp_clear:
  217. isb
  218. dsb
  219. ldmfd sp!, {r4-r12, pc}
  220. ENDPROC(omap4_finish_suspend)
  221. /*
  222. * ============================
  223. * == CPU resume entry point ==
  224. * ============================
  225. *
  226. * void omap4_cpu_resume(void)
  227. *
  228. * ROM code jumps to this function while waking up from CPU
  229. * OFF or DORMANT state. Physical address of the function is
  230. * stored in the SAR RAM while entering to OFF or DORMANT mode.
  231. * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
  232. */
  233. ENTRY(omap4_cpu_resume)
  234. /*
  235. * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
  236. * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
  237. * init and for CPU1, a secure PPA API provided. CPU0 must be ON
  238. * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
  239. * OMAP443X GP devices- SMP bit isn't accessible.
  240. * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
  241. */
  242. ldr r8, =OMAP44XX_SAR_RAM_BASE
  243. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  244. cmp r9, #0x1 @ Skip if GP device
  245. bne skip_ns_smp_enable
  246. mrc p15, 0, r0, c0, c0, 5
  247. ands r0, r0, #0x0f
  248. beq skip_ns_smp_enable
  249. ppa_actrl_retry:
  250. mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
  251. adr r3, ppa_zero_params @ Pointer to parameters
  252. mov r1, #0x0 @ Process ID
  253. mov r2, #0x4 @ Flag
  254. mov r6, #0xff
  255. mov r12, #0x00 @ Secure Service ID
  256. DO_SMC
  257. cmp r0, #0x0 @ API returns 0 on success.
  258. beq enable_smp_bit
  259. b ppa_actrl_retry
  260. enable_smp_bit:
  261. mrc p15, 0, r0, c1, c0, 1
  262. tst r0, #(1 << 6) @ Check SMP bit enabled?
  263. orreq r0, r0, #(1 << 6)
  264. mcreq p15, 0, r0, c1, c0, 1
  265. isb
  266. skip_ns_smp_enable:
  267. #ifdef CONFIG_CACHE_L2X0
  268. /*
  269. * Restore the L2 AUXCTRL and enable the L2 cache.
  270. * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
  271. * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
  272. * register r0 contains value to be programmed.
  273. * L2 cache is already invalidate by ROM code as part
  274. * of MPUSS OFF wakeup path.
  275. */
  276. ldr r2, =OMAP44XX_L2CACHE_BASE
  277. ldr r0, [r2, #L2X0_CTRL]
  278. and r0, #0x0f
  279. cmp r0, #1
  280. beq skip_l2en @ Skip if already enabled
  281. ldr r3, =OMAP44XX_SAR_RAM_BASE
  282. ldr r1, [r3, #OMAP_TYPE_OFFSET]
  283. cmp r1, #0x1 @ Check for HS device
  284. bne set_gp_por
  285. ldr r0, =OMAP4_PPA_L2_POR_INDEX
  286. ldr r1, =OMAP44XX_SAR_RAM_BASE
  287. ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
  288. adr r3, ppa_por_params
  289. str r4, [r3, #0x04]
  290. mov r1, #0x0 @ Process ID
  291. mov r2, #0x4 @ Flag
  292. mov r6, #0xff
  293. mov r12, #0x00 @ Secure Service ID
  294. DO_SMC
  295. b set_aux_ctrl
  296. set_gp_por:
  297. ldr r1, =OMAP44XX_SAR_RAM_BASE
  298. ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
  299. ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
  300. DO_SMC
  301. set_aux_ctrl:
  302. ldr r1, =OMAP44XX_SAR_RAM_BASE
  303. ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
  304. ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
  305. DO_SMC
  306. mov r0, #0x1
  307. ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
  308. DO_SMC
  309. skip_l2en:
  310. #endif
  311. b cpu_resume @ Jump to generic resume
  312. ENDPROC(omap4_cpu_resume)
  313. #endif /* CONFIG_ARCH_OMAP4 */
  314. #endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
  315. #ifndef CONFIG_OMAP4_ERRATA_I688
  316. ENTRY(omap_bus_sync)
  317. mov pc, lr
  318. ENDPROC(omap_bus_sync)
  319. #endif
  320. ENTRY(omap_do_wfi)
  321. stmfd sp!, {lr}
  322. /* Drain interconnect write buffers. */
  323. bl omap_bus_sync
  324. /*
  325. * Execute an ISB instruction to ensure that all of the
  326. * CP15 register changes have been committed.
  327. */
  328. isb
  329. /*
  330. * Execute a barrier instruction to ensure that all cache,
  331. * TLB and branch predictor maintenance operations issued
  332. * by any CPU in the cluster have completed.
  333. */
  334. dsb
  335. dmb
  336. /*
  337. * Execute a WFI instruction and wait until the
  338. * STANDBYWFI output is asserted to indicate that the
  339. * CPU is in idle and low power state. CPU can specualatively
  340. * prefetch the instructions so add NOPs after WFI. Sixteen
  341. * NOPs as per Cortex-A9 pipeline.
  342. */
  343. wfi @ Wait For Interrupt
  344. nop
  345. nop
  346. nop
  347. nop
  348. nop
  349. nop
  350. nop
  351. nop
  352. nop
  353. nop
  354. nop
  355. nop
  356. nop
  357. nop
  358. nop
  359. nop
  360. ldmfd sp!, {pc}
  361. ENDPROC(omap_do_wfi)