sleep44xx.S 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * OMAP44xx sleep code.
  3. *
  4. * Copyright (C) 2011 Texas Instruments, Inc.
  5. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  6. *
  7. * This program is free software,you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/system.h>
  13. #include <asm/smp_scu.h>
  14. #include <asm/memory.h>
  15. #include <asm/hardware/cache-l2x0.h>
  16. #include <plat/omap44xx.h>
  17. #include <mach/omap-secure.h>
  18. #include "common.h"
  19. #include "omap4-sar-layout.h"
  20. #if defined(CONFIG_SMP) && defined(CONFIG_PM)
  21. .macro DO_SMC
  22. dsb
  23. smc #0
  24. dsb
  25. .endm
  26. ppa_zero_params:
  27. .word 0x0
  28. /*
  29. * =============================
  30. * == CPU suspend finisher ==
  31. * =============================
  32. *
  33. * void omap4_finish_suspend(unsigned long cpu_state)
  34. *
  35. * This function code saves the CPU context and performs the CPU
  36. * power down sequence. Calling WFI effectively changes the CPU
  37. * power domains states to the desired target power state.
  38. *
  39. * @cpu_state : contains context save state (r0)
  40. * 0 - No context lost
  41. * 1 - CPUx L1 and logic lost: MPUSS CSWR
  42. * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
  43. * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
  44. * @return: This function never returns for CPU OFF and DORMANT power states.
  45. * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
  46. * from this follows a full CPU reset path via ROM code to CPU restore code.
  47. * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
  48. * It returns to the caller for CPU INACTIVE and ON power states or in case
  49. * CPU failed to transition to targeted OFF/DORMANT state.
  50. */
  51. ENTRY(omap4_finish_suspend)
  52. stmfd sp!, {lr}
  53. cmp r0, #0x0
  54. beq do_WFI @ No lowpower state, jump to WFI
  55. /*
  56. * Flush all data from the L1 data cache before disabling
  57. * SCTLR.C bit.
  58. */
  59. bl omap4_get_sar_ram_base
  60. ldr r9, [r0, #OMAP_TYPE_OFFSET]
  61. cmp r9, #0x1 @ Check for HS device
  62. bne skip_secure_l1_clean
  63. mov r0, #SCU_PM_NORMAL
  64. mov r1, #0xFF @ clean seucre L1
  65. stmfd r13!, {r4-r12, r14}
  66. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  67. DO_SMC
  68. ldmfd r13!, {r4-r12, r14}
  69. skip_secure_l1_clean:
  70. bl v7_flush_dcache_all
  71. /*
  72. * Clear the SCTLR.C bit to prevent further data cache
  73. * allocation. Clearing SCTLR.C would make all the data accesses
  74. * strongly ordered and would not hit the cache.
  75. */
  76. mrc p15, 0, r0, c1, c0, 0
  77. bic r0, r0, #(1 << 2) @ Disable the C bit
  78. mcr p15, 0, r0, c1, c0, 0
  79. isb
  80. /*
  81. * Invalidate L1 data cache. Even though only invalidate is
  82. * necessary exported flush API is used here. Doing clean
  83. * on already clean cache would be almost NOP.
  84. */
  85. bl v7_flush_dcache_all
  86. /*
  87. * Switch the CPU from Symmetric Multiprocessing (SMP) mode
  88. * to AsymmetricMultiprocessing (AMP) mode by programming
  89. * the SCU power status to DORMANT or OFF mode.
  90. * This enables the CPU to be taken out of coherency by
  91. * preventing the CPU from receiving cache, TLB, or BTB
  92. * maintenance operations broadcast by other CPUs in the cluster.
  93. */
  94. bl omap4_get_sar_ram_base
  95. mov r8, r0
  96. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  97. cmp r9, #0x1 @ Check for HS device
  98. bne scu_gp_set
  99. mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
  100. ands r0, r0, #0x0f
  101. ldreq r0, [r8, #SCU_OFFSET0]
  102. ldrne r0, [r8, #SCU_OFFSET1]
  103. mov r1, #0x00
  104. stmfd r13!, {r4-r12, r14}
  105. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  106. DO_SMC
  107. ldmfd r13!, {r4-r12, r14}
  108. b skip_scu_gp_set
  109. scu_gp_set:
  110. mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
  111. ands r0, r0, #0x0f
  112. ldreq r1, [r8, #SCU_OFFSET0]
  113. ldrne r1, [r8, #SCU_OFFSET1]
  114. bl omap4_get_scu_base
  115. bl scu_power_mode
  116. skip_scu_gp_set:
  117. mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
  118. tst r0, #(1 << 18)
  119. mrcne p15, 0, r0, c1, c0, 1
  120. bicne r0, r0, #(1 << 6) @ Disable SMP bit
  121. mcrne p15, 0, r0, c1, c0, 1
  122. isb
  123. dsb
  124. do_WFI:
  125. bl omap_do_wfi
  126. /*
  127. * CPU is here when it failed to enter OFF/DORMANT or
  128. * no low power state was attempted.
  129. */
  130. mrc p15, 0, r0, c1, c0, 0
  131. tst r0, #(1 << 2) @ Check C bit enabled?
  132. orreq r0, r0, #(1 << 2) @ Enable the C bit
  133. mcreq p15, 0, r0, c1, c0, 0
  134. isb
  135. /*
  136. * Ensure the CPU power state is set to NORMAL in
  137. * SCU power state so that CPU is back in coherency.
  138. * In non-coherent mode CPU can lock-up and lead to
  139. * system deadlock.
  140. */
  141. mrc p15, 0, r0, c1, c0, 1
  142. tst r0, #(1 << 6) @ Check SMP bit enabled?
  143. orreq r0, r0, #(1 << 6)
  144. mcreq p15, 0, r0, c1, c0, 1
  145. isb
  146. bl omap4_get_sar_ram_base
  147. mov r8, r0
  148. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  149. cmp r9, #0x1 @ Check for HS device
  150. bne scu_gp_clear
  151. mov r0, #SCU_PM_NORMAL
  152. mov r1, #0x00
  153. stmfd r13!, {r4-r12, r14}
  154. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  155. DO_SMC
  156. ldmfd r13!, {r4-r12, r14}
  157. b skip_scu_gp_clear
  158. scu_gp_clear:
  159. bl omap4_get_scu_base
  160. mov r1, #SCU_PM_NORMAL
  161. bl scu_power_mode
  162. skip_scu_gp_clear:
  163. isb
  164. dsb
  165. ldmfd sp!, {pc}
  166. ENDPROC(omap4_finish_suspend)
  167. /*
  168. * ============================
  169. * == CPU resume entry point ==
  170. * ============================
  171. *
  172. * void omap4_cpu_resume(void)
  173. *
  174. * ROM code jumps to this function while waking up from CPU
  175. * OFF or DORMANT state. Physical address of the function is
  176. * stored in the SAR RAM while entering to OFF or DORMANT mode.
  177. * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
  178. */
  179. ENTRY(omap4_cpu_resume)
  180. /*
  181. * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
  182. * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
  183. * init and for CPU1, a secure PPA API provided. CPU0 must be ON
  184. * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
  185. * OMAP443X GP devices- SMP bit isn't accessible.
  186. * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
  187. */
  188. ldr r8, =OMAP44XX_SAR_RAM_BASE
  189. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  190. cmp r9, #0x1 @ Skip if GP device
  191. bne skip_ns_smp_enable
  192. mrc p15, 0, r0, c0, c0, 5
  193. ands r0, r0, #0x0f
  194. beq skip_ns_smp_enable
  195. ppa_actrl_retry:
  196. mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
  197. adr r3, ppa_zero_params @ Pointer to parameters
  198. mov r1, #0x0 @ Process ID
  199. mov r2, #0x4 @ Flag
  200. mov r6, #0xff
  201. mov r12, #0x00 @ Secure Service ID
  202. DO_SMC
  203. cmp r0, #0x0 @ API returns 0 on success.
  204. beq enable_smp_bit
  205. b ppa_actrl_retry
  206. enable_smp_bit:
  207. mrc p15, 0, r0, c1, c0, 1
  208. tst r0, #(1 << 6) @ Check SMP bit enabled?
  209. orreq r0, r0, #(1 << 6)
  210. mcreq p15, 0, r0, c1, c0, 1
  211. isb
  212. skip_ns_smp_enable:
  213. b cpu_resume @ Jump to generic resume
  214. ENDPROC(omap4_cpu_resume)
  215. #endif
  216. ENTRY(omap_do_wfi)
  217. stmfd sp!, {lr}
  218. /*
  219. * Execute an ISB instruction to ensure that all of the
  220. * CP15 register changes have been committed.
  221. */
  222. isb
  223. /*
  224. * Execute a barrier instruction to ensure that all cache,
  225. * TLB and branch predictor maintenance operations issued
  226. * by any CPU in the cluster have completed.
  227. */
  228. dsb
  229. dmb
  230. /*
  231. * Execute a WFI instruction and wait until the
  232. * STANDBYWFI output is asserted to indicate that the
  233. * CPU is in idle and low power state. CPU can specualatively
  234. * prefetch the instructions so add NOPs after WFI. Sixteen
  235. * NOPs as per Cortex-A9 pipeline.
  236. */
  237. wfi @ Wait For Interrupt
  238. nop
  239. nop
  240. nop
  241. nop
  242. nop
  243. nop
  244. nop
  245. nop
  246. nop
  247. nop
  248. nop
  249. nop
  250. nop
  251. nop
  252. nop
  253. nop
  254. ldmfd sp!, {pc}
  255. ENDPROC(omap_do_wfi)