sleep34xx.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506
  1. /*
  2. * linux/arch/arm/mach-omap2/sleep.S
  3. *
  4. * (C) Copyright 2007
  5. * Texas Instruments
  6. * Karthik Dasu <karthik-dp@ti.com>
  7. *
  8. * (C) Copyright 2004
  9. * Texas Instruments, <www.ti.com>
  10. * Richard Woodruff <r-woodruff2@ti.com>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License as
  14. * published by the Free Software Foundation; either version 2 of
  15. * the License, or (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  25. * MA 02111-1307 USA
  26. */
  27. #include <linux/linkage.h>
  28. #include <asm/assembler.h>
  29. #include <mach/io.h>
  30. #include <plat/control.h>
  31. #include "prm.h"
  32. #include "sdrc.h"
  33. #define PM_PREPWSTST_CORE_V OMAP34XX_PRM_REGADDR(CORE_MOD, \
  34. OMAP3430_PM_PREPWSTST)
  35. #define PM_PREPWSTST_MPU_V OMAP34XX_PRM_REGADDR(MPU_MOD, \
  36. OMAP3430_PM_PREPWSTST)
  37. #define PM_PWSTCTRL_MPU_P OMAP3430_PRM_BASE + MPU_MOD + PM_PWSTCTRL
  38. #define SRAM_BASE_P 0x40200000
  39. #define CONTROL_STAT 0x480022F0
  40. #define SCRATCHPAD_MEM_OFFS 0x310 /* Move this as correct place is
  41. * available */
  42. #define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE + OMAP343X_CONTROL_MEM_WKUP\
  43. + SCRATCHPAD_MEM_OFFS)
  44. #define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER)
  45. .text
  46. /* Function call to get the restore pointer for resume from OFF */
  47. ENTRY(get_restore_pointer)
  48. stmfd sp!, {lr} @ save registers on stack
  49. adr r0, restore
  50. ldmfd sp!, {pc} @ restore regs and return
  51. ENTRY(get_restore_pointer_sz)
  52. .word . - get_restore_pointer_sz
  53. /* Function to call rom code to save secure ram context */
  54. ENTRY(save_secure_ram_context)
  55. stmfd sp!, {r1-r12, lr} @ save registers on stack
  56. save_secure_ram_debug:
  57. /* b save_secure_ram_debug */ @ enable to debug save code
  58. adr r3, api_params @ r3 points to parameters
  59. str r0, [r3,#0x4] @ r0 has sdram address
  60. ldr r12, high_mask
  61. and r3, r3, r12
  62. ldr r12, sram_phy_addr_mask
  63. orr r3, r3, r12
  64. mov r0, #25 @ set service ID for PPA
  65. mov r12, r0 @ copy secure service ID in r12
  66. mov r1, #0 @ set task id for ROM code in r1
  67. mov r2, #7 @ set some flags in r2, r6
  68. mov r6, #0xff
  69. mcr p15, 0, r0, c7, c10, 4 @ data write barrier
  70. mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
  71. .word 0xE1600071 @ call SMI monitor (smi #1)
  72. nop
  73. nop
  74. nop
  75. nop
  76. ldmfd sp!, {r1-r12, pc}
  77. sram_phy_addr_mask:
  78. .word SRAM_BASE_P
  79. high_mask:
  80. .word 0xffff
  81. api_params:
  82. .word 0x4, 0x0, 0x0, 0x1, 0x1
  83. ENTRY(save_secure_ram_context_sz)
  84. .word . - save_secure_ram_context
  85. /*
  86. * Forces OMAP into idle state
  87. *
  88. * omap34xx_suspend() - This bit of code just executes the WFI
  89. * for normal idles.
  90. *
  91. * Note: This code get's copied to internal SRAM at boot. When the OMAP
  92. * wakes up it continues execution at the point it went to sleep.
  93. */
  94. ENTRY(omap34xx_cpu_suspend)
  95. stmfd sp!, {r0-r12, lr} @ save registers on stack
  96. loop:
  97. /*b loop*/ @Enable to debug by stepping through code
  98. /* r0 contains restore pointer in sdram */
  99. /* r1 contains information about saving context */
  100. ldr r4, sdrc_power @ read the SDRC_POWER register
  101. ldr r5, [r4] @ read the contents of SDRC_POWER
  102. orr r5, r5, #0x40 @ enable self refresh on idle req
  103. str r5, [r4] @ write back to SDRC_POWER register
  104. cmp r1, #0x0
  105. /* If context save is required, do that and execute wfi */
  106. bne save_context_wfi
  107. /* Data memory barrier and Data sync barrier */
  108. mov r1, #0
  109. mcr p15, 0, r1, c7, c10, 4
  110. mcr p15, 0, r1, c7, c10, 5
  111. wfi @ wait for interrupt
  112. nop
  113. nop
  114. nop
  115. nop
  116. nop
  117. nop
  118. nop
  119. nop
  120. nop
  121. nop
  122. bl i_dll_wait
  123. ldmfd sp!, {r0-r12, pc} @ restore regs and return
  124. restore:
  125. /* b restore*/ @ Enable to debug restore code
  126. /* Check what was the reason for mpu reset and store the reason in r9*/
  127. /* 1 - Only L1 and logic lost */
  128. /* 2 - Only L2 lost - In this case, we wont be here */
  129. /* 3 - Both L1 and L2 lost */
  130. ldr r1, pm_pwstctrl_mpu
  131. ldr r2, [r1]
  132. and r2, r2, #0x3
  133. cmp r2, #0x0 @ Check if target power state was OFF or RET
  134. moveq r9, #0x3 @ MPU OFF => L1 and L2 lost
  135. movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation
  136. bne logic_l1_restore
  137. ldr r0, control_stat
  138. ldr r1, [r0]
  139. and r1, #0x700
  140. cmp r1, #0x300
  141. beq l2_inv_gp
  142. mov r0, #40 @ set service ID for PPA
  143. mov r12, r0 @ copy secure Service ID in r12
  144. mov r1, #0 @ set task id for ROM code in r1
  145. mov r2, #4 @ set some flags in r2, r6
  146. mov r6, #0xff
  147. adr r3, l2_inv_api_params @ r3 points to dummy parameters
  148. mcr p15, 0, r0, c7, c10, 4 @ data write barrier
  149. mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
  150. .word 0xE1600071 @ call SMI monitor (smi #1)
  151. /* Write to Aux control register to set some bits */
  152. mov r0, #42 @ set service ID for PPA
  153. mov r12, r0 @ copy secure Service ID in r12
  154. mov r1, #0 @ set task id for ROM code in r1
  155. mov r2, #4 @ set some flags in r2, r6
  156. mov r6, #0xff
  157. adr r3, write_aux_control_params @ r3 points to parameters
  158. mcr p15, 0, r0, c7, c10, 4 @ data write barrier
  159. mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
  160. .word 0xE1600071 @ call SMI monitor (smi #1)
  161. b logic_l1_restore
  162. l2_inv_api_params:
  163. .word 0x1, 0x00
  164. write_aux_control_params:
  165. .word 0x1, 0x72
  166. l2_inv_gp:
  167. /* Execute smi to invalidate L2 cache */
  168. mov r12, #0x1 @ set up to invalide L2
  169. smi: .word 0xE1600070 @ Call SMI monitor (smieq)
  170. /* Write to Aux control register to set some bits */
  171. mov r0, #0x72
  172. mov r12, #0x3
  173. .word 0xE1600070 @ Call SMI monitor (smieq)
  174. logic_l1_restore:
  175. mov r1, #0
  176. /* Invalidate all instruction caches to PoU
  177. * and flush branch target cache */
  178. mcr p15, 0, r1, c7, c5, 0
  179. ldr r4, scratchpad_base
  180. ldr r3, [r4,#0xBC]
  181. ldmia r3!, {r4-r6}
  182. mov sp, r4
  183. msr spsr_cxsf, r5
  184. mov lr, r6
  185. ldmia r3!, {r4-r9}
  186. /* Coprocessor access Control Register */
  187. mcr p15, 0, r4, c1, c0, 2
  188. /* TTBR0 */
  189. MCR p15, 0, r5, c2, c0, 0
  190. /* TTBR1 */
  191. MCR p15, 0, r6, c2, c0, 1
  192. /* Translation table base control register */
  193. MCR p15, 0, r7, c2, c0, 2
  194. /*domain access Control Register */
  195. MCR p15, 0, r8, c3, c0, 0
  196. /* data fault status Register */
  197. MCR p15, 0, r9, c5, c0, 0
  198. ldmia r3!,{r4-r8}
  199. /* instruction fault status Register */
  200. MCR p15, 0, r4, c5, c0, 1
  201. /*Data Auxiliary Fault Status Register */
  202. MCR p15, 0, r5, c5, c1, 0
  203. /*Instruction Auxiliary Fault Status Register*/
  204. MCR p15, 0, r6, c5, c1, 1
  205. /*Data Fault Address Register */
  206. MCR p15, 0, r7, c6, c0, 0
  207. /*Instruction Fault Address Register*/
  208. MCR p15, 0, r8, c6, c0, 2
  209. ldmia r3!,{r4-r7}
  210. /* user r/w thread and process ID */
  211. MCR p15, 0, r4, c13, c0, 2
  212. /* user ro thread and process ID */
  213. MCR p15, 0, r5, c13, c0, 3
  214. /*Privileged only thread and process ID */
  215. MCR p15, 0, r6, c13, c0, 4
  216. /* cache size selection */
  217. MCR p15, 2, r7, c0, c0, 0
  218. ldmia r3!,{r4-r8}
  219. /* Data TLB lockdown registers */
  220. MCR p15, 0, r4, c10, c0, 0
  221. /* Instruction TLB lockdown registers */
  222. MCR p15, 0, r5, c10, c0, 1
  223. /* Secure or Nonsecure Vector Base Address */
  224. MCR p15, 0, r6, c12, c0, 0
  225. /* FCSE PID */
  226. MCR p15, 0, r7, c13, c0, 0
  227. /* Context PID */
  228. MCR p15, 0, r8, c13, c0, 1
  229. ldmia r3!,{r4-r5}
  230. /* primary memory remap register */
  231. MCR p15, 0, r4, c10, c2, 0
  232. /*normal memory remap register */
  233. MCR p15, 0, r5, c10, c2, 1
  234. /* Restore cpsr */
  235. ldmia r3!,{r4} /*load CPSR from SDRAM*/
  236. msr cpsr, r4 /*store cpsr */
  237. /* Enabling MMU here */
  238. mrc p15, 0, r7, c2, c0, 2 /* Read TTBRControl */
  239. /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1*/
  240. and r7, #0x7
  241. cmp r7, #0x0
  242. beq usettbr0
  243. ttbr_error:
  244. /* More work needs to be done to support N[0:2] value other than 0
  245. * So looping here so that the error can be detected
  246. */
  247. b ttbr_error
  248. usettbr0:
  249. mrc p15, 0, r2, c2, c0, 0
  250. ldr r5, ttbrbit_mask
  251. and r2, r5
  252. mov r4, pc
  253. ldr r5, table_index_mask
  254. and r4, r5 /* r4 = 31 to 20 bits of pc */
  255. /* Extract the value to be written to table entry */
  256. ldr r1, table_entry
  257. add r1, r1, r4 /* r1 has value to be written to table entry*/
  258. /* Getting the address of table entry to modify */
  259. lsr r4, #18
  260. add r2, r4 /* r2 has the location which needs to be modified */
  261. /* Storing previous entry of location being modified */
  262. ldr r5, scratchpad_base
  263. ldr r4, [r2]
  264. str r4, [r5, #0xC0]
  265. /* Modify the table entry */
  266. str r1, [r2]
  267. /* Storing address of entry being modified
  268. * - will be restored after enabling MMU */
  269. ldr r5, scratchpad_base
  270. str r2, [r5, #0xC4]
  271. mov r0, #0
  272. mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
  273. mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
  274. mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
  275. mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
  276. /* Restore control register but dont enable caches here*/
  277. /* Caches will be enabled after restoring MMU table entry */
  278. ldmia r3!, {r4}
  279. /* Store previous value of control register in scratchpad */
  280. str r4, [r5, #0xC8]
  281. ldr r2, cache_pred_disable_mask
  282. and r4, r2
  283. mcr p15, 0, r4, c1, c0, 0
  284. ldmfd sp!, {r0-r12, pc} @ restore regs and return
  285. save_context_wfi:
  286. /*b save_context_wfi*/ @ enable to debug save code
  287. mov r8, r0 /* Store SDRAM address in r8 */
  288. /* Check what that target sleep state is:stored in r1*/
  289. /* 1 - Only L1 and logic lost */
  290. /* 2 - Only L2 lost */
  291. /* 3 - Both L1 and L2 lost */
  292. cmp r1, #0x2 /* Only L2 lost */
  293. beq clean_l2
  294. cmp r1, #0x1 /* L2 retained */
  295. /* r9 stores whether to clean L2 or not*/
  296. moveq r9, #0x0 /* Dont Clean L2 */
  297. movne r9, #0x1 /* Clean L2 */
  298. l1_logic_lost:
  299. /* Store sp and spsr to SDRAM */
  300. mov r4, sp
  301. mrs r5, spsr
  302. mov r6, lr
  303. stmia r8!, {r4-r6}
  304. /* Save all ARM registers */
  305. /* Coprocessor access control register */
  306. mrc p15, 0, r6, c1, c0, 2
  307. stmia r8!, {r6}
  308. /* TTBR0, TTBR1 and Translation table base control */
  309. mrc p15, 0, r4, c2, c0, 0
  310. mrc p15, 0, r5, c2, c0, 1
  311. mrc p15, 0, r6, c2, c0, 2
  312. stmia r8!, {r4-r6}
  313. /* Domain access control register, data fault status register,
  314. and instruction fault status register */
  315. mrc p15, 0, r4, c3, c0, 0
  316. mrc p15, 0, r5, c5, c0, 0
  317. mrc p15, 0, r6, c5, c0, 1
  318. stmia r8!, {r4-r6}
  319. /* Data aux fault status register, instruction aux fault status,
  320. datat fault address register and instruction fault address register*/
  321. mrc p15, 0, r4, c5, c1, 0
  322. mrc p15, 0, r5, c5, c1, 1
  323. mrc p15, 0, r6, c6, c0, 0
  324. mrc p15, 0, r7, c6, c0, 2
  325. stmia r8!, {r4-r7}
  326. /* user r/w thread and process ID, user r/o thread and process ID,
  327. priv only thread and process ID, cache size selection */
  328. mrc p15, 0, r4, c13, c0, 2
  329. mrc p15, 0, r5, c13, c0, 3
  330. mrc p15, 0, r6, c13, c0, 4
  331. mrc p15, 2, r7, c0, c0, 0
  332. stmia r8!, {r4-r7}
  333. /* Data TLB lockdown, instruction TLB lockdown registers */
  334. mrc p15, 0, r5, c10, c0, 0
  335. mrc p15, 0, r6, c10, c0, 1
  336. stmia r8!, {r5-r6}
  337. /* Secure or non secure vector base address, FCSE PID, Context PID*/
  338. mrc p15, 0, r4, c12, c0, 0
  339. mrc p15, 0, r5, c13, c0, 0
  340. mrc p15, 0, r6, c13, c0, 1
  341. stmia r8!, {r4-r6}
  342. /* Primary remap, normal remap registers */
  343. mrc p15, 0, r4, c10, c2, 0
  344. mrc p15, 0, r5, c10, c2, 1
  345. stmia r8!,{r4-r5}
  346. /* Store current cpsr*/
  347. mrs r2, cpsr
  348. stmia r8!, {r2}
  349. mrc p15, 0, r4, c1, c0, 0
  350. /* save control register */
  351. stmia r8!, {r4}
  352. clean_caches:
  353. /* Clean Data or unified cache to POU*/
  354. /* How to invalidate only L1 cache???? - #FIX_ME# */
  355. /* mcr p15, 0, r11, c7, c11, 1 */
  356. cmp r9, #1 /* Check whether L2 inval is required or not*/
  357. bne skip_l2_inval
  358. clean_l2:
  359. /* read clidr */
  360. mrc p15, 1, r0, c0, c0, 1
  361. /* extract loc from clidr */
  362. ands r3, r0, #0x7000000
  363. /* left align loc bit field */
  364. mov r3, r3, lsr #23
  365. /* if loc is 0, then no need to clean */
  366. beq finished
  367. /* start clean at cache level 0 */
  368. mov r10, #0
  369. loop1:
  370. /* work out 3x current cache level */
  371. add r2, r10, r10, lsr #1
  372. /* extract cache type bits from clidr*/
  373. mov r1, r0, lsr r2
  374. /* mask of the bits for current cache only */
  375. and r1, r1, #7
  376. /* see what cache we have at this level */
  377. cmp r1, #2
  378. /* skip if no cache, or just i-cache */
  379. blt skip
  380. /* select current cache level in cssr */
  381. mcr p15, 2, r10, c0, c0, 0
  382. /* isb to sych the new cssr&csidr */
  383. isb
  384. /* read the new csidr */
  385. mrc p15, 1, r1, c0, c0, 0
  386. /* extract the length of the cache lines */
  387. and r2, r1, #7
  388. /* add 4 (line length offset) */
  389. add r2, r2, #4
  390. ldr r4, assoc_mask
  391. /* find maximum number on the way size */
  392. ands r4, r4, r1, lsr #3
  393. /* find bit position of way size increment */
  394. clz r5, r4
  395. ldr r7, numset_mask
  396. /* extract max number of the index size*/
  397. ands r7, r7, r1, lsr #13
  398. loop2:
  399. mov r9, r4
  400. /* create working copy of max way size*/
  401. loop3:
  402. /* factor way and cache number into r11 */
  403. orr r11, r10, r9, lsl r5
  404. /* factor index number into r11 */
  405. orr r11, r11, r7, lsl r2
  406. /*clean & invalidate by set/way */
  407. mcr p15, 0, r11, c7, c10, 2
  408. /* decrement the way*/
  409. subs r9, r9, #1
  410. bge loop3
  411. /*decrement the index */
  412. subs r7, r7, #1
  413. bge loop2
  414. skip:
  415. add r10, r10, #2
  416. /* increment cache number */
  417. cmp r3, r10
  418. bgt loop1
  419. finished:
  420. /*swith back to cache level 0 */
  421. mov r10, #0
  422. /* select current cache level in cssr */
  423. mcr p15, 2, r10, c0, c0, 0
  424. isb
  425. skip_l2_inval:
  426. /* Data memory barrier and Data sync barrier */
  427. mov r1, #0
  428. mcr p15, 0, r1, c7, c10, 4
  429. mcr p15, 0, r1, c7, c10, 5
  430. wfi @ wait for interrupt
  431. nop
  432. nop
  433. nop
  434. nop
  435. nop
  436. nop
  437. nop
  438. nop
  439. nop
  440. nop
  441. bl i_dll_wait
  442. /* restore regs and return */
  443. ldmfd sp!, {r0-r12, pc}
  444. i_dll_wait:
  445. ldr r4, clk_stabilize_delay
  446. i_dll_delay:
  447. subs r4, r4, #0x1
  448. bne i_dll_delay
  449. ldr r4, sdrc_power
  450. ldr r5, [r4]
  451. bic r5, r5, #0x40
  452. str r5, [r4]
  453. bx lr
  454. pm_prepwstst_core:
  455. .word PM_PREPWSTST_CORE_V
  456. pm_prepwstst_mpu:
  457. .word PM_PREPWSTST_MPU_V
  458. pm_pwstctrl_mpu:
  459. .word PM_PWSTCTRL_MPU_P
  460. scratchpad_base:
  461. .word SCRATCHPAD_BASE_P
  462. sdrc_power:
  463. .word SDRC_POWER_V
  464. clk_stabilize_delay:
  465. .word 0x000001FF
  466. assoc_mask:
  467. .word 0x3ff
  468. numset_mask:
  469. .word 0x7fff
  470. ttbrbit_mask:
  471. .word 0xFFFFC000
  472. table_index_mask:
  473. .word 0xFFF00000
  474. table_entry:
  475. .word 0x00000C02
  476. cache_pred_disable_mask:
  477. .word 0xFFFFE7FB
  478. control_stat:
  479. .word CONTROL_STAT
  480. ENTRY(omap34xx_cpu_suspend_sz)
  481. .word . - omap34xx_cpu_suspend