sleep.S 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. /*
  2. * Low-level PXA250/210 sleep/wakeUp support
  3. *
  4. * Initial SA1110 code:
  5. * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
  6. *
  7. * Adapted for PXA by Nicolas Pitre:
  8. * Copyright (c) 2002 Monta Vista Software, Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License.
  12. */
  13. #include <linux/config.h>
  14. #include <linux/linkage.h>
  15. #include <asm/assembler.h>
  16. #include <asm/hardware.h>
  17. #include <asm/arch/pxa-regs.h>
  18. #ifdef CONFIG_PXA27x // workaround for Errata 50
  19. #define MDREFR_KDIV 0x200a4000 // all banks
  20. #define CCCR_SLEEP 0x00000107 // L=7 2N=2 A=0 PPDIS=0 CPDIS=0
  21. #endif
  22. .text
  23. /*
  24. * pxa_cpu_suspend()
  25. *
  26. * Forces CPU into sleep state
  27. */
  28. ENTRY(pxa_cpu_suspend)
  29. #ifndef CONFIG_IWMMXT
  30. mra r2, r3, acc0
  31. #endif
  32. stmfd sp!, {r2 - r12, lr} @ save registers on stack
  33. @ get coprocessor registers
  34. mrc p14, 0, r3, c6, c0, 0 @ clock configuration, for turbo mode
  35. mrc p15, 0, r4, c15, c1, 0 @ CP access reg
  36. mrc p15, 0, r5, c13, c0, 0 @ PID
  37. mrc p15, 0, r6, c3, c0, 0 @ domain ID
  38. mrc p15, 0, r7, c2, c0, 0 @ translation table base addr
  39. mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg
  40. mrc p15, 0, r9, c1, c0, 0 @ control reg
  41. bic r3, r3, #2 @ clear frequency change bit
  42. @ store them plus current virtual stack ptr on stack
  43. mov r10, sp
  44. stmfd sp!, {r3 - r10}
  45. @ preserve phys address of stack
  46. mov r0, sp
  47. bl sleep_phys_sp
  48. ldr r1, =sleep_save_sp
  49. str r0, [r1]
  50. @ clean data cache
  51. bl xscale_flush_kern_cache_all
  52. @ Put the processor to sleep
  53. @ (also workaround for sighting 28071)
  54. @ prepare value for sleep mode
  55. mov r1, #3 @ sleep mode
  56. @ prepare pointer to physical address 0 (virtual mapping in generic.c)
  57. mov r2, #UNCACHED_PHYS_0
  58. @ prepare SDRAM refresh settings
  59. ldr r4, =MDREFR
  60. ldr r5, [r4]
  61. @ enable SDRAM self-refresh mode
  62. orr r5, r5, #MDREFR_SLFRSH
  63. #ifdef CONFIG_PXA27x
  64. @ set SDCLKx divide-by-2 bits (this is part of a workaround for Errata 50)
  65. ldr r6, =MDREFR_KDIV
  66. orr r5, r5, r6
  67. #endif
  68. #ifdef CONFIG_PXA25x
  69. @ Intel PXA255 Specification Update notes problems
  70. @ about suspending with PXBus operating above 133MHz
  71. @ (see Errata 31, GPIO output signals, ... unpredictable in sleep
  72. @
  73. @ We keep the change-down close to the actual suspend on SDRAM
  74. @ as possible to eliminate messing about with the refresh clock
  75. @ as the system will restore with the original speed settings
  76. @
  77. @ Ben Dooks, 13-Sep-2004
  78. ldr r6, =CCCR
  79. ldr r8, [r6] @ keep original value for resume
  80. @ ensure x1 for run and turbo mode with memory clock
  81. bic r7, r8, #CCCR_M_MASK | CCCR_N_MASK
  82. orr r7, r7, #(1<<5) | (2<<7)
  83. @ check that the memory frequency is within limits
  84. and r14, r7, #CCCR_L_MASK
  85. teq r14, #1
  86. bicne r7, r7, #CCCR_L_MASK
  87. orrne r7, r7, #1 @@ 99.53MHz
  88. @ get ready for the change
  89. @ note, turbo is not preserved over sleep so there is no
  90. @ point in preserving it here. we save it on the stack with the
  91. @ other CP registers instead.
  92. mov r0, #0
  93. mcr p14, 0, r0, c6, c0, 0
  94. orr r0, r0, #2 @ initiate change bit
  95. #endif
  96. #ifdef CONFIG_PXA27x
  97. @ Intel PXA270 Specification Update notes problems sleeping
  98. @ with core operating above 91 MHz
  99. @ (see Errata 50, ...processor does not exit from sleep...)
  100. ldr r6, =CCCR
  101. ldr r8, [r6] @ keep original value for resume
  102. ldr r7, =CCCR_SLEEP @ prepare CCCR sleep value
  103. mov r0, #0x2 @ prepare value for CLKCFG
  104. #endif
  105. @ align execution to a cache line
  106. b 1f
  107. .ltorg
  108. .align 5
  109. 1:
  110. @ All needed values are now in registers.
  111. @ These last instructions should be in cache
  112. #if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
  113. @ initiate the frequency change...
  114. str r7, [r6]
  115. mcr p14, 0, r0, c6, c0, 0
  116. @ restore the original cpu speed value for resume
  117. str r8, [r6]
  118. @ need 6 13-MHz cycles before changing PWRMODE
  119. @ just set frequency to 91-MHz... 6*91/13 = 42
  120. mov r0, #42
  121. 10: subs r0, r0, #1
  122. bne 10b
  123. #endif
  124. @ Do not reorder...
  125. @ Intel PXA270 Specification Update notes problems performing
  126. @ external accesses after SDRAM is put in self-refresh mode
  127. @ (see Errata 39 ...hangs when entering self-refresh mode)
  128. @ force address lines low by reading at physical address 0
  129. ldr r3, [r2]
  130. @ put SDRAM into self-refresh
  131. str r5, [r4]
  132. @ enter sleep mode
  133. mcr p14, 0, r1, c7, c0, 0 @ PWRMODE
  134. 20: b 20b @ loop waiting for sleep
  135. /*
  136. * cpu_pxa_resume()
  137. *
  138. * entry point from bootloader into kernel during resume
  139. *
  140. * Note: Yes, part of the following code is located into the .data section.
  141. * This is to allow sleep_save_sp to be accessed with a relative load
  142. * while we can't rely on any MMU translation. We could have put
  143. * sleep_save_sp in the .text section as well, but some setups might
  144. * insist on it to be truly read-only.
  145. */
  146. .data
  147. .align 5
  148. ENTRY(pxa_cpu_resume)
  149. mov r0, #PSR_I_BIT | PSR_F_BIT | MODE_SVC @ set SVC, irqs off
  150. msr cpsr_c, r0
  151. ldr r0, sleep_save_sp @ stack phys addr
  152. ldr r2, =resume_after_mmu @ its absolute virtual address
  153. ldmfd r0, {r3 - r9, sp} @ CP regs + virt stack ptr
  154. mov r1, #0
  155. mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
  156. mcr p15, 0, r1, c7, c7, 0 @ invalidate I & D caches, BTB
  157. #ifdef CONFIG_XSCALE_CACHE_ERRATA
  158. bic r9, r9, #0x0004 @ see cpu_xscale_proc_init
  159. #endif
  160. mcr p14, 0, r3, c6, c0, 0 @ clock configuration, turbo mode.
  161. mcr p15, 0, r4, c15, c1, 0 @ CP access reg
  162. mcr p15, 0, r5, c13, c0, 0 @ PID
  163. mcr p15, 0, r6, c3, c0, 0 @ domain ID
  164. mcr p15, 0, r7, c2, c0, 0 @ translation table base addr
  165. mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg
  166. b resume_turn_on_mmu @ cache align execution
  167. .align 5
  168. resume_turn_on_mmu:
  169. mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, caches, etc.
  170. @ Let us ensure we jump to resume_after_mmu only when the mcr above
  171. @ actually took effect. They call it the "cpwait" operation.
  172. mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15
  173. sub pc, r2, r1, lsr #32 @ jump to virtual addr
  174. nop
  175. nop
  176. nop
  177. sleep_save_sp:
  178. .word 0 @ preserve stack phys ptr here
  179. .text
  180. resume_after_mmu:
  181. #ifdef CONFIG_XSCALE_CACHE_ERRATA
  182. bl cpu_xscale_proc_init
  183. #endif
  184. ldmfd sp!, {r2, r3}
  185. #ifndef CONFIG_IWMMXT
  186. mar acc0, r2, r3
  187. #endif
  188. ldmfd sp!, {r4 - r12, pc} @ return to caller