headsmp.S 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. #include <linux/linkage.h>
  2. #include <linux/init.h>
  3. #include <asm/cache.h>
  4. #include "flowctrl.h"
  5. #include "iomap.h"
  6. #include "reset.h"
  7. #include "sleep.h"
  8. #define APB_MISC_GP_HIDREV 0x804
  9. #define PMC_SCRATCH41 0x140
  10. #define RESET_DATA(x) ((TEGRA_RESET_##x)*4)
  11. .section ".text.head", "ax"
  12. __CPUINIT
  13. /*
  14. * Tegra specific entry point for secondary CPUs.
  15. * The secondary kernel init calls v7_flush_dcache_all before it enables
  16. * the L1; however, the L1 comes out of reset in an undefined state, so
  17. * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
  18. * of cache lines with uninitialized data and uninitialized tags to get
  19. * written out to memory, which does really unpleasant things to the main
  20. * processor. We fix this by performing an invalidate, rather than a
  21. * clean + invalidate, before jumping into the kernel.
  22. */
  23. ENTRY(v7_invalidate_l1)
  24. mov r0, #0
  25. mcr p15, 2, r0, c0, c0, 0
  26. mrc p15, 1, r0, c0, c0, 0
  27. ldr r1, =0x7fff
  28. and r2, r1, r0, lsr #13
  29. ldr r1, =0x3ff
  30. and r3, r1, r0, lsr #3 @ NumWays - 1
  31. add r2, r2, #1 @ NumSets
  32. and r0, r0, #0x7
  33. add r0, r0, #4 @ SetShift
  34. clz r1, r3 @ WayShift
  35. add r4, r3, #1 @ NumWays
  36. 1: sub r2, r2, #1 @ NumSets--
  37. mov r3, r4 @ Temp = NumWays
  38. 2: subs r3, r3, #1 @ Temp--
  39. mov r5, r3, lsl r1
  40. mov r6, r2, lsl r0
  41. orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
  42. mcr p15, 0, r5, c7, c6, 2
  43. bgt 2b
  44. cmp r2, #0
  45. bgt 1b
  46. dsb
  47. isb
  48. mov pc, lr
  49. ENDPROC(v7_invalidate_l1)
  50. ENTRY(tegra_secondary_startup)
  51. bl v7_invalidate_l1
  52. /* Enable coresight */
  53. mov32 r0, 0xC5ACCE55
  54. mcr p14, 0, r0, c7, c12, 6
  55. b secondary_startup
  56. ENDPROC(tegra_secondary_startup)
  57. #ifdef CONFIG_PM_SLEEP
  58. /*
  59. * tegra_resume
  60. *
  61. * CPU boot vector when restarting the a CPU following
  62. * an LP2 transition. Also branched to by LP0 and LP1 resume after
  63. * re-enabling sdram.
  64. */
  65. ENTRY(tegra_resume)
  66. bl v7_invalidate_l1
  67. /* Enable coresight */
  68. mov32 r0, 0xC5ACCE55
  69. mcr p14, 0, r0, c7, c12, 6
  70. cpu_id r0
  71. cmp r0, #0 @ CPU0?
  72. bne cpu_resume @ no
  73. #ifdef CONFIG_ARCH_TEGRA_3x_SOC
  74. /* Are we on Tegra20? */
  75. mov32 r6, TEGRA_APB_MISC_BASE
  76. ldr r0, [r6, #APB_MISC_GP_HIDREV]
  77. and r0, r0, #0xff00
  78. cmp r0, #(0x20 << 8)
  79. beq 1f @ Yes
  80. /* Clear the flow controller flags for this CPU. */
  81. mov32 r2, TEGRA_FLOW_CTRL_BASE + FLOW_CTRL_CPU0_CSR @ CPU0 CSR
  82. ldr r1, [r2]
  83. /* Clear event & intr flag */
  84. orr r1, r1, \
  85. #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
  86. movw r0, #0x0FFD @ enable, cluster_switch, immed, & bitmaps
  87. bic r1, r1, r0
  88. str r1, [r2]
  89. 1:
  90. #endif
  91. #ifdef CONFIG_HAVE_ARM_SCU
  92. /* enable SCU */
  93. mov32 r0, TEGRA_ARM_PERIF_BASE
  94. ldr r1, [r0]
  95. orr r1, r1, #1
  96. str r1, [r0]
  97. #endif
  98. b cpu_resume
  99. ENDPROC(tegra_resume)
  100. #endif
  101. .align L1_CACHE_SHIFT
  102. ENTRY(__tegra_cpu_reset_handler_start)
  103. /*
  104. * __tegra_cpu_reset_handler:
  105. *
  106. * Common handler for all CPU reset events.
  107. *
  108. * Register usage within the reset handler:
  109. *
  110. * R7 = CPU present (to the OS) mask
  111. * R8 = CPU in LP1 state mask
  112. * R9 = CPU in LP2 state mask
  113. * R10 = CPU number
  114. * R11 = CPU mask
  115. * R12 = pointer to reset handler data
  116. *
  117. * NOTE: This code is copied to IRAM. All code and data accesses
  118. * must be position-independent.
  119. */
  120. .align L1_CACHE_SHIFT
  121. ENTRY(__tegra_cpu_reset_handler)
  122. cpsid aif, 0x13 @ SVC mode, interrupts disabled
  123. mrc p15, 0, r10, c0, c0, 5 @ MPIDR
  124. and r10, r10, #0x3 @ R10 = CPU number
  125. mov r11, #1
  126. mov r11, r11, lsl r10 @ R11 = CPU mask
  127. adr r12, __tegra_cpu_reset_handler_data
  128. #ifdef CONFIG_SMP
  129. /* Does the OS know about this CPU? */
  130. ldr r7, [r12, #RESET_DATA(MASK_PRESENT)]
  131. tst r7, r11 @ if !present
  132. bleq __die @ CPU not present (to OS)
  133. #endif
  134. #ifdef CONFIG_ARCH_TEGRA_2x_SOC
  135. /* Are we on Tegra20? */
  136. mov32 r6, TEGRA_APB_MISC_BASE
  137. ldr r0, [r6, #APB_MISC_GP_HIDREV]
  138. and r0, r0, #0xff00
  139. cmp r0, #(0x20 << 8)
  140. bne 1f
  141. /* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
  142. mov32 r6, TEGRA_PMC_BASE
  143. mov r0, #0
  144. cmp r10, #0
  145. strne r0, [r6, #PMC_SCRATCH41]
  146. 1:
  147. #endif
  148. /* Waking up from LP2? */
  149. ldr r9, [r12, #RESET_DATA(MASK_LP2)]
  150. tst r9, r11 @ if in_lp2
  151. beq __is_not_lp2
  152. ldr lr, [r12, #RESET_DATA(STARTUP_LP2)]
  153. cmp lr, #0
  154. bleq __die @ no LP2 startup handler
  155. bx lr
  156. __is_not_lp2:
  157. #ifdef CONFIG_SMP
  158. /*
  159. * Can only be secondary boot (initial or hotplug) but CPU 0
  160. * cannot be here.
  161. */
  162. cmp r10, #0
  163. bleq __die @ CPU0 cannot be here
  164. ldr lr, [r12, #RESET_DATA(STARTUP_SECONDARY)]
  165. cmp lr, #0
  166. bleq __die @ no secondary startup handler
  167. bx lr
  168. #endif
  169. /*
  170. * We don't know why the CPU reset. Just kill it.
  171. * The LR register will contain the address we died at + 4.
  172. */
  173. __die:
  174. sub lr, lr, #4
  175. mov32 r7, TEGRA_PMC_BASE
  176. str lr, [r7, #PMC_SCRATCH41]
  177. mov32 r7, TEGRA_CLK_RESET_BASE
  178. /* Are we on Tegra20? */
  179. mov32 r6, TEGRA_APB_MISC_BASE
  180. ldr r0, [r6, #APB_MISC_GP_HIDREV]
  181. and r0, r0, #0xff00
  182. cmp r0, #(0x20 << 8)
  183. bne 1f
  184. #ifdef CONFIG_ARCH_TEGRA_2x_SOC
  185. mov32 r0, 0x1111
  186. mov r1, r0, lsl r10
  187. str r1, [r7, #0x340] @ CLK_RST_CPU_CMPLX_SET
  188. #endif
  189. 1:
  190. #ifdef CONFIG_ARCH_TEGRA_3x_SOC
  191. mov32 r6, TEGRA_FLOW_CTRL_BASE
  192. cmp r10, #0
  193. moveq r1, #FLOW_CTRL_HALT_CPU0_EVENTS
  194. moveq r2, #FLOW_CTRL_CPU0_CSR
  195. movne r1, r10, lsl #3
  196. addne r2, r1, #(FLOW_CTRL_CPU1_CSR-8)
  197. addne r1, r1, #(FLOW_CTRL_HALT_CPU1_EVENTS-8)
  198. /* Clear CPU "event" and "interrupt" flags and power gate
  199. it when halting but not before it is in the "WFI" state. */
  200. ldr r0, [r6, +r2]
  201. orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
  202. orr r0, r0, #FLOW_CTRL_CSR_ENABLE
  203. str r0, [r6, +r2]
  204. /* Unconditionally halt this CPU */
  205. mov r0, #FLOW_CTRL_WAITEVENT
  206. str r0, [r6, +r1]
  207. ldr r0, [r6, +r1] @ memory barrier
  208. dsb
  209. isb
  210. wfi @ CPU should be power gated here
  211. /* If the CPU didn't power gate above just kill it's clock. */
  212. mov r0, r11, lsl #8
  213. str r0, [r7, #348] @ CLK_CPU_CMPLX_SET
  214. #endif
  215. /* If the CPU still isn't dead, just spin here. */
  216. b .
  217. ENDPROC(__tegra_cpu_reset_handler)
  218. .align L1_CACHE_SHIFT
  219. .type __tegra_cpu_reset_handler_data, %object
  220. .globl __tegra_cpu_reset_handler_data
  221. __tegra_cpu_reset_handler_data:
  222. .rept TEGRA_RESET_DATA_SIZE
  223. .long 0
  224. .endr
  225. .align L1_CACHE_SHIFT
  226. ENTRY(__tegra_cpu_reset_handler_end)