head-v7.S 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. /*
  2. * Copyright 2011 Freescale Semiconductor, Inc.
  3. * Copyright 2011 Linaro Ltd.
  4. *
  5. * The code contained herein is licensed under the GNU General Public
  6. * License. You may obtain a copy of the GNU General Public License
  7. * Version 2 or later at the following locations:
  8. *
  9. * http://www.opensource.org/licenses/gpl-license.html
  10. * http://www.gnu.org/copyleft/gpl.html
  11. */
  12. #include <linux/linkage.h>
  13. #include <linux/init.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/hardware/cache-l2x0.h>
  16. .section ".text.head", "ax"
  17. /*
  18. * The secondary kernel init calls v7_flush_dcache_all before it enables
  19. * the L1; however, the L1 comes out of reset in an undefined state, so
  20. * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
  21. * of cache lines with uninitialized data and uninitialized tags to get
  22. * written out to memory, which does really unpleasant things to the main
  23. * processor. We fix this by performing an invalidate, rather than a
  24. * clean + invalidate, before jumping into the kernel.
  25. *
  26. * This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs
  27. * to be called for both secondary cores startup and primary core resume
  28. * procedures. Ideally, it should be moved into arch/arm/mm/cache-v7.S.
  29. */
  30. ENTRY(v7_invalidate_l1)
  31. mov r0, #0
  32. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  33. mcr p15, 2, r0, c0, c0, 0
  34. mrc p15, 1, r0, c0, c0, 0
  35. ldr r1, =0x7fff
  36. and r2, r1, r0, lsr #13
  37. ldr r1, =0x3ff
  38. and r3, r1, r0, lsr #3 @ NumWays - 1
  39. add r2, r2, #1 @ NumSets
  40. and r0, r0, #0x7
  41. add r0, r0, #4 @ SetShift
  42. clz r1, r3 @ WayShift
  43. add r4, r3, #1 @ NumWays
  44. 1: sub r2, r2, #1 @ NumSets--
  45. mov r3, r4 @ Temp = NumWays
  46. 2: subs r3, r3, #1 @ Temp--
  47. mov r5, r3, lsl r1
  48. mov r6, r2, lsl r0
  49. orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
  50. mcr p15, 0, r5, c7, c6, 2
  51. bgt 2b
  52. cmp r2, #0
  53. bgt 1b
  54. dsb
  55. isb
  56. mov pc, lr
  57. ENDPROC(v7_invalidate_l1)
  58. #ifdef CONFIG_SMP
  59. ENTRY(v7_secondary_startup)
  60. bl v7_invalidate_l1
  61. b secondary_startup
  62. ENDPROC(v7_secondary_startup)
  63. #endif
  64. #ifdef CONFIG_PM
  65. /*
  66. * The following code is located into the .data section. This is to
  67. * allow phys_l2x0_saved_regs to be accessed with a relative load
  68. * as we are running on physical address here.
  69. */
  70. .data
  71. .align
  72. #ifdef CONFIG_CACHE_L2X0
  73. .macro pl310_resume
  74. ldr r2, phys_l2x0_saved_regs
  75. ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
  76. ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value
  77. str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl
  78. mov r1, #0x1
  79. str r1, [r0, #L2X0_CTRL] @ re-enable L2
  80. .endm
  81. .globl phys_l2x0_saved_regs
  82. phys_l2x0_saved_regs:
  83. .long 0
  84. #else
  85. .macro pl310_resume
  86. .endm
  87. #endif
  88. ENTRY(v7_cpu_resume)
  89. bl v7_invalidate_l1
  90. pl310_resume
  91. b cpu_resume
  92. ENDPROC(v7_cpu_resume)
  93. #endif