pm-at32ap700x.S 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /*
  2. * Low-level Power Management code.
  3. *
  4. * Copyright (C) 2008 Atmel Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <asm/asm.h>
  11. #include <asm/asm-offsets.h>
  12. #include <asm/thread_info.h>
  13. #include <mach/pm.h>
  14. #include "pm.h"
  15. #include "sdramc.h"
  16. /* Same as 0xfff00000 but fits in a 21 bit signed immediate */
  17. #define PM_BASE -0x100000
  18. .section .bss, "wa", @nobits
  19. .global disable_idle_sleep
  20. .type disable_idle_sleep, @object
  21. disable_idle_sleep:
  22. .int 4
  23. .size disable_idle_sleep, . - disable_idle_sleep
  24. /* Keep this close to the irq handlers */
  25. .section .irq.text, "ax", @progbits
  26. /*
  27. * void cpu_enter_idle(void)
  28. *
  29. * Put the CPU into "idle" mode, in which it will consume
  30. * significantly less power.
  31. *
  32. * If an interrupt comes along in the window between
  33. * unmask_interrupts and the sleep instruction below, the
  34. * interrupt code will adjust the return address so that we
  35. * never execute the sleep instruction. This is required
  36. * because the AP7000 doesn't unmask interrupts when entering
  37. * sleep modes; later CPUs may not need this workaround.
  38. */
  39. .global cpu_enter_idle
  40. .type cpu_enter_idle, @function
  41. cpu_enter_idle:
  42. mask_interrupts
  43. get_thread_info r8
  44. ld.w r9, r8[TI_flags]
  45. bld r9, TIF_NEED_RESCHED
  46. brcs .Lret_from_sleep
  47. sbr r9, TIF_CPU_GOING_TO_SLEEP
  48. st.w r8[TI_flags], r9
  49. unmask_interrupts
  50. sleep CPU_SLEEP_IDLE
  51. .size cpu_idle_sleep, . - cpu_idle_sleep
  52. /*
  53. * Common return path for PM functions that don't run from
  54. * SRAM.
  55. */
  56. .global cpu_idle_skip_sleep
  57. .type cpu_idle_skip_sleep, @function
  58. cpu_idle_skip_sleep:
  59. mask_interrupts
  60. ld.w r9, r8[TI_flags]
  61. cbr r9, TIF_CPU_GOING_TO_SLEEP
  62. st.w r8[TI_flags], r9
  63. .Lret_from_sleep:
  64. unmask_interrupts
  65. retal r12
  66. .size cpu_idle_skip_sleep, . - cpu_idle_skip_sleep
  67. #ifdef CONFIG_PM
  68. .section .init.text, "ax", @progbits
  69. .global pm_exception
  70. .type pm_exception, @function
  71. pm_exception:
  72. /*
  73. * Exceptions are masked when we switch to this handler, so
  74. * we'll only get "unrecoverable" exceptions (offset 0.)
  75. */
  76. sub r12, pc, . - .Lpanic_msg
  77. lddpc pc, .Lpanic_addr
  78. .align 2
  79. .Lpanic_addr:
  80. .long panic
  81. .Lpanic_msg:
  82. .asciz "Unrecoverable exception during suspend\n"
  83. .size pm_exception, . - pm_exception
  84. .global pm_irq0
  85. .type pm_irq0, @function
  86. pm_irq0:
  87. /* Disable interrupts and return after the sleep instruction */
  88. mfsr r9, SYSREG_RSR_INT0
  89. mtsr SYSREG_RAR_INT0, r8
  90. sbr r9, SYSREG_GM_OFFSET
  91. mtsr SYSREG_RSR_INT0, r9
  92. rete
  93. /*
  94. * void cpu_enter_standby(unsigned long sdramc_base)
  95. *
  96. * Enter PM_SUSPEND_STANDBY mode. At this point, all drivers
  97. * are suspended and interrupts are disabled. Interrupts
  98. * marked as 'wakeup' event sources may still come along and
  99. * get us out of here.
  100. *
  101. * The SDRAM will be put into self-refresh mode (which does
  102. * not require a clock from the CPU), and the CPU will be put
  103. * into "frozen" mode (HSB bus stopped). The SDRAM controller
  104. * will automatically bring the SDRAM into normal mode on the
  105. * first access, and the power manager will automatically
  106. * start the HSB and CPU clocks upon a wakeup event.
  107. *
  108. * This code uses the same "skip sleep" technique as above.
  109. * It is very important that we jump directly to
  110. * cpu_after_sleep after the sleep instruction since that's
  111. * where we'll end up if the interrupt handler decides that we
  112. * need to skip the sleep instruction.
  113. */
  114. .global pm_standby
  115. .type pm_standby, @function
  116. pm_standby:
  117. /*
  118. * interrupts are already masked at this point, and EVBA
  119. * points to pm_exception above.
  120. */
  121. ld.w r10, r12[SDRAMC_LPR]
  122. sub r8, pc, . - 1f /* return address for irq handler */
  123. mov r11, SDRAMC_LPR_LPCB_SELF_RFR
  124. bfins r10, r11, 0, 2 /* LPCB <- self Refresh */
  125. sync 0 /* flush write buffer */
  126. st.w r12[SDRAMC_LPR], r10 /* put SDRAM in self-refresh mode */
  127. ld.w r11, r12[SDRAMC_LPR]
  128. unmask_interrupts
  129. sleep CPU_SLEEP_FROZEN
  130. 1: mask_interrupts
  131. retal r12
  132. .size pm_standby, . - pm_standby
  133. .global pm_suspend_to_ram
  134. .type pm_suspend_to_ram, @function
  135. pm_suspend_to_ram:
  136. /*
  137. * interrupts are already masked at this point, and EVBA
  138. * points to pm_exception above.
  139. */
  140. mov r11, 0
  141. cache r11[2], 8 /* clean all dcache lines */
  142. sync 0 /* flush write buffer */
  143. ld.w r10, r12[SDRAMC_LPR]
  144. sub r8, pc, . - 1f /* return address for irq handler */
  145. mov r11, SDRAMC_LPR_LPCB_SELF_RFR
  146. bfins r10, r11, 0, 2 /* LPCB <- self refresh */
  147. st.w r12[SDRAMC_LPR], r10 /* put SDRAM in self-refresh mode */
  148. ld.w r11, r12[SDRAMC_LPR]
  149. unmask_interrupts
  150. sleep CPU_SLEEP_STOP
  151. 1: mask_interrupts
  152. retal r12
  153. .size pm_suspend_to_ram, . - pm_suspend_to_ram
  154. .global pm_sram_end
  155. .type pm_sram_end, @function
  156. pm_sram_end:
  157. .size pm_sram_end, 0
  158. #endif /* CONFIG_PM */