swsusp_32.S 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. #include <linux/threads.h>
  2. #include <asm/processor.h>
  3. #include <asm/page.h>
  4. #include <asm/cputable.h>
  5. #include <asm/thread_info.h>
  6. #include <asm/ppc_asm.h>
  7. #include <asm/asm-offsets.h>
  8. /*
  9. * Structure for storing CPU registers on the save area.
  10. */
  11. #define SL_SP 0
  12. #define SL_PC 4
  13. #define SL_MSR 8
  14. #define SL_SDR1 0xc
  15. #define SL_SPRG0 0x10 /* 4 sprg's */
  16. #define SL_DBAT0 0x20
  17. #define SL_IBAT0 0x28
  18. #define SL_DBAT1 0x30
  19. #define SL_IBAT1 0x38
  20. #define SL_DBAT2 0x40
  21. #define SL_IBAT2 0x48
  22. #define SL_DBAT3 0x50
  23. #define SL_IBAT3 0x58
  24. #define SL_TB 0x60
  25. #define SL_R2 0x68
  26. #define SL_CR 0x6c
  27. #define SL_LR 0x70
  28. #define SL_R12 0x74 /* r12 to r31 */
  29. #define SL_SIZE (SL_R12 + 80)
  30. .section .data
  31. .align 5
  32. _GLOBAL(swsusp_save_area)
  33. .space SL_SIZE
  34. .section .text
  35. .align 5
  36. _GLOBAL(swsusp_arch_suspend)
  37. lis r11,swsusp_save_area@h
  38. ori r11,r11,swsusp_save_area@l
  39. mflr r0
  40. stw r0,SL_LR(r11)
  41. mfcr r0
  42. stw r0,SL_CR(r11)
  43. stw r1,SL_SP(r11)
  44. stw r2,SL_R2(r11)
  45. stmw r12,SL_R12(r11)
  46. /* Save MSR & SDR1 */
  47. mfmsr r4
  48. stw r4,SL_MSR(r11)
  49. mfsdr1 r4
  50. stw r4,SL_SDR1(r11)
  51. /* Get a stable timebase and save it */
  52. 1: mftbu r4
  53. stw r4,SL_TB(r11)
  54. mftb r5
  55. stw r5,SL_TB+4(r11)
  56. mftbu r3
  57. cmpw r3,r4
  58. bne 1b
  59. /* Save SPRGs */
  60. mfsprg r4,0
  61. stw r4,SL_SPRG0(r11)
  62. mfsprg r4,1
  63. stw r4,SL_SPRG0+4(r11)
  64. mfsprg r4,2
  65. stw r4,SL_SPRG0+8(r11)
  66. mfsprg r4,3
  67. stw r4,SL_SPRG0+12(r11)
  68. /* Save BATs */
  69. mfdbatu r4,0
  70. stw r4,SL_DBAT0(r11)
  71. mfdbatl r4,0
  72. stw r4,SL_DBAT0+4(r11)
  73. mfdbatu r4,1
  74. stw r4,SL_DBAT1(r11)
  75. mfdbatl r4,1
  76. stw r4,SL_DBAT1+4(r11)
  77. mfdbatu r4,2
  78. stw r4,SL_DBAT2(r11)
  79. mfdbatl r4,2
  80. stw r4,SL_DBAT2+4(r11)
  81. mfdbatu r4,3
  82. stw r4,SL_DBAT3(r11)
  83. mfdbatl r4,3
  84. stw r4,SL_DBAT3+4(r11)
  85. mfibatu r4,0
  86. stw r4,SL_IBAT0(r11)
  87. mfibatl r4,0
  88. stw r4,SL_IBAT0+4(r11)
  89. mfibatu r4,1
  90. stw r4,SL_IBAT1(r11)
  91. mfibatl r4,1
  92. stw r4,SL_IBAT1+4(r11)
  93. mfibatu r4,2
  94. stw r4,SL_IBAT2(r11)
  95. mfibatl r4,2
  96. stw r4,SL_IBAT2+4(r11)
  97. mfibatu r4,3
  98. stw r4,SL_IBAT3(r11)
  99. mfibatl r4,3
  100. stw r4,SL_IBAT3+4(r11)
  101. #if 0
  102. /* Backup various CPU config stuffs */
  103. bl __save_cpu_setup
  104. #endif
  105. /* Call the low level suspend stuff (we should probably have made
  106. * a stackframe...
  107. */
  108. bl swsusp_save
  109. /* Restore LR from the save area */
  110. lis r11,swsusp_save_area@h
  111. ori r11,r11,swsusp_save_area@l
  112. lwz r0,SL_LR(r11)
  113. mtlr r0
  114. blr
  115. /* Resume code */
  116. _GLOBAL(swsusp_arch_resume)
  117. #ifdef CONFIG_ALTIVEC
  118. /* Stop pending alitvec streams and memory accesses */
  119. BEGIN_FTR_SECTION
  120. DSSALL
  121. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  122. #endif
  123. sync
  124. /* Disable MSR:DR to make sure we don't take a TLB or
  125. * hash miss during the copy, as our hash table will
  126. * for a while be unuseable. For .text, we assume we are
  127. * covered by a BAT. This works only for non-G5 at this
  128. * point. G5 will need a better approach, possibly using
  129. * a small temporary hash table filled with large mappings,
  130. * disabling the MMU completely isn't a good option for
  131. * performance reasons.
  132. * (Note that 750's may have the same performance issue as
  133. * the G5 in this case, we should investigate using moving
  134. * BATs for these CPUs)
  135. */
  136. mfmsr r0
  137. sync
  138. rlwinm r0,r0,0,28,26 /* clear MSR_DR */
  139. mtmsr r0
  140. sync
  141. isync
  142. /* Load ptr the list of pages to copy in r3 */
  143. lis r11,(restore_pblist - KERNELBASE)@h
  144. ori r11,r11,restore_pblist@l
  145. lwz r10,0(r11)
  146. /* Copy the pages. This is a very basic implementation, to
  147. * be replaced by something more cache efficient */
  148. 1:
  149. tophys(r3,r10)
  150. li r0,256
  151. mtctr r0
  152. lwz r11,pbe_address(r3) /* source */
  153. tophys(r5,r11)
  154. lwz r10,pbe_orig_address(r3) /* destination */
  155. tophys(r6,r10)
  156. 2:
  157. lwz r8,0(r5)
  158. lwz r9,4(r5)
  159. lwz r10,8(r5)
  160. lwz r11,12(r5)
  161. addi r5,r5,16
  162. stw r8,0(r6)
  163. stw r9,4(r6)
  164. stw r10,8(r6)
  165. stw r11,12(r6)
  166. addi r6,r6,16
  167. bdnz 2b
  168. lwz r10,pbe_next(r3)
  169. cmpwi 0,r10,0
  170. bne 1b
  171. /* Do a very simple cache flush/inval of the L1 to ensure
  172. * coherency of the icache
  173. */
  174. lis r3,0x0002
  175. mtctr r3
  176. li r3, 0
  177. 1:
  178. lwz r0,0(r3)
  179. addi r3,r3,0x0020
  180. bdnz 1b
  181. isync
  182. sync
  183. /* Now flush those cache lines */
  184. lis r3,0x0002
  185. mtctr r3
  186. li r3, 0
  187. 1:
  188. dcbf 0,r3
  189. addi r3,r3,0x0020
  190. bdnz 1b
  191. sync
  192. /* Ok, we are now running with the kernel data of the old
  193. * kernel fully restored. We can get to the save area
  194. * easily now. As for the rest of the code, it assumes the
  195. * loader kernel and the booted one are exactly identical
  196. */
  197. lis r11,swsusp_save_area@h
  198. ori r11,r11,swsusp_save_area@l
  199. tophys(r11,r11)
  200. #if 0
  201. /* Restore various CPU config stuffs */
  202. bl __restore_cpu_setup
  203. #endif
  204. /* Restore the BATs, and SDR1. Then we can turn on the MMU.
  205. * This is a bit hairy as we are running out of those BATs,
  206. * but first, our code is probably in the icache, and we are
  207. * writing the same value to the BAT, so that should be fine,
  208. * though a better solution will have to be found long-term
  209. */
  210. lwz r4,SL_SDR1(r11)
  211. mtsdr1 r4
  212. lwz r4,SL_SPRG0(r11)
  213. mtsprg 0,r4
  214. lwz r4,SL_SPRG0+4(r11)
  215. mtsprg 1,r4
  216. lwz r4,SL_SPRG0+8(r11)
  217. mtsprg 2,r4
  218. lwz r4,SL_SPRG0+12(r11)
  219. mtsprg 3,r4
  220. #if 0
  221. lwz r4,SL_DBAT0(r11)
  222. mtdbatu 0,r4
  223. lwz r4,SL_DBAT0+4(r11)
  224. mtdbatl 0,r4
  225. lwz r4,SL_DBAT1(r11)
  226. mtdbatu 1,r4
  227. lwz r4,SL_DBAT1+4(r11)
  228. mtdbatl 1,r4
  229. lwz r4,SL_DBAT2(r11)
  230. mtdbatu 2,r4
  231. lwz r4,SL_DBAT2+4(r11)
  232. mtdbatl 2,r4
  233. lwz r4,SL_DBAT3(r11)
  234. mtdbatu 3,r4
  235. lwz r4,SL_DBAT3+4(r11)
  236. mtdbatl 3,r4
  237. lwz r4,SL_IBAT0(r11)
  238. mtibatu 0,r4
  239. lwz r4,SL_IBAT0+4(r11)
  240. mtibatl 0,r4
  241. lwz r4,SL_IBAT1(r11)
  242. mtibatu 1,r4
  243. lwz r4,SL_IBAT1+4(r11)
  244. mtibatl 1,r4
  245. lwz r4,SL_IBAT2(r11)
  246. mtibatu 2,r4
  247. lwz r4,SL_IBAT2+4(r11)
  248. mtibatl 2,r4
  249. lwz r4,SL_IBAT3(r11)
  250. mtibatu 3,r4
  251. lwz r4,SL_IBAT3+4(r11)
  252. mtibatl 3,r4
  253. #endif
  254. BEGIN_FTR_SECTION
  255. li r4,0
  256. mtspr SPRN_DBAT4U,r4
  257. mtspr SPRN_DBAT4L,r4
  258. mtspr SPRN_DBAT5U,r4
  259. mtspr SPRN_DBAT5L,r4
  260. mtspr SPRN_DBAT6U,r4
  261. mtspr SPRN_DBAT6L,r4
  262. mtspr SPRN_DBAT7U,r4
  263. mtspr SPRN_DBAT7L,r4
  264. mtspr SPRN_IBAT4U,r4
  265. mtspr SPRN_IBAT4L,r4
  266. mtspr SPRN_IBAT5U,r4
  267. mtspr SPRN_IBAT5L,r4
  268. mtspr SPRN_IBAT6U,r4
  269. mtspr SPRN_IBAT6L,r4
  270. mtspr SPRN_IBAT7U,r4
  271. mtspr SPRN_IBAT7L,r4
  272. END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
  273. /* Flush all TLBs */
  274. lis r4,0x1000
  275. 1: addic. r4,r4,-0x1000
  276. tlbie r4
  277. blt 1b
  278. sync
  279. /* restore the MSR and turn on the MMU */
  280. lwz r3,SL_MSR(r11)
  281. bl turn_on_mmu
  282. tovirt(r11,r11)
  283. /* Restore TB */
  284. li r3,0
  285. mttbl r3
  286. lwz r3,SL_TB(r11)
  287. lwz r4,SL_TB+4(r11)
  288. mttbu r3
  289. mttbl r4
  290. /* Kick decrementer */
  291. li r0,1
  292. mtdec r0
  293. /* Restore the callee-saved registers and return */
  294. lwz r0,SL_CR(r11)
  295. mtcr r0
  296. lwz r2,SL_R2(r11)
  297. lmw r12,SL_R12(r11)
  298. lwz r1,SL_SP(r11)
  299. lwz r0,SL_LR(r11)
  300. mtlr r0
  301. // XXX Note: we don't really need to call swsusp_resume
  302. li r3,0
  303. blr
  304. /* FIXME:This construct is actually not useful since we don't shut
  305. * down the instruction MMU, we could just flip back MSR-DR on.
  306. */
  307. turn_on_mmu:
  308. mflr r4
  309. mtsrr0 r4
  310. mtsrr1 r3
  311. sync
  312. isync
  313. rfi