swsusp.S 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. #include <linux/config.h>
  2. #include <linux/threads.h>
  3. #include <asm/processor.h>
  4. #include <asm/page.h>
  5. #include <asm/cputable.h>
  6. #include <asm/thread_info.h>
  7. #include <asm/ppc_asm.h>
  8. #include <asm/offsets.h>
  9. /*
  10. * Structure for storing CPU registers on the save area.
  11. */
  12. #define SL_SP 0
  13. #define SL_PC 4
  14. #define SL_MSR 8
  15. #define SL_SDR1 0xc
  16. #define SL_SPRG0 0x10 /* 4 sprg's */
  17. #define SL_DBAT0 0x20
  18. #define SL_IBAT0 0x28
  19. #define SL_DBAT1 0x30
  20. #define SL_IBAT1 0x38
  21. #define SL_DBAT2 0x40
  22. #define SL_IBAT2 0x48
  23. #define SL_DBAT3 0x50
  24. #define SL_IBAT3 0x58
  25. #define SL_TB 0x60
  26. #define SL_R2 0x68
  27. #define SL_CR 0x6c
  28. #define SL_LR 0x70
  29. #define SL_R12 0x74 /* r12 to r31 */
  30. #define SL_SIZE (SL_R12 + 80)
  31. .section .data
  32. .align 5
  33. _GLOBAL(swsusp_save_area)
  34. .space SL_SIZE
  35. .section .text
  36. .align 5
  37. _GLOBAL(swsusp_arch_suspend)
  38. lis r11,swsusp_save_area@h
  39. ori r11,r11,swsusp_save_area@l
  40. mflr r0
  41. stw r0,SL_LR(r11)
  42. mfcr r0
  43. stw r0,SL_CR(r11)
  44. stw r1,SL_SP(r11)
  45. stw r2,SL_R2(r11)
  46. stmw r12,SL_R12(r11)
  47. /* Save MSR & SDR1 */
  48. mfmsr r4
  49. stw r4,SL_MSR(r11)
  50. mfsdr1 r4
  51. stw r4,SL_SDR1(r11)
  52. /* Get a stable timebase and save it */
  53. 1: mftbu r4
  54. stw r4,SL_TB(r11)
  55. mftb r5
  56. stw r5,SL_TB+4(r11)
  57. mftbu r3
  58. cmpw r3,r4
  59. bne 1b
  60. /* Save SPRGs */
  61. mfsprg r4,0
  62. stw r4,SL_SPRG0(r11)
  63. mfsprg r4,1
  64. stw r4,SL_SPRG0+4(r11)
  65. mfsprg r4,2
  66. stw r4,SL_SPRG0+8(r11)
  67. mfsprg r4,3
  68. stw r4,SL_SPRG0+12(r11)
  69. /* Save BATs */
  70. mfdbatu r4,0
  71. stw r4,SL_DBAT0(r11)
  72. mfdbatl r4,0
  73. stw r4,SL_DBAT0+4(r11)
  74. mfdbatu r4,1
  75. stw r4,SL_DBAT1(r11)
  76. mfdbatl r4,1
  77. stw r4,SL_DBAT1+4(r11)
  78. mfdbatu r4,2
  79. stw r4,SL_DBAT2(r11)
  80. mfdbatl r4,2
  81. stw r4,SL_DBAT2+4(r11)
  82. mfdbatu r4,3
  83. stw r4,SL_DBAT3(r11)
  84. mfdbatl r4,3
  85. stw r4,SL_DBAT3+4(r11)
  86. mfibatu r4,0
  87. stw r4,SL_IBAT0(r11)
  88. mfibatl r4,0
  89. stw r4,SL_IBAT0+4(r11)
  90. mfibatu r4,1
  91. stw r4,SL_IBAT1(r11)
  92. mfibatl r4,1
  93. stw r4,SL_IBAT1+4(r11)
  94. mfibatu r4,2
  95. stw r4,SL_IBAT2(r11)
  96. mfibatl r4,2
  97. stw r4,SL_IBAT2+4(r11)
  98. mfibatu r4,3
  99. stw r4,SL_IBAT3(r11)
  100. mfibatl r4,3
  101. stw r4,SL_IBAT3+4(r11)
  102. #if 0
  103. /* Backup various CPU config stuffs */
  104. bl __save_cpu_setup
  105. #endif
  106. /* Call the low level suspend stuff (we should probably have made
  107. * a stackframe...
  108. */
  109. bl swsusp_save
  110. /* Restore LR from the save area */
  111. lis r11,swsusp_save_area@h
  112. ori r11,r11,swsusp_save_area@l
  113. lwz r0,SL_LR(r11)
  114. mtlr r0
  115. blr
  116. /* Resume code */
  117. _GLOBAL(swsusp_arch_resume)
  118. /* Stop pending alitvec streams and memory accesses */
  119. BEGIN_FTR_SECTION
  120. DSSALL
  121. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  122. sync
  123. /* Disable MSR:DR to make sure we don't take a TLB or
  124. * hash miss during the copy, as our hash table will
  125. * for a while be unuseable. For .text, we assume we are
  126. * covered by a BAT. This works only for non-G5 at this
  127. * point. G5 will need a better approach, possibly using
  128. * a small temporary hash table filled with large mappings,
  129. * disabling the MMU completely isn't a good option for
  130. * performance reasons.
  131. * (Note that 750's may have the same performance issue as
  132. * the G5 in this case, we should investigate using moving
  133. * BATs for these CPUs)
  134. */
  135. mfmsr r0
  136. sync
  137. rlwinm r0,r0,0,28,26 /* clear MSR_DR */
  138. mtmsr r0
  139. sync
  140. isync
  141. /* Load ptr the list of pages to copy in r3 */
  142. lis r11,(pagedir_nosave - KERNELBASE)@h
  143. ori r11,r11,pagedir_nosave@l
  144. lwz r10,0(r11)
  145. /* Copy the pages. This is a very basic implementation, to
  146. * be replaced by something more cache efficient */
  147. 1:
  148. tophys(r3,r10)
  149. li r0,256
  150. mtctr r0
  151. lwz r11,pbe_address(r3) /* source */
  152. tophys(r5,r11)
  153. lwz r10,pbe_orig_address(r3) /* destination */
  154. tophys(r6,r10)
  155. 2:
  156. lwz r8,0(r5)
  157. lwz r9,4(r5)
  158. lwz r10,8(r5)
  159. lwz r11,12(r5)
  160. addi r5,r5,16
  161. stw r8,0(r6)
  162. stw r9,4(r6)
  163. stw r10,8(r6)
  164. stw r11,12(r6)
  165. addi r6,r6,16
  166. bdnz 2b
  167. lwz r10,pbe_next(r3)
  168. cmpwi 0,r10,0
  169. bne 1b
  170. /* Do a very simple cache flush/inval of the L1 to ensure
  171. * coherency of the icache
  172. */
  173. lis r3,0x0002
  174. mtctr r3
  175. li r3, 0
  176. 1:
  177. lwz r0,0(r3)
  178. addi r3,r3,0x0020
  179. bdnz 1b
  180. isync
  181. sync
  182. /* Now flush those cache lines */
  183. lis r3,0x0002
  184. mtctr r3
  185. li r3, 0
  186. 1:
  187. dcbf 0,r3
  188. addi r3,r3,0x0020
  189. bdnz 1b
  190. sync
  191. /* Ok, we are now running with the kernel data of the old
  192. * kernel fully restored. We can get to the save area
  193. * easily now. As for the rest of the code, it assumes the
  194. * loader kernel and the booted one are exactly identical
  195. */
  196. lis r11,swsusp_save_area@h
  197. ori r11,r11,swsusp_save_area@l
  198. tophys(r11,r11)
  199. #if 0
  200. /* Restore various CPU config stuffs */
  201. bl __restore_cpu_setup
  202. #endif
  203. /* Restore the BATs, and SDR1. Then we can turn on the MMU.
  204. * This is a bit hairy as we are running out of those BATs,
  205. * but first, our code is probably in the icache, and we are
  206. * writing the same value to the BAT, so that should be fine,
  207. * though a better solution will have to be found long-term
  208. */
  209. lwz r4,SL_SDR1(r11)
  210. mtsdr1 r4
  211. lwz r4,SL_SPRG0(r11)
  212. mtsprg 0,r4
  213. lwz r4,SL_SPRG0+4(r11)
  214. mtsprg 1,r4
  215. lwz r4,SL_SPRG0+8(r11)
  216. mtsprg 2,r4
  217. lwz r4,SL_SPRG0+12(r11)
  218. mtsprg 3,r4
  219. #if 0
  220. lwz r4,SL_DBAT0(r11)
  221. mtdbatu 0,r4
  222. lwz r4,SL_DBAT0+4(r11)
  223. mtdbatl 0,r4
  224. lwz r4,SL_DBAT1(r11)
  225. mtdbatu 1,r4
  226. lwz r4,SL_DBAT1+4(r11)
  227. mtdbatl 1,r4
  228. lwz r4,SL_DBAT2(r11)
  229. mtdbatu 2,r4
  230. lwz r4,SL_DBAT2+4(r11)
  231. mtdbatl 2,r4
  232. lwz r4,SL_DBAT3(r11)
  233. mtdbatu 3,r4
  234. lwz r4,SL_DBAT3+4(r11)
  235. mtdbatl 3,r4
  236. lwz r4,SL_IBAT0(r11)
  237. mtibatu 0,r4
  238. lwz r4,SL_IBAT0+4(r11)
  239. mtibatl 0,r4
  240. lwz r4,SL_IBAT1(r11)
  241. mtibatu 1,r4
  242. lwz r4,SL_IBAT1+4(r11)
  243. mtibatl 1,r4
  244. lwz r4,SL_IBAT2(r11)
  245. mtibatu 2,r4
  246. lwz r4,SL_IBAT2+4(r11)
  247. mtibatl 2,r4
  248. lwz r4,SL_IBAT3(r11)
  249. mtibatu 3,r4
  250. lwz r4,SL_IBAT3+4(r11)
  251. mtibatl 3,r4
  252. #endif
  253. BEGIN_FTR_SECTION
  254. li r4,0
  255. mtspr SPRN_DBAT4U,r4
  256. mtspr SPRN_DBAT4L,r4
  257. mtspr SPRN_DBAT5U,r4
  258. mtspr SPRN_DBAT5L,r4
  259. mtspr SPRN_DBAT6U,r4
  260. mtspr SPRN_DBAT6L,r4
  261. mtspr SPRN_DBAT7U,r4
  262. mtspr SPRN_DBAT7L,r4
  263. mtspr SPRN_IBAT4U,r4
  264. mtspr SPRN_IBAT4L,r4
  265. mtspr SPRN_IBAT5U,r4
  266. mtspr SPRN_IBAT5L,r4
  267. mtspr SPRN_IBAT6U,r4
  268. mtspr SPRN_IBAT6L,r4
  269. mtspr SPRN_IBAT7U,r4
  270. mtspr SPRN_IBAT7L,r4
  271. END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
  272. /* Flush all TLBs */
  273. lis r4,0x1000
  274. 1: addic. r4,r4,-0x1000
  275. tlbie r4
  276. blt 1b
  277. sync
  278. /* restore the MSR and turn on the MMU */
  279. lwz r3,SL_MSR(r11)
  280. bl turn_on_mmu
  281. tovirt(r11,r11)
  282. /* Restore TB */
  283. li r3,0
  284. mttbl r3
  285. lwz r3,SL_TB(r11)
  286. lwz r4,SL_TB+4(r11)
  287. mttbu r3
  288. mttbl r4
  289. /* Kick decrementer */
  290. li r0,1
  291. mtdec r0
  292. /* Restore the callee-saved registers and return */
  293. lwz r0,SL_CR(r11)
  294. mtcr r0
  295. lwz r2,SL_R2(r11)
  296. lmw r12,SL_R12(r11)
  297. lwz r1,SL_SP(r11)
  298. lwz r0,SL_LR(r11)
  299. mtlr r0
  300. // XXX Note: we don't really need to call swsusp_resume
  301. li r3,0
  302. blr
  303. /* FIXME:This construct is actually not useful since we don't shut
  304. * down the instruction MMU, we could just flip back MSR-DR on.
  305. */
  306. turn_on_mmu:
  307. mflr r4
  308. mtsrr0 r4
  309. mtsrr1 r3
  310. sync
  311. isync
  312. rfi