entry_32.S 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
  5. * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
  6. * Adapted for Power Macintosh by Paul Mackerras.
  7. * Low-level exception handlers and MMU support
  8. * rewritten by Paul Mackerras.
  9. * Copyright (C) 1996 Paul Mackerras.
  10. * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11. *
  12. * This file contains the system call entry code, context switch
  13. * code, and exception/interrupt return code for PowerPC.
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. *
  20. */
  21. #include <linux/errno.h>
  22. #include <linux/sys.h>
  23. #include <linux/threads.h>
  24. #include <asm/reg.h>
  25. #include <asm/page.h>
  26. #include <asm/mmu.h>
  27. #include <asm/cputable.h>
  28. #include <asm/thread_info.h>
  29. #include <asm/ppc_asm.h>
  30. #include <asm/asm-offsets.h>
  31. #include <asm/unistd.h>
  32. #include <asm/ftrace.h>
  33. #include <asm/ptrace.h>
  34. #undef SHOW_SYSCALLS
  35. #undef SHOW_SYSCALLS_TASK
  36. /*
  37. * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
  38. */
  39. #if MSR_KERNEL >= 0x10000
  40. #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
  41. #else
  42. #define LOAD_MSR_KERNEL(r, x) li r,(x)
  43. #endif
  44. #ifdef CONFIG_BOOKE
  45. .globl mcheck_transfer_to_handler
  46. mcheck_transfer_to_handler:
  47. mfspr r0,SPRN_DSRR0
  48. stw r0,_DSRR0(r11)
  49. mfspr r0,SPRN_DSRR1
  50. stw r0,_DSRR1(r11)
  51. /* fall through */
  52. .globl debug_transfer_to_handler
  53. debug_transfer_to_handler:
  54. mfspr r0,SPRN_CSRR0
  55. stw r0,_CSRR0(r11)
  56. mfspr r0,SPRN_CSRR1
  57. stw r0,_CSRR1(r11)
  58. /* fall through */
  59. .globl crit_transfer_to_handler
  60. crit_transfer_to_handler:
  61. #ifdef CONFIG_PPC_BOOK3E_MMU
  62. mfspr r0,SPRN_MAS0
  63. stw r0,MAS0(r11)
  64. mfspr r0,SPRN_MAS1
  65. stw r0,MAS1(r11)
  66. mfspr r0,SPRN_MAS2
  67. stw r0,MAS2(r11)
  68. mfspr r0,SPRN_MAS3
  69. stw r0,MAS3(r11)
  70. mfspr r0,SPRN_MAS6
  71. stw r0,MAS6(r11)
  72. #ifdef CONFIG_PHYS_64BIT
  73. mfspr r0,SPRN_MAS7
  74. stw r0,MAS7(r11)
  75. #endif /* CONFIG_PHYS_64BIT */
  76. #endif /* CONFIG_PPC_BOOK3E_MMU */
  77. #ifdef CONFIG_44x
  78. mfspr r0,SPRN_MMUCR
  79. stw r0,MMUCR(r11)
  80. #endif
  81. mfspr r0,SPRN_SRR0
  82. stw r0,_SRR0(r11)
  83. mfspr r0,SPRN_SRR1
  84. stw r0,_SRR1(r11)
  85. /* set the stack limit to the current stack
  86. * and set the limit to protect the thread_info
  87. * struct
  88. */
  89. mfspr r8,SPRN_SPRG_THREAD
  90. lwz r0,KSP_LIMIT(r8)
  91. stw r0,SAVED_KSP_LIMIT(r11)
  92. rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
  93. stw r0,KSP_LIMIT(r8)
  94. /* fall through */
  95. #endif
  96. #ifdef CONFIG_40x
  97. .globl crit_transfer_to_handler
  98. crit_transfer_to_handler:
  99. lwz r0,crit_r10@l(0)
  100. stw r0,GPR10(r11)
  101. lwz r0,crit_r11@l(0)
  102. stw r0,GPR11(r11)
  103. mfspr r0,SPRN_SRR0
  104. stw r0,crit_srr0@l(0)
  105. mfspr r0,SPRN_SRR1
  106. stw r0,crit_srr1@l(0)
  107. /* set the stack limit to the current stack
  108. * and set the limit to protect the thread_info
  109. * struct
  110. */
  111. mfspr r8,SPRN_SPRG_THREAD
  112. lwz r0,KSP_LIMIT(r8)
  113. stw r0,saved_ksp_limit@l(0)
  114. rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
  115. stw r0,KSP_LIMIT(r8)
  116. /* fall through */
  117. #endif
  118. /*
  119. * This code finishes saving the registers to the exception frame
  120. * and jumps to the appropriate handler for the exception, turning
  121. * on address translation.
  122. * Note that we rely on the caller having set cr0.eq iff the exception
  123. * occurred in kernel mode (i.e. MSR:PR = 0).
  124. */
  125. .globl transfer_to_handler_full
  126. transfer_to_handler_full:
  127. SAVE_NVGPRS(r11)
  128. /* fall through */
  129. .globl transfer_to_handler
  130. transfer_to_handler:
  131. stw r2,GPR2(r11)
  132. stw r12,_NIP(r11)
  133. stw r9,_MSR(r11)
  134. andi. r2,r9,MSR_PR
  135. mfctr r12
  136. mfspr r2,SPRN_XER
  137. stw r12,_CTR(r11)
  138. stw r2,_XER(r11)
  139. mfspr r12,SPRN_SPRG_THREAD
  140. addi r2,r12,-THREAD
  141. tovirt(r2,r2) /* set r2 to current */
  142. beq 2f /* if from user, fix up THREAD.regs */
  143. addi r11,r1,STACK_FRAME_OVERHEAD
  144. stw r11,PT_REGS(r12)
  145. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  146. /* Check to see if the dbcr0 register is set up to debug. Use the
  147. internal debug mode bit to do this. */
  148. lwz r12,THREAD_DBCR0(r12)
  149. andis. r12,r12,DBCR0_IDM@h
  150. beq+ 3f
  151. /* From user and task is ptraced - load up global dbcr0 */
  152. li r12,-1 /* clear all pending debug events */
  153. mtspr SPRN_DBSR,r12
  154. lis r11,global_dbcr0@ha
  155. tophys(r11,r11)
  156. addi r11,r11,global_dbcr0@l
  157. #ifdef CONFIG_SMP
  158. CURRENT_THREAD_INFO(r9, r1)
  159. lwz r9,TI_CPU(r9)
  160. slwi r9,r9,3
  161. add r11,r11,r9
  162. #endif
  163. lwz r12,0(r11)
  164. mtspr SPRN_DBCR0,r12
  165. lwz r12,4(r11)
  166. addi r12,r12,-1
  167. stw r12,4(r11)
  168. #endif
  169. b 3f
  170. 2: /* if from kernel, check interrupted DOZE/NAP mode and
  171. * check for stack overflow
  172. */
  173. lwz r9,KSP_LIMIT(r12)
  174. cmplw r1,r9 /* if r1 <= ksp_limit */
  175. ble- stack_ovf /* then the kernel stack overflowed */
  176. 5:
  177. #if defined(CONFIG_6xx) || defined(CONFIG_E500)
  178. CURRENT_THREAD_INFO(r9, r1)
  179. tophys(r9,r9) /* check local flags */
  180. lwz r12,TI_LOCAL_FLAGS(r9)
  181. mtcrf 0x01,r12
  182. bt- 31-TLF_NAPPING,4f
  183. bt- 31-TLF_SLEEPING,7f
  184. #endif /* CONFIG_6xx || CONFIG_E500 */
  185. .globl transfer_to_handler_cont
  186. transfer_to_handler_cont:
  187. 3:
  188. mflr r9
  189. lwz r11,0(r9) /* virtual address of handler */
  190. lwz r9,4(r9) /* where to go when done */
  191. #ifdef CONFIG_TRACE_IRQFLAGS
  192. lis r12,reenable_mmu@h
  193. ori r12,r12,reenable_mmu@l
  194. mtspr SPRN_SRR0,r12
  195. mtspr SPRN_SRR1,r10
  196. SYNC
  197. RFI
  198. reenable_mmu: /* re-enable mmu so we can */
  199. mfmsr r10
  200. lwz r12,_MSR(r1)
  201. xor r10,r10,r12
  202. andi. r10,r10,MSR_EE /* Did EE change? */
  203. beq 1f
  204. /*
  205. * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
  206. * If from user mode there is only one stack frame on the stack, and
  207. * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
  208. * stack frame to make trace_hardirqs_off happy.
  209. *
  210. * This is handy because we also need to save a bunch of GPRs,
  211. * r3 can be different from GPR3(r1) at this point, r9 and r11
  212. * contains the old MSR and handler address respectively,
  213. * r4 & r5 can contain page fault arguments that need to be passed
  214. * along as well. r12, CCR, CTR, XER etc... are left clobbered as
  215. * they aren't useful past this point (aren't syscall arguments),
  216. * the rest is restored from the exception frame.
  217. */
  218. stwu r1,-32(r1)
  219. stw r9,8(r1)
  220. stw r11,12(r1)
  221. stw r3,16(r1)
  222. stw r4,20(r1)
  223. stw r5,24(r1)
  224. bl trace_hardirqs_off
  225. lwz r5,24(r1)
  226. lwz r4,20(r1)
  227. lwz r3,16(r1)
  228. lwz r11,12(r1)
  229. lwz r9,8(r1)
  230. addi r1,r1,32
  231. lwz r0,GPR0(r1)
  232. lwz r6,GPR6(r1)
  233. lwz r7,GPR7(r1)
  234. lwz r8,GPR8(r1)
  235. 1: mtctr r11
  236. mtlr r9
  237. bctr /* jump to handler */
  238. #else /* CONFIG_TRACE_IRQFLAGS */
  239. mtspr SPRN_SRR0,r11
  240. mtspr SPRN_SRR1,r10
  241. mtlr r9
  242. SYNC
  243. RFI /* jump to handler, enable MMU */
  244. #endif /* CONFIG_TRACE_IRQFLAGS */
  245. #if defined (CONFIG_6xx) || defined(CONFIG_E500)
  246. 4: rlwinm r12,r12,0,~_TLF_NAPPING
  247. stw r12,TI_LOCAL_FLAGS(r9)
  248. b power_save_ppc32_restore
  249. 7: rlwinm r12,r12,0,~_TLF_SLEEPING
  250. stw r12,TI_LOCAL_FLAGS(r9)
  251. lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
  252. rlwinm r9,r9,0,~MSR_EE
  253. lwz r12,_LINK(r11) /* and return to address in LR */
  254. b fast_exception_return
  255. #endif
  256. /*
  257. * On kernel stack overflow, load up an initial stack pointer
  258. * and call StackOverflow(regs), which should not return.
  259. */
  260. stack_ovf:
  261. /* sometimes we use a statically-allocated stack, which is OK. */
  262. lis r12,_end@h
  263. ori r12,r12,_end@l
  264. cmplw r1,r12
  265. ble 5b /* r1 <= &_end is OK */
  266. SAVE_NVGPRS(r11)
  267. addi r3,r1,STACK_FRAME_OVERHEAD
  268. lis r1,init_thread_union@ha
  269. addi r1,r1,init_thread_union@l
  270. addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
  271. lis r9,StackOverflow@ha
  272. addi r9,r9,StackOverflow@l
  273. LOAD_MSR_KERNEL(r10,MSR_KERNEL)
  274. FIX_SRR1(r10,r12)
  275. mtspr SPRN_SRR0,r9
  276. mtspr SPRN_SRR1,r10
  277. SYNC
  278. RFI
  279. /*
  280. * Handle a system call.
  281. */
  282. .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
  283. .stabs "entry_32.S",N_SO,0,0,0f
  284. 0:
  285. _GLOBAL(DoSyscall)
  286. stw r3,ORIG_GPR3(r1)
  287. li r12,0
  288. stw r12,RESULT(r1)
  289. lwz r11,_CCR(r1) /* Clear SO bit in CR */
  290. rlwinm r11,r11,0,4,2
  291. stw r11,_CCR(r1)
  292. #ifdef SHOW_SYSCALLS
  293. bl do_show_syscall
  294. #endif /* SHOW_SYSCALLS */
  295. #ifdef CONFIG_TRACE_IRQFLAGS
  296. /* Return from syscalls can (and generally will) hard enable
  297. * interrupts. You aren't supposed to call a syscall with
  298. * interrupts disabled in the first place. However, to ensure
  299. * that we get it right vs. lockdep if it happens, we force
  300. * that hard enable here with appropriate tracing if we see
  301. * that we have been called with interrupts off
  302. */
  303. mfmsr r11
  304. andi. r12,r11,MSR_EE
  305. bne+ 1f
  306. /* We came in with interrupts disabled, we enable them now */
  307. bl trace_hardirqs_on
  308. mfmsr r11
  309. lwz r0,GPR0(r1)
  310. lwz r3,GPR3(r1)
  311. lwz r4,GPR4(r1)
  312. ori r11,r11,MSR_EE
  313. lwz r5,GPR5(r1)
  314. lwz r6,GPR6(r1)
  315. lwz r7,GPR7(r1)
  316. lwz r8,GPR8(r1)
  317. mtmsr r11
  318. 1:
  319. #endif /* CONFIG_TRACE_IRQFLAGS */
  320. CURRENT_THREAD_INFO(r10, r1)
  321. lwz r11,TI_FLAGS(r10)
  322. andi. r11,r11,_TIF_SYSCALL_T_OR_A
  323. bne- syscall_dotrace
  324. syscall_dotrace_cont:
  325. cmplwi 0,r0,NR_syscalls
  326. lis r10,sys_call_table@h
  327. ori r10,r10,sys_call_table@l
  328. slwi r0,r0,2
  329. bge- 66f
  330. lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
  331. mtlr r10
  332. addi r9,r1,STACK_FRAME_OVERHEAD
  333. PPC440EP_ERR42
  334. blrl /* Call handler */
  335. .globl ret_from_syscall
  336. ret_from_syscall:
  337. #ifdef SHOW_SYSCALLS
  338. bl do_show_syscall_exit
  339. #endif
  340. mr r6,r3
  341. CURRENT_THREAD_INFO(r12, r1)
  342. /* disable interrupts so current_thread_info()->flags can't change */
  343. LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
  344. /* Note: We don't bother telling lockdep about it */
  345. SYNC
  346. MTMSRD(r10)
  347. lwz r9,TI_FLAGS(r12)
  348. li r8,-_LAST_ERRNO
  349. andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
  350. bne- syscall_exit_work
  351. cmplw 0,r3,r8
  352. blt+ syscall_exit_cont
  353. lwz r11,_CCR(r1) /* Load CR */
  354. neg r3,r3
  355. oris r11,r11,0x1000 /* Set SO bit in CR */
  356. stw r11,_CCR(r1)
  357. syscall_exit_cont:
  358. lwz r8,_MSR(r1)
  359. #ifdef CONFIG_TRACE_IRQFLAGS
  360. /* If we are going to return from the syscall with interrupts
  361. * off, we trace that here. It shouldn't happen though but we
  362. * want to catch the bugger if it does right ?
  363. */
  364. andi. r10,r8,MSR_EE
  365. bne+ 1f
  366. stw r3,GPR3(r1)
  367. bl trace_hardirqs_off
  368. lwz r3,GPR3(r1)
  369. 1:
  370. #endif /* CONFIG_TRACE_IRQFLAGS */
  371. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  372. /* If the process has its own DBCR0 value, load it up. The internal
  373. debug mode bit tells us that dbcr0 should be loaded. */
  374. lwz r0,THREAD+THREAD_DBCR0(r2)
  375. andis. r10,r0,DBCR0_IDM@h
  376. bnel- load_dbcr0
  377. #endif
  378. #ifdef CONFIG_44x
  379. BEGIN_MMU_FTR_SECTION
  380. lis r4,icache_44x_need_flush@ha
  381. lwz r5,icache_44x_need_flush@l(r4)
  382. cmplwi cr0,r5,0
  383. bne- 2f
  384. 1:
  385. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
  386. #endif /* CONFIG_44x */
  387. BEGIN_FTR_SECTION
  388. lwarx r7,0,r1
  389. END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
  390. stwcx. r0,0,r1 /* to clear the reservation */
  391. lwz r4,_LINK(r1)
  392. lwz r5,_CCR(r1)
  393. mtlr r4
  394. mtcr r5
  395. lwz r7,_NIP(r1)
  396. FIX_SRR1(r8, r0)
  397. lwz r2,GPR2(r1)
  398. lwz r1,GPR1(r1)
  399. mtspr SPRN_SRR0,r7
  400. mtspr SPRN_SRR1,r8
  401. SYNC
  402. RFI
  403. #ifdef CONFIG_44x
  404. 2: li r7,0
  405. iccci r0,r0
  406. stw r7,icache_44x_need_flush@l(r4)
  407. b 1b
  408. #endif /* CONFIG_44x */
  409. 66: li r3,-ENOSYS
  410. b ret_from_syscall
  411. .globl ret_from_fork
  412. ret_from_fork:
  413. REST_NVGPRS(r1)
  414. bl schedule_tail
  415. li r3,0
  416. b ret_from_syscall
  417. /* Traced system call support */
  418. syscall_dotrace:
  419. SAVE_NVGPRS(r1)
  420. li r0,0xc00
  421. stw r0,_TRAP(r1)
  422. addi r3,r1,STACK_FRAME_OVERHEAD
  423. bl do_syscall_trace_enter
  424. /*
  425. * Restore argument registers possibly just changed.
  426. * We use the return value of do_syscall_trace_enter
  427. * for call number to look up in the table (r0).
  428. */
  429. mr r0,r3
  430. lwz r3,GPR3(r1)
  431. lwz r4,GPR4(r1)
  432. lwz r5,GPR5(r1)
  433. lwz r6,GPR6(r1)
  434. lwz r7,GPR7(r1)
  435. lwz r8,GPR8(r1)
  436. REST_NVGPRS(r1)
  437. b syscall_dotrace_cont
  438. syscall_exit_work:
  439. andi. r0,r9,_TIF_RESTOREALL
  440. beq+ 0f
  441. REST_NVGPRS(r1)
  442. b 2f
  443. 0: cmplw 0,r3,r8
  444. blt+ 1f
  445. andi. r0,r9,_TIF_NOERROR
  446. bne- 1f
  447. lwz r11,_CCR(r1) /* Load CR */
  448. neg r3,r3
  449. oris r11,r11,0x1000 /* Set SO bit in CR */
  450. stw r11,_CCR(r1)
  451. 1: stw r6,RESULT(r1) /* Save result */
  452. stw r3,GPR3(r1) /* Update return value */
  453. 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
  454. beq 4f
  455. /* Clear per-syscall TIF flags if any are set. */
  456. li r11,_TIF_PERSYSCALL_MASK
  457. addi r12,r12,TI_FLAGS
  458. 3: lwarx r8,0,r12
  459. andc r8,r8,r11
  460. #ifdef CONFIG_IBM405_ERR77
  461. dcbt 0,r12
  462. #endif
  463. stwcx. r8,0,r12
  464. bne- 3b
  465. subi r12,r12,TI_FLAGS
  466. 4: /* Anything which requires enabling interrupts? */
  467. andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
  468. beq ret_from_except
  469. /* Re-enable interrupts. There is no need to trace that with
  470. * lockdep as we are supposed to have IRQs on at this point
  471. */
  472. ori r10,r10,MSR_EE
  473. SYNC
  474. MTMSRD(r10)
  475. /* Save NVGPRS if they're not saved already */
  476. lwz r4,_TRAP(r1)
  477. andi. r4,r4,1
  478. beq 5f
  479. SAVE_NVGPRS(r1)
  480. li r4,0xc00
  481. stw r4,_TRAP(r1)
  482. 5:
  483. addi r3,r1,STACK_FRAME_OVERHEAD
  484. bl do_syscall_trace_leave
  485. b ret_from_except_full
  486. #ifdef SHOW_SYSCALLS
  487. do_show_syscall:
  488. #ifdef SHOW_SYSCALLS_TASK
  489. lis r11,show_syscalls_task@ha
  490. lwz r11,show_syscalls_task@l(r11)
  491. cmp 0,r2,r11
  492. bnelr
  493. #endif
  494. stw r31,GPR31(r1)
  495. mflr r31
  496. lis r3,7f@ha
  497. addi r3,r3,7f@l
  498. lwz r4,GPR0(r1)
  499. lwz r5,GPR3(r1)
  500. lwz r6,GPR4(r1)
  501. lwz r7,GPR5(r1)
  502. lwz r8,GPR6(r1)
  503. lwz r9,GPR7(r1)
  504. bl printk
  505. lis r3,77f@ha
  506. addi r3,r3,77f@l
  507. lwz r4,GPR8(r1)
  508. mr r5,r2
  509. bl printk
  510. lwz r0,GPR0(r1)
  511. lwz r3,GPR3(r1)
  512. lwz r4,GPR4(r1)
  513. lwz r5,GPR5(r1)
  514. lwz r6,GPR6(r1)
  515. lwz r7,GPR7(r1)
  516. lwz r8,GPR8(r1)
  517. mtlr r31
  518. lwz r31,GPR31(r1)
  519. blr
  520. do_show_syscall_exit:
  521. #ifdef SHOW_SYSCALLS_TASK
  522. lis r11,show_syscalls_task@ha
  523. lwz r11,show_syscalls_task@l(r11)
  524. cmp 0,r2,r11
  525. bnelr
  526. #endif
  527. stw r31,GPR31(r1)
  528. mflr r31
  529. stw r3,RESULT(r1) /* Save result */
  530. mr r4,r3
  531. lis r3,79f@ha
  532. addi r3,r3,79f@l
  533. bl printk
  534. lwz r3,RESULT(r1)
  535. mtlr r31
  536. lwz r31,GPR31(r1)
  537. blr
  538. 7: .string "syscall %d(%x, %x, %x, %x, %x, "
  539. 77: .string "%x), current=%p\n"
  540. 79: .string " -> %x\n"
  541. .align 2,0
  542. #ifdef SHOW_SYSCALLS_TASK
  543. .data
  544. .globl show_syscalls_task
  545. show_syscalls_task:
  546. .long -1
  547. .text
  548. #endif
  549. #endif /* SHOW_SYSCALLS */
  550. /*
  551. * The fork/clone functions need to copy the full register set into
  552. * the child process. Therefore we need to save all the nonvolatile
  553. * registers (r13 - r31) before calling the C code.
  554. */
  555. .globl ppc_fork
  556. ppc_fork:
  557. SAVE_NVGPRS(r1)
  558. lwz r0,_TRAP(r1)
  559. rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
  560. stw r0,_TRAP(r1) /* register set saved */
  561. b sys_fork
  562. .globl ppc_vfork
  563. ppc_vfork:
  564. SAVE_NVGPRS(r1)
  565. lwz r0,_TRAP(r1)
  566. rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
  567. stw r0,_TRAP(r1) /* register set saved */
  568. b sys_vfork
  569. .globl ppc_clone
  570. ppc_clone:
  571. SAVE_NVGPRS(r1)
  572. lwz r0,_TRAP(r1)
  573. rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
  574. stw r0,_TRAP(r1) /* register set saved */
  575. b sys_clone
  576. .globl ppc_swapcontext
  577. ppc_swapcontext:
  578. SAVE_NVGPRS(r1)
  579. lwz r0,_TRAP(r1)
  580. rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
  581. stw r0,_TRAP(r1) /* register set saved */
  582. b sys_swapcontext
  583. /*
  584. * Top-level page fault handling.
  585. * This is in assembler because if do_page_fault tells us that
  586. * it is a bad kernel page fault, we want to save the non-volatile
  587. * registers before calling bad_page_fault.
  588. */
  589. .globl handle_page_fault
  590. handle_page_fault:
  591. stw r4,_DAR(r1)
  592. addi r3,r1,STACK_FRAME_OVERHEAD
  593. bl do_page_fault
  594. cmpwi r3,0
  595. beq+ ret_from_except
  596. SAVE_NVGPRS(r1)
  597. lwz r0,_TRAP(r1)
  598. clrrwi r0,r0,1
  599. stw r0,_TRAP(r1)
  600. mr r5,r3
  601. addi r3,r1,STACK_FRAME_OVERHEAD
  602. lwz r4,_DAR(r1)
  603. bl bad_page_fault
  604. b ret_from_except_full
  605. /*
  606. * This routine switches between two different tasks. The process
  607. * state of one is saved on its kernel stack. Then the state
  608. * of the other is restored from its kernel stack. The memory
  609. * management hardware is updated to the second process's state.
  610. * Finally, we can return to the second process.
  611. * On entry, r3 points to the THREAD for the current task, r4
  612. * points to the THREAD for the new task.
  613. *
  614. * This routine is always called with interrupts disabled.
  615. *
  616. * Note: there are two ways to get to the "going out" portion
  617. * of this code; either by coming in via the entry (_switch)
  618. * or via "fork" which must set up an environment equivalent
  619. * to the "_switch" path. If you change this , you'll have to
  620. * change the fork code also.
  621. *
  622. * The code which creates the new task context is in 'copy_thread'
  623. * in arch/ppc/kernel/process.c
  624. */
  625. _GLOBAL(_switch)
  626. stwu r1,-INT_FRAME_SIZE(r1)
  627. mflr r0
  628. stw r0,INT_FRAME_SIZE+4(r1)
  629. /* r3-r12 are caller saved -- Cort */
  630. SAVE_NVGPRS(r1)
  631. stw r0,_NIP(r1) /* Return to switch caller */
  632. mfmsr r11
  633. li r0,MSR_FP /* Disable floating-point */
  634. #ifdef CONFIG_ALTIVEC
  635. BEGIN_FTR_SECTION
  636. oris r0,r0,MSR_VEC@h /* Disable altivec */
  637. mfspr r12,SPRN_VRSAVE /* save vrsave register value */
  638. stw r12,THREAD+THREAD_VRSAVE(r2)
  639. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  640. #endif /* CONFIG_ALTIVEC */
  641. #ifdef CONFIG_SPE
  642. BEGIN_FTR_SECTION
  643. oris r0,r0,MSR_SPE@h /* Disable SPE */
  644. mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
  645. stw r12,THREAD+THREAD_SPEFSCR(r2)
  646. END_FTR_SECTION_IFSET(CPU_FTR_SPE)
  647. #endif /* CONFIG_SPE */
  648. and. r0,r0,r11 /* FP or altivec or SPE enabled? */
  649. beq+ 1f
  650. andc r11,r11,r0
  651. MTMSRD(r11)
  652. isync
  653. 1: stw r11,_MSR(r1)
  654. mfcr r10
  655. stw r10,_CCR(r1)
  656. stw r1,KSP(r3) /* Set old stack pointer */
  657. #ifdef CONFIG_SMP
  658. /* We need a sync somewhere here to make sure that if the
  659. * previous task gets rescheduled on another CPU, it sees all
  660. * stores it has performed on this one.
  661. */
  662. sync
  663. #endif /* CONFIG_SMP */
  664. tophys(r0,r4)
  665. CLR_TOP32(r0)
  666. mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
  667. lwz r1,KSP(r4) /* Load new stack pointer */
  668. /* save the old current 'last' for return value */
  669. mr r3,r2
  670. addi r2,r4,-THREAD /* Update current */
  671. #ifdef CONFIG_ALTIVEC
  672. BEGIN_FTR_SECTION
  673. lwz r0,THREAD+THREAD_VRSAVE(r2)
  674. mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
  675. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  676. #endif /* CONFIG_ALTIVEC */
  677. #ifdef CONFIG_SPE
  678. BEGIN_FTR_SECTION
  679. lwz r0,THREAD+THREAD_SPEFSCR(r2)
  680. mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
  681. END_FTR_SECTION_IFSET(CPU_FTR_SPE)
  682. #endif /* CONFIG_SPE */
  683. lwz r0,_CCR(r1)
  684. mtcrf 0xFF,r0
  685. /* r3-r12 are destroyed -- Cort */
  686. REST_NVGPRS(r1)
  687. lwz r4,_NIP(r1) /* Return to _switch caller in new task */
  688. mtlr r4
  689. addi r1,r1,INT_FRAME_SIZE
  690. blr
  691. .globl fast_exception_return
  692. fast_exception_return:
  693. #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
  694. andi. r10,r9,MSR_RI /* check for recoverable interrupt */
  695. beq 1f /* if not, we've got problems */
  696. #endif
  697. 2: REST_4GPRS(3, r11)
  698. lwz r10,_CCR(r11)
  699. REST_GPR(1, r11)
  700. mtcr r10
  701. lwz r10,_LINK(r11)
  702. mtlr r10
  703. REST_GPR(10, r11)
  704. mtspr SPRN_SRR1,r9
  705. mtspr SPRN_SRR0,r12
  706. REST_GPR(9, r11)
  707. REST_GPR(12, r11)
  708. lwz r11,GPR11(r11)
  709. SYNC
  710. RFI
  711. #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
  712. /* check if the exception happened in a restartable section */
  713. 1: lis r3,exc_exit_restart_end@ha
  714. addi r3,r3,exc_exit_restart_end@l
  715. cmplw r12,r3
  716. bge 3f
  717. lis r4,exc_exit_restart@ha
  718. addi r4,r4,exc_exit_restart@l
  719. cmplw r12,r4
  720. blt 3f
  721. lis r3,fee_restarts@ha
  722. tophys(r3,r3)
  723. lwz r5,fee_restarts@l(r3)
  724. addi r5,r5,1
  725. stw r5,fee_restarts@l(r3)
  726. mr r12,r4 /* restart at exc_exit_restart */
  727. b 2b
  728. .section .bss
  729. .align 2
  730. fee_restarts:
  731. .space 4
  732. .previous
  733. /* aargh, a nonrecoverable interrupt, panic */
  734. /* aargh, we don't know which trap this is */
  735. /* but the 601 doesn't implement the RI bit, so assume it's OK */
  736. 3:
  737. BEGIN_FTR_SECTION
  738. b 2b
  739. END_FTR_SECTION_IFSET(CPU_FTR_601)
  740. li r10,-1
  741. stw r10,_TRAP(r11)
  742. addi r3,r1,STACK_FRAME_OVERHEAD
  743. lis r10,MSR_KERNEL@h
  744. ori r10,r10,MSR_KERNEL@l
  745. bl transfer_to_handler_full
  746. .long nonrecoverable_exception
  747. .long ret_from_except
  748. #endif
  749. .globl ret_from_except_full
  750. ret_from_except_full:
  751. REST_NVGPRS(r1)
  752. /* fall through */
  753. .globl ret_from_except
  754. ret_from_except:
  755. /* Hard-disable interrupts so that current_thread_info()->flags
  756. * can't change between when we test it and when we return
  757. * from the interrupt. */
  758. /* Note: We don't bother telling lockdep about it */
  759. LOAD_MSR_KERNEL(r10,MSR_KERNEL)
  760. SYNC /* Some chip revs have problems here... */
  761. MTMSRD(r10) /* disable interrupts */
  762. lwz r3,_MSR(r1) /* Returning to user mode? */
  763. andi. r0,r3,MSR_PR
  764. beq resume_kernel
  765. user_exc_return: /* r10 contains MSR_KERNEL here */
  766. /* Check current_thread_info()->flags */
  767. CURRENT_THREAD_INFO(r9, r1)
  768. lwz r9,TI_FLAGS(r9)
  769. andi. r0,r9,_TIF_USER_WORK_MASK
  770. bne do_work
  771. restore_user:
  772. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  773. /* Check whether this process has its own DBCR0 value. The internal
  774. debug mode bit tells us that dbcr0 should be loaded. */
  775. lwz r0,THREAD+THREAD_DBCR0(r2)
  776. andis. r10,r0,DBCR0_IDM@h
  777. bnel- load_dbcr0
  778. #endif
  779. #ifdef CONFIG_PREEMPT
  780. b restore
  781. /* N.B. the only way to get here is from the beq following ret_from_except. */
  782. resume_kernel:
  783. /* check current_thread_info->preempt_count */
  784. CURRENT_THREAD_INFO(r9, r1)
  785. lwz r0,TI_PREEMPT(r9)
  786. cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
  787. bne restore
  788. lwz r0,TI_FLAGS(r9)
  789. andi. r0,r0,_TIF_NEED_RESCHED
  790. beq+ restore
  791. andi. r0,r3,MSR_EE /* interrupts off? */
  792. beq restore /* don't schedule if so */
  793. #ifdef CONFIG_TRACE_IRQFLAGS
  794. /* Lockdep thinks irqs are enabled, we need to call
  795. * preempt_schedule_irq with IRQs off, so we inform lockdep
  796. * now that we -did- turn them off already
  797. */
  798. bl trace_hardirqs_off
  799. #endif
  800. 1: bl preempt_schedule_irq
  801. CURRENT_THREAD_INFO(r9, r1)
  802. lwz r3,TI_FLAGS(r9)
  803. andi. r0,r3,_TIF_NEED_RESCHED
  804. bne- 1b
  805. #ifdef CONFIG_TRACE_IRQFLAGS
  806. /* And now, to properly rebalance the above, we tell lockdep they
  807. * are being turned back on, which will happen when we return
  808. */
  809. bl trace_hardirqs_on
  810. #endif
  811. #else
  812. resume_kernel:
  813. #endif /* CONFIG_PREEMPT */
  814. /* interrupts are hard-disabled at this point */
  815. restore:
  816. #ifdef CONFIG_44x
  817. BEGIN_MMU_FTR_SECTION
  818. b 1f
  819. END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
  820. lis r4,icache_44x_need_flush@ha
  821. lwz r5,icache_44x_need_flush@l(r4)
  822. cmplwi cr0,r5,0
  823. beq+ 1f
  824. li r6,0
  825. iccci r0,r0
  826. stw r6,icache_44x_need_flush@l(r4)
  827. 1:
  828. #endif /* CONFIG_44x */
  829. lwz r9,_MSR(r1)
  830. #ifdef CONFIG_TRACE_IRQFLAGS
  831. /* Lockdep doesn't know about the fact that IRQs are temporarily turned
  832. * off in this assembly code while peeking at TI_FLAGS() and such. However
  833. * we need to inform it if the exception turned interrupts off, and we
  834. * are about to trun them back on.
  835. *
  836. * The problem here sadly is that we don't know whether the exceptions was
  837. * one that turned interrupts off or not. So we always tell lockdep about
  838. * turning them on here when we go back to wherever we came from with EE
  839. * on, even if that may meen some redudant calls being tracked. Maybe later
  840. * we could encode what the exception did somewhere or test the exception
  841. * type in the pt_regs but that sounds overkill
  842. */
  843. andi. r10,r9,MSR_EE
  844. beq 1f
  845. /*
  846. * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
  847. * which is the stack frame here, we need to force a stack frame
  848. * in case we came from user space.
  849. */
  850. stwu r1,-32(r1)
  851. mflr r0
  852. stw r0,4(r1)
  853. stwu r1,-32(r1)
  854. bl trace_hardirqs_on
  855. lwz r1,0(r1)
  856. lwz r1,0(r1)
  857. lwz r9,_MSR(r1)
  858. 1:
  859. #endif /* CONFIG_TRACE_IRQFLAGS */
  860. lwz r0,GPR0(r1)
  861. lwz r2,GPR2(r1)
  862. REST_4GPRS(3, r1)
  863. REST_2GPRS(7, r1)
  864. lwz r10,_XER(r1)
  865. lwz r11,_CTR(r1)
  866. mtspr SPRN_XER,r10
  867. mtctr r11
  868. PPC405_ERR77(0,r1)
  869. BEGIN_FTR_SECTION
  870. lwarx r11,0,r1
  871. END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
  872. stwcx. r0,0,r1 /* to clear the reservation */
  873. #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
  874. andi. r10,r9,MSR_RI /* check if this exception occurred */
  875. beql nonrecoverable /* at a bad place (MSR:RI = 0) */
  876. lwz r10,_CCR(r1)
  877. lwz r11,_LINK(r1)
  878. mtcrf 0xFF,r10
  879. mtlr r11
  880. /*
  881. * Once we put values in SRR0 and SRR1, we are in a state
  882. * where exceptions are not recoverable, since taking an
  883. * exception will trash SRR0 and SRR1. Therefore we clear the
  884. * MSR:RI bit to indicate this. If we do take an exception,
  885. * we can't return to the point of the exception but we
  886. * can restart the exception exit path at the label
  887. * exc_exit_restart below. -- paulus
  888. */
  889. LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
  890. SYNC
  891. MTMSRD(r10) /* clear the RI bit */
  892. .globl exc_exit_restart
  893. exc_exit_restart:
  894. lwz r12,_NIP(r1)
  895. FIX_SRR1(r9,r10)
  896. mtspr SPRN_SRR0,r12
  897. mtspr SPRN_SRR1,r9
  898. REST_4GPRS(9, r1)
  899. lwz r1,GPR1(r1)
  900. .globl exc_exit_restart_end
  901. exc_exit_restart_end:
  902. SYNC
  903. RFI
  904. #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
  905. /*
  906. * This is a bit different on 4xx/Book-E because it doesn't have
  907. * the RI bit in the MSR.
  908. * The TLB miss handler checks if we have interrupted
  909. * the exception exit path and restarts it if so
  910. * (well maybe one day it will... :).
  911. */
  912. lwz r11,_LINK(r1)
  913. mtlr r11
  914. lwz r10,_CCR(r1)
  915. mtcrf 0xff,r10
  916. REST_2GPRS(9, r1)
  917. .globl exc_exit_restart
  918. exc_exit_restart:
  919. lwz r11,_NIP(r1)
  920. lwz r12,_MSR(r1)
  921. exc_exit_start:
  922. mtspr SPRN_SRR0,r11
  923. mtspr SPRN_SRR1,r12
  924. REST_2GPRS(11, r1)
  925. lwz r1,GPR1(r1)
  926. .globl exc_exit_restart_end
  927. exc_exit_restart_end:
  928. PPC405_ERR77_SYNC
  929. rfi
  930. b . /* prevent prefetch past rfi */
  931. /*
  932. * Returning from a critical interrupt in user mode doesn't need
  933. * to be any different from a normal exception. For a critical
  934. * interrupt in the kernel, we just return (without checking for
  935. * preemption) since the interrupt may have happened at some crucial
  936. * place (e.g. inside the TLB miss handler), and because we will be
  937. * running with r1 pointing into critical_stack, not the current
  938. * process's kernel stack (and therefore current_thread_info() will
  939. * give the wrong answer).
  940. * We have to restore various SPRs that may have been in use at the
  941. * time of the critical interrupt.
  942. *
  943. */
  944. #ifdef CONFIG_40x
  945. #define PPC_40x_TURN_OFF_MSR_DR \
  946. /* avoid any possible TLB misses here by turning off MSR.DR, we \
  947. * assume the instructions here are mapped by a pinned TLB entry */ \
  948. li r10,MSR_IR; \
  949. mtmsr r10; \
  950. isync; \
  951. tophys(r1, r1);
  952. #else
  953. #define PPC_40x_TURN_OFF_MSR_DR
  954. #endif
  955. #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
  956. REST_NVGPRS(r1); \
  957. lwz r3,_MSR(r1); \
  958. andi. r3,r3,MSR_PR; \
  959. LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
  960. bne user_exc_return; \
  961. lwz r0,GPR0(r1); \
  962. lwz r2,GPR2(r1); \
  963. REST_4GPRS(3, r1); \
  964. REST_2GPRS(7, r1); \
  965. lwz r10,_XER(r1); \
  966. lwz r11,_CTR(r1); \
  967. mtspr SPRN_XER,r10; \
  968. mtctr r11; \
  969. PPC405_ERR77(0,r1); \
  970. stwcx. r0,0,r1; /* to clear the reservation */ \
  971. lwz r11,_LINK(r1); \
  972. mtlr r11; \
  973. lwz r10,_CCR(r1); \
  974. mtcrf 0xff,r10; \
  975. PPC_40x_TURN_OFF_MSR_DR; \
  976. lwz r9,_DEAR(r1); \
  977. lwz r10,_ESR(r1); \
  978. mtspr SPRN_DEAR,r9; \
  979. mtspr SPRN_ESR,r10; \
  980. lwz r11,_NIP(r1); \
  981. lwz r12,_MSR(r1); \
  982. mtspr exc_lvl_srr0,r11; \
  983. mtspr exc_lvl_srr1,r12; \
  984. lwz r9,GPR9(r1); \
  985. lwz r12,GPR12(r1); \
  986. lwz r10,GPR10(r1); \
  987. lwz r11,GPR11(r1); \
  988. lwz r1,GPR1(r1); \
  989. PPC405_ERR77_SYNC; \
  990. exc_lvl_rfi; \
  991. b .; /* prevent prefetch past exc_lvl_rfi */
  992. #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
  993. lwz r9,_##exc_lvl_srr0(r1); \
  994. lwz r10,_##exc_lvl_srr1(r1); \
  995. mtspr SPRN_##exc_lvl_srr0,r9; \
  996. mtspr SPRN_##exc_lvl_srr1,r10;
  997. #if defined(CONFIG_PPC_BOOK3E_MMU)
  998. #ifdef CONFIG_PHYS_64BIT
  999. #define RESTORE_MAS7 \
  1000. lwz r11,MAS7(r1); \
  1001. mtspr SPRN_MAS7,r11;
  1002. #else
  1003. #define RESTORE_MAS7
  1004. #endif /* CONFIG_PHYS_64BIT */
  1005. #define RESTORE_MMU_REGS \
  1006. lwz r9,MAS0(r1); \
  1007. lwz r10,MAS1(r1); \
  1008. lwz r11,MAS2(r1); \
  1009. mtspr SPRN_MAS0,r9; \
  1010. lwz r9,MAS3(r1); \
  1011. mtspr SPRN_MAS1,r10; \
  1012. lwz r10,MAS6(r1); \
  1013. mtspr SPRN_MAS2,r11; \
  1014. mtspr SPRN_MAS3,r9; \
  1015. mtspr SPRN_MAS6,r10; \
  1016. RESTORE_MAS7;
  1017. #elif defined(CONFIG_44x)
  1018. #define RESTORE_MMU_REGS \
  1019. lwz r9,MMUCR(r1); \
  1020. mtspr SPRN_MMUCR,r9;
  1021. #else
  1022. #define RESTORE_MMU_REGS
  1023. #endif
  1024. #ifdef CONFIG_40x
  1025. .globl ret_from_crit_exc
  1026. ret_from_crit_exc:
  1027. mfspr r9,SPRN_SPRG_THREAD
  1028. lis r10,saved_ksp_limit@ha;
  1029. lwz r10,saved_ksp_limit@l(r10);
  1030. tovirt(r9,r9);
  1031. stw r10,KSP_LIMIT(r9)
  1032. lis r9,crit_srr0@ha;
  1033. lwz r9,crit_srr0@l(r9);
  1034. lis r10,crit_srr1@ha;
  1035. lwz r10,crit_srr1@l(r10);
  1036. mtspr SPRN_SRR0,r9;
  1037. mtspr SPRN_SRR1,r10;
  1038. RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
  1039. #endif /* CONFIG_40x */
  1040. #ifdef CONFIG_BOOKE
  1041. .globl ret_from_crit_exc
  1042. ret_from_crit_exc:
  1043. mfspr r9,SPRN_SPRG_THREAD
  1044. lwz r10,SAVED_KSP_LIMIT(r1)
  1045. stw r10,KSP_LIMIT(r9)
  1046. RESTORE_xSRR(SRR0,SRR1);
  1047. RESTORE_MMU_REGS;
  1048. RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
  1049. .globl ret_from_debug_exc
  1050. ret_from_debug_exc:
  1051. mfspr r9,SPRN_SPRG_THREAD
  1052. lwz r10,SAVED_KSP_LIMIT(r1)
  1053. stw r10,KSP_LIMIT(r9)
  1054. lwz r9,THREAD_INFO-THREAD(r9)
  1055. CURRENT_THREAD_INFO(r10, r1)
  1056. lwz r10,TI_PREEMPT(r10)
  1057. stw r10,TI_PREEMPT(r9)
  1058. RESTORE_xSRR(SRR0,SRR1);
  1059. RESTORE_xSRR(CSRR0,CSRR1);
  1060. RESTORE_MMU_REGS;
  1061. RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
  1062. .globl ret_from_mcheck_exc
  1063. ret_from_mcheck_exc:
  1064. mfspr r9,SPRN_SPRG_THREAD
  1065. lwz r10,SAVED_KSP_LIMIT(r1)
  1066. stw r10,KSP_LIMIT(r9)
  1067. RESTORE_xSRR(SRR0,SRR1);
  1068. RESTORE_xSRR(CSRR0,CSRR1);
  1069. RESTORE_xSRR(DSRR0,DSRR1);
  1070. RESTORE_MMU_REGS;
  1071. RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
  1072. #endif /* CONFIG_BOOKE */
  1073. /*
  1074. * Load the DBCR0 value for a task that is being ptraced,
  1075. * having first saved away the global DBCR0. Note that r0
  1076. * has the dbcr0 value to set upon entry to this.
  1077. */
  1078. load_dbcr0:
  1079. mfmsr r10 /* first disable debug exceptions */
  1080. rlwinm r10,r10,0,~MSR_DE
  1081. mtmsr r10
  1082. isync
  1083. mfspr r10,SPRN_DBCR0
  1084. lis r11,global_dbcr0@ha
  1085. addi r11,r11,global_dbcr0@l
  1086. #ifdef CONFIG_SMP
  1087. CURRENT_THREAD_INFO(r9, r1)
  1088. lwz r9,TI_CPU(r9)
  1089. slwi r9,r9,3
  1090. add r11,r11,r9
  1091. #endif
  1092. stw r10,0(r11)
  1093. mtspr SPRN_DBCR0,r0
  1094. lwz r10,4(r11)
  1095. addi r10,r10,1
  1096. stw r10,4(r11)
  1097. li r11,-1
  1098. mtspr SPRN_DBSR,r11 /* clear all pending debug events */
  1099. blr
  1100. .section .bss
  1101. .align 4
  1102. global_dbcr0:
  1103. .space 8*NR_CPUS
  1104. .previous
  1105. #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
  1106. do_work: /* r10 contains MSR_KERNEL here */
  1107. andi. r0,r9,_TIF_NEED_RESCHED
  1108. beq do_user_signal
  1109. do_resched: /* r10 contains MSR_KERNEL here */
  1110. /* Note: We don't need to inform lockdep that we are enabling
  1111. * interrupts here. As far as it knows, they are already enabled
  1112. */
  1113. ori r10,r10,MSR_EE
  1114. SYNC
  1115. MTMSRD(r10) /* hard-enable interrupts */
  1116. bl schedule
  1117. recheck:
  1118. /* Note: And we don't tell it we are disabling them again
  1119. * neither. Those disable/enable cycles used to peek at
  1120. * TI_FLAGS aren't advertised.
  1121. */
  1122. LOAD_MSR_KERNEL(r10,MSR_KERNEL)
  1123. SYNC
  1124. MTMSRD(r10) /* disable interrupts */
  1125. CURRENT_THREAD_INFO(r9, r1)
  1126. lwz r9,TI_FLAGS(r9)
  1127. andi. r0,r9,_TIF_NEED_RESCHED
  1128. bne- do_resched
  1129. andi. r0,r9,_TIF_USER_WORK_MASK
  1130. beq restore_user
  1131. do_user_signal: /* r10 contains MSR_KERNEL here */
  1132. ori r10,r10,MSR_EE
  1133. SYNC
  1134. MTMSRD(r10) /* hard-enable interrupts */
  1135. /* save r13-r31 in the exception frame, if not already done */
  1136. lwz r3,_TRAP(r1)
  1137. andi. r0,r3,1
  1138. beq 2f
  1139. SAVE_NVGPRS(r1)
  1140. rlwinm r3,r3,0,0,30
  1141. stw r3,_TRAP(r1)
  1142. 2: addi r3,r1,STACK_FRAME_OVERHEAD
  1143. mr r4,r9
  1144. bl do_notify_resume
  1145. REST_NVGPRS(r1)
  1146. b recheck
  1147. /*
  1148. * We come here when we are at the end of handling an exception
  1149. * that occurred at a place where taking an exception will lose
  1150. * state information, such as the contents of SRR0 and SRR1.
  1151. */
  1152. nonrecoverable:
  1153. lis r10,exc_exit_restart_end@ha
  1154. addi r10,r10,exc_exit_restart_end@l
  1155. cmplw r12,r10
  1156. bge 3f
  1157. lis r11,exc_exit_restart@ha
  1158. addi r11,r11,exc_exit_restart@l
  1159. cmplw r12,r11
  1160. blt 3f
  1161. lis r10,ee_restarts@ha
  1162. lwz r12,ee_restarts@l(r10)
  1163. addi r12,r12,1
  1164. stw r12,ee_restarts@l(r10)
  1165. mr r12,r11 /* restart at exc_exit_restart */
  1166. blr
  1167. 3: /* OK, we can't recover, kill this process */
  1168. /* but the 601 doesn't implement the RI bit, so assume it's OK */
  1169. BEGIN_FTR_SECTION
  1170. blr
  1171. END_FTR_SECTION_IFSET(CPU_FTR_601)
  1172. lwz r3,_TRAP(r1)
  1173. andi. r0,r3,1
  1174. beq 4f
  1175. SAVE_NVGPRS(r1)
  1176. rlwinm r3,r3,0,0,30
  1177. stw r3,_TRAP(r1)
  1178. 4: addi r3,r1,STACK_FRAME_OVERHEAD
  1179. bl nonrecoverable_exception
  1180. /* shouldn't return */
  1181. b 4b
  1182. .section .bss
  1183. .align 2
  1184. ee_restarts:
  1185. .space 4
  1186. .previous
  1187. /*
  1188. * PROM code for specific machines follows. Put it
  1189. * here so it's easy to add arch-specific sections later.
  1190. * -- Cort
  1191. */
  1192. #ifdef CONFIG_PPC_RTAS
  1193. /*
  1194. * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
  1195. * called with the MMU off.
  1196. */
  1197. _GLOBAL(enter_rtas)
  1198. stwu r1,-INT_FRAME_SIZE(r1)
  1199. mflr r0
  1200. stw r0,INT_FRAME_SIZE+4(r1)
  1201. LOAD_REG_ADDR(r4, rtas)
  1202. lis r6,1f@ha /* physical return address for rtas */
  1203. addi r6,r6,1f@l
  1204. tophys(r6,r6)
  1205. tophys(r7,r1)
  1206. lwz r8,RTASENTRY(r4)
  1207. lwz r4,RTASBASE(r4)
  1208. mfmsr r9
  1209. stw r9,8(r1)
  1210. LOAD_MSR_KERNEL(r0,MSR_KERNEL)
  1211. SYNC /* disable interrupts so SRR0/1 */
  1212. MTMSRD(r0) /* don't get trashed */
  1213. li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
  1214. mtlr r6
  1215. mtspr SPRN_SPRG_RTAS,r7
  1216. mtspr SPRN_SRR0,r8
  1217. mtspr SPRN_SRR1,r9
  1218. RFI
  1219. 1: tophys(r9,r1)
  1220. lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
  1221. lwz r9,8(r9) /* original msr value */
  1222. FIX_SRR1(r9,r0)
  1223. addi r1,r1,INT_FRAME_SIZE
  1224. li r0,0
  1225. mtspr SPRN_SPRG_RTAS,r0
  1226. mtspr SPRN_SRR0,r8
  1227. mtspr SPRN_SRR1,r9
  1228. RFI /* return to caller */
  1229. .globl machine_check_in_rtas
  1230. machine_check_in_rtas:
  1231. twi 31,0,0
  1232. /* XXX load up BATs and panic */
  1233. #endif /* CONFIG_PPC_RTAS */
  1234. #ifdef CONFIG_FUNCTION_TRACER
  1235. #ifdef CONFIG_DYNAMIC_FTRACE
  1236. _GLOBAL(mcount)
  1237. _GLOBAL(_mcount)
  1238. /*
  1239. * It is required that _mcount on PPC32 must preserve the
  1240. * link register. But we have r0 to play with. We use r0
  1241. * to push the return address back to the caller of mcount
  1242. * into the ctr register, restore the link register and
  1243. * then jump back using the ctr register.
  1244. */
  1245. mflr r0
  1246. mtctr r0
  1247. lwz r0, 4(r1)
  1248. mtlr r0
  1249. bctr
  1250. _GLOBAL(ftrace_caller)
  1251. MCOUNT_SAVE_FRAME
  1252. /* r3 ends up with link register */
  1253. subi r3, r3, MCOUNT_INSN_SIZE
  1254. .globl ftrace_call
  1255. ftrace_call:
  1256. bl ftrace_stub
  1257. nop
  1258. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1259. .globl ftrace_graph_call
  1260. ftrace_graph_call:
  1261. b ftrace_graph_stub
  1262. _GLOBAL(ftrace_graph_stub)
  1263. #endif
  1264. MCOUNT_RESTORE_FRAME
  1265. /* old link register ends up in ctr reg */
  1266. bctr
  1267. #else
  1268. _GLOBAL(mcount)
  1269. _GLOBAL(_mcount)
  1270. MCOUNT_SAVE_FRAME
  1271. subi r3, r3, MCOUNT_INSN_SIZE
  1272. LOAD_REG_ADDR(r5, ftrace_trace_function)
  1273. lwz r5,0(r5)
  1274. mtctr r5
  1275. bctrl
  1276. nop
  1277. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1278. b ftrace_graph_caller
  1279. #endif
  1280. MCOUNT_RESTORE_FRAME
  1281. bctr
  1282. #endif
  1283. _GLOBAL(ftrace_stub)
  1284. blr
  1285. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1286. _GLOBAL(ftrace_graph_caller)
  1287. /* load r4 with local address */
  1288. lwz r4, 44(r1)
  1289. subi r4, r4, MCOUNT_INSN_SIZE
  1290. /* get the parent address */
  1291. addi r3, r1, 52
  1292. bl prepare_ftrace_return
  1293. nop
  1294. MCOUNT_RESTORE_FRAME
  1295. /* old link register ends up in ctr reg */
  1296. bctr
  1297. _GLOBAL(return_to_handler)
  1298. /* need to save return values */
  1299. stwu r1, -32(r1)
  1300. stw r3, 20(r1)
  1301. stw r4, 16(r1)
  1302. stw r31, 12(r1)
  1303. mr r31, r1
  1304. bl ftrace_return_to_handler
  1305. nop
  1306. /* return value has real return address */
  1307. mtlr r3
  1308. lwz r3, 20(r1)
  1309. lwz r4, 16(r1)
  1310. lwz r31,12(r1)
  1311. lwz r1, 0(r1)
  1312. /* Jump back to real return address */
  1313. blr
  1314. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1315. #endif /* CONFIG_MCOUNT */