entry_32.S 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
  5. * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
  6. * Adapted for Power Macintosh by Paul Mackerras.
  7. * Low-level exception handlers and MMU support
  8. * rewritten by Paul Mackerras.
  9. * Copyright (C) 1996 Paul Mackerras.
  10. * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11. *
  12. * This file contains the system call entry code, context switch
  13. * code, and exception/interrupt return code for PowerPC.
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. *
  20. */
  21. #include <linux/errno.h>
  22. #include <linux/sys.h>
  23. #include <linux/threads.h>
  24. #include <asm/reg.h>
  25. #include <asm/page.h>
  26. #include <asm/mmu.h>
  27. #include <asm/cputable.h>
  28. #include <asm/thread_info.h>
  29. #include <asm/ppc_asm.h>
  30. #include <asm/asm-offsets.h>
  31. #include <asm/unistd.h>
  32. #include <asm/ftrace.h>
  33. #include <asm/ptrace.h>
  34. #undef SHOW_SYSCALLS
  35. #undef SHOW_SYSCALLS_TASK
  36. /*
  37. * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
  38. */
  39. #if MSR_KERNEL >= 0x10000
  40. #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
  41. #else
  42. #define LOAD_MSR_KERNEL(r, x) li r,(x)
  43. #endif
  44. #ifdef CONFIG_BOOKE
  45. .globl mcheck_transfer_to_handler
  46. mcheck_transfer_to_handler:
  47. mfspr r0,SPRN_DSRR0
  48. stw r0,_DSRR0(r11)
  49. mfspr r0,SPRN_DSRR1
  50. stw r0,_DSRR1(r11)
  51. /* fall through */
  52. .globl debug_transfer_to_handler
  53. debug_transfer_to_handler:
  54. mfspr r0,SPRN_CSRR0
  55. stw r0,_CSRR0(r11)
  56. mfspr r0,SPRN_CSRR1
  57. stw r0,_CSRR1(r11)
  58. /* fall through */
  59. .globl crit_transfer_to_handler
  60. crit_transfer_to_handler:
  61. #ifdef CONFIG_PPC_BOOK3E_MMU
  62. mfspr r0,SPRN_MAS0
  63. stw r0,MAS0(r11)
  64. mfspr r0,SPRN_MAS1
  65. stw r0,MAS1(r11)
  66. mfspr r0,SPRN_MAS2
  67. stw r0,MAS2(r11)
  68. mfspr r0,SPRN_MAS3
  69. stw r0,MAS3(r11)
  70. mfspr r0,SPRN_MAS6
  71. stw r0,MAS6(r11)
  72. #ifdef CONFIG_PHYS_64BIT
  73. mfspr r0,SPRN_MAS7
  74. stw r0,MAS7(r11)
  75. #endif /* CONFIG_PHYS_64BIT */
  76. #endif /* CONFIG_PPC_BOOK3E_MMU */
  77. #ifdef CONFIG_44x
  78. mfspr r0,SPRN_MMUCR
  79. stw r0,MMUCR(r11)
  80. #endif
  81. mfspr r0,SPRN_SRR0
  82. stw r0,_SRR0(r11)
  83. mfspr r0,SPRN_SRR1
  84. stw r0,_SRR1(r11)
  85. /* set the stack limit to the current stack
  86. * and set the limit to protect the thread_info
  87. * struct
  88. */
  89. mfspr r8,SPRN_SPRG_THREAD
  90. lwz r0,KSP_LIMIT(r8)
  91. stw r0,SAVED_KSP_LIMIT(r11)
  92. rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
  93. stw r0,KSP_LIMIT(r8)
  94. /* fall through */
  95. #endif
  96. #ifdef CONFIG_40x
  97. .globl crit_transfer_to_handler
  98. crit_transfer_to_handler:
  99. lwz r0,crit_r10@l(0)
  100. stw r0,GPR10(r11)
  101. lwz r0,crit_r11@l(0)
  102. stw r0,GPR11(r11)
  103. mfspr r0,SPRN_SRR0
  104. stw r0,crit_srr0@l(0)
  105. mfspr r0,SPRN_SRR1
  106. stw r0,crit_srr1@l(0)
  107. /* set the stack limit to the current stack
  108. * and set the limit to protect the thread_info
  109. * struct
  110. */
  111. mfspr r8,SPRN_SPRG_THREAD
  112. lwz r0,KSP_LIMIT(r8)
  113. stw r0,saved_ksp_limit@l(0)
  114. rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
  115. stw r0,KSP_LIMIT(r8)
  116. /* fall through */
  117. #endif
  118. /*
  119. * This code finishes saving the registers to the exception frame
  120. * and jumps to the appropriate handler for the exception, turning
  121. * on address translation.
  122. * Note that we rely on the caller having set cr0.eq iff the exception
  123. * occurred in kernel mode (i.e. MSR:PR = 0).
  124. */
  125. .globl transfer_to_handler_full
  126. transfer_to_handler_full:
  127. SAVE_NVGPRS(r11)
  128. /* fall through */
  129. .globl transfer_to_handler
  130. transfer_to_handler:
  131. stw r2,GPR2(r11)
  132. stw r12,_NIP(r11)
  133. stw r9,_MSR(r11)
  134. andi. r2,r9,MSR_PR
  135. mfctr r12
  136. mfspr r2,SPRN_XER
  137. stw r12,_CTR(r11)
  138. stw r2,_XER(r11)
  139. mfspr r12,SPRN_SPRG_THREAD
  140. addi r2,r12,-THREAD
  141. tovirt(r2,r2) /* set r2 to current */
  142. beq 2f /* if from user, fix up THREAD.regs */
  143. addi r11,r1,STACK_FRAME_OVERHEAD
  144. stw r11,PT_REGS(r12)
  145. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  146. /* Check to see if the dbcr0 register is set up to debug. Use the
  147. internal debug mode bit to do this. */
  148. lwz r12,THREAD_DBCR0(r12)
  149. andis. r12,r12,DBCR0_IDM@h
  150. beq+ 3f
  151. /* From user and task is ptraced - load up global dbcr0 */
  152. li r12,-1 /* clear all pending debug events */
  153. mtspr SPRN_DBSR,r12
  154. lis r11,global_dbcr0@ha
  155. tophys(r11,r11)
  156. addi r11,r11,global_dbcr0@l
  157. #ifdef CONFIG_SMP
  158. CURRENT_THREAD_INFO(r9, r1)
  159. lwz r9,TI_CPU(r9)
  160. slwi r9,r9,3
  161. add r11,r11,r9
  162. #endif
  163. lwz r12,0(r11)
  164. mtspr SPRN_DBCR0,r12
  165. lwz r12,4(r11)
  166. addi r12,r12,-1
  167. stw r12,4(r11)
  168. #endif
  169. b 3f
  170. 2: /* if from kernel, check interrupted DOZE/NAP mode and
  171. * check for stack overflow
  172. */
  173. lwz r9,KSP_LIMIT(r12)
  174. cmplw r1,r9 /* if r1 <= ksp_limit */
  175. ble- stack_ovf /* then the kernel stack overflowed */
  176. 5:
  177. #if defined(CONFIG_6xx) || defined(CONFIG_E500)
  178. CURRENT_THREAD_INFO(r9, r1)
  179. tophys(r9,r9) /* check local flags */
  180. lwz r12,TI_LOCAL_FLAGS(r9)
  181. mtcrf 0x01,r12
  182. bt- 31-TLF_NAPPING,4f
  183. bt- 31-TLF_SLEEPING,7f
  184. #endif /* CONFIG_6xx || CONFIG_E500 */
  185. .globl transfer_to_handler_cont
  186. transfer_to_handler_cont:
  187. 3:
  188. mflr r9
  189. lwz r11,0(r9) /* virtual address of handler */
  190. lwz r9,4(r9) /* where to go when done */
  191. #ifdef CONFIG_TRACE_IRQFLAGS
  192. lis r12,reenable_mmu@h
  193. ori r12,r12,reenable_mmu@l
  194. mtspr SPRN_SRR0,r12
  195. mtspr SPRN_SRR1,r10
  196. SYNC
  197. RFI
  198. reenable_mmu: /* re-enable mmu so we can */
  199. mfmsr r10
  200. lwz r12,_MSR(r1)
  201. xor r10,r10,r12
  202. andi. r10,r10,MSR_EE /* Did EE change? */
  203. beq 1f
  204. /*
  205. * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
  206. * If from user mode there is only one stack frame on the stack, and
  207. * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
  208. * stack frame to make trace_hardirqs_off happy.
  209. *
  210. * This is handy because we also need to save a bunch of GPRs,
  211. * r3 can be different from GPR3(r1) at this point, r9 and r11
  212. * contains the old MSR and handler address respectively,
  213. * r4 & r5 can contain page fault arguments that need to be passed
  214. * along as well. r12, CCR, CTR, XER etc... are left clobbered as
  215. * they aren't useful past this point (aren't syscall arguments),
  216. * the rest is restored from the exception frame.
  217. */
  218. stwu r1,-32(r1)
  219. stw r9,8(r1)
  220. stw r11,12(r1)
  221. stw r3,16(r1)
  222. stw r4,20(r1)
  223. stw r5,24(r1)
  224. bl trace_hardirqs_off
  225. lwz r5,24(r1)
  226. lwz r4,20(r1)
  227. lwz r3,16(r1)
  228. lwz r11,12(r1)
  229. lwz r9,8(r1)
  230. addi r1,r1,32
  231. lwz r0,GPR0(r1)
  232. lwz r6,GPR6(r1)
  233. lwz r7,GPR7(r1)
  234. lwz r8,GPR8(r1)
  235. 1: mtctr r11
  236. mtlr r9
  237. bctr /* jump to handler */
  238. #else /* CONFIG_TRACE_IRQFLAGS */
  239. mtspr SPRN_SRR0,r11
  240. mtspr SPRN_SRR1,r10
  241. mtlr r9
  242. SYNC
  243. RFI /* jump to handler, enable MMU */
  244. #endif /* CONFIG_TRACE_IRQFLAGS */
  245. #if defined (CONFIG_6xx) || defined(CONFIG_E500)
  246. 4: rlwinm r12,r12,0,~_TLF_NAPPING
  247. stw r12,TI_LOCAL_FLAGS(r9)
  248. b power_save_ppc32_restore
  249. 7: rlwinm r12,r12,0,~_TLF_SLEEPING
  250. stw r12,TI_LOCAL_FLAGS(r9)
  251. lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
  252. rlwinm r9,r9,0,~MSR_EE
  253. lwz r12,_LINK(r11) /* and return to address in LR */
  254. b fast_exception_return
  255. #endif
  256. /*
  257. * On kernel stack overflow, load up an initial stack pointer
  258. * and call StackOverflow(regs), which should not return.
  259. */
  260. stack_ovf:
  261. /* sometimes we use a statically-allocated stack, which is OK. */
  262. lis r12,_end@h
  263. ori r12,r12,_end@l
  264. cmplw r1,r12
  265. ble 5b /* r1 <= &_end is OK */
  266. SAVE_NVGPRS(r11)
  267. addi r3,r1,STACK_FRAME_OVERHEAD
  268. lis r1,init_thread_union@ha
  269. addi r1,r1,init_thread_union@l
  270. addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
  271. lis r9,StackOverflow@ha
  272. addi r9,r9,StackOverflow@l
  273. LOAD_MSR_KERNEL(r10,MSR_KERNEL)
  274. FIX_SRR1(r10,r12)
  275. mtspr SPRN_SRR0,r9
  276. mtspr SPRN_SRR1,r10
  277. SYNC
  278. RFI
  279. /*
  280. * Handle a system call.
  281. */
  282. .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
  283. .stabs "entry_32.S",N_SO,0,0,0f
  284. 0:
  285. _GLOBAL(DoSyscall)
  286. stw r3,ORIG_GPR3(r1)
  287. li r12,0
  288. stw r12,RESULT(r1)
  289. lwz r11,_CCR(r1) /* Clear SO bit in CR */
  290. rlwinm r11,r11,0,4,2
  291. stw r11,_CCR(r1)
  292. #ifdef SHOW_SYSCALLS
  293. bl do_show_syscall
  294. #endif /* SHOW_SYSCALLS */
  295. #ifdef CONFIG_TRACE_IRQFLAGS
  296. /* Return from syscalls can (and generally will) hard enable
  297. * interrupts. You aren't supposed to call a syscall with
  298. * interrupts disabled in the first place. However, to ensure
  299. * that we get it right vs. lockdep if it happens, we force
  300. * that hard enable here with appropriate tracing if we see
  301. * that we have been called with interrupts off
  302. */
  303. mfmsr r11
  304. andi. r12,r11,MSR_EE
  305. bne+ 1f
  306. /* We came in with interrupts disabled, we enable them now */
  307. bl trace_hardirqs_on
  308. mfmsr r11
  309. lwz r0,GPR0(r1)
  310. lwz r3,GPR3(r1)
  311. lwz r4,GPR4(r1)
  312. ori r11,r11,MSR_EE
  313. lwz r5,GPR5(r1)
  314. lwz r6,GPR6(r1)
  315. lwz r7,GPR7(r1)
  316. lwz r8,GPR8(r1)
  317. mtmsr r11
  318. 1:
  319. #endif /* CONFIG_TRACE_IRQFLAGS */
  320. CURRENT_THREAD_INFO(r10, r1)
  321. lwz r11,TI_FLAGS(r10)
  322. andi. r11,r11,_TIF_SYSCALL_T_OR_A
  323. bne- syscall_dotrace
  324. syscall_dotrace_cont:
  325. cmplwi 0,r0,NR_syscalls
  326. lis r10,sys_call_table@h
  327. ori r10,r10,sys_call_table@l
  328. slwi r0,r0,2
  329. bge- 66f
  330. lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
  331. mtlr r10
  332. addi r9,r1,STACK_FRAME_OVERHEAD
  333. PPC440EP_ERR42
  334. blrl /* Call handler */
  335. .globl ret_from_syscall
  336. ret_from_syscall:
  337. #ifdef SHOW_SYSCALLS
  338. bl do_show_syscall_exit
  339. #endif
  340. mr r6,r3
  341. CURRENT_THREAD_INFO(r12, r1)
  342. /* disable interrupts so current_thread_info()->flags can't change */
  343. LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
  344. /* Note: We don't bother telling lockdep about it */
  345. SYNC
  346. MTMSRD(r10)
  347. lwz r9,TI_FLAGS(r12)
  348. li r8,-_LAST_ERRNO
  349. andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
  350. bne- syscall_exit_work
  351. cmplw 0,r3,r8
  352. blt+ syscall_exit_cont
  353. lwz r11,_CCR(r1) /* Load CR */
  354. neg r3,r3
  355. oris r11,r11,0x1000 /* Set SO bit in CR */
  356. stw r11,_CCR(r1)
  357. syscall_exit_cont:
  358. lwz r8,_MSR(r1)
  359. #ifdef CONFIG_TRACE_IRQFLAGS
  360. /* If we are going to return from the syscall with interrupts
  361. * off, we trace that here. It shouldn't happen though but we
  362. * want to catch the bugger if it does right ?
  363. */
  364. andi. r10,r8,MSR_EE
  365. bne+ 1f
  366. stw r3,GPR3(r1)
  367. bl trace_hardirqs_off
  368. lwz r3,GPR3(r1)
  369. 1:
  370. #endif /* CONFIG_TRACE_IRQFLAGS */
  371. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  372. /* If the process has its own DBCR0 value, load it up. The internal
  373. debug mode bit tells us that dbcr0 should be loaded. */
  374. lwz r0,THREAD+THREAD_DBCR0(r2)
  375. andis. r10,r0,DBCR0_IDM@h
  376. bnel- load_dbcr0
  377. #endif
  378. #ifdef CONFIG_44x
  379. BEGIN_MMU_FTR_SECTION
  380. lis r4,icache_44x_need_flush@ha
  381. lwz r5,icache_44x_need_flush@l(r4)
  382. cmplwi cr0,r5,0
  383. bne- 2f
  384. 1:
  385. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
  386. #endif /* CONFIG_44x */
  387. BEGIN_FTR_SECTION
  388. lwarx r7,0,r1
  389. END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
  390. stwcx. r0,0,r1 /* to clear the reservation */
  391. lwz r4,_LINK(r1)
  392. lwz r5,_CCR(r1)
  393. mtlr r4
  394. mtcr r5
  395. lwz r7,_NIP(r1)
  396. FIX_SRR1(r8, r0)
  397. lwz r2,GPR2(r1)
  398. lwz r1,GPR1(r1)
  399. mtspr SPRN_SRR0,r7
  400. mtspr SPRN_SRR1,r8
  401. SYNC
  402. RFI
  403. #ifdef CONFIG_44x
  404. 2: li r7,0
  405. iccci r0,r0
  406. stw r7,icache_44x_need_flush@l(r4)
  407. b 1b
  408. #endif /* CONFIG_44x */
  409. 66: li r3,-ENOSYS
  410. b ret_from_syscall
  411. .globl ret_from_fork
  412. ret_from_fork:
  413. REST_NVGPRS(r1)
  414. bl schedule_tail
  415. li r3,0
  416. b ret_from_syscall
  417. .globl ret_from_kernel_thread
  418. ret_from_kernel_thread:
  419. REST_NVGPRS(r1)
  420. bl schedule_tail
  421. mtlr r14
  422. mr r3,r15
  423. PPC440EP_ERR42
  424. blrl
  425. li r3,0
  426. b do_exit # no return
  427. .globl __ret_from_kernel_execve
  428. __ret_from_kernel_execve:
  429. addi r1,r3,-STACK_FRAME_OVERHEAD
  430. b ret_from_syscall
  431. /* Traced system call support */
  432. syscall_dotrace:
  433. SAVE_NVGPRS(r1)
  434. li r0,0xc00
  435. stw r0,_TRAP(r1)
  436. addi r3,r1,STACK_FRAME_OVERHEAD
  437. bl do_syscall_trace_enter
  438. /*
  439. * Restore argument registers possibly just changed.
  440. * We use the return value of do_syscall_trace_enter
  441. * for call number to look up in the table (r0).
  442. */
  443. mr r0,r3
  444. lwz r3,GPR3(r1)
  445. lwz r4,GPR4(r1)
  446. lwz r5,GPR5(r1)
  447. lwz r6,GPR6(r1)
  448. lwz r7,GPR7(r1)
  449. lwz r8,GPR8(r1)
  450. REST_NVGPRS(r1)
  451. b syscall_dotrace_cont
  452. syscall_exit_work:
  453. andi. r0,r9,_TIF_RESTOREALL
  454. beq+ 0f
  455. REST_NVGPRS(r1)
  456. b 2f
  457. 0: cmplw 0,r3,r8
  458. blt+ 1f
  459. andi. r0,r9,_TIF_NOERROR
  460. bne- 1f
  461. lwz r11,_CCR(r1) /* Load CR */
  462. neg r3,r3
  463. oris r11,r11,0x1000 /* Set SO bit in CR */
  464. stw r11,_CCR(r1)
  465. 1: stw r6,RESULT(r1) /* Save result */
  466. stw r3,GPR3(r1) /* Update return value */
  467. 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
  468. beq 4f
  469. /* Clear per-syscall TIF flags if any are set. */
  470. li r11,_TIF_PERSYSCALL_MASK
  471. addi r12,r12,TI_FLAGS
  472. 3: lwarx r8,0,r12
  473. andc r8,r8,r11
  474. #ifdef CONFIG_IBM405_ERR77
  475. dcbt 0,r12
  476. #endif
  477. stwcx. r8,0,r12
  478. bne- 3b
  479. subi r12,r12,TI_FLAGS
  480. 4: /* Anything which requires enabling interrupts? */
  481. andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
  482. beq ret_from_except
  483. /* Re-enable interrupts. There is no need to trace that with
  484. * lockdep as we are supposed to have IRQs on at this point
  485. */
  486. ori r10,r10,MSR_EE
  487. SYNC
  488. MTMSRD(r10)
  489. /* Save NVGPRS if they're not saved already */
  490. lwz r4,_TRAP(r1)
  491. andi. r4,r4,1
  492. beq 5f
  493. SAVE_NVGPRS(r1)
  494. li r4,0xc00
  495. stw r4,_TRAP(r1)
  496. 5:
  497. addi r3,r1,STACK_FRAME_OVERHEAD
  498. bl do_syscall_trace_leave
  499. b ret_from_except_full
  500. #ifdef SHOW_SYSCALLS
  501. do_show_syscall:
  502. #ifdef SHOW_SYSCALLS_TASK
  503. lis r11,show_syscalls_task@ha
  504. lwz r11,show_syscalls_task@l(r11)
  505. cmp 0,r2,r11
  506. bnelr
  507. #endif
  508. stw r31,GPR31(r1)
  509. mflr r31
  510. lis r3,7f@ha
  511. addi r3,r3,7f@l
  512. lwz r4,GPR0(r1)
  513. lwz r5,GPR3(r1)
  514. lwz r6,GPR4(r1)
  515. lwz r7,GPR5(r1)
  516. lwz r8,GPR6(r1)
  517. lwz r9,GPR7(r1)
  518. bl printk
  519. lis r3,77f@ha
  520. addi r3,r3,77f@l
  521. lwz r4,GPR8(r1)
  522. mr r5,r2
  523. bl printk
  524. lwz r0,GPR0(r1)
  525. lwz r3,GPR3(r1)
  526. lwz r4,GPR4(r1)
  527. lwz r5,GPR5(r1)
  528. lwz r6,GPR6(r1)
  529. lwz r7,GPR7(r1)
  530. lwz r8,GPR8(r1)
  531. mtlr r31
  532. lwz r31,GPR31(r1)
  533. blr
  534. do_show_syscall_exit:
  535. #ifdef SHOW_SYSCALLS_TASK
  536. lis r11,show_syscalls_task@ha
  537. lwz r11,show_syscalls_task@l(r11)
  538. cmp 0,r2,r11
  539. bnelr
  540. #endif
  541. stw r31,GPR31(r1)
  542. mflr r31
  543. stw r3,RESULT(r1) /* Save result */
  544. mr r4,r3
  545. lis r3,79f@ha
  546. addi r3,r3,79f@l
  547. bl printk
  548. lwz r3,RESULT(r1)
  549. mtlr r31
  550. lwz r31,GPR31(r1)
  551. blr
  552. 7: .string "syscall %d(%x, %x, %x, %x, %x, "
  553. 77: .string "%x), current=%p\n"
  554. 79: .string " -> %x\n"
  555. .align 2,0
  556. #ifdef SHOW_SYSCALLS_TASK
  557. .data
  558. .globl show_syscalls_task
  559. show_syscalls_task:
  560. .long -1
  561. .text
  562. #endif
  563. #endif /* SHOW_SYSCALLS */
  564. /*
  565. * The fork/clone functions need to copy the full register set into
  566. * the child process. Therefore we need to save all the nonvolatile
  567. * registers (r13 - r31) before calling the C code.
  568. */
  569. .globl ppc_fork
  570. ppc_fork:
  571. SAVE_NVGPRS(r1)
  572. lwz r0,_TRAP(r1)
  573. rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
  574. stw r0,_TRAP(r1) /* register set saved */
  575. b sys_fork
  576. .globl ppc_vfork
  577. ppc_vfork:
  578. SAVE_NVGPRS(r1)
  579. lwz r0,_TRAP(r1)
  580. rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
  581. stw r0,_TRAP(r1) /* register set saved */
  582. b sys_vfork
  583. .globl ppc_clone
  584. ppc_clone:
  585. SAVE_NVGPRS(r1)
  586. lwz r0,_TRAP(r1)
  587. rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
  588. stw r0,_TRAP(r1) /* register set saved */
  589. b sys_clone
  590. .globl ppc_swapcontext
  591. ppc_swapcontext:
  592. SAVE_NVGPRS(r1)
  593. lwz r0,_TRAP(r1)
  594. rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
  595. stw r0,_TRAP(r1) /* register set saved */
  596. b sys_swapcontext
  597. /*
  598. * Top-level page fault handling.
  599. * This is in assembler because if do_page_fault tells us that
  600. * it is a bad kernel page fault, we want to save the non-volatile
  601. * registers before calling bad_page_fault.
  602. */
  603. .globl handle_page_fault
  604. handle_page_fault:
  605. stw r4,_DAR(r1)
  606. addi r3,r1,STACK_FRAME_OVERHEAD
  607. bl do_page_fault
  608. cmpwi r3,0
  609. beq+ ret_from_except
  610. SAVE_NVGPRS(r1)
  611. lwz r0,_TRAP(r1)
  612. clrrwi r0,r0,1
  613. stw r0,_TRAP(r1)
  614. mr r5,r3
  615. addi r3,r1,STACK_FRAME_OVERHEAD
  616. lwz r4,_DAR(r1)
  617. bl bad_page_fault
  618. b ret_from_except_full
  619. /*
  620. * This routine switches between two different tasks. The process
  621. * state of one is saved on its kernel stack. Then the state
  622. * of the other is restored from its kernel stack. The memory
  623. * management hardware is updated to the second process's state.
  624. * Finally, we can return to the second process.
  625. * On entry, r3 points to the THREAD for the current task, r4
  626. * points to the THREAD for the new task.
  627. *
  628. * This routine is always called with interrupts disabled.
  629. *
  630. * Note: there are two ways to get to the "going out" portion
  631. * of this code; either by coming in via the entry (_switch)
  632. * or via "fork" which must set up an environment equivalent
  633. * to the "_switch" path. If you change this , you'll have to
  634. * change the fork code also.
  635. *
  636. * The code which creates the new task context is in 'copy_thread'
  637. * in arch/ppc/kernel/process.c
  638. */
  639. _GLOBAL(_switch)
  640. stwu r1,-INT_FRAME_SIZE(r1)
  641. mflr r0
  642. stw r0,INT_FRAME_SIZE+4(r1)
  643. /* r3-r12 are caller saved -- Cort */
  644. SAVE_NVGPRS(r1)
  645. stw r0,_NIP(r1) /* Return to switch caller */
  646. mfmsr r11
  647. li r0,MSR_FP /* Disable floating-point */
  648. #ifdef CONFIG_ALTIVEC
  649. BEGIN_FTR_SECTION
  650. oris r0,r0,MSR_VEC@h /* Disable altivec */
  651. mfspr r12,SPRN_VRSAVE /* save vrsave register value */
  652. stw r12,THREAD+THREAD_VRSAVE(r2)
  653. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  654. #endif /* CONFIG_ALTIVEC */
  655. #ifdef CONFIG_SPE
  656. BEGIN_FTR_SECTION
  657. oris r0,r0,MSR_SPE@h /* Disable SPE */
  658. mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
  659. stw r12,THREAD+THREAD_SPEFSCR(r2)
  660. END_FTR_SECTION_IFSET(CPU_FTR_SPE)
  661. #endif /* CONFIG_SPE */
  662. and. r0,r0,r11 /* FP or altivec or SPE enabled? */
  663. beq+ 1f
  664. andc r11,r11,r0
  665. MTMSRD(r11)
  666. isync
  667. 1: stw r11,_MSR(r1)
  668. mfcr r10
  669. stw r10,_CCR(r1)
  670. stw r1,KSP(r3) /* Set old stack pointer */
  671. #ifdef CONFIG_SMP
  672. /* We need a sync somewhere here to make sure that if the
  673. * previous task gets rescheduled on another CPU, it sees all
  674. * stores it has performed on this one.
  675. */
  676. sync
  677. #endif /* CONFIG_SMP */
  678. tophys(r0,r4)
  679. CLR_TOP32(r0)
  680. mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
  681. lwz r1,KSP(r4) /* Load new stack pointer */
  682. /* save the old current 'last' for return value */
  683. mr r3,r2
  684. addi r2,r4,-THREAD /* Update current */
  685. #ifdef CONFIG_ALTIVEC
  686. BEGIN_FTR_SECTION
  687. lwz r0,THREAD+THREAD_VRSAVE(r2)
  688. mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
  689. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  690. #endif /* CONFIG_ALTIVEC */
  691. #ifdef CONFIG_SPE
  692. BEGIN_FTR_SECTION
  693. lwz r0,THREAD+THREAD_SPEFSCR(r2)
  694. mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
  695. END_FTR_SECTION_IFSET(CPU_FTR_SPE)
  696. #endif /* CONFIG_SPE */
  697. lwz r0,_CCR(r1)
  698. mtcrf 0xFF,r0
  699. /* r3-r12 are destroyed -- Cort */
  700. REST_NVGPRS(r1)
  701. lwz r4,_NIP(r1) /* Return to _switch caller in new task */
  702. mtlr r4
  703. addi r1,r1,INT_FRAME_SIZE
  704. blr
  705. .globl fast_exception_return
  706. fast_exception_return:
  707. #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
  708. andi. r10,r9,MSR_RI /* check for recoverable interrupt */
  709. beq 1f /* if not, we've got problems */
  710. #endif
  711. 2: REST_4GPRS(3, r11)
  712. lwz r10,_CCR(r11)
  713. REST_GPR(1, r11)
  714. mtcr r10
  715. lwz r10,_LINK(r11)
  716. mtlr r10
  717. REST_GPR(10, r11)
  718. mtspr SPRN_SRR1,r9
  719. mtspr SPRN_SRR0,r12
  720. REST_GPR(9, r11)
  721. REST_GPR(12, r11)
  722. lwz r11,GPR11(r11)
  723. SYNC
  724. RFI
  725. #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
  726. /* check if the exception happened in a restartable section */
  727. 1: lis r3,exc_exit_restart_end@ha
  728. addi r3,r3,exc_exit_restart_end@l
  729. cmplw r12,r3
  730. bge 3f
  731. lis r4,exc_exit_restart@ha
  732. addi r4,r4,exc_exit_restart@l
  733. cmplw r12,r4
  734. blt 3f
  735. lis r3,fee_restarts@ha
  736. tophys(r3,r3)
  737. lwz r5,fee_restarts@l(r3)
  738. addi r5,r5,1
  739. stw r5,fee_restarts@l(r3)
  740. mr r12,r4 /* restart at exc_exit_restart */
  741. b 2b
  742. .section .bss
  743. .align 2
  744. fee_restarts:
  745. .space 4
  746. .previous
  747. /* aargh, a nonrecoverable interrupt, panic */
  748. /* aargh, we don't know which trap this is */
  749. /* but the 601 doesn't implement the RI bit, so assume it's OK */
  750. 3:
  751. BEGIN_FTR_SECTION
  752. b 2b
  753. END_FTR_SECTION_IFSET(CPU_FTR_601)
  754. li r10,-1
  755. stw r10,_TRAP(r11)
  756. addi r3,r1,STACK_FRAME_OVERHEAD
  757. lis r10,MSR_KERNEL@h
  758. ori r10,r10,MSR_KERNEL@l
  759. bl transfer_to_handler_full
  760. .long nonrecoverable_exception
  761. .long ret_from_except
  762. #endif
  763. .globl ret_from_except_full
  764. ret_from_except_full:
  765. REST_NVGPRS(r1)
  766. /* fall through */
  767. .globl ret_from_except
  768. ret_from_except:
  769. /* Hard-disable interrupts so that current_thread_info()->flags
  770. * can't change between when we test it and when we return
  771. * from the interrupt. */
  772. /* Note: We don't bother telling lockdep about it */
  773. LOAD_MSR_KERNEL(r10,MSR_KERNEL)
  774. SYNC /* Some chip revs have problems here... */
  775. MTMSRD(r10) /* disable interrupts */
  776. lwz r3,_MSR(r1) /* Returning to user mode? */
  777. andi. r0,r3,MSR_PR
  778. beq resume_kernel
  779. user_exc_return: /* r10 contains MSR_KERNEL here */
  780. /* Check current_thread_info()->flags */
  781. CURRENT_THREAD_INFO(r9, r1)
  782. lwz r9,TI_FLAGS(r9)
  783. andi. r0,r9,_TIF_USER_WORK_MASK
  784. bne do_work
  785. restore_user:
  786. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  787. /* Check whether this process has its own DBCR0 value. The internal
  788. debug mode bit tells us that dbcr0 should be loaded. */
  789. lwz r0,THREAD+THREAD_DBCR0(r2)
  790. andis. r10,r0,DBCR0_IDM@h
  791. bnel- load_dbcr0
  792. #endif
  793. b restore
  794. /* N.B. the only way to get here is from the beq following ret_from_except. */
  795. resume_kernel:
  796. /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
  797. CURRENT_THREAD_INFO(r9, r1)
  798. lwz r8,TI_FLAGS(r9)
  799. andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
  800. beq+ 1f
  801. addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
  802. lwz r3,GPR1(r1)
  803. subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
  804. mr r4,r1 /* src: current exception frame */
  805. mr r1,r3 /* Reroute the trampoline frame to r1 */
  806. /* Copy from the original to the trampoline. */
  807. li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
  808. li r6,0 /* start offset: 0 */
  809. mtctr r5
  810. 2: lwzx r0,r6,r4
  811. stwx r0,r6,r3
  812. addi r6,r6,4
  813. bdnz 2b
  814. /* Do real store operation to complete stwu */
  815. lwz r5,GPR1(r1)
  816. stw r8,0(r5)
  817. /* Clear _TIF_EMULATE_STACK_STORE flag */
  818. lis r11,_TIF_EMULATE_STACK_STORE@h
  819. addi r5,r9,TI_FLAGS
  820. 0: lwarx r8,0,r5
  821. andc r8,r8,r11
  822. #ifdef CONFIG_IBM405_ERR77
  823. dcbt 0,r5
  824. #endif
  825. stwcx. r8,0,r5
  826. bne- 0b
  827. 1:
  828. #ifdef CONFIG_PREEMPT
  829. /* check current_thread_info->preempt_count */
  830. lwz r0,TI_PREEMPT(r9)
  831. cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
  832. bne restore
  833. andi. r8,r8,_TIF_NEED_RESCHED
  834. beq+ restore
  835. lwz r3,_MSR(r1)
  836. andi. r0,r3,MSR_EE /* interrupts off? */
  837. beq restore /* don't schedule if so */
  838. #ifdef CONFIG_TRACE_IRQFLAGS
  839. /* Lockdep thinks irqs are enabled, we need to call
  840. * preempt_schedule_irq with IRQs off, so we inform lockdep
  841. * now that we -did- turn them off already
  842. */
  843. bl trace_hardirqs_off
  844. #endif
  845. 1: bl preempt_schedule_irq
  846. CURRENT_THREAD_INFO(r9, r1)
  847. lwz r3,TI_FLAGS(r9)
  848. andi. r0,r3,_TIF_NEED_RESCHED
  849. bne- 1b
  850. #ifdef CONFIG_TRACE_IRQFLAGS
  851. /* And now, to properly rebalance the above, we tell lockdep they
  852. * are being turned back on, which will happen when we return
  853. */
  854. bl trace_hardirqs_on
  855. #endif
  856. #endif /* CONFIG_PREEMPT */
  857. /* interrupts are hard-disabled at this point */
  858. restore:
  859. #ifdef CONFIG_44x
  860. BEGIN_MMU_FTR_SECTION
  861. b 1f
  862. END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
  863. lis r4,icache_44x_need_flush@ha
  864. lwz r5,icache_44x_need_flush@l(r4)
  865. cmplwi cr0,r5,0
  866. beq+ 1f
  867. li r6,0
  868. iccci r0,r0
  869. stw r6,icache_44x_need_flush@l(r4)
  870. 1:
  871. #endif /* CONFIG_44x */
  872. lwz r9,_MSR(r1)
  873. #ifdef CONFIG_TRACE_IRQFLAGS
  874. /* Lockdep doesn't know about the fact that IRQs are temporarily turned
  875. * off in this assembly code while peeking at TI_FLAGS() and such. However
  876. * we need to inform it if the exception turned interrupts off, and we
  877. * are about to trun them back on.
  878. *
  879. * The problem here sadly is that we don't know whether the exceptions was
  880. * one that turned interrupts off or not. So we always tell lockdep about
  881. * turning them on here when we go back to wherever we came from with EE
  882. * on, even if that may meen some redudant calls being tracked. Maybe later
  883. * we could encode what the exception did somewhere or test the exception
  884. * type in the pt_regs but that sounds overkill
  885. */
  886. andi. r10,r9,MSR_EE
  887. beq 1f
  888. /*
  889. * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
  890. * which is the stack frame here, we need to force a stack frame
  891. * in case we came from user space.
  892. */
  893. stwu r1,-32(r1)
  894. mflr r0
  895. stw r0,4(r1)
  896. stwu r1,-32(r1)
  897. bl trace_hardirqs_on
  898. lwz r1,0(r1)
  899. lwz r1,0(r1)
  900. lwz r9,_MSR(r1)
  901. 1:
  902. #endif /* CONFIG_TRACE_IRQFLAGS */
  903. lwz r0,GPR0(r1)
  904. lwz r2,GPR2(r1)
  905. REST_4GPRS(3, r1)
  906. REST_2GPRS(7, r1)
  907. lwz r10,_XER(r1)
  908. lwz r11,_CTR(r1)
  909. mtspr SPRN_XER,r10
  910. mtctr r11
  911. PPC405_ERR77(0,r1)
  912. BEGIN_FTR_SECTION
  913. lwarx r11,0,r1
  914. END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
  915. stwcx. r0,0,r1 /* to clear the reservation */
  916. #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
  917. andi. r10,r9,MSR_RI /* check if this exception occurred */
  918. beql nonrecoverable /* at a bad place (MSR:RI = 0) */
  919. lwz r10,_CCR(r1)
  920. lwz r11,_LINK(r1)
  921. mtcrf 0xFF,r10
  922. mtlr r11
  923. /*
  924. * Once we put values in SRR0 and SRR1, we are in a state
  925. * where exceptions are not recoverable, since taking an
  926. * exception will trash SRR0 and SRR1. Therefore we clear the
  927. * MSR:RI bit to indicate this. If we do take an exception,
  928. * we can't return to the point of the exception but we
  929. * can restart the exception exit path at the label
  930. * exc_exit_restart below. -- paulus
  931. */
  932. LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
  933. SYNC
  934. MTMSRD(r10) /* clear the RI bit */
  935. .globl exc_exit_restart
  936. exc_exit_restart:
  937. lwz r12,_NIP(r1)
  938. FIX_SRR1(r9,r10)
  939. mtspr SPRN_SRR0,r12
  940. mtspr SPRN_SRR1,r9
  941. REST_4GPRS(9, r1)
  942. lwz r1,GPR1(r1)
  943. .globl exc_exit_restart_end
  944. exc_exit_restart_end:
  945. SYNC
  946. RFI
  947. #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
  948. /*
  949. * This is a bit different on 4xx/Book-E because it doesn't have
  950. * the RI bit in the MSR.
  951. * The TLB miss handler checks if we have interrupted
  952. * the exception exit path and restarts it if so
  953. * (well maybe one day it will... :).
  954. */
  955. lwz r11,_LINK(r1)
  956. mtlr r11
  957. lwz r10,_CCR(r1)
  958. mtcrf 0xff,r10
  959. REST_2GPRS(9, r1)
  960. .globl exc_exit_restart
  961. exc_exit_restart:
  962. lwz r11,_NIP(r1)
  963. lwz r12,_MSR(r1)
  964. exc_exit_start:
  965. mtspr SPRN_SRR0,r11
  966. mtspr SPRN_SRR1,r12
  967. REST_2GPRS(11, r1)
  968. lwz r1,GPR1(r1)
  969. .globl exc_exit_restart_end
  970. exc_exit_restart_end:
  971. PPC405_ERR77_SYNC
  972. rfi
  973. b . /* prevent prefetch past rfi */
  974. /*
  975. * Returning from a critical interrupt in user mode doesn't need
  976. * to be any different from a normal exception. For a critical
  977. * interrupt in the kernel, we just return (without checking for
  978. * preemption) since the interrupt may have happened at some crucial
  979. * place (e.g. inside the TLB miss handler), and because we will be
  980. * running with r1 pointing into critical_stack, not the current
  981. * process's kernel stack (and therefore current_thread_info() will
  982. * give the wrong answer).
  983. * We have to restore various SPRs that may have been in use at the
  984. * time of the critical interrupt.
  985. *
  986. */
  987. #ifdef CONFIG_40x
  988. #define PPC_40x_TURN_OFF_MSR_DR \
  989. /* avoid any possible TLB misses here by turning off MSR.DR, we \
  990. * assume the instructions here are mapped by a pinned TLB entry */ \
  991. li r10,MSR_IR; \
  992. mtmsr r10; \
  993. isync; \
  994. tophys(r1, r1);
  995. #else
  996. #define PPC_40x_TURN_OFF_MSR_DR
  997. #endif
  998. #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
  999. REST_NVGPRS(r1); \
  1000. lwz r3,_MSR(r1); \
  1001. andi. r3,r3,MSR_PR; \
  1002. LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
  1003. bne user_exc_return; \
  1004. lwz r0,GPR0(r1); \
  1005. lwz r2,GPR2(r1); \
  1006. REST_4GPRS(3, r1); \
  1007. REST_2GPRS(7, r1); \
  1008. lwz r10,_XER(r1); \
  1009. lwz r11,_CTR(r1); \
  1010. mtspr SPRN_XER,r10; \
  1011. mtctr r11; \
  1012. PPC405_ERR77(0,r1); \
  1013. stwcx. r0,0,r1; /* to clear the reservation */ \
  1014. lwz r11,_LINK(r1); \
  1015. mtlr r11; \
  1016. lwz r10,_CCR(r1); \
  1017. mtcrf 0xff,r10; \
  1018. PPC_40x_TURN_OFF_MSR_DR; \
  1019. lwz r9,_DEAR(r1); \
  1020. lwz r10,_ESR(r1); \
  1021. mtspr SPRN_DEAR,r9; \
  1022. mtspr SPRN_ESR,r10; \
  1023. lwz r11,_NIP(r1); \
  1024. lwz r12,_MSR(r1); \
  1025. mtspr exc_lvl_srr0,r11; \
  1026. mtspr exc_lvl_srr1,r12; \
  1027. lwz r9,GPR9(r1); \
  1028. lwz r12,GPR12(r1); \
  1029. lwz r10,GPR10(r1); \
  1030. lwz r11,GPR11(r1); \
  1031. lwz r1,GPR1(r1); \
  1032. PPC405_ERR77_SYNC; \
  1033. exc_lvl_rfi; \
  1034. b .; /* prevent prefetch past exc_lvl_rfi */
  1035. #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
  1036. lwz r9,_##exc_lvl_srr0(r1); \
  1037. lwz r10,_##exc_lvl_srr1(r1); \
  1038. mtspr SPRN_##exc_lvl_srr0,r9; \
  1039. mtspr SPRN_##exc_lvl_srr1,r10;
  1040. #if defined(CONFIG_PPC_BOOK3E_MMU)
  1041. #ifdef CONFIG_PHYS_64BIT
  1042. #define RESTORE_MAS7 \
  1043. lwz r11,MAS7(r1); \
  1044. mtspr SPRN_MAS7,r11;
  1045. #else
  1046. #define RESTORE_MAS7
  1047. #endif /* CONFIG_PHYS_64BIT */
  1048. #define RESTORE_MMU_REGS \
  1049. lwz r9,MAS0(r1); \
  1050. lwz r10,MAS1(r1); \
  1051. lwz r11,MAS2(r1); \
  1052. mtspr SPRN_MAS0,r9; \
  1053. lwz r9,MAS3(r1); \
  1054. mtspr SPRN_MAS1,r10; \
  1055. lwz r10,MAS6(r1); \
  1056. mtspr SPRN_MAS2,r11; \
  1057. mtspr SPRN_MAS3,r9; \
  1058. mtspr SPRN_MAS6,r10; \
  1059. RESTORE_MAS7;
  1060. #elif defined(CONFIG_44x)
  1061. #define RESTORE_MMU_REGS \
  1062. lwz r9,MMUCR(r1); \
  1063. mtspr SPRN_MMUCR,r9;
  1064. #else
  1065. #define RESTORE_MMU_REGS
  1066. #endif
  1067. #ifdef CONFIG_40x
  1068. .globl ret_from_crit_exc
  1069. ret_from_crit_exc:
  1070. mfspr r9,SPRN_SPRG_THREAD
  1071. lis r10,saved_ksp_limit@ha;
  1072. lwz r10,saved_ksp_limit@l(r10);
  1073. tovirt(r9,r9);
  1074. stw r10,KSP_LIMIT(r9)
  1075. lis r9,crit_srr0@ha;
  1076. lwz r9,crit_srr0@l(r9);
  1077. lis r10,crit_srr1@ha;
  1078. lwz r10,crit_srr1@l(r10);
  1079. mtspr SPRN_SRR0,r9;
  1080. mtspr SPRN_SRR1,r10;
  1081. RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
  1082. #endif /* CONFIG_40x */
  1083. #ifdef CONFIG_BOOKE
  1084. .globl ret_from_crit_exc
  1085. ret_from_crit_exc:
  1086. mfspr r9,SPRN_SPRG_THREAD
  1087. lwz r10,SAVED_KSP_LIMIT(r1)
  1088. stw r10,KSP_LIMIT(r9)
  1089. RESTORE_xSRR(SRR0,SRR1);
  1090. RESTORE_MMU_REGS;
  1091. RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
  1092. .globl ret_from_debug_exc
  1093. ret_from_debug_exc:
  1094. mfspr r9,SPRN_SPRG_THREAD
  1095. lwz r10,SAVED_KSP_LIMIT(r1)
  1096. stw r10,KSP_LIMIT(r9)
  1097. lwz r9,THREAD_INFO-THREAD(r9)
  1098. CURRENT_THREAD_INFO(r10, r1)
  1099. lwz r10,TI_PREEMPT(r10)
  1100. stw r10,TI_PREEMPT(r9)
  1101. RESTORE_xSRR(SRR0,SRR1);
  1102. RESTORE_xSRR(CSRR0,CSRR1);
  1103. RESTORE_MMU_REGS;
  1104. RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
  1105. .globl ret_from_mcheck_exc
  1106. ret_from_mcheck_exc:
  1107. mfspr r9,SPRN_SPRG_THREAD
  1108. lwz r10,SAVED_KSP_LIMIT(r1)
  1109. stw r10,KSP_LIMIT(r9)
  1110. RESTORE_xSRR(SRR0,SRR1);
  1111. RESTORE_xSRR(CSRR0,CSRR1);
  1112. RESTORE_xSRR(DSRR0,DSRR1);
  1113. RESTORE_MMU_REGS;
  1114. RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
  1115. #endif /* CONFIG_BOOKE */
  1116. /*
  1117. * Load the DBCR0 value for a task that is being ptraced,
  1118. * having first saved away the global DBCR0. Note that r0
  1119. * has the dbcr0 value to set upon entry to this.
  1120. */
  1121. load_dbcr0:
  1122. mfmsr r10 /* first disable debug exceptions */
  1123. rlwinm r10,r10,0,~MSR_DE
  1124. mtmsr r10
  1125. isync
  1126. mfspr r10,SPRN_DBCR0
  1127. lis r11,global_dbcr0@ha
  1128. addi r11,r11,global_dbcr0@l
  1129. #ifdef CONFIG_SMP
  1130. CURRENT_THREAD_INFO(r9, r1)
  1131. lwz r9,TI_CPU(r9)
  1132. slwi r9,r9,3
  1133. add r11,r11,r9
  1134. #endif
  1135. stw r10,0(r11)
  1136. mtspr SPRN_DBCR0,r0
  1137. lwz r10,4(r11)
  1138. addi r10,r10,1
  1139. stw r10,4(r11)
  1140. li r11,-1
  1141. mtspr SPRN_DBSR,r11 /* clear all pending debug events */
  1142. blr
  1143. .section .bss
  1144. .align 4
  1145. global_dbcr0:
  1146. .space 8*NR_CPUS
  1147. .previous
  1148. #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
  1149. do_work: /* r10 contains MSR_KERNEL here */
  1150. andi. r0,r9,_TIF_NEED_RESCHED
  1151. beq do_user_signal
  1152. do_resched: /* r10 contains MSR_KERNEL here */
  1153. /* Note: We don't need to inform lockdep that we are enabling
  1154. * interrupts here. As far as it knows, they are already enabled
  1155. */
  1156. ori r10,r10,MSR_EE
  1157. SYNC
  1158. MTMSRD(r10) /* hard-enable interrupts */
  1159. bl schedule
  1160. recheck:
  1161. /* Note: And we don't tell it we are disabling them again
  1162. * neither. Those disable/enable cycles used to peek at
  1163. * TI_FLAGS aren't advertised.
  1164. */
  1165. LOAD_MSR_KERNEL(r10,MSR_KERNEL)
  1166. SYNC
  1167. MTMSRD(r10) /* disable interrupts */
  1168. CURRENT_THREAD_INFO(r9, r1)
  1169. lwz r9,TI_FLAGS(r9)
  1170. andi. r0,r9,_TIF_NEED_RESCHED
  1171. bne- do_resched
  1172. andi. r0,r9,_TIF_USER_WORK_MASK
  1173. beq restore_user
  1174. do_user_signal: /* r10 contains MSR_KERNEL here */
  1175. ori r10,r10,MSR_EE
  1176. SYNC
  1177. MTMSRD(r10) /* hard-enable interrupts */
  1178. /* save r13-r31 in the exception frame, if not already done */
  1179. lwz r3,_TRAP(r1)
  1180. andi. r0,r3,1
  1181. beq 2f
  1182. SAVE_NVGPRS(r1)
  1183. rlwinm r3,r3,0,0,30
  1184. stw r3,_TRAP(r1)
  1185. 2: addi r3,r1,STACK_FRAME_OVERHEAD
  1186. mr r4,r9
  1187. bl do_notify_resume
  1188. REST_NVGPRS(r1)
  1189. b recheck
  1190. /*
  1191. * We come here when we are at the end of handling an exception
  1192. * that occurred at a place where taking an exception will lose
  1193. * state information, such as the contents of SRR0 and SRR1.
  1194. */
  1195. nonrecoverable:
  1196. lis r10,exc_exit_restart_end@ha
  1197. addi r10,r10,exc_exit_restart_end@l
  1198. cmplw r12,r10
  1199. bge 3f
  1200. lis r11,exc_exit_restart@ha
  1201. addi r11,r11,exc_exit_restart@l
  1202. cmplw r12,r11
  1203. blt 3f
  1204. lis r10,ee_restarts@ha
  1205. lwz r12,ee_restarts@l(r10)
  1206. addi r12,r12,1
  1207. stw r12,ee_restarts@l(r10)
  1208. mr r12,r11 /* restart at exc_exit_restart */
  1209. blr
  1210. 3: /* OK, we can't recover, kill this process */
  1211. /* but the 601 doesn't implement the RI bit, so assume it's OK */
  1212. BEGIN_FTR_SECTION
  1213. blr
  1214. END_FTR_SECTION_IFSET(CPU_FTR_601)
  1215. lwz r3,_TRAP(r1)
  1216. andi. r0,r3,1
  1217. beq 4f
  1218. SAVE_NVGPRS(r1)
  1219. rlwinm r3,r3,0,0,30
  1220. stw r3,_TRAP(r1)
  1221. 4: addi r3,r1,STACK_FRAME_OVERHEAD
  1222. bl nonrecoverable_exception
  1223. /* shouldn't return */
  1224. b 4b
  1225. .section .bss
  1226. .align 2
  1227. ee_restarts:
  1228. .space 4
  1229. .previous
  1230. /*
  1231. * PROM code for specific machines follows. Put it
  1232. * here so it's easy to add arch-specific sections later.
  1233. * -- Cort
  1234. */
  1235. #ifdef CONFIG_PPC_RTAS
  1236. /*
  1237. * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
  1238. * called with the MMU off.
  1239. */
  1240. _GLOBAL(enter_rtas)
  1241. stwu r1,-INT_FRAME_SIZE(r1)
  1242. mflr r0
  1243. stw r0,INT_FRAME_SIZE+4(r1)
  1244. LOAD_REG_ADDR(r4, rtas)
  1245. lis r6,1f@ha /* physical return address for rtas */
  1246. addi r6,r6,1f@l
  1247. tophys(r6,r6)
  1248. tophys(r7,r1)
  1249. lwz r8,RTASENTRY(r4)
  1250. lwz r4,RTASBASE(r4)
  1251. mfmsr r9
  1252. stw r9,8(r1)
  1253. LOAD_MSR_KERNEL(r0,MSR_KERNEL)
  1254. SYNC /* disable interrupts so SRR0/1 */
  1255. MTMSRD(r0) /* don't get trashed */
  1256. li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
  1257. mtlr r6
  1258. mtspr SPRN_SPRG_RTAS,r7
  1259. mtspr SPRN_SRR0,r8
  1260. mtspr SPRN_SRR1,r9
  1261. RFI
  1262. 1: tophys(r9,r1)
  1263. lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
  1264. lwz r9,8(r9) /* original msr value */
  1265. FIX_SRR1(r9,r0)
  1266. addi r1,r1,INT_FRAME_SIZE
  1267. li r0,0
  1268. mtspr SPRN_SPRG_RTAS,r0
  1269. mtspr SPRN_SRR0,r8
  1270. mtspr SPRN_SRR1,r9
  1271. RFI /* return to caller */
  1272. .globl machine_check_in_rtas
  1273. machine_check_in_rtas:
  1274. twi 31,0,0
  1275. /* XXX load up BATs and panic */
  1276. #endif /* CONFIG_PPC_RTAS */
  1277. #ifdef CONFIG_FUNCTION_TRACER
  1278. #ifdef CONFIG_DYNAMIC_FTRACE
  1279. _GLOBAL(mcount)
  1280. _GLOBAL(_mcount)
  1281. /*
  1282. * It is required that _mcount on PPC32 must preserve the
  1283. * link register. But we have r0 to play with. We use r0
  1284. * to push the return address back to the caller of mcount
  1285. * into the ctr register, restore the link register and
  1286. * then jump back using the ctr register.
  1287. */
  1288. mflr r0
  1289. mtctr r0
  1290. lwz r0, 4(r1)
  1291. mtlr r0
  1292. bctr
  1293. _GLOBAL(ftrace_caller)
  1294. MCOUNT_SAVE_FRAME
  1295. /* r3 ends up with link register */
  1296. subi r3, r3, MCOUNT_INSN_SIZE
  1297. .globl ftrace_call
  1298. ftrace_call:
  1299. bl ftrace_stub
  1300. nop
  1301. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1302. .globl ftrace_graph_call
  1303. ftrace_graph_call:
  1304. b ftrace_graph_stub
  1305. _GLOBAL(ftrace_graph_stub)
  1306. #endif
  1307. MCOUNT_RESTORE_FRAME
  1308. /* old link register ends up in ctr reg */
  1309. bctr
  1310. #else
  1311. _GLOBAL(mcount)
  1312. _GLOBAL(_mcount)
  1313. MCOUNT_SAVE_FRAME
  1314. subi r3, r3, MCOUNT_INSN_SIZE
  1315. LOAD_REG_ADDR(r5, ftrace_trace_function)
  1316. lwz r5,0(r5)
  1317. mtctr r5
  1318. bctrl
  1319. nop
  1320. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1321. b ftrace_graph_caller
  1322. #endif
  1323. MCOUNT_RESTORE_FRAME
  1324. bctr
  1325. #endif
  1326. _GLOBAL(ftrace_stub)
  1327. blr
  1328. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1329. _GLOBAL(ftrace_graph_caller)
  1330. /* load r4 with local address */
  1331. lwz r4, 44(r1)
  1332. subi r4, r4, MCOUNT_INSN_SIZE
  1333. /* get the parent address */
  1334. addi r3, r1, 52
  1335. bl prepare_ftrace_return
  1336. nop
  1337. MCOUNT_RESTORE_FRAME
  1338. /* old link register ends up in ctr reg */
  1339. bctr
  1340. _GLOBAL(return_to_handler)
  1341. /* need to save return values */
  1342. stwu r1, -32(r1)
  1343. stw r3, 20(r1)
  1344. stw r4, 16(r1)
  1345. stw r31, 12(r1)
  1346. mr r31, r1
  1347. bl ftrace_return_to_handler
  1348. nop
  1349. /* return value has real return address */
  1350. mtlr r3
  1351. lwz r3, 20(r1)
  1352. lwz r4, 16(r1)
  1353. lwz r31,12(r1)
  1354. lwz r1, 0(r1)
  1355. /* Jump back to real return address */
  1356. blr
  1357. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1358. #endif /* CONFIG_MCOUNT */