entry_64.S 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
  5. * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
  6. * Adapted for Power Macintosh by Paul Mackerras.
  7. * Low-level exception handlers and MMU support
  8. * rewritten by Paul Mackerras.
  9. * Copyright (C) 1996 Paul Mackerras.
  10. * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11. *
  12. * This file contains the system call entry code, context switch
  13. * code, and exception/interrupt return code for PowerPC.
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. */
  20. #include <linux/errno.h>
  21. #include <asm/unistd.h>
  22. #include <asm/processor.h>
  23. #include <asm/page.h>
  24. #include <asm/mmu.h>
  25. #include <asm/thread_info.h>
  26. #include <asm/ppc_asm.h>
  27. #include <asm/asm-offsets.h>
  28. #include <asm/cputable.h>
  29. #include <asm/firmware.h>
  30. #include <asm/bug.h>
  31. #include <asm/ptrace.h>
  32. #include <asm/irqflags.h>
  33. #include <asm/ftrace.h>
  34. #include <asm/hw_irq.h>
  35. /*
  36. * System calls.
  37. */
  38. .section ".toc","aw"
  39. .SYS_CALL_TABLE:
  40. .tc .sys_call_table[TC],.sys_call_table
  41. /* This value is used to mark exception frames on the stack. */
  42. exception_marker:
  43. .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
  44. .section ".text"
  45. .align 7
  46. #undef SHOW_SYSCALLS
  47. .globl system_call_common
  48. system_call_common:
  49. andi. r10,r12,MSR_PR
  50. mr r10,r1
  51. addi r1,r1,-INT_FRAME_SIZE
  52. beq- 1f
  53. ld r1,PACAKSAVE(r13)
  54. 1: std r10,0(r1)
  55. std r11,_NIP(r1)
  56. std r12,_MSR(r1)
  57. std r0,GPR0(r1)
  58. std r10,GPR1(r1)
  59. beq 2f /* if from kernel mode */
  60. ACCOUNT_CPU_USER_ENTRY(r10, r11)
  61. 2: std r2,GPR2(r1)
  62. std r3,GPR3(r1)
  63. mfcr r2
  64. std r4,GPR4(r1)
  65. std r5,GPR5(r1)
  66. std r6,GPR6(r1)
  67. std r7,GPR7(r1)
  68. std r8,GPR8(r1)
  69. li r11,0
  70. std r11,GPR9(r1)
  71. std r11,GPR10(r1)
  72. std r11,GPR11(r1)
  73. std r11,GPR12(r1)
  74. std r11,_XER(r1)
  75. std r11,_CTR(r1)
  76. std r9,GPR13(r1)
  77. mflr r10
  78. /*
  79. * This clears CR0.SO (bit 28), which is the error indication on
  80. * return from this system call.
  81. */
  82. rldimi r2,r11,28,(63-28)
  83. li r11,0xc01
  84. std r10,_LINK(r1)
  85. std r11,_TRAP(r1)
  86. std r3,ORIG_GPR3(r1)
  87. std r2,_CCR(r1)
  88. ld r2,PACATOC(r13)
  89. addi r9,r1,STACK_FRAME_OVERHEAD
  90. ld r11,exception_marker@toc(r2)
  91. std r11,-16(r9) /* "regshere" marker */
  92. #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
  93. BEGIN_FW_FTR_SECTION
  94. beq 33f
  95. /* if from user, see if there are any DTL entries to process */
  96. ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
  97. ld r11,PACA_DTL_RIDX(r13) /* get log read index */
  98. ld r10,LPPACA_DTLIDX(r10) /* get log write index */
  99. cmpd cr1,r11,r10
  100. beq+ cr1,33f
  101. bl .accumulate_stolen_time
  102. REST_GPR(0,r1)
  103. REST_4GPRS(3,r1)
  104. REST_2GPRS(7,r1)
  105. addi r9,r1,STACK_FRAME_OVERHEAD
  106. 33:
  107. END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
  108. #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
  109. /*
  110. * A syscall should always be called with interrupts enabled
  111. * so we just unconditionally hard-enable here. When some kind
  112. * of irq tracing is used, we additionally check that condition
  113. * is correct
  114. */
  115. #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
  116. lbz r10,PACASOFTIRQEN(r13)
  117. xori r10,r10,1
  118. 1: tdnei r10,0
  119. EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
  120. #endif
  121. #ifdef CONFIG_PPC_BOOK3E
  122. wrteei 1
  123. #else
  124. ld r11,PACAKMSR(r13)
  125. ori r11,r11,MSR_EE
  126. mtmsrd r11,1
  127. #endif /* CONFIG_PPC_BOOK3E */
  128. /* We do need to set SOFTE in the stack frame or the return
  129. * from interrupt will be painful
  130. */
  131. li r10,1
  132. std r10,SOFTE(r1)
  133. #ifdef SHOW_SYSCALLS
  134. bl .do_show_syscall
  135. REST_GPR(0,r1)
  136. REST_4GPRS(3,r1)
  137. REST_2GPRS(7,r1)
  138. addi r9,r1,STACK_FRAME_OVERHEAD
  139. #endif
  140. CURRENT_THREAD_INFO(r11, r1)
  141. ld r10,TI_FLAGS(r11)
  142. andi. r11,r10,_TIF_SYSCALL_T_OR_A
  143. bne- syscall_dotrace
  144. .Lsyscall_dotrace_cont:
  145. cmpldi 0,r0,NR_syscalls
  146. bge- syscall_enosys
  147. system_call: /* label this so stack traces look sane */
  148. /*
  149. * Need to vector to 32 Bit or default sys_call_table here,
  150. * based on caller's run-mode / personality.
  151. */
  152. ld r11,.SYS_CALL_TABLE@toc(2)
  153. andi. r10,r10,_TIF_32BIT
  154. beq 15f
  155. addi r11,r11,8 /* use 32-bit syscall entries */
  156. clrldi r3,r3,32
  157. clrldi r4,r4,32
  158. clrldi r5,r5,32
  159. clrldi r6,r6,32
  160. clrldi r7,r7,32
  161. clrldi r8,r8,32
  162. 15:
  163. slwi r0,r0,4
  164. ldx r10,r11,r0 /* Fetch system call handler [ptr] */
  165. mtctr r10
  166. bctrl /* Call handler */
  167. syscall_exit:
  168. std r3,RESULT(r1)
  169. #ifdef SHOW_SYSCALLS
  170. bl .do_show_syscall_exit
  171. ld r3,RESULT(r1)
  172. #endif
  173. CURRENT_THREAD_INFO(r12, r1)
  174. ld r8,_MSR(r1)
  175. #ifdef CONFIG_PPC_BOOK3S
  176. /* No MSR:RI on BookE */
  177. andi. r10,r8,MSR_RI
  178. beq- unrecov_restore
  179. #endif
  180. /*
  181. * Disable interrupts so current_thread_info()->flags can't change,
  182. * and so that we don't get interrupted after loading SRR0/1.
  183. */
  184. #ifdef CONFIG_PPC_BOOK3E
  185. wrteei 0
  186. #else
  187. ld r10,PACAKMSR(r13)
  188. /*
  189. * For performance reasons we clear RI the same time that we
  190. * clear EE. We only need to clear RI just before we restore r13
  191. * below, but batching it with EE saves us one expensive mtmsrd call.
  192. * We have to be careful to restore RI if we branch anywhere from
  193. * here (eg syscall_exit_work).
  194. */
  195. li r9,MSR_RI
  196. andc r11,r10,r9
  197. mtmsrd r11,1
  198. #endif /* CONFIG_PPC_BOOK3E */
  199. ld r9,TI_FLAGS(r12)
  200. li r11,-_LAST_ERRNO
  201. andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
  202. bne- syscall_exit_work
  203. cmpld r3,r11
  204. ld r5,_CCR(r1)
  205. bge- syscall_error
  206. .Lsyscall_error_cont:
  207. ld r7,_NIP(r1)
  208. BEGIN_FTR_SECTION
  209. stdcx. r0,0,r1 /* to clear the reservation */
  210. END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  211. andi. r6,r8,MSR_PR
  212. ld r4,_LINK(r1)
  213. beq- 1f
  214. ACCOUNT_CPU_USER_EXIT(r11, r12)
  215. HMT_MEDIUM_LOW_HAS_PPR
  216. ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
  217. 1: ld r2,GPR2(r1)
  218. ld r1,GPR1(r1)
  219. mtlr r4
  220. mtcr r5
  221. mtspr SPRN_SRR0,r7
  222. mtspr SPRN_SRR1,r8
  223. RFI
  224. b . /* prevent speculative execution */
  225. syscall_error:
  226. oris r5,r5,0x1000 /* Set SO bit in CR */
  227. neg r3,r3
  228. std r5,_CCR(r1)
  229. b .Lsyscall_error_cont
  230. /* Traced system call support */
  231. syscall_dotrace:
  232. bl .save_nvgprs
  233. addi r3,r1,STACK_FRAME_OVERHEAD
  234. bl .do_syscall_trace_enter
  235. /*
  236. * Restore argument registers possibly just changed.
  237. * We use the return value of do_syscall_trace_enter
  238. * for the call number to look up in the table (r0).
  239. */
  240. mr r0,r3
  241. ld r3,GPR3(r1)
  242. ld r4,GPR4(r1)
  243. ld r5,GPR5(r1)
  244. ld r6,GPR6(r1)
  245. ld r7,GPR7(r1)
  246. ld r8,GPR8(r1)
  247. addi r9,r1,STACK_FRAME_OVERHEAD
  248. CURRENT_THREAD_INFO(r10, r1)
  249. ld r10,TI_FLAGS(r10)
  250. b .Lsyscall_dotrace_cont
  251. syscall_enosys:
  252. li r3,-ENOSYS
  253. b syscall_exit
  254. syscall_exit_work:
  255. #ifdef CONFIG_PPC_BOOK3S
  256. mtmsrd r10,1 /* Restore RI */
  257. #endif
  258. /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
  259. If TIF_NOERROR is set, just save r3 as it is. */
  260. andi. r0,r9,_TIF_RESTOREALL
  261. beq+ 0f
  262. REST_NVGPRS(r1)
  263. b 2f
  264. 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */
  265. blt+ 1f
  266. andi. r0,r9,_TIF_NOERROR
  267. bne- 1f
  268. ld r5,_CCR(r1)
  269. neg r3,r3
  270. oris r5,r5,0x1000 /* Set SO bit in CR */
  271. std r5,_CCR(r1)
  272. 1: std r3,GPR3(r1)
  273. 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
  274. beq 4f
  275. /* Clear per-syscall TIF flags if any are set. */
  276. li r11,_TIF_PERSYSCALL_MASK
  277. addi r12,r12,TI_FLAGS
  278. 3: ldarx r10,0,r12
  279. andc r10,r10,r11
  280. stdcx. r10,0,r12
  281. bne- 3b
  282. subi r12,r12,TI_FLAGS
  283. 4: /* Anything else left to do? */
  284. SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */
  285. andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
  286. beq .ret_from_except_lite
  287. /* Re-enable interrupts */
  288. #ifdef CONFIG_PPC_BOOK3E
  289. wrteei 1
  290. #else
  291. ld r10,PACAKMSR(r13)
  292. ori r10,r10,MSR_EE
  293. mtmsrd r10,1
  294. #endif /* CONFIG_PPC_BOOK3E */
  295. bl .save_nvgprs
  296. addi r3,r1,STACK_FRAME_OVERHEAD
  297. bl .do_syscall_trace_leave
  298. b .ret_from_except
  299. /* Save non-volatile GPRs, if not already saved. */
  300. _GLOBAL(save_nvgprs)
  301. ld r11,_TRAP(r1)
  302. andi. r0,r11,1
  303. beqlr-
  304. SAVE_NVGPRS(r1)
  305. clrrdi r0,r11,1
  306. std r0,_TRAP(r1)
  307. blr
  308. /*
  309. * The sigsuspend and rt_sigsuspend system calls can call do_signal
  310. * and thus put the process into the stopped state where we might
  311. * want to examine its user state with ptrace. Therefore we need
  312. * to save all the nonvolatile registers (r14 - r31) before calling
  313. * the C code. Similarly, fork, vfork and clone need the full
  314. * register state on the stack so that it can be copied to the child.
  315. */
  316. _GLOBAL(ppc_fork)
  317. bl .save_nvgprs
  318. bl .sys_fork
  319. b syscall_exit
  320. _GLOBAL(ppc_vfork)
  321. bl .save_nvgprs
  322. bl .sys_vfork
  323. b syscall_exit
  324. _GLOBAL(ppc_clone)
  325. bl .save_nvgprs
  326. bl .sys_clone
  327. b syscall_exit
  328. _GLOBAL(ppc32_swapcontext)
  329. bl .save_nvgprs
  330. bl .compat_sys_swapcontext
  331. b syscall_exit
  332. _GLOBAL(ppc64_swapcontext)
  333. bl .save_nvgprs
  334. bl .sys_swapcontext
  335. b syscall_exit
  336. _GLOBAL(ret_from_fork)
  337. bl .schedule_tail
  338. REST_NVGPRS(r1)
  339. li r3,0
  340. b syscall_exit
  341. _GLOBAL(ret_from_kernel_thread)
  342. bl .schedule_tail
  343. REST_NVGPRS(r1)
  344. li r3,0
  345. std r3,0(r1)
  346. ld r14, 0(r14)
  347. mtlr r14
  348. mr r3,r15
  349. blrl
  350. li r3,0
  351. b syscall_exit
  352. .section ".toc","aw"
  353. DSCR_DEFAULT:
  354. .tc dscr_default[TC],dscr_default
  355. .section ".text"
  356. /*
  357. * This routine switches between two different tasks. The process
  358. * state of one is saved on its kernel stack. Then the state
  359. * of the other is restored from its kernel stack. The memory
  360. * management hardware is updated to the second process's state.
  361. * Finally, we can return to the second process, via ret_from_except.
  362. * On entry, r3 points to the THREAD for the current task, r4
  363. * points to the THREAD for the new task.
  364. *
  365. * Note: there are two ways to get to the "going out" portion
  366. * of this code; either by coming in via the entry (_switch)
  367. * or via "fork" which must set up an environment equivalent
  368. * to the "_switch" path. If you change this you'll have to change
  369. * the fork code also.
  370. *
  371. * The code which creates the new task context is in 'copy_thread'
  372. * in arch/powerpc/kernel/process.c
  373. */
  374. .align 7
  375. _GLOBAL(_switch)
  376. mflr r0
  377. std r0,16(r1)
  378. stdu r1,-SWITCH_FRAME_SIZE(r1)
  379. /* r3-r13 are caller saved -- Cort */
  380. SAVE_8GPRS(14, r1)
  381. SAVE_10GPRS(22, r1)
  382. mflr r20 /* Return to switch caller */
  383. mfmsr r22
  384. li r0, MSR_FP
  385. #ifdef CONFIG_VSX
  386. BEGIN_FTR_SECTION
  387. oris r0,r0,MSR_VSX@h /* Disable VSX */
  388. END_FTR_SECTION_IFSET(CPU_FTR_VSX)
  389. #endif /* CONFIG_VSX */
  390. #ifdef CONFIG_ALTIVEC
  391. BEGIN_FTR_SECTION
  392. oris r0,r0,MSR_VEC@h /* Disable altivec */
  393. mfspr r24,SPRN_VRSAVE /* save vrsave register value */
  394. std r24,THREAD_VRSAVE(r3)
  395. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  396. #endif /* CONFIG_ALTIVEC */
  397. #ifdef CONFIG_PPC64
  398. BEGIN_FTR_SECTION
  399. mfspr r25,SPRN_DSCR
  400. std r25,THREAD_DSCR(r3)
  401. END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
  402. #endif
  403. and. r0,r0,r22
  404. beq+ 1f
  405. andc r22,r22,r0
  406. MTMSRD(r22)
  407. isync
  408. 1: std r20,_NIP(r1)
  409. mfcr r23
  410. std r23,_CCR(r1)
  411. std r1,KSP(r3) /* Set old stack pointer */
  412. #ifdef CONFIG_SMP
  413. /* We need a sync somewhere here to make sure that if the
  414. * previous task gets rescheduled on another CPU, it sees all
  415. * stores it has performed on this one.
  416. */
  417. sync
  418. #endif /* CONFIG_SMP */
  419. /*
  420. * If we optimise away the clear of the reservation in system
  421. * calls because we know the CPU tracks the address of the
  422. * reservation, then we need to clear it here to cover the
  423. * case that the kernel context switch path has no larx
  424. * instructions.
  425. */
  426. BEGIN_FTR_SECTION
  427. ldarx r6,0,r1
  428. END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
  429. addi r6,r4,-THREAD /* Convert THREAD to 'current' */
  430. std r6,PACACURRENT(r13) /* Set new 'current' */
  431. ld r8,KSP(r4) /* new stack pointer */
  432. #ifdef CONFIG_PPC_BOOK3S
  433. BEGIN_FTR_SECTION
  434. BEGIN_FTR_SECTION_NESTED(95)
  435. clrrdi r6,r8,28 /* get its ESID */
  436. clrrdi r9,r1,28 /* get current sp ESID */
  437. FTR_SECTION_ELSE_NESTED(95)
  438. clrrdi r6,r8,40 /* get its 1T ESID */
  439. clrrdi r9,r1,40 /* get current sp 1T ESID */
  440. ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
  441. FTR_SECTION_ELSE
  442. b 2f
  443. ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
  444. clrldi. r0,r6,2 /* is new ESID c00000000? */
  445. cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
  446. cror eq,4*cr1+eq,eq
  447. beq 2f /* if yes, don't slbie it */
  448. /* Bolt in the new stack SLB entry */
  449. ld r7,KSP_VSID(r4) /* Get new stack's VSID */
  450. oris r0,r6,(SLB_ESID_V)@h
  451. ori r0,r0,(SLB_NUM_BOLTED-1)@l
  452. BEGIN_FTR_SECTION
  453. li r9,MMU_SEGSIZE_1T /* insert B field */
  454. oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
  455. rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
  456. END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
  457. /* Update the last bolted SLB. No write barriers are needed
  458. * here, provided we only update the current CPU's SLB shadow
  459. * buffer.
  460. */
  461. ld r9,PACA_SLBSHADOWPTR(r13)
  462. li r12,0
  463. std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
  464. std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
  465. std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
  466. /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
  467. * we have 1TB segments, the only CPUs known to have the errata
  468. * only support less than 1TB of system memory and we'll never
  469. * actually hit this code path.
  470. */
  471. slbie r6
  472. slbie r6 /* Workaround POWER5 < DD2.1 issue */
  473. slbmte r7,r0
  474. isync
  475. 2:
  476. #endif /* !CONFIG_PPC_BOOK3S */
  477. CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
  478. /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
  479. because we don't need to leave the 288-byte ABI gap at the
  480. top of the kernel stack. */
  481. addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
  482. mr r1,r8 /* start using new stack pointer */
  483. std r7,PACAKSAVE(r13)
  484. #ifdef CONFIG_ALTIVEC
  485. BEGIN_FTR_SECTION
  486. ld r0,THREAD_VRSAVE(r4)
  487. mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
  488. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  489. #endif /* CONFIG_ALTIVEC */
  490. #ifdef CONFIG_PPC64
  491. BEGIN_FTR_SECTION
  492. lwz r6,THREAD_DSCR_INHERIT(r4)
  493. ld r7,DSCR_DEFAULT@toc(2)
  494. ld r0,THREAD_DSCR(r4)
  495. cmpwi r6,0
  496. bne 1f
  497. ld r0,0(r7)
  498. 1: cmpd r0,r25
  499. beq 2f
  500. mtspr SPRN_DSCR,r0
  501. 2:
  502. END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
  503. #endif
  504. ld r6,_CCR(r1)
  505. mtcrf 0xFF,r6
  506. /* r3-r13 are destroyed -- Cort */
  507. REST_8GPRS(14, r1)
  508. REST_10GPRS(22, r1)
  509. /* convert old thread to its task_struct for return value */
  510. addi r3,r3,-THREAD
  511. ld r7,_NIP(r1) /* Return to _switch caller in new task */
  512. mtlr r7
  513. addi r1,r1,SWITCH_FRAME_SIZE
  514. blr
  515. .align 7
  516. _GLOBAL(ret_from_except)
  517. ld r11,_TRAP(r1)
  518. andi. r0,r11,1
  519. bne .ret_from_except_lite
  520. REST_NVGPRS(r1)
  521. _GLOBAL(ret_from_except_lite)
  522. /*
  523. * Disable interrupts so that current_thread_info()->flags
  524. * can't change between when we test it and when we return
  525. * from the interrupt.
  526. */
  527. #ifdef CONFIG_PPC_BOOK3E
  528. wrteei 0
  529. #else
  530. ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
  531. mtmsrd r10,1 /* Update machine state */
  532. #endif /* CONFIG_PPC_BOOK3E */
  533. CURRENT_THREAD_INFO(r9, r1)
  534. ld r3,_MSR(r1)
  535. ld r4,TI_FLAGS(r9)
  536. andi. r3,r3,MSR_PR
  537. beq resume_kernel
  538. /* Check current_thread_info()->flags */
  539. andi. r0,r4,_TIF_USER_WORK_MASK
  540. beq restore
  541. andi. r0,r4,_TIF_NEED_RESCHED
  542. beq 1f
  543. bl .restore_interrupts
  544. bl .schedule
  545. b .ret_from_except_lite
  546. 1: bl .save_nvgprs
  547. bl .restore_interrupts
  548. addi r3,r1,STACK_FRAME_OVERHEAD
  549. bl .do_notify_resume
  550. b .ret_from_except
  551. resume_kernel:
  552. /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
  553. CURRENT_THREAD_INFO(r9, r1)
  554. ld r8,TI_FLAGS(r9)
  555. andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
  556. beq+ 1f
  557. addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
  558. lwz r3,GPR1(r1)
  559. subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
  560. mr r4,r1 /* src: current exception frame */
  561. mr r1,r3 /* Reroute the trampoline frame to r1 */
  562. /* Copy from the original to the trampoline. */
  563. li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
  564. li r6,0 /* start offset: 0 */
  565. mtctr r5
  566. 2: ldx r0,r6,r4
  567. stdx r0,r6,r3
  568. addi r6,r6,8
  569. bdnz 2b
  570. /* Do real store operation to complete stwu */
  571. lwz r5,GPR1(r1)
  572. std r8,0(r5)
  573. /* Clear _TIF_EMULATE_STACK_STORE flag */
  574. lis r11,_TIF_EMULATE_STACK_STORE@h
  575. addi r5,r9,TI_FLAGS
  576. ldarx r4,0,r5
  577. andc r4,r4,r11
  578. stdcx. r4,0,r5
  579. bne- 0b
  580. 1:
  581. #ifdef CONFIG_PREEMPT
  582. /* Check if we need to preempt */
  583. andi. r0,r4,_TIF_NEED_RESCHED
  584. beq+ restore
  585. /* Check that preempt_count() == 0 and interrupts are enabled */
  586. lwz r8,TI_PREEMPT(r9)
  587. cmpwi cr1,r8,0
  588. ld r0,SOFTE(r1)
  589. cmpdi r0,0
  590. crandc eq,cr1*4+eq,eq
  591. bne restore
  592. /*
  593. * Here we are preempting the current task. We want to make
  594. * sure we are soft-disabled first
  595. */
  596. SOFT_DISABLE_INTS(r3,r4)
  597. 1: bl .preempt_schedule_irq
  598. /* Re-test flags and eventually loop */
  599. CURRENT_THREAD_INFO(r9, r1)
  600. ld r4,TI_FLAGS(r9)
  601. andi. r0,r4,_TIF_NEED_RESCHED
  602. bne 1b
  603. #endif /* CONFIG_PREEMPT */
  604. .globl fast_exc_return_irq
  605. fast_exc_return_irq:
  606. restore:
  607. /*
  608. * This is the main kernel exit path. First we check if we
  609. * are about to re-enable interrupts
  610. */
  611. ld r5,SOFTE(r1)
  612. lbz r6,PACASOFTIRQEN(r13)
  613. cmpwi cr0,r5,0
  614. beq restore_irq_off
  615. /* We are enabling, were we already enabled ? Yes, just return */
  616. cmpwi cr0,r6,1
  617. beq cr0,do_restore
  618. /*
  619. * We are about to soft-enable interrupts (we are hard disabled
  620. * at this point). We check if there's anything that needs to
  621. * be replayed first.
  622. */
  623. lbz r0,PACAIRQHAPPENED(r13)
  624. cmpwi cr0,r0,0
  625. bne- restore_check_irq_replay
  626. /*
  627. * Get here when nothing happened while soft-disabled, just
  628. * soft-enable and move-on. We will hard-enable as a side
  629. * effect of rfi
  630. */
  631. restore_no_replay:
  632. TRACE_ENABLE_INTS
  633. li r0,1
  634. stb r0,PACASOFTIRQEN(r13);
  635. /*
  636. * Final return path. BookE is handled in a different file
  637. */
  638. do_restore:
  639. #ifdef CONFIG_PPC_BOOK3E
  640. b .exception_return_book3e
  641. #else
  642. /*
  643. * Clear the reservation. If we know the CPU tracks the address of
  644. * the reservation then we can potentially save some cycles and use
  645. * a larx. On POWER6 and POWER7 this is significantly faster.
  646. */
  647. BEGIN_FTR_SECTION
  648. stdcx. r0,0,r1 /* to clear the reservation */
  649. FTR_SECTION_ELSE
  650. ldarx r4,0,r1
  651. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  652. /*
  653. * Some code path such as load_up_fpu or altivec return directly
  654. * here. They run entirely hard disabled and do not alter the
  655. * interrupt state. They also don't use lwarx/stwcx. and thus
  656. * are known not to leave dangling reservations.
  657. */
  658. .globl fast_exception_return
  659. fast_exception_return:
  660. ld r3,_MSR(r1)
  661. ld r4,_CTR(r1)
  662. ld r0,_LINK(r1)
  663. mtctr r4
  664. mtlr r0
  665. ld r4,_XER(r1)
  666. mtspr SPRN_XER,r4
  667. REST_8GPRS(5, r1)
  668. andi. r0,r3,MSR_RI
  669. beq- unrecov_restore
  670. /*
  671. * Clear RI before restoring r13. If we are returning to
  672. * userspace and we take an exception after restoring r13,
  673. * we end up corrupting the userspace r13 value.
  674. */
  675. ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
  676. andc r4,r4,r0 /* r0 contains MSR_RI here */
  677. mtmsrd r4,1
  678. /*
  679. * r13 is our per cpu area, only restore it if we are returning to
  680. * userspace the value stored in the stack frame may belong to
  681. * another CPU.
  682. */
  683. andi. r0,r3,MSR_PR
  684. beq 1f
  685. ACCOUNT_CPU_USER_EXIT(r2, r4)
  686. RESTORE_PPR(r2, r4)
  687. REST_GPR(13, r1)
  688. 1:
  689. mtspr SPRN_SRR1,r3
  690. ld r2,_CCR(r1)
  691. mtcrf 0xFF,r2
  692. ld r2,_NIP(r1)
  693. mtspr SPRN_SRR0,r2
  694. ld r0,GPR0(r1)
  695. ld r2,GPR2(r1)
  696. ld r3,GPR3(r1)
  697. ld r4,GPR4(r1)
  698. ld r1,GPR1(r1)
  699. rfid
  700. b . /* prevent speculative execution */
  701. #endif /* CONFIG_PPC_BOOK3E */
  702. /*
  703. * We are returning to a context with interrupts soft disabled.
  704. *
  705. * However, we may also about to hard enable, so we need to
  706. * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
  707. * or that bit can get out of sync and bad things will happen
  708. */
  709. restore_irq_off:
  710. ld r3,_MSR(r1)
  711. lbz r7,PACAIRQHAPPENED(r13)
  712. andi. r0,r3,MSR_EE
  713. beq 1f
  714. rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
  715. stb r7,PACAIRQHAPPENED(r13)
  716. 1: li r0,0
  717. stb r0,PACASOFTIRQEN(r13);
  718. TRACE_DISABLE_INTS
  719. b do_restore
  720. /*
  721. * Something did happen, check if a re-emit is needed
  722. * (this also clears paca->irq_happened)
  723. */
  724. restore_check_irq_replay:
  725. /* XXX: We could implement a fast path here where we check
  726. * for irq_happened being just 0x01, in which case we can
  727. * clear it and return. That means that we would potentially
  728. * miss a decrementer having wrapped all the way around.
  729. *
  730. * Still, this might be useful for things like hash_page
  731. */
  732. bl .__check_irq_replay
  733. cmpwi cr0,r3,0
  734. beq restore_no_replay
  735. /*
  736. * We need to re-emit an interrupt. We do so by re-using our
  737. * existing exception frame. We first change the trap value,
  738. * but we need to ensure we preserve the low nibble of it
  739. */
  740. ld r4,_TRAP(r1)
  741. clrldi r4,r4,60
  742. or r4,r4,r3
  743. std r4,_TRAP(r1)
  744. /*
  745. * Then find the right handler and call it. Interrupts are
  746. * still soft-disabled and we keep them that way.
  747. */
  748. cmpwi cr0,r3,0x500
  749. bne 1f
  750. addi r3,r1,STACK_FRAME_OVERHEAD;
  751. bl .do_IRQ
  752. b .ret_from_except
  753. 1: cmpwi cr0,r3,0x900
  754. bne 1f
  755. addi r3,r1,STACK_FRAME_OVERHEAD;
  756. bl .timer_interrupt
  757. b .ret_from_except
  758. #ifdef CONFIG_PPC_DOORBELL
  759. 1:
  760. #ifdef CONFIG_PPC_BOOK3E
  761. cmpwi cr0,r3,0x280
  762. #else
  763. BEGIN_FTR_SECTION
  764. cmpwi cr0,r3,0xe80
  765. FTR_SECTION_ELSE
  766. cmpwi cr0,r3,0xa00
  767. ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
  768. #endif /* CONFIG_PPC_BOOK3E */
  769. bne 1f
  770. addi r3,r1,STACK_FRAME_OVERHEAD;
  771. bl .doorbell_exception
  772. b .ret_from_except
  773. #endif /* CONFIG_PPC_DOORBELL */
  774. 1: b .ret_from_except /* What else to do here ? */
  775. unrecov_restore:
  776. addi r3,r1,STACK_FRAME_OVERHEAD
  777. bl .unrecoverable_exception
  778. b unrecov_restore
  779. #ifdef CONFIG_PPC_RTAS
  780. /*
  781. * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
  782. * called with the MMU off.
  783. *
  784. * In addition, we need to be in 32b mode, at least for now.
  785. *
  786. * Note: r3 is an input parameter to rtas, so don't trash it...
  787. */
  788. _GLOBAL(enter_rtas)
  789. mflr r0
  790. std r0,16(r1)
  791. stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
  792. /* Because RTAS is running in 32b mode, it clobbers the high order half
  793. * of all registers that it saves. We therefore save those registers
  794. * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
  795. */
  796. SAVE_GPR(2, r1) /* Save the TOC */
  797. SAVE_GPR(13, r1) /* Save paca */
  798. SAVE_8GPRS(14, r1) /* Save the non-volatiles */
  799. SAVE_10GPRS(22, r1) /* ditto */
  800. mfcr r4
  801. std r4,_CCR(r1)
  802. mfctr r5
  803. std r5,_CTR(r1)
  804. mfspr r6,SPRN_XER
  805. std r6,_XER(r1)
  806. mfdar r7
  807. std r7,_DAR(r1)
  808. mfdsisr r8
  809. std r8,_DSISR(r1)
  810. /* Temporary workaround to clear CR until RTAS can be modified to
  811. * ignore all bits.
  812. */
  813. li r0,0
  814. mtcr r0
  815. #ifdef CONFIG_BUG
  816. /* There is no way it is acceptable to get here with interrupts enabled,
  817. * check it with the asm equivalent of WARN_ON
  818. */
  819. lbz r0,PACASOFTIRQEN(r13)
  820. 1: tdnei r0,0
  821. EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
  822. #endif
  823. /* Hard-disable interrupts */
  824. mfmsr r6
  825. rldicl r7,r6,48,1
  826. rotldi r7,r7,16
  827. mtmsrd r7,1
  828. /* Unfortunately, the stack pointer and the MSR are also clobbered,
  829. * so they are saved in the PACA which allows us to restore
  830. * our original state after RTAS returns.
  831. */
  832. std r1,PACAR1(r13)
  833. std r6,PACASAVEDMSR(r13)
  834. /* Setup our real return addr */
  835. LOAD_REG_ADDR(r4,.rtas_return_loc)
  836. clrldi r4,r4,2 /* convert to realmode address */
  837. mtlr r4
  838. li r0,0
  839. ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
  840. andc r0,r6,r0
  841. li r9,1
  842. rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
  843. ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
  844. andc r6,r0,r9
  845. sync /* disable interrupts so SRR0/1 */
  846. mtmsrd r0 /* don't get trashed */
  847. LOAD_REG_ADDR(r4, rtas)
  848. ld r5,RTASENTRY(r4) /* get the rtas->entry value */
  849. ld r4,RTASBASE(r4) /* get the rtas->base value */
  850. mtspr SPRN_SRR0,r5
  851. mtspr SPRN_SRR1,r6
  852. rfid
  853. b . /* prevent speculative execution */
  854. _STATIC(rtas_return_loc)
  855. /* relocation is off at this point */
  856. GET_PACA(r4)
  857. clrldi r4,r4,2 /* convert to realmode address */
  858. bcl 20,31,$+4
  859. 0: mflr r3
  860. ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
  861. mfmsr r6
  862. li r0,MSR_RI
  863. andc r6,r6,r0
  864. sync
  865. mtmsrd r6
  866. ld r1,PACAR1(r4) /* Restore our SP */
  867. ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
  868. mtspr SPRN_SRR0,r3
  869. mtspr SPRN_SRR1,r4
  870. rfid
  871. b . /* prevent speculative execution */
  872. .align 3
  873. 1: .llong .rtas_restore_regs
  874. _STATIC(rtas_restore_regs)
  875. /* relocation is on at this point */
  876. REST_GPR(2, r1) /* Restore the TOC */
  877. REST_GPR(13, r1) /* Restore paca */
  878. REST_8GPRS(14, r1) /* Restore the non-volatiles */
  879. REST_10GPRS(22, r1) /* ditto */
  880. GET_PACA(r13)
  881. ld r4,_CCR(r1)
  882. mtcr r4
  883. ld r5,_CTR(r1)
  884. mtctr r5
  885. ld r6,_XER(r1)
  886. mtspr SPRN_XER,r6
  887. ld r7,_DAR(r1)
  888. mtdar r7
  889. ld r8,_DSISR(r1)
  890. mtdsisr r8
  891. addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
  892. ld r0,16(r1) /* get return address */
  893. mtlr r0
  894. blr /* return to caller */
  895. #endif /* CONFIG_PPC_RTAS */
  896. _GLOBAL(enter_prom)
  897. mflr r0
  898. std r0,16(r1)
  899. stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
  900. /* Because PROM is running in 32b mode, it clobbers the high order half
  901. * of all registers that it saves. We therefore save those registers
  902. * PROM might touch to the stack. (r0, r3-r13 are caller saved)
  903. */
  904. SAVE_GPR(2, r1)
  905. SAVE_GPR(13, r1)
  906. SAVE_8GPRS(14, r1)
  907. SAVE_10GPRS(22, r1)
  908. mfcr r10
  909. mfmsr r11
  910. std r10,_CCR(r1)
  911. std r11,_MSR(r1)
  912. /* Get the PROM entrypoint */
  913. mtlr r4
  914. /* Switch MSR to 32 bits mode
  915. */
  916. #ifdef CONFIG_PPC_BOOK3E
  917. rlwinm r11,r11,0,1,31
  918. mtmsr r11
  919. #else /* CONFIG_PPC_BOOK3E */
  920. mfmsr r11
  921. li r12,1
  922. rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
  923. andc r11,r11,r12
  924. li r12,1
  925. rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
  926. andc r11,r11,r12
  927. mtmsrd r11
  928. #endif /* CONFIG_PPC_BOOK3E */
  929. isync
  930. /* Enter PROM here... */
  931. blrl
  932. /* Just make sure that r1 top 32 bits didn't get
  933. * corrupt by OF
  934. */
  935. rldicl r1,r1,0,32
  936. /* Restore the MSR (back to 64 bits) */
  937. ld r0,_MSR(r1)
  938. MTMSRD(r0)
  939. isync
  940. /* Restore other registers */
  941. REST_GPR(2, r1)
  942. REST_GPR(13, r1)
  943. REST_8GPRS(14, r1)
  944. REST_10GPRS(22, r1)
  945. ld r4,_CCR(r1)
  946. mtcr r4
  947. addi r1,r1,PROM_FRAME_SIZE
  948. ld r0,16(r1)
  949. mtlr r0
  950. blr
  951. #ifdef CONFIG_FUNCTION_TRACER
  952. #ifdef CONFIG_DYNAMIC_FTRACE
  953. _GLOBAL(mcount)
  954. _GLOBAL(_mcount)
  955. blr
  956. _GLOBAL(ftrace_caller)
  957. /* Taken from output of objdump from lib64/glibc */
  958. mflr r3
  959. ld r11, 0(r1)
  960. stdu r1, -112(r1)
  961. std r3, 128(r1)
  962. ld r4, 16(r11)
  963. subi r3, r3, MCOUNT_INSN_SIZE
  964. .globl ftrace_call
  965. ftrace_call:
  966. bl ftrace_stub
  967. nop
  968. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  969. .globl ftrace_graph_call
  970. ftrace_graph_call:
  971. b ftrace_graph_stub
  972. _GLOBAL(ftrace_graph_stub)
  973. #endif
  974. ld r0, 128(r1)
  975. mtlr r0
  976. addi r1, r1, 112
  977. _GLOBAL(ftrace_stub)
  978. blr
  979. #else
  980. _GLOBAL(mcount)
  981. blr
  982. _GLOBAL(_mcount)
  983. /* Taken from output of objdump from lib64/glibc */
  984. mflr r3
  985. ld r11, 0(r1)
  986. stdu r1, -112(r1)
  987. std r3, 128(r1)
  988. ld r4, 16(r11)
  989. subi r3, r3, MCOUNT_INSN_SIZE
  990. LOAD_REG_ADDR(r5,ftrace_trace_function)
  991. ld r5,0(r5)
  992. ld r5,0(r5)
  993. mtctr r5
  994. bctrl
  995. nop
  996. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  997. b ftrace_graph_caller
  998. #endif
  999. ld r0, 128(r1)
  1000. mtlr r0
  1001. addi r1, r1, 112
  1002. _GLOBAL(ftrace_stub)
  1003. blr
  1004. #endif /* CONFIG_DYNAMIC_FTRACE */
  1005. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1006. _GLOBAL(ftrace_graph_caller)
  1007. /* load r4 with local address */
  1008. ld r4, 128(r1)
  1009. subi r4, r4, MCOUNT_INSN_SIZE
  1010. /* get the parent address */
  1011. ld r11, 112(r1)
  1012. addi r3, r11, 16
  1013. bl .prepare_ftrace_return
  1014. nop
  1015. ld r0, 128(r1)
  1016. mtlr r0
  1017. addi r1, r1, 112
  1018. blr
  1019. _GLOBAL(return_to_handler)
  1020. /* need to save return values */
  1021. std r4, -24(r1)
  1022. std r3, -16(r1)
  1023. std r31, -8(r1)
  1024. mr r31, r1
  1025. stdu r1, -112(r1)
  1026. bl .ftrace_return_to_handler
  1027. nop
  1028. /* return value has real return address */
  1029. mtlr r3
  1030. ld r1, 0(r1)
  1031. ld r4, -24(r1)
  1032. ld r3, -16(r1)
  1033. ld r31, -8(r1)
  1034. /* Jump back to real return address */
  1035. blr
  1036. _GLOBAL(mod_return_to_handler)
  1037. /* need to save return values */
  1038. std r4, -32(r1)
  1039. std r3, -24(r1)
  1040. /* save TOC */
  1041. std r2, -16(r1)
  1042. std r31, -8(r1)
  1043. mr r31, r1
  1044. stdu r1, -112(r1)
  1045. /*
  1046. * We are in a module using the module's TOC.
  1047. * Switch to our TOC to run inside the core kernel.
  1048. */
  1049. ld r2, PACATOC(r13)
  1050. bl .ftrace_return_to_handler
  1051. nop
  1052. /* return value has real return address */
  1053. mtlr r3
  1054. ld r1, 0(r1)
  1055. ld r4, -32(r1)
  1056. ld r3, -24(r1)
  1057. ld r2, -16(r1)
  1058. ld r31, -8(r1)
  1059. /* Jump back to real return address */
  1060. blr
  1061. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1062. #endif /* CONFIG_FUNCTION_TRACER */