entry.S 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011
  1. /*
  2. * arch/xtensa/kernel/entry.S
  3. *
  4. * Low-level exception handling
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2004 - 2008 by Tensilica Inc.
  11. *
  12. * Chris Zankel <chris@zankel.net>
  13. *
  14. */
  15. #include <linux/linkage.h>
  16. #include <asm/asm-offsets.h>
  17. #include <asm/processor.h>
  18. #include <asm/coprocessor.h>
  19. #include <asm/thread_info.h>
  20. #include <asm/uaccess.h>
  21. #include <asm/unistd.h>
  22. #include <asm/ptrace.h>
  23. #include <asm/current.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/page.h>
  26. #include <asm/signal.h>
  27. #include <asm/tlbflush.h>
  28. #include <variant/tie-asm.h>
  29. /* Unimplemented features. */
  30. #undef KERNEL_STACK_OVERFLOW_CHECK
  31. #undef PREEMPTIBLE_KERNEL
  32. #undef ALLOCA_EXCEPTION_IN_IRAM
  33. /* Not well tested.
  34. *
  35. * - fast_coprocessor
  36. */
  37. /*
  38. * Macro to find first bit set in WINDOWBASE from the left + 1
  39. *
  40. * 100....0 -> 1
  41. * 010....0 -> 2
  42. * 000....1 -> WSBITS
  43. */
  44. .macro ffs_ws bit mask
  45. #if XCHAL_HAVE_NSA
  46. nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
  47. addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
  48. #else
  49. movi \bit, WSBITS
  50. #if WSBITS > 16
  51. _bltui \mask, 0x10000, 99f
  52. addi \bit, \bit, -16
  53. extui \mask, \mask, 16, 16
  54. #endif
  55. #if WSBITS > 8
  56. 99: _bltui \mask, 0x100, 99f
  57. addi \bit, \bit, -8
  58. srli \mask, \mask, 8
  59. #endif
  60. 99: _bltui \mask, 0x10, 99f
  61. addi \bit, \bit, -4
  62. srli \mask, \mask, 4
  63. 99: _bltui \mask, 0x4, 99f
  64. addi \bit, \bit, -2
  65. srli \mask, \mask, 2
  66. 99: _bltui \mask, 0x2, 99f
  67. addi \bit, \bit, -1
  68. 99:
  69. #endif
  70. .endm
  71. /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
  72. /*
  73. * First-level exception handler for user exceptions.
  74. * Save some special registers, extra states and all registers in the AR
  75. * register file that were in use in the user task, and jump to the common
  76. * exception code.
  77. * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
  78. * save them for kernel exceptions).
  79. *
  80. * Entry condition for user_exception:
  81. *
  82. * a0: trashed, original value saved on stack (PT_AREG0)
  83. * a1: a1
  84. * a2: new stack pointer, original value in depc
  85. * a3: dispatch table
  86. * depc: a2, original value saved on stack (PT_DEPC)
  87. * excsave1: a3
  88. *
  89. * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  90. * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
  91. *
  92. * Entry condition for _user_exception:
  93. *
  94. * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
  95. * excsave has been restored, and
  96. * stack pointer (a1) has been set.
  97. *
  98. * Note: _user_exception might be at an odd address. Don't use call0..call12
  99. */
  100. ENTRY(user_exception)
  101. /* Save a2, a3, and depc, restore excsave_1 and set SP. */
  102. xsr a3, excsave1
  103. rsr a0, depc
  104. s32i a1, a2, PT_AREG1
  105. s32i a0, a2, PT_AREG2
  106. s32i a3, a2, PT_AREG3
  107. mov a1, a2
  108. .globl _user_exception
  109. _user_exception:
  110. /* Save SAR and turn off single stepping */
  111. movi a2, 0
  112. rsr a3, sar
  113. xsr a2, icountlevel
  114. s32i a3, a1, PT_SAR
  115. s32i a2, a1, PT_ICOUNTLEVEL
  116. #if XCHAL_HAVE_THREADPTR
  117. rur a2, threadptr
  118. s32i a2, a1, PT_THREADPTR
  119. #endif
  120. /* Rotate ws so that the current windowbase is at bit0. */
  121. /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
  122. rsr a2, windowbase
  123. rsr a3, windowstart
  124. ssr a2
  125. s32i a2, a1, PT_WINDOWBASE
  126. s32i a3, a1, PT_WINDOWSTART
  127. slli a2, a3, 32-WSBITS
  128. src a2, a3, a2
  129. srli a2, a2, 32-WSBITS
  130. s32i a2, a1, PT_WMASK # needed for restoring registers
  131. /* Save only live registers. */
  132. _bbsi.l a2, 1, 1f
  133. s32i a4, a1, PT_AREG4
  134. s32i a5, a1, PT_AREG5
  135. s32i a6, a1, PT_AREG6
  136. s32i a7, a1, PT_AREG7
  137. _bbsi.l a2, 2, 1f
  138. s32i a8, a1, PT_AREG8
  139. s32i a9, a1, PT_AREG9
  140. s32i a10, a1, PT_AREG10
  141. s32i a11, a1, PT_AREG11
  142. _bbsi.l a2, 3, 1f
  143. s32i a12, a1, PT_AREG12
  144. s32i a13, a1, PT_AREG13
  145. s32i a14, a1, PT_AREG14
  146. s32i a15, a1, PT_AREG15
  147. _bnei a2, 1, 1f # only one valid frame?
  148. /* Only one valid frame, skip saving regs. */
  149. j 2f
  150. /* Save the remaining registers.
  151. * We have to save all registers up to the first '1' from
  152. * the right, except the current frame (bit 0).
  153. * Assume a2 is: 001001000110001
  154. * All register frames starting from the top field to the marked '1'
  155. * must be saved.
  156. */
  157. 1: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
  158. neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
  159. and a3, a3, a2 # max. only one bit is set
  160. /* Find number of frames to save */
  161. ffs_ws a0, a3 # number of frames to the '1' from left
  162. /* Store information into WMASK:
  163. * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
  164. * bits 4...: number of valid 4-register frames
  165. */
  166. slli a3, a0, 4 # number of frames to save in bits 8..4
  167. extui a2, a2, 0, 4 # mask for the first 16 registers
  168. or a2, a3, a2
  169. s32i a2, a1, PT_WMASK # needed when we restore the reg-file
  170. /* Save 4 registers at a time */
  171. 1: rotw -1
  172. s32i a0, a5, PT_AREG_END - 16
  173. s32i a1, a5, PT_AREG_END - 12
  174. s32i a2, a5, PT_AREG_END - 8
  175. s32i a3, a5, PT_AREG_END - 4
  176. addi a0, a4, -1
  177. addi a1, a5, -16
  178. _bnez a0, 1b
  179. /* WINDOWBASE still in SAR! */
  180. rsr a2, sar # original WINDOWBASE
  181. movi a3, 1
  182. ssl a2
  183. sll a3, a3
  184. wsr a3, windowstart # set corresponding WINDOWSTART bit
  185. wsr a2, windowbase # and WINDOWSTART
  186. rsync
  187. /* We are back to the original stack pointer (a1) */
  188. 2: /* Now, jump to the common exception handler. */
  189. j common_exception
  190. ENDPROC(user_exception)
  191. /*
  192. * First-level exit handler for kernel exceptions
  193. * Save special registers and the live window frame.
  194. * Note: Even though we changes the stack pointer, we don't have to do a
  195. * MOVSP here, as we do that when we return from the exception.
  196. * (See comment in the kernel exception exit code)
  197. *
  198. * Entry condition for kernel_exception:
  199. *
  200. * a0: trashed, original value saved on stack (PT_AREG0)
  201. * a1: a1
  202. * a2: new stack pointer, original in DEPC
  203. * a3: dispatch table
  204. * depc: a2, original value saved on stack (PT_DEPC)
  205. * excsave_1: a3
  206. *
  207. * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  208. * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
  209. *
  210. * Entry condition for _kernel_exception:
  211. *
  212. * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
  213. * excsave has been restored, and
  214. * stack pointer (a1) has been set.
  215. *
  216. * Note: _kernel_exception might be at an odd address. Don't use call0..call12
  217. */
  218. ENTRY(kernel_exception)
  219. /* Save a0, a2, a3, DEPC and set SP. */
  220. xsr a3, excsave1 # restore a3, excsave_1
  221. rsr a0, depc # get a2
  222. s32i a1, a2, PT_AREG1
  223. s32i a0, a2, PT_AREG2
  224. s32i a3, a2, PT_AREG3
  225. mov a1, a2
  226. .globl _kernel_exception
  227. _kernel_exception:
  228. /* Save SAR and turn off single stepping */
  229. movi a2, 0
  230. rsr a3, sar
  231. xsr a2, icountlevel
  232. s32i a3, a1, PT_SAR
  233. s32i a2, a1, PT_ICOUNTLEVEL
  234. /* Rotate ws so that the current windowbase is at bit0. */
  235. /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
  236. rsr a2, windowbase # don't need to save these, we only
  237. rsr a3, windowstart # need shifted windowstart: windowmask
  238. ssr a2
  239. slli a2, a3, 32-WSBITS
  240. src a2, a3, a2
  241. srli a2, a2, 32-WSBITS
  242. s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
  243. /* Save only the live window-frame */
  244. _bbsi.l a2, 1, 1f
  245. s32i a4, a1, PT_AREG4
  246. s32i a5, a1, PT_AREG5
  247. s32i a6, a1, PT_AREG6
  248. s32i a7, a1, PT_AREG7
  249. _bbsi.l a2, 2, 1f
  250. s32i a8, a1, PT_AREG8
  251. s32i a9, a1, PT_AREG9
  252. s32i a10, a1, PT_AREG10
  253. s32i a11, a1, PT_AREG11
  254. _bbsi.l a2, 3, 1f
  255. s32i a12, a1, PT_AREG12
  256. s32i a13, a1, PT_AREG13
  257. s32i a14, a1, PT_AREG14
  258. s32i a15, a1, PT_AREG15
  259. 1:
  260. #ifdef KERNEL_STACK_OVERFLOW_CHECK
  261. /* Stack overflow check, for debugging */
  262. extui a2, a1, TASK_SIZE_BITS,XX
  263. movi a3, SIZE??
  264. _bge a2, a3, out_of_stack_panic
  265. #endif
  266. /*
  267. * This is the common exception handler.
  268. * We get here from the user exception handler or simply by falling through
  269. * from the kernel exception handler.
  270. * Save the remaining special registers, switch to kernel mode, and jump
  271. * to the second-level exception handler.
  272. *
  273. */
  274. common_exception:
  275. /* Save some registers, disable loops and clear the syscall flag. */
  276. rsr a2, debugcause
  277. rsr a3, epc1
  278. s32i a2, a1, PT_DEBUGCAUSE
  279. s32i a3, a1, PT_PC
  280. movi a2, -1
  281. rsr a3, excvaddr
  282. s32i a2, a1, PT_SYSCALL
  283. movi a2, 0
  284. s32i a3, a1, PT_EXCVADDR
  285. xsr a2, lcount
  286. s32i a2, a1, PT_LCOUNT
  287. /* It is now save to restore the EXC_TABLE_FIXUP variable. */
  288. rsr a0, exccause
  289. movi a3, 0
  290. rsr a2, excsave1
  291. s32i a0, a1, PT_EXCCAUSE
  292. s32i a3, a2, EXC_TABLE_FIXUP
  293. /* All unrecoverable states are saved on stack, now, and a1 is valid,
  294. * so we can allow exceptions and interrupts (*) again.
  295. * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
  296. *
  297. * (*) We only allow interrupts of higher priority than current IRQ
  298. */
  299. rsr a3, ps
  300. addi a0, a0, -4
  301. movi a2, 1
  302. extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
  303. # a3 = PS.INTLEVEL
  304. movnez a2, a3, a3 # a2 = 1: level-1, > 1: high priority
  305. moveqz a3, a2, a0 # a3 = IRQ level iff interrupt
  306. movi a2, 1 << PS_WOE_BIT
  307. or a3, a3, a2
  308. rsr a0, exccause
  309. xsr a3, ps
  310. s32i a3, a1, PT_PS # save ps
  311. /* Save lbeg, lend */
  312. rsr a2, lbeg
  313. rsr a3, lend
  314. s32i a2, a1, PT_LBEG
  315. s32i a3, a1, PT_LEND
  316. /* Save SCOMPARE1 */
  317. #if XCHAL_HAVE_S32C1I
  318. rsr a2, scompare1
  319. s32i a2, a1, PT_SCOMPARE1
  320. #endif
  321. /* Save optional registers. */
  322. save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
  323. /* Go to second-level dispatcher. Set up parameters to pass to the
  324. * exception handler and call the exception handler.
  325. */
  326. movi a4, exc_table
  327. mov a6, a1 # pass stack frame
  328. mov a7, a0 # pass EXCCAUSE
  329. addx4 a4, a0, a4
  330. l32i a4, a4, EXC_TABLE_DEFAULT # load handler
  331. /* Call the second-level handler */
  332. callx4 a4
  333. /* Jump here for exception exit */
  334. .global common_exception_return
  335. common_exception_return:
  336. /* Jump if we are returning from kernel exceptions. */
  337. 1: l32i a3, a1, PT_PS
  338. _bbci.l a3, PS_UM_BIT, 4f
  339. /* Specific to a user exception exit:
  340. * We need to check some flags for signal handling and rescheduling,
  341. * and have to restore WB and WS, extra states, and all registers
  342. * in the register file that were in use in the user task.
  343. * Note that we don't disable interrupts here.
  344. */
  345. GET_THREAD_INFO(a2,a1)
  346. l32i a4, a2, TI_FLAGS
  347. _bbsi.l a4, TIF_NEED_RESCHED, 3f
  348. _bbsi.l a4, TIF_NOTIFY_RESUME, 2f
  349. _bbci.l a4, TIF_SIGPENDING, 4f
  350. 2: l32i a4, a1, PT_DEPC
  351. bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
  352. /* Call do_signal() */
  353. movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
  354. mov a6, a1
  355. callx4 a4
  356. j 1b
  357. 3: /* Reschedule */
  358. movi a4, schedule # void schedule (void)
  359. callx4 a4
  360. j 1b
  361. 4: /* Restore optional registers. */
  362. load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
  363. /* Restore SCOMPARE1 */
  364. #if XCHAL_HAVE_S32C1I
  365. l32i a2, a1, PT_SCOMPARE1
  366. wsr a2, scompare1
  367. #endif
  368. wsr a3, ps /* disable interrupts */
  369. _bbci.l a3, PS_UM_BIT, kernel_exception_exit
  370. user_exception_exit:
  371. /* Restore the state of the task and return from the exception. */
  372. /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
  373. l32i a2, a1, PT_WINDOWBASE
  374. l32i a3, a1, PT_WINDOWSTART
  375. wsr a1, depc # use DEPC as temp storage
  376. wsr a3, windowstart # restore WINDOWSTART
  377. ssr a2 # preserve user's WB in the SAR
  378. wsr a2, windowbase # switch to user's saved WB
  379. rsync
  380. rsr a1, depc # restore stack pointer
  381. l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
  382. rotw -1 # we restore a4..a7
  383. _bltui a6, 16, 1f # only have to restore current window?
  384. /* The working registers are a0 and a3. We are restoring to
  385. * a4..a7. Be careful not to destroy what we have just restored.
  386. * Note: wmask has the format YYYYM:
  387. * Y: number of registers saved in groups of 4
  388. * M: 4 bit mask of first 16 registers
  389. */
  390. mov a2, a6
  391. mov a3, a5
  392. 2: rotw -1 # a0..a3 become a4..a7
  393. addi a3, a7, -4*4 # next iteration
  394. addi a2, a6, -16 # decrementing Y in WMASK
  395. l32i a4, a3, PT_AREG_END + 0
  396. l32i a5, a3, PT_AREG_END + 4
  397. l32i a6, a3, PT_AREG_END + 8
  398. l32i a7, a3, PT_AREG_END + 12
  399. _bgeui a2, 16, 2b
  400. /* Clear unrestored registers (don't leak anything to user-land */
  401. 1: rsr a0, windowbase
  402. rsr a3, sar
  403. sub a3, a0, a3
  404. beqz a3, 2f
  405. extui a3, a3, 0, WBBITS
  406. 1: rotw -1
  407. addi a3, a7, -1
  408. movi a4, 0
  409. movi a5, 0
  410. movi a6, 0
  411. movi a7, 0
  412. bgei a3, 1, 1b
  413. /* We are back were we were when we started.
  414. * Note: a2 still contains WMASK (if we've returned to the original
  415. * frame where we had loaded a2), or at least the lower 4 bits
  416. * (if we have restored WSBITS-1 frames).
  417. */
  418. #if XCHAL_HAVE_THREADPTR
  419. l32i a3, a1, PT_THREADPTR
  420. wur a3, threadptr
  421. #endif
  422. 2: j common_exception_exit
  423. /* This is the kernel exception exit.
  424. * We avoided to do a MOVSP when we entered the exception, but we
  425. * have to do it here.
  426. */
  427. kernel_exception_exit:
  428. #ifdef PREEMPTIBLE_KERNEL
  429. #ifdef CONFIG_PREEMPT
  430. /*
  431. * Note: We've just returned from a call4, so we have
  432. * at least 4 addt'l regs.
  433. */
  434. /* Check current_thread_info->preempt_count */
  435. GET_THREAD_INFO(a2)
  436. l32i a3, a2, TI_PREEMPT
  437. bnez a3, 1f
  438. l32i a2, a2, TI_FLAGS
  439. 1:
  440. #endif
  441. #endif
  442. /* Check if we have to do a movsp.
  443. *
  444. * We only have to do a movsp if the previous window-frame has
  445. * been spilled to the *temporary* exception stack instead of the
  446. * task's stack. This is the case if the corresponding bit in
  447. * WINDOWSTART for the previous window-frame was set before
  448. * (not spilled) but is zero now (spilled).
  449. * If this bit is zero, all other bits except the one for the
  450. * current window frame are also zero. So, we can use a simple test:
  451. * 'and' WINDOWSTART and WINDOWSTART-1:
  452. *
  453. * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
  454. *
  455. * The result is zero only if one bit was set.
  456. *
  457. * (Note: We might have gone through several task switches before
  458. * we come back to the current task, so WINDOWBASE might be
  459. * different from the time the exception occurred.)
  460. */
  461. /* Test WINDOWSTART before and after the exception.
  462. * We actually have WMASK, so we only have to test if it is 1 or not.
  463. */
  464. l32i a2, a1, PT_WMASK
  465. _beqi a2, 1, common_exception_exit # Spilled before exception,jump
  466. /* Test WINDOWSTART now. If spilled, do the movsp */
  467. rsr a3, windowstart
  468. addi a0, a3, -1
  469. and a3, a3, a0
  470. _bnez a3, common_exception_exit
  471. /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
  472. addi a0, a1, -16
  473. l32i a3, a0, 0
  474. l32i a4, a0, 4
  475. s32i a3, a1, PT_SIZE+0
  476. s32i a4, a1, PT_SIZE+4
  477. l32i a3, a0, 8
  478. l32i a4, a0, 12
  479. s32i a3, a1, PT_SIZE+8
  480. s32i a4, a1, PT_SIZE+12
  481. /* Common exception exit.
  482. * We restore the special register and the current window frame, and
  483. * return from the exception.
  484. *
  485. * Note: We expect a2 to hold PT_WMASK
  486. */
  487. common_exception_exit:
  488. /* Restore address registers. */
  489. _bbsi.l a2, 1, 1f
  490. l32i a4, a1, PT_AREG4
  491. l32i a5, a1, PT_AREG5
  492. l32i a6, a1, PT_AREG6
  493. l32i a7, a1, PT_AREG7
  494. _bbsi.l a2, 2, 1f
  495. l32i a8, a1, PT_AREG8
  496. l32i a9, a1, PT_AREG9
  497. l32i a10, a1, PT_AREG10
  498. l32i a11, a1, PT_AREG11
  499. _bbsi.l a2, 3, 1f
  500. l32i a12, a1, PT_AREG12
  501. l32i a13, a1, PT_AREG13
  502. l32i a14, a1, PT_AREG14
  503. l32i a15, a1, PT_AREG15
  504. /* Restore PC, SAR */
  505. 1: l32i a2, a1, PT_PC
  506. l32i a3, a1, PT_SAR
  507. wsr a2, epc1
  508. wsr a3, sar
  509. /* Restore LBEG, LEND, LCOUNT */
  510. l32i a2, a1, PT_LBEG
  511. l32i a3, a1, PT_LEND
  512. wsr a2, lbeg
  513. l32i a2, a1, PT_LCOUNT
  514. wsr a3, lend
  515. wsr a2, lcount
  516. /* We control single stepping through the ICOUNTLEVEL register. */
  517. l32i a2, a1, PT_ICOUNTLEVEL
  518. movi a3, -2
  519. wsr a2, icountlevel
  520. wsr a3, icount
  521. /* Check if it was double exception. */
  522. l32i a0, a1, PT_DEPC
  523. l32i a3, a1, PT_AREG3
  524. _bltui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
  525. wsr a0, depc
  526. l32i a2, a1, PT_AREG2
  527. l32i a0, a1, PT_AREG0
  528. l32i a1, a1, PT_AREG1
  529. rfde
  530. 1:
  531. /* Restore a0...a3 and return */
  532. rsr a0, ps
  533. extui a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
  534. movi a0, 2f
  535. slli a2, a2, 4
  536. add a0, a2, a0
  537. l32i a2, a1, PT_AREG2
  538. jx a0
  539. .macro irq_exit_level level
  540. .align 16
  541. .if XCHAL_EXCM_LEVEL >= \level
  542. l32i a0, a1, PT_PC
  543. wsr a0, epc\level
  544. l32i a0, a1, PT_AREG0
  545. l32i a1, a1, PT_AREG1
  546. rfi \level
  547. .endif
  548. .endm
  549. .align 16
  550. 2:
  551. l32i a0, a1, PT_AREG0
  552. l32i a1, a1, PT_AREG1
  553. rfe
  554. .align 16
  555. /* no rfi for level-1 irq, handled by rfe above*/
  556. nop
  557. irq_exit_level 2
  558. irq_exit_level 3
  559. irq_exit_level 4
  560. irq_exit_level 5
  561. irq_exit_level 6
  562. ENDPROC(kernel_exception)
  563. /*
  564. * Debug exception handler.
  565. *
  566. * Currently, we don't support KGDB, so only user application can be debugged.
  567. *
  568. * When we get here, a0 is trashed and saved to excsave[debuglevel]
  569. */
  570. ENTRY(debug_exception)
  571. rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
  572. bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
  573. /* Set EPC1 and EXCCAUSE */
  574. wsr a2, depc # save a2 temporarily
  575. rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
  576. wsr a2, epc1
  577. movi a2, EXCCAUSE_MAPPED_DEBUG
  578. wsr a2, exccause
  579. /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
  580. movi a2, 1 << PS_EXCM_BIT
  581. or a2, a0, a2
  582. movi a0, debug_exception # restore a3, debug jump vector
  583. wsr a2, ps
  584. xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
  585. /* Switch to kernel/user stack, restore jump vector, and save a0 */
  586. bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
  587. addi a2, a1, -16-PT_SIZE # assume kernel stack
  588. s32i a0, a2, PT_AREG0
  589. movi a0, 0
  590. s32i a1, a2, PT_AREG1
  591. s32i a0, a2, PT_DEPC # mark it as a regular exception
  592. xsr a0, depc
  593. s32i a3, a2, PT_AREG3
  594. s32i a0, a2, PT_AREG2
  595. mov a1, a2
  596. j _kernel_exception
  597. 2: rsr a2, excsave1
  598. l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
  599. s32i a0, a2, PT_AREG0
  600. movi a0, 0
  601. s32i a1, a2, PT_AREG1
  602. s32i a0, a2, PT_DEPC
  603. xsr a0, depc
  604. s32i a3, a2, PT_AREG3
  605. s32i a0, a2, PT_AREG2
  606. mov a1, a2
  607. j _user_exception
  608. /* Debug exception while in exception mode. */
  609. 1: j 1b // FIXME!!
  610. ENDPROC(debug_exception)
  611. /*
  612. * We get here in case of an unrecoverable exception.
  613. * The only thing we can do is to be nice and print a panic message.
  614. * We only produce a single stack frame for panic, so ???
  615. *
  616. *
  617. * Entry conditions:
  618. *
  619. * - a0 contains the caller address; original value saved in excsave1.
  620. * - the original a0 contains a valid return address (backtrace) or 0.
  621. * - a2 contains a valid stackpointer
  622. *
  623. * Notes:
  624. *
  625. * - If the stack pointer could be invalid, the caller has to setup a
  626. * dummy stack pointer (e.g. the stack of the init_task)
  627. *
  628. * - If the return address could be invalid, the caller has to set it
  629. * to 0, so the backtrace would stop.
  630. *
  631. */
  632. .align 4
  633. unrecoverable_text:
  634. .ascii "Unrecoverable error in exception handler\0"
  635. ENTRY(unrecoverable_exception)
  636. movi a0, 1
  637. movi a1, 0
  638. wsr a0, windowstart
  639. wsr a1, windowbase
  640. rsync
  641. movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL
  642. wsr a1, ps
  643. rsync
  644. movi a1, init_task
  645. movi a0, 0
  646. addi a1, a1, PT_REGS_OFFSET
  647. movi a4, panic
  648. movi a6, unrecoverable_text
  649. callx4 a4
  650. 1: j 1b
  651. ENDPROC(unrecoverable_exception)
  652. /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
  653. /*
  654. * Fast-handler for alloca exceptions
  655. *
  656. * The ALLOCA handler is entered when user code executes the MOVSP
  657. * instruction and the caller's frame is not in the register file.
  658. * In this case, the caller frame's a0..a3 are on the stack just
  659. * below sp (a1), and this handler moves them.
  660. *
  661. * For "MOVSP <ar>,<as>" without destination register a1, this routine
  662. * simply moves the value from <as> to <ar> without moving the save area.
  663. *
  664. * Entry condition:
  665. *
  666. * a0: trashed, original value saved on stack (PT_AREG0)
  667. * a1: a1
  668. * a2: new stack pointer, original in DEPC
  669. * a3: dispatch table
  670. * depc: a2, original value saved on stack (PT_DEPC)
  671. * excsave_1: a3
  672. *
  673. * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  674. * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
  675. */
  676. #if XCHAL_HAVE_BE
  677. #define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4
  678. #define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4
  679. #else
  680. #define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4
  681. #define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4
  682. #endif
  683. ENTRY(fast_alloca)
  684. /* We shouldn't be in a double exception. */
  685. l32i a0, a2, PT_DEPC
  686. _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
  687. rsr a0, depc # get a2
  688. s32i a4, a2, PT_AREG4 # save a4 and
  689. s32i a0, a2, PT_AREG2 # a2 to stack
  690. /* Exit critical section. */
  691. movi a0, 0
  692. s32i a0, a3, EXC_TABLE_FIXUP
  693. /* Restore a3, excsave_1 */
  694. xsr a3, excsave1 # make sure excsave_1 is valid for dbl.
  695. rsr a4, epc1 # get exception address
  696. s32i a3, a2, PT_AREG3 # save a3 to stack
  697. #ifdef ALLOCA_EXCEPTION_IN_IRAM
  698. #error iram not supported
  699. #else
  700. /* Note: l8ui not allowed in IRAM/IROM!! */
  701. l8ui a0, a4, 1 # read as(src) from MOVSP instruction
  702. #endif
  703. movi a3, .Lmovsp_src
  704. _EXTUI_MOVSP_SRC(a0) # extract source register number
  705. addx8 a3, a0, a3
  706. jx a3
  707. .Lunhandled_double:
  708. wsr a0, excsave1
  709. movi a0, unrecoverable_exception
  710. callx0 a0
  711. .align 8
  712. .Lmovsp_src:
  713. l32i a3, a2, PT_AREG0; _j 1f; .align 8
  714. mov a3, a1; _j 1f; .align 8
  715. l32i a3, a2, PT_AREG2; _j 1f; .align 8
  716. l32i a3, a2, PT_AREG3; _j 1f; .align 8
  717. l32i a3, a2, PT_AREG4; _j 1f; .align 8
  718. mov a3, a5; _j 1f; .align 8
  719. mov a3, a6; _j 1f; .align 8
  720. mov a3, a7; _j 1f; .align 8
  721. mov a3, a8; _j 1f; .align 8
  722. mov a3, a9; _j 1f; .align 8
  723. mov a3, a10; _j 1f; .align 8
  724. mov a3, a11; _j 1f; .align 8
  725. mov a3, a12; _j 1f; .align 8
  726. mov a3, a13; _j 1f; .align 8
  727. mov a3, a14; _j 1f; .align 8
  728. mov a3, a15; _j 1f; .align 8
  729. 1:
  730. #ifdef ALLOCA_EXCEPTION_IN_IRAM
  731. #error iram not supported
  732. #else
  733. l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction
  734. #endif
  735. addi a4, a4, 3 # step over movsp
  736. _EXTUI_MOVSP_DST(a0) # extract destination register
  737. wsr a4, epc1 # save new epc_1
  738. _bnei a0, 1, 1f # no 'movsp a1, ax': jump
  739. /* Move the save area. This implies the use of the L32E
  740. * and S32E instructions, because this move must be done with
  741. * the user's PS.RING privilege levels, not with ring 0
  742. * (kernel's) privileges currently active with PS.EXCM
  743. * set. Note that we have stil registered a fixup routine with the
  744. * double exception vector in case a double exception occurs.
  745. */
  746. /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
  747. l32e a0, a1, -16
  748. l32e a4, a1, -12
  749. s32e a0, a3, -16
  750. s32e a4, a3, -12
  751. l32e a0, a1, -8
  752. l32e a4, a1, -4
  753. s32e a0, a3, -8
  754. s32e a4, a3, -4
  755. /* Restore stack-pointer and all the other saved registers. */
  756. mov a1, a3
  757. l32i a4, a2, PT_AREG4
  758. l32i a3, a2, PT_AREG3
  759. l32i a0, a2, PT_AREG0
  760. l32i a2, a2, PT_AREG2
  761. rfe
  762. /* MOVSP <at>,<as> was invoked with <at> != a1.
  763. * Because the stack pointer is not being modified,
  764. * we should be able to just modify the pointer
  765. * without moving any save area.
  766. * The processor only traps these occurrences if the
  767. * caller window isn't live, so unfortunately we can't
  768. * use this as an alternate trap mechanism.
  769. * So we just do the move. This requires that we
  770. * resolve the destination register, not just the source,
  771. * so there's some extra work.
  772. * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
  773. */
  774. /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
  775. 1: movi a4, .Lmovsp_dst
  776. addx8 a4, a0, a4
  777. jx a4
  778. .align 8
  779. .Lmovsp_dst:
  780. s32i a3, a2, PT_AREG0; _j 1f; .align 8
  781. mov a1, a3; _j 1f; .align 8
  782. s32i a3, a2, PT_AREG2; _j 1f; .align 8
  783. s32i a3, a2, PT_AREG3; _j 1f; .align 8
  784. s32i a3, a2, PT_AREG4; _j 1f; .align 8
  785. mov a5, a3; _j 1f; .align 8
  786. mov a6, a3; _j 1f; .align 8
  787. mov a7, a3; _j 1f; .align 8
  788. mov a8, a3; _j 1f; .align 8
  789. mov a9, a3; _j 1f; .align 8
  790. mov a10, a3; _j 1f; .align 8
  791. mov a11, a3; _j 1f; .align 8
  792. mov a12, a3; _j 1f; .align 8
  793. mov a13, a3; _j 1f; .align 8
  794. mov a14, a3; _j 1f; .align 8
  795. mov a15, a3; _j 1f; .align 8
  796. 1: l32i a4, a2, PT_AREG4
  797. l32i a3, a2, PT_AREG3
  798. l32i a0, a2, PT_AREG0
  799. l32i a2, a2, PT_AREG2
  800. rfe
  801. ENDPROC(fast_alloca)
  802. /*
  803. * fast system calls.
  804. *
  805. * WARNING: The kernel doesn't save the entire user context before
  806. * handling a fast system call. These functions are small and short,
  807. * usually offering some functionality not available to user tasks.
  808. *
  809. * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
  810. *
  811. * Entry condition:
  812. *
  813. * a0: trashed, original value saved on stack (PT_AREG0)
  814. * a1: a1
  815. * a2: new stack pointer, original in DEPC
  816. * a3: dispatch table
  817. * depc: a2, original value saved on stack (PT_DEPC)
  818. * excsave_1: a3
  819. */
  820. ENTRY(fast_syscall_kernel)
  821. /* Skip syscall. */
  822. rsr a0, epc1
  823. addi a0, a0, 3
  824. wsr a0, epc1
  825. l32i a0, a2, PT_DEPC
  826. bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
  827. rsr a0, depc # get syscall-nr
  828. _beqz a0, fast_syscall_spill_registers
  829. _beqi a0, __NR_xtensa, fast_syscall_xtensa
  830. j kernel_exception
  831. ENDPROC(fast_syscall_kernel)
  832. ENTRY(fast_syscall_user)
  833. /* Skip syscall. */
  834. rsr a0, epc1
  835. addi a0, a0, 3
  836. wsr a0, epc1
  837. l32i a0, a2, PT_DEPC
  838. bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
  839. rsr a0, depc # get syscall-nr
  840. _beqz a0, fast_syscall_spill_registers
  841. _beqi a0, __NR_xtensa, fast_syscall_xtensa
  842. j user_exception
  843. ENDPROC(fast_syscall_user)
  844. ENTRY(fast_syscall_unrecoverable)
  845. /* Restore all states. */
  846. l32i a0, a2, PT_AREG0 # restore a0
  847. xsr a2, depc # restore a2, depc
  848. rsr a3, excsave1
  849. wsr a0, excsave1
  850. movi a0, unrecoverable_exception
  851. callx0 a0
  852. ENDPROC(fast_syscall_unrecoverable)
  853. /*
  854. * sysxtensa syscall handler
  855. *
  856. * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused);
  857. * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused);
  858. * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
  859. * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
  860. * a2 a6 a3 a4 a5
  861. *
  862. * Entry condition:
  863. *
  864. * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0)
  865. * a1: a1
  866. * a2: new stack pointer, original in a0 and DEPC
  867. * a3: dispatch table, original in excsave_1
  868. * a4..a15: unchanged
  869. * depc: a2, original value saved on stack (PT_DEPC)
  870. * excsave_1: a3
  871. *
  872. * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  873. * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
  874. *
  875. * Note: we don't have to save a2; a2 holds the return value
  876. *
  877. * We use the two macros TRY and CATCH:
  878. *
  879. * TRY adds an entry to the __ex_table fixup table for the immediately
  880. * following instruction.
  881. *
  882. * CATCH catches any exception that occurred at one of the preceding TRY
  883. * statements and continues from there
  884. *
  885. * Usage TRY l32i a0, a1, 0
  886. * <other code>
  887. * done: rfe
  888. * CATCH <set return code>
  889. * j done
  890. */
  891. #define TRY \
  892. .section __ex_table, "a"; \
  893. .word 66f, 67f; \
  894. .text; \
  895. 66:
  896. #define CATCH \
  897. 67:
  898. ENTRY(fast_syscall_xtensa)
  899. xsr a3, excsave1 # restore a3, excsave1
  900. s32i a7, a2, PT_AREG7 # we need an additional register
  901. movi a7, 4 # sizeof(unsigned int)
  902. access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
  903. addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
  904. _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
  905. _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
  906. /* Fall through for ATOMIC_CMP_SWP. */
  907. .Lswp: /* Atomic compare and swap */
  908. TRY l32i a0, a3, 0 # read old value
  909. bne a0, a4, 1f # same as old value? jump
  910. TRY s32i a5, a3, 0 # different, modify value
  911. l32i a7, a2, PT_AREG7 # restore a7
  912. l32i a0, a2, PT_AREG0 # restore a0
  913. movi a2, 1 # and return 1
  914. addi a6, a6, 1 # restore a6 (really necessary?)
  915. rfe
  916. 1: l32i a7, a2, PT_AREG7 # restore a7
  917. l32i a0, a2, PT_AREG0 # restore a0
  918. movi a2, 0 # return 0 (note that we cannot set
  919. addi a6, a6, 1 # restore a6 (really necessary?)
  920. rfe
  921. .Lnswp: /* Atomic set, add, and exg_add. */
  922. TRY l32i a7, a3, 0 # orig
  923. add a0, a4, a7 # + arg
  924. moveqz a0, a4, a6 # set
  925. TRY s32i a0, a3, 0 # write new value
  926. mov a0, a2
  927. mov a2, a7
  928. l32i a7, a0, PT_AREG7 # restore a7
  929. l32i a0, a0, PT_AREG0 # restore a0
  930. addi a6, a6, 1 # restore a6 (really necessary?)
  931. rfe
  932. CATCH
  933. .Leac: l32i a7, a2, PT_AREG7 # restore a7
  934. l32i a0, a2, PT_AREG0 # restore a0
  935. movi a2, -EFAULT
  936. rfe
  937. .Lill: l32i a7, a2, PT_AREG0 # restore a7
  938. l32i a0, a2, PT_AREG0 # restore a0
  939. movi a2, -EINVAL
  940. rfe
  941. ENDPROC(fast_syscall_xtensa)
  942. /* fast_syscall_spill_registers.
  943. *
  944. * Entry condition:
  945. *
  946. * a0: trashed, original value saved on stack (PT_AREG0)
  947. * a1: a1
  948. * a2: new stack pointer, original in DEPC
  949. * a3: dispatch table
  950. * depc: a2, original value saved on stack (PT_DEPC)
  951. * excsave_1: a3
  952. *
  953. * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
  954. */
  955. ENTRY(fast_syscall_spill_registers)
  956. /* Register a FIXUP handler (pass current wb as a parameter) */
  957. movi a0, fast_syscall_spill_registers_fixup
  958. s32i a0, a3, EXC_TABLE_FIXUP
  959. rsr a0, windowbase
  960. s32i a0, a3, EXC_TABLE_PARAM
  961. /* Save a3 and SAR on stack. */
  962. rsr a0, sar
  963. xsr a3, excsave1 # restore a3 and excsave_1
  964. s32i a3, a2, PT_AREG3
  965. s32i a4, a2, PT_AREG4
  966. s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
  967. /* The spill routine might clobber a7, a11, and a15. */
  968. s32i a7, a2, PT_AREG7
  969. s32i a11, a2, PT_AREG11
  970. s32i a15, a2, PT_AREG15
  971. call0 _spill_registers # destroys a3, a4, and SAR
  972. /* Advance PC, restore registers and SAR, and return from exception. */
  973. l32i a3, a2, PT_AREG5
  974. l32i a4, a2, PT_AREG4
  975. l32i a0, a2, PT_AREG0
  976. wsr a3, sar
  977. l32i a3, a2, PT_AREG3
  978. /* Restore clobbered registers. */
  979. l32i a7, a2, PT_AREG7
  980. l32i a11, a2, PT_AREG11
  981. l32i a15, a2, PT_AREG15
  982. movi a2, 0
  983. rfe
  984. ENDPROC(fast_syscall_spill_registers)
  985. /* Fixup handler.
  986. *
  987. * We get here if the spill routine causes an exception, e.g. tlb miss.
  988. * We basically restore WINDOWBASE and WINDOWSTART to the condition when
  989. * we entered the spill routine and jump to the user exception handler.
  990. *
  991. * a0: value of depc, original value in depc
  992. * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
  993. * a3: exctable, original value in excsave1
  994. */
  995. fast_syscall_spill_registers_fixup:
  996. rsr a2, windowbase # get current windowbase (a2 is saved)
  997. xsr a0, depc # restore depc and a0
  998. ssl a2 # set shift (32 - WB)
  999. /* We need to make sure the current registers (a0-a3) are preserved.
  1000. * To do this, we simply set the bit for the current window frame
  1001. * in WS, so that the exception handlers save them to the task stack.
  1002. */
  1003. rsr a3, excsave1 # get spill-mask
  1004. slli a2, a3, 1 # shift left by one
  1005. slli a3, a2, 32-WSBITS
  1006. src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
  1007. wsr a2, windowstart # set corrected windowstart
  1008. movi a3, exc_table
  1009. l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
  1010. l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
  1011. /* Return to the original (user task) WINDOWBASE.
  1012. * We leave the following frame behind:
  1013. * a0, a1, a2 same
  1014. * a3: trashed (saved in excsave_1)
  1015. * depc: depc (we have to return to that address)
  1016. * excsave_1: a3
  1017. */
  1018. wsr a3, windowbase
  1019. rsync
  1020. /* We are now in the original frame when we entered _spill_registers:
  1021. * a0: return address
  1022. * a1: used, stack pointer
  1023. * a2: kernel stack pointer
  1024. * a3: available, saved in EXCSAVE_1
  1025. * depc: exception address
  1026. * excsave: a3
  1027. * Note: This frame might be the same as above.
  1028. */
  1029. /* Setup stack pointer. */
  1030. addi a2, a2, -PT_USER_SIZE
  1031. s32i a0, a2, PT_AREG0
  1032. /* Make sure we return to this fixup handler. */
  1033. movi a3, fast_syscall_spill_registers_fixup_return
  1034. s32i a3, a2, PT_DEPC # setup depc
  1035. /* Jump to the exception handler. */
  1036. movi a3, exc_table
  1037. rsr a0, exccause
  1038. addx4 a0, a0, a3 # find entry in table
  1039. l32i a0, a0, EXC_TABLE_FAST_USER # load handler
  1040. jx a0
  1041. fast_syscall_spill_registers_fixup_return:
  1042. /* When we return here, all registers have been restored (a2: DEPC) */
  1043. wsr a2, depc # exception address
  1044. /* Restore fixup handler. */
  1045. xsr a3, excsave1
  1046. movi a2, fast_syscall_spill_registers_fixup
  1047. s32i a2, a3, EXC_TABLE_FIXUP
  1048. rsr a2, windowbase
  1049. s32i a2, a3, EXC_TABLE_PARAM
  1050. l32i a2, a3, EXC_TABLE_KSTK
  1051. /* Load WB at the time the exception occurred. */
  1052. rsr a3, sar # WB is still in SAR
  1053. neg a3, a3
  1054. wsr a3, windowbase
  1055. rsync
  1056. /* Restore a3 and return. */
  1057. movi a3, exc_table
  1058. xsr a3, excsave1
  1059. rfde
  1060. /*
  1061. * spill all registers.
  1062. *
  1063. * This is not a real function. The following conditions must be met:
  1064. *
  1065. * - must be called with call0.
  1066. * - uses a3, a4 and SAR.
  1067. * - the last 'valid' register of each frame are clobbered.
  1068. * - the caller must have registered a fixup handler
  1069. * (or be inside a critical section)
  1070. * - PS_EXCM must be set (PS_WOE cleared?)
  1071. */
  1072. ENTRY(_spill_registers)
  1073. /*
  1074. * Rotate ws so that the current windowbase is at bit 0.
  1075. * Assume ws = xxxwww1yy (www1 current window frame).
  1076. * Rotate ws right so that a4 = yyxxxwww1.
  1077. */
  1078. rsr a4, windowbase
  1079. rsr a3, windowstart # a3 = xxxwww1yy
  1080. ssr a4 # holds WB
  1081. slli a4, a3, WSBITS
  1082. or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
  1083. srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
  1084. /* We are done if there are no more than the current register frame. */
  1085. extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
  1086. movi a4, (1 << (WSBITS-1))
  1087. _beqz a3, .Lnospill # only one active frame? jump
  1088. /* We want 1 at the top, so that we return to the current windowbase */
  1089. or a3, a3, a4 # 1yyxxxwww
  1090. /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
  1091. wsr a3, windowstart # save shifted windowstart
  1092. neg a4, a3
  1093. and a3, a4, a3 # first bit set from right: 000010000
  1094. ffs_ws a4, a3 # a4: shifts to skip empty frames
  1095. movi a3, WSBITS
  1096. sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
  1097. ssr a4 # save in SAR for later.
  1098. rsr a3, windowbase
  1099. add a3, a3, a4
  1100. wsr a3, windowbase
  1101. rsync
  1102. rsr a3, windowstart
  1103. srl a3, a3 # shift windowstart
  1104. /* WB is now just one frame below the oldest frame in the register
  1105. window. WS is shifted so the oldest frame is in bit 0, thus, WB
  1106. and WS differ by one 4-register frame. */
  1107. /* Save frames. Depending what call was used (call4, call8, call12),
  1108. * we have to save 4,8. or 12 registers.
  1109. */
  1110. _bbsi.l a3, 1, .Lc4
  1111. _bbsi.l a3, 2, .Lc8
  1112. /* Special case: we have a call12-frame starting at a4. */
  1113. _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
  1114. s32e a4, a1, -16 # a1 is valid with an empty spill area
  1115. l32e a4, a5, -12
  1116. s32e a8, a4, -48
  1117. mov a8, a4
  1118. l32e a4, a1, -16
  1119. j .Lc12c
  1120. .Lnospill:
  1121. ret
  1122. .Lloop: _bbsi.l a3, 1, .Lc4
  1123. _bbci.l a3, 2, .Lc12
  1124. .Lc8: s32e a4, a13, -16
  1125. l32e a4, a5, -12
  1126. s32e a8, a4, -32
  1127. s32e a5, a13, -12
  1128. s32e a6, a13, -8
  1129. s32e a7, a13, -4
  1130. s32e a9, a4, -28
  1131. s32e a10, a4, -24
  1132. s32e a11, a4, -20
  1133. srli a11, a3, 2 # shift windowbase by 2
  1134. rotw 2
  1135. _bnei a3, 1, .Lloop
  1136. .Lexit: /* Done. Do the final rotation, set WS, and return. */
  1137. rotw 1
  1138. rsr a3, windowbase
  1139. ssl a3
  1140. movi a3, 1
  1141. sll a3, a3
  1142. wsr a3, windowstart
  1143. ret
  1144. .Lc4: s32e a4, a9, -16
  1145. s32e a5, a9, -12
  1146. s32e a6, a9, -8
  1147. s32e a7, a9, -4
  1148. srli a7, a3, 1
  1149. rotw 1
  1150. _bnei a3, 1, .Lloop
  1151. j .Lexit
  1152. .Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
  1153. /* 12-register frame (call12) */
  1154. l32e a2, a5, -12
  1155. s32e a8, a2, -48
  1156. mov a8, a2
  1157. .Lc12c: s32e a9, a8, -44
  1158. s32e a10, a8, -40
  1159. s32e a11, a8, -36
  1160. s32e a12, a8, -32
  1161. s32e a13, a8, -28
  1162. s32e a14, a8, -24
  1163. s32e a15, a8, -20
  1164. srli a15, a3, 3
  1165. /* The stack pointer for a4..a7 is out of reach, so we rotate the
  1166. * window, grab the stackpointer, and rotate back.
  1167. * Alternatively, we could also use the following approach, but that
  1168. * makes the fixup routine much more complicated:
  1169. * rotw 1
  1170. * s32e a0, a13, -16
  1171. * ...
  1172. * rotw 2
  1173. */
  1174. rotw 1
  1175. mov a5, a13
  1176. rotw -1
  1177. s32e a4, a9, -16
  1178. s32e a5, a9, -12
  1179. s32e a6, a9, -8
  1180. s32e a7, a9, -4
  1181. rotw 3
  1182. _beqi a3, 1, .Lexit
  1183. j .Lloop
  1184. .Linvalid_mask:
  1185. /* We get here because of an unrecoverable error in the window
  1186. * registers. If we are in user space, we kill the application,
  1187. * however, this condition is unrecoverable in kernel space.
  1188. */
  1189. rsr a0, ps
  1190. _bbci.l a0, PS_UM_BIT, 1f
  1191. /* User space: Setup a dummy frame and kill application.
  1192. * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
  1193. */
  1194. movi a0, 1
  1195. movi a1, 0
  1196. wsr a0, windowstart
  1197. wsr a1, windowbase
  1198. rsync
  1199. movi a0, 0
  1200. movi a3, exc_table
  1201. l32i a1, a3, EXC_TABLE_KSTK
  1202. wsr a3, excsave1
  1203. movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL
  1204. wsr a4, ps
  1205. rsync
  1206. movi a6, SIGSEGV
  1207. movi a4, do_exit
  1208. callx4 a4
  1209. 1: /* Kernel space: PANIC! */
  1210. wsr a0, excsave1
  1211. movi a0, unrecoverable_exception
  1212. callx0 a0 # should not return
  1213. 1: j 1b
  1214. ENDPROC(_spill_registers)
  1215. #ifdef CONFIG_MMU
  1216. /*
  1217. * We should never get here. Bail out!
  1218. */
  1219. ENTRY(fast_second_level_miss_double_kernel)
  1220. 1: movi a0, unrecoverable_exception
  1221. callx0 a0 # should not return
  1222. 1: j 1b
  1223. ENDPROC(fast_second_level_miss_double_kernel)
  1224. /* First-level entry handler for user, kernel, and double 2nd-level
  1225. * TLB miss exceptions. Note that for now, user and kernel miss
  1226. * exceptions share the same entry point and are handled identically.
  1227. *
  1228. * An old, less-efficient C version of this function used to exist.
  1229. * We include it below, interleaved as comments, for reference.
  1230. *
  1231. * Entry condition:
  1232. *
  1233. * a0: trashed, original value saved on stack (PT_AREG0)
  1234. * a1: a1
  1235. * a2: new stack pointer, original in DEPC
  1236. * a3: dispatch table
  1237. * depc: a2, original value saved on stack (PT_DEPC)
  1238. * excsave_1: a3
  1239. *
  1240. * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  1241. * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
  1242. */
  1243. ENTRY(fast_second_level_miss)
  1244. /* Save a1. Note: we don't expect a double exception. */
  1245. s32i a1, a2, PT_AREG1
  1246. /* We need to map the page of PTEs for the user task. Find
  1247. * the pointer to that page. Also, it's possible for tsk->mm
  1248. * to be NULL while tsk->active_mm is nonzero if we faulted on
  1249. * a vmalloc address. In that rare case, we must use
  1250. * active_mm instead to avoid a fault in this handler. See
  1251. *
  1252. * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
  1253. * (or search Internet on "mm vs. active_mm")
  1254. *
  1255. * if (!mm)
  1256. * mm = tsk->active_mm;
  1257. * pgd = pgd_offset (mm, regs->excvaddr);
  1258. * pmd = pmd_offset (pgd, regs->excvaddr);
  1259. * pmdval = *pmd;
  1260. */
  1261. GET_CURRENT(a1,a2)
  1262. l32i a0, a1, TASK_MM # tsk->mm
  1263. beqz a0, 9f
  1264. /* We deliberately destroy a3 that holds the exception table. */
  1265. 8: rsr a3, excvaddr # fault address
  1266. _PGD_OFFSET(a0, a3, a1)
  1267. l32i a0, a0, 0 # read pmdval
  1268. beqz a0, 2f
  1269. /* Read ptevaddr and convert to top of page-table page.
  1270. *
  1271. * vpnval = read_ptevaddr_register() & PAGE_MASK;
  1272. * vpnval += DTLB_WAY_PGTABLE;
  1273. * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
  1274. * write_dtlb_entry (pteval, vpnval);
  1275. *
  1276. * The messy computation for 'pteval' above really simplifies
  1277. * into the following:
  1278. *
  1279. * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
  1280. */
  1281. movi a1, (-PAGE_OFFSET) & 0xffffffff
  1282. add a0, a0, a1 # pmdval - PAGE_OFFSET
  1283. extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
  1284. xor a0, a0, a1
  1285. movi a1, _PAGE_DIRECTORY
  1286. or a0, a0, a1 # ... | PAGE_DIRECTORY
  1287. /*
  1288. * We utilize all three wired-ways (7-9) to hold pmd translations.
  1289. * Memory regions are mapped to the DTLBs according to bits 28 and 29.
  1290. * This allows to map the three most common regions to three different
  1291. * DTLBs:
  1292. * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
  1293. * 2 -> way 8 shared libaries (2000.0000)
  1294. * 3 -> way 0 stack (3000.0000)
  1295. */
  1296. extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
  1297. rsr a1, ptevaddr
  1298. addx2 a3, a3, a3 # -> 0,3,6,9
  1299. srli a1, a1, PAGE_SHIFT
  1300. extui a3, a3, 2, 2 # -> 0,0,1,2
  1301. slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
  1302. addi a3, a3, DTLB_WAY_PGD
  1303. add a1, a1, a3 # ... + way_number
  1304. 3: wdtlb a0, a1
  1305. dsync
  1306. /* Exit critical section. */
  1307. 4: movi a3, exc_table # restore a3
  1308. movi a0, 0
  1309. s32i a0, a3, EXC_TABLE_FIXUP
  1310. /* Restore the working registers, and return. */
  1311. l32i a0, a2, PT_AREG0
  1312. l32i a1, a2, PT_AREG1
  1313. l32i a2, a2, PT_DEPC
  1314. xsr a3, excsave1
  1315. bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
  1316. /* Restore excsave1 and return. */
  1317. rsr a2, depc
  1318. rfe
  1319. /* Return from double exception. */
  1320. 1: xsr a2, depc
  1321. esync
  1322. rfde
  1323. 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
  1324. j 8b
  1325. #if (DCACHE_WAY_SIZE > PAGE_SIZE)
  1326. 2: /* Special case for cache aliasing.
  1327. * We (should) only get here if a clear_user_page, copy_user_page
  1328. * or the aliased cache flush functions got preemptively interrupted
  1329. * by another task. Re-establish temporary mapping to the
  1330. * TLBTEMP_BASE areas.
  1331. */
  1332. /* We shouldn't be in a double exception */
  1333. l32i a0, a2, PT_DEPC
  1334. bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
  1335. /* Make sure the exception originated in the special functions */
  1336. movi a0, __tlbtemp_mapping_start
  1337. rsr a3, epc1
  1338. bltu a3, a0, 2f
  1339. movi a0, __tlbtemp_mapping_end
  1340. bgeu a3, a0, 2f
  1341. /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
  1342. movi a3, TLBTEMP_BASE_1
  1343. rsr a0, excvaddr
  1344. bltu a0, a3, 2f
  1345. addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
  1346. bgeu a1, a3, 2f
  1347. /* Check if we have to restore an ITLB mapping. */
  1348. movi a1, __tlbtemp_mapping_itlb
  1349. rsr a3, epc1
  1350. sub a3, a3, a1
  1351. /* Calculate VPN */
  1352. movi a1, PAGE_MASK
  1353. and a1, a1, a0
  1354. /* Jump for ITLB entry */
  1355. bgez a3, 1f
  1356. /* We can use up to two TLBTEMP areas, one for src and one for dst. */
  1357. extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
  1358. add a1, a3, a1
  1359. /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
  1360. mov a0, a6
  1361. movnez a0, a7, a3
  1362. j 3b
  1363. /* ITLB entry. We only use dst in a6. */
  1364. 1: witlb a6, a1
  1365. isync
  1366. j 4b
  1367. #endif // DCACHE_WAY_SIZE > PAGE_SIZE
  1368. 2: /* Invalid PGD, default exception handling */
  1369. movi a3, exc_table
  1370. rsr a1, depc
  1371. xsr a3, excsave1
  1372. s32i a1, a2, PT_AREG2
  1373. s32i a3, a2, PT_AREG3
  1374. mov a1, a2
  1375. rsr a2, ps
  1376. bbsi.l a2, PS_UM_BIT, 1f
  1377. j _kernel_exception
  1378. 1: j _user_exception
  1379. ENDPROC(fast_second_level_miss)
  1380. /*
  1381. * StoreProhibitedException
  1382. *
  1383. * Update the pte and invalidate the itlb mapping for this pte.
  1384. *
  1385. * Entry condition:
  1386. *
  1387. * a0: trashed, original value saved on stack (PT_AREG0)
  1388. * a1: a1
  1389. * a2: new stack pointer, original in DEPC
  1390. * a3: dispatch table
  1391. * depc: a2, original value saved on stack (PT_DEPC)
  1392. * excsave_1: a3
  1393. *
  1394. * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
  1395. * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
  1396. */
  1397. ENTRY(fast_store_prohibited)
  1398. /* Save a1 and a4. */
  1399. s32i a1, a2, PT_AREG1
  1400. s32i a4, a2, PT_AREG4
  1401. GET_CURRENT(a1,a2)
  1402. l32i a0, a1, TASK_MM # tsk->mm
  1403. beqz a0, 9f
  1404. 8: rsr a1, excvaddr # fault address
  1405. _PGD_OFFSET(a0, a1, a4)
  1406. l32i a0, a0, 0
  1407. beqz a0, 2f
  1408. /* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/
  1409. _PTE_OFFSET(a0, a1, a4)
  1410. l32i a4, a0, 0 # read pteval
  1411. bbci.l a4, _PAGE_WRITABLE_BIT, 2f
  1412. movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
  1413. or a4, a4, a1
  1414. rsr a1, excvaddr
  1415. s32i a4, a0, 0
  1416. /* We need to flush the cache if we have page coloring. */
  1417. #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
  1418. dhwb a0, 0
  1419. #endif
  1420. pdtlb a0, a1
  1421. wdtlb a4, a0
  1422. /* Exit critical section. */
  1423. movi a0, 0
  1424. s32i a0, a3, EXC_TABLE_FIXUP
  1425. /* Restore the working registers, and return. */
  1426. l32i a4, a2, PT_AREG4
  1427. l32i a1, a2, PT_AREG1
  1428. l32i a0, a2, PT_AREG0
  1429. l32i a2, a2, PT_DEPC
  1430. /* Restore excsave1 and a3. */
  1431. xsr a3, excsave1
  1432. bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
  1433. rsr a2, depc
  1434. rfe
  1435. /* Double exception. Restore FIXUP handler and return. */
  1436. 1: xsr a2, depc
  1437. esync
  1438. rfde
  1439. 9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
  1440. j 8b
  1441. 2: /* If there was a problem, handle fault in C */
  1442. rsr a4, depc # still holds a2
  1443. xsr a3, excsave1
  1444. s32i a4, a2, PT_AREG2
  1445. s32i a3, a2, PT_AREG3
  1446. l32i a4, a2, PT_AREG4
  1447. mov a1, a2
  1448. rsr a2, ps
  1449. bbsi.l a2, PS_UM_BIT, 1f
  1450. j _kernel_exception
  1451. 1: j _user_exception
  1452. ENDPROC(fast_store_prohibited)
  1453. #endif /* CONFIG_MMU */
  1454. /*
  1455. * System Calls.
  1456. *
  1457. * void system_call (struct pt_regs* regs, int exccause)
  1458. * a2 a3
  1459. */
  1460. ENTRY(system_call)
  1461. entry a1, 32
  1462. /* regs->syscall = regs->areg[2] */
  1463. l32i a3, a2, PT_AREG2
  1464. mov a6, a2
  1465. movi a4, do_syscall_trace_enter
  1466. s32i a3, a2, PT_SYSCALL
  1467. callx4 a4
  1468. /* syscall = sys_call_table[syscall_nr] */
  1469. movi a4, sys_call_table;
  1470. movi a5, __NR_syscall_count
  1471. movi a6, -ENOSYS
  1472. bgeu a3, a5, 1f
  1473. addx4 a4, a3, a4
  1474. l32i a4, a4, 0
  1475. movi a5, sys_ni_syscall;
  1476. beq a4, a5, 1f
  1477. /* Load args: arg0 - arg5 are passed via regs. */
  1478. l32i a6, a2, PT_AREG6
  1479. l32i a7, a2, PT_AREG3
  1480. l32i a8, a2, PT_AREG4
  1481. l32i a9, a2, PT_AREG5
  1482. l32i a10, a2, PT_AREG8
  1483. l32i a11, a2, PT_AREG9
  1484. /* Pass one additional argument to the syscall: pt_regs (on stack) */
  1485. s32i a2, a1, 0
  1486. callx4 a4
  1487. 1: /* regs->areg[2] = return_value */
  1488. s32i a6, a2, PT_AREG2
  1489. movi a4, do_syscall_trace_leave
  1490. mov a6, a2
  1491. callx4 a4
  1492. retw
  1493. ENDPROC(system_call)
  1494. /*
  1495. * Task switch.
  1496. *
  1497. * struct task* _switch_to (struct task* prev, struct task* next)
  1498. * a2 a2 a3
  1499. */
  1500. ENTRY(_switch_to)
  1501. entry a1, 16
  1502. mov a12, a2 # preserve 'prev' (a2)
  1503. mov a13, a3 # and 'next' (a3)
  1504. l32i a4, a2, TASK_THREAD_INFO
  1505. l32i a5, a3, TASK_THREAD_INFO
  1506. save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
  1507. s32i a0, a12, THREAD_RA # save return address
  1508. s32i a1, a12, THREAD_SP # save stack pointer
  1509. /* Disable ints while we manipulate the stack pointer. */
  1510. movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
  1511. xsr a14, ps
  1512. rsr a3, excsave1
  1513. rsync
  1514. s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
  1515. /* Switch CPENABLE */
  1516. #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
  1517. l32i a3, a5, THREAD_CPENABLE
  1518. xsr a3, cpenable
  1519. s32i a3, a4, THREAD_CPENABLE
  1520. #endif
  1521. /* Flush register file. */
  1522. call0 _spill_registers # destroys a3, a4, and SAR
  1523. /* Set kernel stack (and leave critical section)
  1524. * Note: It's save to set it here. The stack will not be overwritten
  1525. * because the kernel stack will only be loaded again after
  1526. * we return from kernel space.
  1527. */
  1528. rsr a3, excsave1 # exc_table
  1529. movi a6, 0
  1530. addi a7, a5, PT_REGS_OFFSET
  1531. s32i a6, a3, EXC_TABLE_FIXUP
  1532. s32i a7, a3, EXC_TABLE_KSTK
  1533. /* restore context of the task 'next' */
  1534. l32i a0, a13, THREAD_RA # restore return address
  1535. l32i a1, a13, THREAD_SP # restore stack pointer
  1536. load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
  1537. wsr a14, ps
  1538. mov a2, a12 # return 'prev'
  1539. rsync
  1540. retw
  1541. ENDPROC(_switch_to)
  1542. ENTRY(ret_from_fork)
  1543. /* void schedule_tail (struct task_struct *prev)
  1544. * Note: prev is still in a6 (return value from fake call4 frame)
  1545. */
  1546. movi a4, schedule_tail
  1547. callx4 a4
  1548. movi a4, do_syscall_trace_leave
  1549. mov a6, a1
  1550. callx4 a4
  1551. j common_exception_return
  1552. ENDPROC(ret_from_fork)
  1553. /*
  1554. * Kernel thread creation helper
  1555. * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
  1556. * left from _switch_to: a6 = prev
  1557. */
  1558. ENTRY(ret_from_kernel_thread)
  1559. call4 schedule_tail
  1560. mov a6, a3
  1561. callx4 a2
  1562. j common_exception_return
  1563. ENDPROC(ret_from_kernel_thread)