intvec_32.S 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * Linux interrupt vectors.
  15. */
  16. #include <linux/linkage.h>
  17. #include <linux/errno.h>
  18. #include <linux/init.h>
  19. #include <linux/unistd.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/thread_info.h>
  22. #include <asm/irqflags.h>
  23. #include <asm/atomic.h>
  24. #include <asm/asm-offsets.h>
  25. #include <hv/hypervisor.h>
  26. #include <arch/abi.h>
  27. #include <arch/interrupts.h>
  28. #include <arch/spr_def.h>
  29. #ifdef CONFIG_PREEMPT
  30. # error "No support for kernel preemption currently"
  31. #endif
  32. #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
  33. #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
  34. #if !CHIP_HAS_WH64()
  35. /* By making this an empty macro, we can use wh64 in the code. */
  36. .macro wh64 reg
  37. .endm
  38. #endif
  39. .macro push_reg reg, ptr=sp, delta=-4
  40. {
  41. sw \ptr, \reg
  42. addli \ptr, \ptr, \delta
  43. }
  44. .endm
  45. .macro pop_reg reg, ptr=sp, delta=4
  46. {
  47. lw \reg, \ptr
  48. addli \ptr, \ptr, \delta
  49. }
  50. .endm
  51. .macro pop_reg_zero reg, zreg, ptr=sp, delta=4
  52. {
  53. move \zreg, zero
  54. lw \reg, \ptr
  55. addi \ptr, \ptr, \delta
  56. }
  57. .endm
  58. .macro push_extra_callee_saves reg
  59. PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
  60. push_reg r51, \reg
  61. push_reg r50, \reg
  62. push_reg r49, \reg
  63. push_reg r48, \reg
  64. push_reg r47, \reg
  65. push_reg r46, \reg
  66. push_reg r45, \reg
  67. push_reg r44, \reg
  68. push_reg r43, \reg
  69. push_reg r42, \reg
  70. push_reg r41, \reg
  71. push_reg r40, \reg
  72. push_reg r39, \reg
  73. push_reg r38, \reg
  74. push_reg r37, \reg
  75. push_reg r36, \reg
  76. push_reg r35, \reg
  77. push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
  78. .endm
  79. .macro panic str
  80. .pushsection .rodata, "a"
  81. 1:
  82. .asciz "\str"
  83. .popsection
  84. {
  85. moveli r0, lo16(1b)
  86. }
  87. {
  88. auli r0, r0, ha16(1b)
  89. jal panic
  90. }
  91. .endm
  92. #ifdef __COLLECT_LINKER_FEEDBACK__
  93. .pushsection .text.intvec_feedback,"ax"
  94. intvec_feedback:
  95. .popsection
  96. #endif
  97. /*
  98. * Default interrupt handler.
  99. *
  100. * vecnum is where we'll put this code.
  101. * c_routine is the C routine we'll call.
  102. *
  103. * The C routine is passed two arguments:
  104. * - A pointer to the pt_regs state.
  105. * - The interrupt vector number.
  106. *
  107. * The "processing" argument specifies the code for processing
  108. * the interrupt. Defaults to "handle_interrupt".
  109. */
  110. .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
  111. .org (\vecnum << 8)
  112. intvec_\vecname:
  113. .ifc \vecnum, INT_SWINT_1
  114. blz TREG_SYSCALL_NR_NAME, sys_cmpxchg
  115. .endif
  116. /* Temporarily save a register so we have somewhere to work. */
  117. mtspr SPR_SYSTEM_SAVE_K_1, r0
  118. mfspr r0, SPR_EX_CONTEXT_K_1
  119. /* The cmpxchg code clears sp to force us to reset it here on fault. */
  120. {
  121. bz sp, 2f
  122. andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
  123. }
  124. .ifc \vecnum, INT_DOUBLE_FAULT
  125. /*
  126. * For double-faults from user-space, fall through to the normal
  127. * register save and stack setup path. Otherwise, it's the
  128. * hypervisor giving us one last chance to dump diagnostics, and we
  129. * branch to the kernel_double_fault routine to do so.
  130. */
  131. bz r0, 1f
  132. j _kernel_double_fault
  133. 1:
  134. .else
  135. /*
  136. * If we're coming from user-space, then set sp to the top of
  137. * the kernel stack. Otherwise, assume sp is already valid.
  138. */
  139. {
  140. bnz r0, 0f
  141. move r0, sp
  142. }
  143. .endif
  144. .ifc \c_routine, do_page_fault
  145. /*
  146. * The page_fault handler may be downcalled directly by the
  147. * hypervisor even when Linux is running and has ICS set.
  148. *
  149. * In this case the contents of EX_CONTEXT_K_1 reflect the
  150. * previous fault and can't be relied on to choose whether or
  151. * not to reinitialize the stack pointer. So we add a test
  152. * to see whether SYSTEM_SAVE_K_2 has the high bit set,
  153. * and if so we don't reinitialize sp, since we must be coming
  154. * from Linux. (In fact the precise case is !(val & ~1),
  155. * but any Linux PC has to have the high bit set.)
  156. *
  157. * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
  158. * any path that turns into a downcall to one of our TLB handlers.
  159. */
  160. mfspr r0, SPR_SYSTEM_SAVE_K_2
  161. {
  162. blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
  163. move r0, sp
  164. }
  165. .endif
  166. 2:
  167. /*
  168. * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
  169. * the current stack top in the higher bits. So we recover
  170. * our stack top by just masking off the low bits, then
  171. * point sp at the top aligned address on the actual stack page.
  172. */
  173. mfspr r0, SPR_SYSTEM_SAVE_K_0
  174. mm r0, r0, zero, LOG2_THREAD_SIZE, 31
  175. 0:
  176. /*
  177. * Align the stack mod 64 so we can properly predict what
  178. * cache lines we need to write-hint to reduce memory fetch
  179. * latency as we enter the kernel. The layout of memory is
  180. * as follows, with cache line 0 at the lowest VA, and cache
  181. * line 4 just below the r0 value this "andi" computes.
  182. * Note that we never write to cache line 4, and we skip
  183. * cache line 1 for syscalls.
  184. *
  185. * cache line 4: ptregs padding (two words)
  186. * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad
  187. * cache line 2: r30...r45
  188. * cache line 1: r14...r29
  189. * cache line 0: 2 x frame, r0..r13
  190. */
  191. andi r0, r0, -64
  192. /*
  193. * Push the first four registers on the stack, so that we can set
  194. * them to vector-unique values before we jump to the common code.
  195. *
  196. * Registers are pushed on the stack as a struct pt_regs,
  197. * with the sp initially just above the struct, and when we're
  198. * done, sp points to the base of the struct, minus
  199. * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
  200. *
  201. * This routine saves just the first four registers, plus the
  202. * stack context so we can do proper backtracing right away,
  203. * and defers to handle_interrupt to save the rest.
  204. * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
  205. */
  206. addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
  207. wh64 r0 /* cache line 3 */
  208. {
  209. sw r0, lr
  210. addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
  211. }
  212. {
  213. sw r0, sp
  214. addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
  215. }
  216. {
  217. sw sp, r52
  218. addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
  219. }
  220. wh64 sp /* cache line 0 */
  221. {
  222. sw sp, r1
  223. addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
  224. }
  225. {
  226. sw sp, r2
  227. addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
  228. }
  229. {
  230. sw sp, r3
  231. addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
  232. }
  233. mfspr r0, SPR_EX_CONTEXT_K_0
  234. .ifc \processing,handle_syscall
  235. /*
  236. * Bump the saved PC by one bundle so that when we return, we won't
  237. * execute the same swint instruction again. We need to do this while
  238. * we're in the critical section.
  239. */
  240. addi r0, r0, 8
  241. .endif
  242. {
  243. sw sp, r0
  244. addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
  245. }
  246. mfspr r0, SPR_EX_CONTEXT_K_1
  247. {
  248. sw sp, r0
  249. addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
  250. /*
  251. * Use r0 for syscalls so it's a temporary; use r1 for interrupts
  252. * so that it gets passed through unchanged to the handler routine.
  253. * Note that the .if conditional confusingly spans bundles.
  254. */
  255. .ifc \processing,handle_syscall
  256. movei r0, \vecnum
  257. }
  258. {
  259. sw sp, r0
  260. .else
  261. movei r1, \vecnum
  262. }
  263. {
  264. sw sp, r1
  265. .endif
  266. addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
  267. }
  268. mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
  269. {
  270. sw sp, r0
  271. addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
  272. }
  273. {
  274. sw sp, zero /* write zero into "Next SP" frame pointer */
  275. addi sp, sp, -4 /* leave SP pointing at bottom of frame */
  276. }
  277. .ifc \processing,handle_syscall
  278. j handle_syscall
  279. .else
  280. /*
  281. * Capture per-interrupt SPR context to registers.
  282. * We overload the meaning of r3 on this path such that if its bit 31
  283. * is set, we have to mask all interrupts including NMIs before
  284. * clearing the interrupt critical section bit.
  285. * See discussion below at "finish_interrupt_save".
  286. */
  287. .ifc \c_routine, do_page_fault
  288. mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
  289. mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
  290. .else
  291. .ifc \vecnum, INT_DOUBLE_FAULT
  292. {
  293. mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
  294. movei r3, 0
  295. }
  296. .else
  297. .ifc \c_routine, do_trap
  298. {
  299. mfspr r2, GPV_REASON
  300. movei r3, 0
  301. }
  302. .else
  303. .ifc \c_routine, op_handle_perf_interrupt
  304. {
  305. mfspr r2, PERF_COUNT_STS
  306. movei r3, -1 /* not used, but set for consistency */
  307. }
  308. .else
  309. #if CHIP_HAS_AUX_PERF_COUNTERS()
  310. .ifc \c_routine, op_handle_aux_perf_interrupt
  311. {
  312. mfspr r2, AUX_PERF_COUNT_STS
  313. movei r3, -1 /* not used, but set for consistency */
  314. }
  315. .else
  316. #endif
  317. movei r3, 0
  318. #if CHIP_HAS_AUX_PERF_COUNTERS()
  319. .endif
  320. #endif
  321. .endif
  322. .endif
  323. .endif
  324. .endif
  325. /* Put function pointer in r0 */
  326. moveli r0, lo16(\c_routine)
  327. {
  328. auli r0, r0, ha16(\c_routine)
  329. j \processing
  330. }
  331. .endif
  332. ENDPROC(intvec_\vecname)
  333. #ifdef __COLLECT_LINKER_FEEDBACK__
  334. .pushsection .text.intvec_feedback,"ax"
  335. .org (\vecnum << 5)
  336. FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
  337. jrp lr
  338. .popsection
  339. #endif
  340. .endm
  341. /*
  342. * Save the rest of the registers that we didn't save in the actual
  343. * vector itself. We can't use r0-r10 inclusive here.
  344. */
  345. .macro finish_interrupt_save, function
  346. /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
  347. PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
  348. {
  349. .ifc \function,handle_syscall
  350. sw r52, r0
  351. .else
  352. sw r52, zero
  353. .endif
  354. PTREGS_PTR(r52, PTREGS_OFFSET_TP)
  355. }
  356. /*
  357. * For ordinary syscalls, we save neither caller- nor callee-
  358. * save registers, since the syscall invoker doesn't expect the
  359. * caller-saves to be saved, and the called kernel functions will
  360. * take care of saving the callee-saves for us.
  361. *
  362. * For interrupts we save just the caller-save registers. Saving
  363. * them is required (since the "caller" can't save them). Again,
  364. * the called kernel functions will restore the callee-save
  365. * registers for us appropriately.
  366. *
  367. * On return, we normally restore nothing special for syscalls,
  368. * and just the caller-save registers for interrupts.
  369. *
  370. * However, there are some important caveats to all this:
  371. *
  372. * - We always save a few callee-save registers to give us
  373. * some scratchpad registers to carry across function calls.
  374. *
  375. * - fork/vfork/etc require us to save all the callee-save
  376. * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
  377. *
  378. * - We always save r0..r5 and r10 for syscalls, since we need
  379. * to reload them a bit later for the actual kernel call, and
  380. * since we might need them for -ERESTARTNOINTR, etc.
  381. *
  382. * - Before invoking a signal handler, we save the unsaved
  383. * callee-save registers so they are visible to the
  384. * signal handler or any ptracer.
  385. *
  386. * - If the unsaved callee-save registers are modified, we set
  387. * a bit in pt_regs so we know to reload them from pt_regs
  388. * and not just rely on the kernel function unwinding.
  389. * (Done for ptrace register writes and SA_SIGINFO handler.)
  390. */
  391. {
  392. sw r52, tp
  393. PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
  394. }
  395. wh64 r52 /* cache line 2 */
  396. push_reg r33, r52
  397. push_reg r32, r52
  398. push_reg r31, r52
  399. .ifc \function,handle_syscall
  400. push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
  401. push_reg TREG_SYSCALL_NR_NAME, r52, \
  402. PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
  403. .else
  404. push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
  405. wh64 r52 /* cache line 1 */
  406. push_reg r29, r52
  407. push_reg r28, r52
  408. push_reg r27, r52
  409. push_reg r26, r52
  410. push_reg r25, r52
  411. push_reg r24, r52
  412. push_reg r23, r52
  413. push_reg r22, r52
  414. push_reg r21, r52
  415. push_reg r20, r52
  416. push_reg r19, r52
  417. push_reg r18, r52
  418. push_reg r17, r52
  419. push_reg r16, r52
  420. push_reg r15, r52
  421. push_reg r14, r52
  422. push_reg r13, r52
  423. push_reg r12, r52
  424. push_reg r11, r52
  425. push_reg r10, r52
  426. push_reg r9, r52
  427. push_reg r8, r52
  428. push_reg r7, r52
  429. push_reg r6, r52
  430. .endif
  431. push_reg r5, r52
  432. sw r52, r4
  433. /* Load tp with our per-cpu offset. */
  434. #ifdef CONFIG_SMP
  435. {
  436. mfspr r20, SPR_SYSTEM_SAVE_K_0
  437. moveli r21, lo16(__per_cpu_offset)
  438. }
  439. {
  440. auli r21, r21, ha16(__per_cpu_offset)
  441. mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1
  442. }
  443. s2a r20, r20, r21
  444. lw tp, r20
  445. #else
  446. move tp, zero
  447. #endif
  448. /*
  449. * If we will be returning to the kernel, we will need to
  450. * reset the interrupt masks to the state they had before.
  451. * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
  452. * We load flags in r32 here so we can jump to .Lrestore_regs
  453. * directly after do_page_fault_ics() if necessary.
  454. */
  455. mfspr r32, SPR_EX_CONTEXT_K_1
  456. {
  457. andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
  458. PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
  459. }
  460. bzt r32, 1f /* zero if from user space */
  461. IRQS_DISABLED(r32) /* zero if irqs enabled */
  462. #if PT_FLAGS_DISABLE_IRQ != 1
  463. # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
  464. #endif
  465. 1:
  466. .ifnc \function,handle_syscall
  467. /* Record the fact that we saved the caller-save registers above. */
  468. ori r32, r32, PT_FLAGS_CALLER_SAVES
  469. .endif
  470. sw r21, r32
  471. #ifdef __COLLECT_LINKER_FEEDBACK__
  472. /*
  473. * Notify the feedback routines that we were in the
  474. * appropriate fixed interrupt vector area. Note that we
  475. * still have ICS set at this point, so we can't invoke any
  476. * atomic operations or we will panic. The feedback
  477. * routines internally preserve r0..r10 and r30 up.
  478. */
  479. .ifnc \function,handle_syscall
  480. shli r20, r1, 5
  481. .else
  482. moveli r20, INT_SWINT_1 << 5
  483. .endif
  484. addli r20, r20, lo16(intvec_feedback)
  485. auli r20, r20, ha16(intvec_feedback)
  486. jalr r20
  487. /* And now notify the feedback routines that we are here. */
  488. FEEDBACK_ENTER(\function)
  489. #endif
  490. /*
  491. * we've captured enough state to the stack (including in
  492. * particular our EX_CONTEXT state) that we can now release
  493. * the interrupt critical section and replace it with our
  494. * standard "interrupts disabled" mask value. This allows
  495. * synchronous interrupts (and profile interrupts) to punch
  496. * through from this point onwards.
  497. *
  498. * If bit 31 of r3 is set during a non-NMI interrupt, we know we
  499. * are on the path where the hypervisor has punched through our
  500. * ICS with a page fault, so we call out to do_page_fault_ics()
  501. * to figure out what to do with it. If the fault was in
  502. * an atomic op, we unlock the atomic lock, adjust the
  503. * saved register state a little, and return "zero" in r4,
  504. * falling through into the normal page-fault interrupt code.
  505. * If the fault was in a kernel-space atomic operation, then
  506. * do_page_fault_ics() resolves it itself, returns "one" in r4,
  507. * and as a result goes directly to restoring registers and iret,
  508. * without trying to adjust the interrupt masks at all.
  509. * The do_page_fault_ics() API involves passing and returning
  510. * a five-word struct (in registers) to avoid writing the
  511. * save and restore code here.
  512. */
  513. .ifc \function,handle_nmi
  514. IRQ_DISABLE_ALL(r20)
  515. .else
  516. .ifnc \function,handle_syscall
  517. bgezt r3, 1f
  518. {
  519. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  520. jal do_page_fault_ics
  521. }
  522. FEEDBACK_REENTER(\function)
  523. bzt r4, 1f
  524. j .Lrestore_regs
  525. 1:
  526. .endif
  527. IRQ_DISABLE(r20, r21)
  528. .endif
  529. mtspr INTERRUPT_CRITICAL_SECTION, zero
  530. #if CHIP_HAS_WH64()
  531. /*
  532. * Prepare the first 256 stack bytes to be rapidly accessible
  533. * without having to fetch the background data. We don't really
  534. * know how far to write-hint, but kernel stacks generally
  535. * aren't that big, and write-hinting here does take some time.
  536. */
  537. addi r52, sp, -64
  538. {
  539. wh64 r52
  540. addi r52, r52, -64
  541. }
  542. {
  543. wh64 r52
  544. addi r52, r52, -64
  545. }
  546. {
  547. wh64 r52
  548. addi r52, r52, -64
  549. }
  550. wh64 r52
  551. #endif
  552. #ifdef CONFIG_TRACE_IRQFLAGS
  553. .ifnc \function,handle_nmi
  554. /*
  555. * We finally have enough state set up to notify the irq
  556. * tracing code that irqs were disabled on entry to the handler.
  557. * The TRACE_IRQS_OFF call clobbers registers r0-r29.
  558. * For syscalls, we already have the register state saved away
  559. * on the stack, so we don't bother to do any register saves here,
  560. * and later we pop the registers back off the kernel stack.
  561. * For interrupt handlers, save r0-r3 in callee-saved registers.
  562. */
  563. .ifnc \function,handle_syscall
  564. { move r30, r0; move r31, r1 }
  565. { move r32, r2; move r33, r3 }
  566. .endif
  567. TRACE_IRQS_OFF
  568. .ifnc \function,handle_syscall
  569. { move r0, r30; move r1, r31 }
  570. { move r2, r32; move r3, r33 }
  571. .endif
  572. .endif
  573. #endif
  574. .endm
  575. .macro check_single_stepping, kind, not_single_stepping
  576. /*
  577. * Check for single stepping in user-level priv
  578. * kind can be "normal", "ill", or "syscall"
  579. * At end, if fall-thru
  580. * r29: thread_info->step_state
  581. * r28: &pt_regs->pc
  582. * r27: pt_regs->pc
  583. * r26: thread_info->step_state->buffer
  584. */
  585. /* Check for single stepping */
  586. GET_THREAD_INFO(r29)
  587. {
  588. /* Get pointer to field holding step state */
  589. addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET
  590. /* Get pointer to EX1 in register state */
  591. PTREGS_PTR(r27, PTREGS_OFFSET_EX1)
  592. }
  593. {
  594. /* Get pointer to field holding PC */
  595. PTREGS_PTR(r28, PTREGS_OFFSET_PC)
  596. /* Load the pointer to the step state */
  597. lw r29, r29
  598. }
  599. /* Load EX1 */
  600. lw r27, r27
  601. {
  602. /* Points to flags */
  603. addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET
  604. /* No single stepping if there is no step state structure */
  605. bzt r29, \not_single_stepping
  606. }
  607. {
  608. /* mask off ICS and any other high bits */
  609. andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK
  610. /* Load pointer to single step instruction buffer */
  611. lw r26, r29
  612. }
  613. /* Check priv state */
  614. bnz r27, \not_single_stepping
  615. /* Get flags */
  616. lw r22, r23
  617. {
  618. /* Branch if single-step mode not enabled */
  619. bbnst r22, \not_single_stepping
  620. /* Clear enabled flag */
  621. andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED
  622. }
  623. .ifc \kind,normal
  624. {
  625. /* Load PC */
  626. lw r27, r28
  627. /* Point to the entry containing the original PC */
  628. addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
  629. }
  630. {
  631. /* Disable single stepping flag */
  632. sw r23, r22
  633. }
  634. {
  635. /* Get the original pc */
  636. lw r24, r24
  637. /* See if the PC is at the start of the single step buffer */
  638. seq r25, r26, r27
  639. }
  640. /*
  641. * NOTE: it is really expected that the PC be in the single step buffer
  642. * at this point
  643. */
  644. bzt r25, \not_single_stepping
  645. /* Restore the original PC */
  646. sw r28, r24
  647. .else
  648. .ifc \kind,syscall
  649. {
  650. /* Load PC */
  651. lw r27, r28
  652. /* Point to the entry containing the next PC */
  653. addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
  654. }
  655. {
  656. /* Increment the stopped PC by the bundle size */
  657. addi r26, r26, 8
  658. /* Disable single stepping flag */
  659. sw r23, r22
  660. }
  661. {
  662. /* Get the next pc */
  663. lw r24, r24
  664. /*
  665. * See if the PC is one bundle past the start of the
  666. * single step buffer
  667. */
  668. seq r25, r26, r27
  669. }
  670. {
  671. /*
  672. * NOTE: it is really expected that the PC be in the
  673. * single step buffer at this point
  674. */
  675. bzt r25, \not_single_stepping
  676. }
  677. /* Set to the next PC */
  678. sw r28, r24
  679. .else
  680. {
  681. /* Point to 3rd bundle in buffer */
  682. addi r25, r26, 16
  683. /* Load PC */
  684. lw r27, r28
  685. }
  686. {
  687. /* Disable single stepping flag */
  688. sw r23, r22
  689. /* See if the PC is in the single step buffer */
  690. slte_u r24, r26, r27
  691. }
  692. {
  693. slte_u r25, r27, r25
  694. /*
  695. * NOTE: it is really expected that the PC be in the
  696. * single step buffer at this point
  697. */
  698. bzt r24, \not_single_stepping
  699. }
  700. bzt r25, \not_single_stepping
  701. .endif
  702. .endif
  703. .endm
  704. /*
  705. * Redispatch a downcall.
  706. */
  707. .macro dc_dispatch vecnum, vecname
  708. .org (\vecnum << 8)
  709. intvec_\vecname:
  710. j hv_downcall_dispatch
  711. ENDPROC(intvec_\vecname)
  712. .endm
  713. /*
  714. * Common code for most interrupts. The C function we're eventually
  715. * going to is in r0, and the faultnum is in r1; the original
  716. * values for those registers are on the stack.
  717. */
  718. .pushsection .text.handle_interrupt,"ax"
  719. handle_interrupt:
  720. finish_interrupt_save handle_interrupt
  721. /*
  722. * Check for if we are single stepping in user level. If so, then
  723. * we need to restore the PC.
  724. */
  725. check_single_stepping normal, .Ldispatch_interrupt
  726. .Ldispatch_interrupt:
  727. /* Jump to the C routine; it should enable irqs as soon as possible. */
  728. {
  729. jalr r0
  730. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  731. }
  732. FEEDBACK_REENTER(handle_interrupt)
  733. {
  734. movei r30, 0 /* not an NMI */
  735. j interrupt_return
  736. }
  737. STD_ENDPROC(handle_interrupt)
  738. /*
  739. * This routine takes a boolean in r30 indicating if this is an NMI.
  740. * If so, we also expect a boolean in r31 indicating whether to
  741. * re-enable the oprofile interrupts.
  742. */
  743. STD_ENTRY(interrupt_return)
  744. /* If we're resuming to kernel space, don't check thread flags. */
  745. {
  746. bnz r30, .Lrestore_all /* NMIs don't special-case user-space */
  747. PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
  748. }
  749. lw r29, r29
  750. andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
  751. {
  752. bzt r29, .Lresume_userspace
  753. PTREGS_PTR(r29, PTREGS_OFFSET_PC)
  754. }
  755. /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
  756. {
  757. lw r28, r29
  758. moveli r27, lo16(_cpu_idle_nap)
  759. }
  760. {
  761. auli r27, r27, ha16(_cpu_idle_nap)
  762. }
  763. {
  764. seq r27, r27, r28
  765. }
  766. {
  767. bbns r27, .Lrestore_all
  768. addi r28, r28, 8
  769. }
  770. sw r29, r28
  771. j .Lrestore_all
  772. .Lresume_userspace:
  773. FEEDBACK_REENTER(interrupt_return)
  774. /*
  775. * Disable interrupts so as to make sure we don't
  776. * miss an interrupt that sets any of the thread flags (like
  777. * need_resched or sigpending) between sampling and the iret.
  778. * Routines like schedule() or do_signal() may re-enable
  779. * interrupts before returning.
  780. */
  781. IRQ_DISABLE(r20, r21)
  782. TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
  783. /* Get base of stack in r32; note r30/31 are used as arguments here. */
  784. GET_THREAD_INFO(r32)
  785. /* Check to see if there is any work to do before returning to user. */
  786. {
  787. addi r29, r32, THREAD_INFO_FLAGS_OFFSET
  788. moveli r28, lo16(_TIF_ALLWORK_MASK)
  789. }
  790. {
  791. lw r29, r29
  792. auli r28, r28, ha16(_TIF_ALLWORK_MASK)
  793. }
  794. and r28, r29, r28
  795. bnz r28, .Lwork_pending
  796. /*
  797. * In the NMI case we
  798. * omit the call to single_process_check_nohz, which normally checks
  799. * to see if we should start or stop the scheduler tick, because
  800. * we can't call arbitrary Linux code from an NMI context.
  801. * We always call the homecache TLB deferral code to re-trigger
  802. * the deferral mechanism.
  803. *
  804. * The other chunk of responsibility this code has is to reset the
  805. * interrupt masks appropriately to reset irqs and NMIs. We have
  806. * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
  807. * lockdep-type stuff, but we can't set ICS until afterwards, since
  808. * ICS can only be used in very tight chunks of code to avoid
  809. * tripping over various assertions that it is off.
  810. *
  811. * (There is what looks like a window of vulnerability here since
  812. * we might take a profile interrupt between the two SPR writes
  813. * that set the mask, but since we write the low SPR word first,
  814. * and our interrupt entry code checks the low SPR word, any
  815. * profile interrupt will actually disable interrupts in both SPRs
  816. * before returning, which is OK.)
  817. */
  818. .Lrestore_all:
  819. PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
  820. {
  821. lw r0, r0
  822. PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
  823. }
  824. {
  825. andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
  826. lw r32, r32
  827. }
  828. bnz r0, 1f
  829. j 2f
  830. #if PT_FLAGS_DISABLE_IRQ != 1
  831. # error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below
  832. #endif
  833. 1: bbnst r32, 2f
  834. IRQ_DISABLE(r20,r21)
  835. TRACE_IRQS_OFF
  836. movei r0, 1
  837. mtspr INTERRUPT_CRITICAL_SECTION, r0
  838. bzt r30, .Lrestore_regs
  839. j 3f
  840. 2: TRACE_IRQS_ON
  841. movei r0, 1
  842. mtspr INTERRUPT_CRITICAL_SECTION, r0
  843. IRQ_ENABLE(r20, r21)
  844. bzt r30, .Lrestore_regs
  845. 3:
  846. /*
  847. * We now commit to returning from this interrupt, since we will be
  848. * doing things like setting EX_CONTEXT SPRs and unwinding the stack
  849. * frame. No calls should be made to any other code after this point.
  850. * This code should only be entered with ICS set.
  851. * r32 must still be set to ptregs.flags.
  852. * We launch loads to each cache line separately first, so we can
  853. * get some parallelism out of the memory subsystem.
  854. * We start zeroing caller-saved registers throughout, since
  855. * that will save some cycles if this turns out to be a syscall.
  856. */
  857. .Lrestore_regs:
  858. FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
  859. /*
  860. * Rotate so we have one high bit and one low bit to test.
  861. * - low bit says whether to restore all the callee-saved registers,
  862. * or just r30-r33, and r52 up.
  863. * - high bit (i.e. sign bit) says whether to restore all the
  864. * caller-saved registers, or just r0.
  865. */
  866. #if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
  867. # error Rotate trick does not work :-)
  868. #endif
  869. {
  870. rli r20, r32, 30
  871. PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
  872. }
  873. /*
  874. * Load cache lines 0, 2, and 3 in that order, then use
  875. * the last loaded value, which makes it likely that the other
  876. * cache lines have also loaded, at which point we should be
  877. * able to safely read all the remaining words on those cache
  878. * lines without waiting for the memory subsystem.
  879. */
  880. pop_reg_zero r0, r28, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
  881. pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30)
  882. pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
  883. pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
  884. {
  885. mtspr SPR_EX_CONTEXT_K_0, r21
  886. move r5, zero
  887. }
  888. {
  889. mtspr SPR_EX_CONTEXT_K_1, lr
  890. andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
  891. }
  892. /* Restore callee-saveds that we actually use. */
  893. pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52)
  894. pop_reg_zero r31, r7
  895. pop_reg_zero r32, r8
  896. pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
  897. /*
  898. * If we modified other callee-saveds, restore them now.
  899. * This is rare, but could be via ptrace or signal handler.
  900. */
  901. {
  902. move r10, zero
  903. bbs r20, .Lrestore_callees
  904. }
  905. .Lcontinue_restore_regs:
  906. /* Check if we're returning from a syscall. */
  907. {
  908. move r11, zero
  909. blzt r20, 1f /* no, so go restore callee-save registers */
  910. }
  911. /*
  912. * Check if we're returning to userspace.
  913. * Note that if we're not, we don't worry about zeroing everything.
  914. */
  915. {
  916. addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
  917. bnz lr, .Lkernel_return
  918. }
  919. /*
  920. * On return from syscall, we've restored r0 from pt_regs, but we
  921. * clear the remainder of the caller-saved registers. We could
  922. * restore the syscall arguments, but there's not much point,
  923. * and it ensures user programs aren't trying to use the
  924. * caller-saves if we clear them, as well as avoiding leaking
  925. * kernel pointers into userspace.
  926. */
  927. pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
  928. pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
  929. {
  930. lw sp, sp
  931. move r14, zero
  932. move r15, zero
  933. }
  934. { move r16, zero; move r17, zero }
  935. { move r18, zero; move r19, zero }
  936. { move r20, zero; move r21, zero }
  937. { move r22, zero; move r23, zero }
  938. { move r24, zero; move r25, zero }
  939. { move r26, zero; move r27, zero }
  940. /* Set r1 to errno if we are returning an error, otherwise zero. */
  941. {
  942. moveli r29, 4096
  943. sub r1, zero, r0
  944. }
  945. slt_u r29, r1, r29
  946. {
  947. mnz r1, r29, r1
  948. move r29, zero
  949. }
  950. iret
  951. /*
  952. * Not a syscall, so restore caller-saved registers.
  953. * First kick off a load for cache line 1, which we're touching
  954. * for the first time here.
  955. */
  956. .align 64
  957. 1: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29)
  958. pop_reg r1
  959. pop_reg r2
  960. pop_reg r3
  961. pop_reg r4
  962. pop_reg r5
  963. pop_reg r6
  964. pop_reg r7
  965. pop_reg r8
  966. pop_reg r9
  967. pop_reg r10
  968. pop_reg r11
  969. pop_reg r12
  970. pop_reg r13
  971. pop_reg r14
  972. pop_reg r15
  973. pop_reg r16
  974. pop_reg r17
  975. pop_reg r18
  976. pop_reg r19
  977. pop_reg r20
  978. pop_reg r21
  979. pop_reg r22
  980. pop_reg r23
  981. pop_reg r24
  982. pop_reg r25
  983. pop_reg r26
  984. pop_reg r27
  985. pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
  986. /* r29 already restored above */
  987. bnz lr, .Lkernel_return
  988. pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
  989. pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
  990. lw sp, sp
  991. iret
  992. /*
  993. * We can't restore tp when in kernel mode, since a thread might
  994. * have migrated from another cpu and brought a stale tp value.
  995. */
  996. .Lkernel_return:
  997. pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
  998. lw sp, sp
  999. iret
  1000. /* Restore callee-saved registers from r34 to r51. */
  1001. .Lrestore_callees:
  1002. addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
  1003. pop_reg r34
  1004. pop_reg r35
  1005. pop_reg r36
  1006. pop_reg r37
  1007. pop_reg r38
  1008. pop_reg r39
  1009. pop_reg r40
  1010. pop_reg r41
  1011. pop_reg r42
  1012. pop_reg r43
  1013. pop_reg r44
  1014. pop_reg r45
  1015. pop_reg r46
  1016. pop_reg r47
  1017. pop_reg r48
  1018. pop_reg r49
  1019. pop_reg r50
  1020. pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
  1021. j .Lcontinue_restore_regs
  1022. .Lwork_pending:
  1023. /* Mask the reschedule flag */
  1024. andi r28, r29, _TIF_NEED_RESCHED
  1025. {
  1026. /*
  1027. * If the NEED_RESCHED flag is called, we call schedule(), which
  1028. * may drop this context right here and go do something else.
  1029. * On return, jump back to .Lresume_userspace and recheck.
  1030. */
  1031. bz r28, .Lasync_tlb
  1032. /* Mask the async-tlb flag */
  1033. andi r28, r29, _TIF_ASYNC_TLB
  1034. }
  1035. jal schedule
  1036. FEEDBACK_REENTER(interrupt_return)
  1037. /* Reload the flags and check again */
  1038. j .Lresume_userspace
  1039. .Lasync_tlb:
  1040. {
  1041. bz r28, .Lneed_sigpending
  1042. /* Mask the sigpending flag */
  1043. andi r28, r29, _TIF_SIGPENDING
  1044. }
  1045. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  1046. jal do_async_page_fault
  1047. FEEDBACK_REENTER(interrupt_return)
  1048. /*
  1049. * Go restart the "resume userspace" process. We may have
  1050. * fired a signal, and we need to disable interrupts again.
  1051. */
  1052. j .Lresume_userspace
  1053. .Lneed_sigpending:
  1054. /*
  1055. * At this point we are either doing signal handling or single-step,
  1056. * so either way make sure we have all the registers saved.
  1057. */
  1058. push_extra_callee_saves r0
  1059. {
  1060. /* If no signal pending, skip to singlestep check */
  1061. bz r28, .Lneed_singlestep
  1062. /* Mask the singlestep flag */
  1063. andi r28, r29, _TIF_SINGLESTEP
  1064. }
  1065. jal do_signal
  1066. FEEDBACK_REENTER(interrupt_return)
  1067. /* Reload the flags and check again */
  1068. j .Lresume_userspace
  1069. .Lneed_singlestep:
  1070. {
  1071. /* Get a pointer to the EX1 field */
  1072. PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
  1073. /* If we get here, our bit must be set. */
  1074. bz r28, .Lwork_confusion
  1075. }
  1076. /* If we are in priv mode, don't single step */
  1077. lw r28, r29
  1078. andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
  1079. bnz r28, .Lrestore_all
  1080. /* Allow interrupts within the single step code */
  1081. TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */
  1082. IRQ_ENABLE(r20, r21)
  1083. /* try to single-step the current instruction */
  1084. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  1085. jal single_step_once
  1086. FEEDBACK_REENTER(interrupt_return)
  1087. /* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */
  1088. IRQ_DISABLE(r20,r21)
  1089. j .Lrestore_all
  1090. .Lwork_confusion:
  1091. move r0, r28
  1092. panic "thread_info allwork flags unhandled on userspace resume: %#x"
  1093. STD_ENDPROC(interrupt_return)
  1094. /*
  1095. * Some interrupts don't check for single stepping
  1096. */
  1097. .pushsection .text.handle_interrupt_no_single_step,"ax"
  1098. handle_interrupt_no_single_step:
  1099. finish_interrupt_save handle_interrupt_no_single_step
  1100. {
  1101. jalr r0
  1102. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  1103. }
  1104. FEEDBACK_REENTER(handle_interrupt_no_single_step)
  1105. {
  1106. movei r30, 0 /* not an NMI */
  1107. j interrupt_return
  1108. }
  1109. STD_ENDPROC(handle_interrupt_no_single_step)
  1110. /*
  1111. * "NMI" interrupts mask ALL interrupts before calling the
  1112. * handler, and don't check thread flags, etc., on the way
  1113. * back out. In general, the only things we do here for NMIs
  1114. * are the register save/restore, fixing the PC if we were
  1115. * doing single step, and the dataplane kernel-TLB management.
  1116. * We don't (for example) deal with start/stop of the sched tick.
  1117. */
  1118. .pushsection .text.handle_nmi,"ax"
  1119. handle_nmi:
  1120. finish_interrupt_save handle_nmi
  1121. check_single_stepping normal, .Ldispatch_nmi
  1122. .Ldispatch_nmi:
  1123. {
  1124. jalr r0
  1125. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  1126. }
  1127. FEEDBACK_REENTER(handle_nmi)
  1128. j interrupt_return
  1129. STD_ENDPROC(handle_nmi)
  1130. /*
  1131. * Parallel code for syscalls to handle_interrupt.
  1132. */
  1133. .pushsection .text.handle_syscall,"ax"
  1134. handle_syscall:
  1135. finish_interrupt_save handle_syscall
  1136. /*
  1137. * Check for if we are single stepping in user level. If so, then
  1138. * we need to restore the PC.
  1139. */
  1140. check_single_stepping syscall, .Ldispatch_syscall
  1141. .Ldispatch_syscall:
  1142. /* Enable irqs. */
  1143. TRACE_IRQS_ON
  1144. IRQ_ENABLE(r20, r21)
  1145. /* Bump the counter for syscalls made on this tile. */
  1146. moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
  1147. auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
  1148. add r20, r20, tp
  1149. lw r21, r20
  1150. addi r21, r21, 1
  1151. sw r20, r21
  1152. /* Trace syscalls, if requested. */
  1153. GET_THREAD_INFO(r31)
  1154. addi r31, r31, THREAD_INFO_FLAGS_OFFSET
  1155. lw r30, r31
  1156. andi r30, r30, _TIF_SYSCALL_TRACE
  1157. bzt r30, .Lrestore_syscall_regs
  1158. jal do_syscall_trace
  1159. FEEDBACK_REENTER(handle_syscall)
  1160. /*
  1161. * We always reload our registers from the stack at this
  1162. * point. They might be valid, if we didn't build with
  1163. * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
  1164. * doing syscall tracing, but there are enough cases now that it
  1165. * seems simplest just to do the reload unconditionally.
  1166. */
  1167. .Lrestore_syscall_regs:
  1168. PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
  1169. pop_reg r0, r11
  1170. pop_reg r1, r11
  1171. pop_reg r2, r11
  1172. pop_reg r3, r11
  1173. pop_reg r4, r11
  1174. pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
  1175. pop_reg TREG_SYSCALL_NR_NAME, r11
  1176. /* Ensure that the syscall number is within the legal range. */
  1177. moveli r21, __NR_syscalls
  1178. {
  1179. slt_u r21, TREG_SYSCALL_NR_NAME, r21
  1180. moveli r20, lo16(sys_call_table)
  1181. }
  1182. {
  1183. bbns r21, .Linvalid_syscall
  1184. auli r20, r20, ha16(sys_call_table)
  1185. }
  1186. s2a r20, TREG_SYSCALL_NR_NAME, r20
  1187. lw r20, r20
  1188. /* Jump to syscall handler. */
  1189. jalr r20
  1190. .Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
  1191. /*
  1192. * Write our r0 onto the stack so it gets restored instead
  1193. * of whatever the user had there before.
  1194. */
  1195. PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
  1196. sw r29, r0
  1197. .Lsyscall_sigreturn_skip:
  1198. FEEDBACK_REENTER(handle_syscall)
  1199. /* Do syscall trace again, if requested. */
  1200. lw r30, r31
  1201. andi r30, r30, _TIF_SYSCALL_TRACE
  1202. bzt r30, 1f
  1203. jal do_syscall_trace
  1204. FEEDBACK_REENTER(handle_syscall)
  1205. 1: j .Lresume_userspace /* jump into middle of interrupt_return */
  1206. .Linvalid_syscall:
  1207. /* Report an invalid syscall back to the user program */
  1208. {
  1209. PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
  1210. movei r28, -ENOSYS
  1211. }
  1212. sw r29, r28
  1213. j .Lresume_userspace /* jump into middle of interrupt_return */
  1214. STD_ENDPROC(handle_syscall)
  1215. /* Return the address for oprofile to suppress in backtraces. */
  1216. STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
  1217. lnk r0
  1218. {
  1219. addli r0, r0, .Lhandle_syscall_link - .
  1220. jrp lr
  1221. }
  1222. STD_ENDPROC(handle_syscall_link_address)
  1223. STD_ENTRY(ret_from_fork)
  1224. jal sim_notify_fork
  1225. jal schedule_tail
  1226. FEEDBACK_REENTER(ret_from_fork)
  1227. j .Lresume_userspace /* jump into middle of interrupt_return */
  1228. STD_ENDPROC(ret_from_fork)
  1229. /*
  1230. * Code for ill interrupt.
  1231. */
  1232. .pushsection .text.handle_ill,"ax"
  1233. handle_ill:
  1234. finish_interrupt_save handle_ill
  1235. /*
  1236. * Check for if we are single stepping in user level. If so, then
  1237. * we need to restore the PC.
  1238. */
  1239. check_single_stepping ill, .Ldispatch_normal_ill
  1240. {
  1241. /* See if the PC is the 1st bundle in the buffer */
  1242. seq r25, r27, r26
  1243. /* Point to the 2nd bundle in the buffer */
  1244. addi r26, r26, 8
  1245. }
  1246. {
  1247. /* Point to the original pc */
  1248. addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
  1249. /* Branch if the PC is the 1st bundle in the buffer */
  1250. bnz r25, 3f
  1251. }
  1252. {
  1253. /* See if the PC is the 2nd bundle of the buffer */
  1254. seq r25, r27, r26
  1255. /* Set PC to next instruction */
  1256. addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
  1257. }
  1258. {
  1259. /* Point to flags */
  1260. addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET
  1261. /* Branch if PC is in the second bundle */
  1262. bz r25, 2f
  1263. }
  1264. /* Load flags */
  1265. lw r25, r25
  1266. {
  1267. /*
  1268. * Get the offset for the register to restore
  1269. * Note: the lower bound is 2, so we have implicit scaling by 4.
  1270. * No multiplication of the register number by the size of a register
  1271. * is needed.
  1272. */
  1273. mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \
  1274. SINGLESTEP_STATE_TARGET_UB
  1275. /* Mask Rewrite_LR */
  1276. andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE
  1277. }
  1278. {
  1279. addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET
  1280. /* Don't rewrite temp register */
  1281. bz r25, 3f
  1282. }
  1283. {
  1284. /* Get the temp value */
  1285. lw r29, r29
  1286. /* Point to where the register is stored */
  1287. add r27, r27, sp
  1288. }
  1289. /* Add in the C ABI save area size to the register offset */
  1290. addi r27, r27, C_ABI_SAVE_AREA_SIZE
  1291. /* Restore the user's register with the temp value */
  1292. sw r27, r29
  1293. j 3f
  1294. 2:
  1295. /* Must be in the third bundle */
  1296. addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET
  1297. 3:
  1298. /* set PC and continue */
  1299. lw r26, r24
  1300. sw r28, r26
  1301. /*
  1302. * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
  1303. * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
  1304. * need to clear it here and can't really impose on all other arches.
  1305. * So what's another write between friends?
  1306. */
  1307. GET_THREAD_INFO(r0)
  1308. addi r1, r0, THREAD_INFO_FLAGS_OFFSET
  1309. {
  1310. lw r2, r1
  1311. addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */
  1312. }
  1313. andi r2, r2, ~_TIF_SINGLESTEP
  1314. sw r1, r2
  1315. /* Issue a sigtrap */
  1316. {
  1317. lw r0, r0 /* indirect thru thread_info to get task_info*/
  1318. addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
  1319. move r2, zero /* load error code into r2 */
  1320. }
  1321. jal send_sigtrap /* issue a SIGTRAP */
  1322. FEEDBACK_REENTER(handle_ill)
  1323. j .Lresume_userspace /* jump into middle of interrupt_return */
  1324. .Ldispatch_normal_ill:
  1325. {
  1326. jalr r0
  1327. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  1328. }
  1329. FEEDBACK_REENTER(handle_ill)
  1330. {
  1331. movei r30, 0 /* not an NMI */
  1332. j interrupt_return
  1333. }
  1334. STD_ENDPROC(handle_ill)
  1335. /* Various stub interrupt handlers and syscall handlers */
  1336. STD_ENTRY_LOCAL(_kernel_double_fault)
  1337. mfspr r1, SPR_EX_CONTEXT_K_0
  1338. move r2, lr
  1339. move r3, sp
  1340. move r4, r52
  1341. addi sp, sp, -C_ABI_SAVE_AREA_SIZE
  1342. j kernel_double_fault
  1343. STD_ENDPROC(_kernel_double_fault)
  1344. STD_ENTRY_LOCAL(bad_intr)
  1345. mfspr r2, SPR_EX_CONTEXT_K_0
  1346. panic "Unhandled interrupt %#x: PC %#lx"
  1347. STD_ENDPROC(bad_intr)
  1348. /* Put address of pt_regs in reg and jump. */
  1349. #define PTREGS_SYSCALL(x, reg) \
  1350. STD_ENTRY(_##x); \
  1351. { \
  1352. PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
  1353. j x \
  1354. }; \
  1355. STD_ENDPROC(_##x)
  1356. /*
  1357. * Special-case sigreturn to not write r0 to the stack on return.
  1358. * This is technically more efficient, but it also avoids difficulties
  1359. * in the 64-bit OS when handling 32-bit compat code, since we must not
  1360. * sign-extend r0 for the sigreturn return-value case.
  1361. */
  1362. #define PTREGS_SYSCALL_SIGRETURN(x, reg) \
  1363. STD_ENTRY(_##x); \
  1364. addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
  1365. { \
  1366. PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
  1367. j x \
  1368. }; \
  1369. STD_ENDPROC(_##x)
  1370. PTREGS_SYSCALL(sys_execve, r3)
  1371. PTREGS_SYSCALL(sys_sigaltstack, r2)
  1372. PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
  1373. PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
  1374. /* Save additional callee-saves to pt_regs, put address in r4 and jump. */
  1375. STD_ENTRY(_sys_clone)
  1376. push_extra_callee_saves r4
  1377. j sys_clone
  1378. STD_ENDPROC(_sys_clone)
  1379. /*
  1380. * This entrypoint is taken for the cmpxchg and atomic_update fast
  1381. * swints. We may wish to generalize it to other fast swints at some
  1382. * point, but for now there are just two very similar ones, which
  1383. * makes it faster.
  1384. *
  1385. * The fast swint code is designed to have a small footprint. It does
  1386. * not save or restore any GPRs, counting on the caller-save registers
  1387. * to be available to it on entry. It does not modify any callee-save
  1388. * registers (including "lr"). It does not check what PL it is being
  1389. * called at, so you'd better not call it other than at PL0.
  1390. * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
  1391. * it ever is necessary to use more registers, be aware.
  1392. *
  1393. * It does not use the stack, but since it might be re-interrupted by
  1394. * a page fault which would assume the stack was valid, it does
  1395. * save/restore the stack pointer and zero it out to make sure it gets reset.
  1396. * Since we always keep interrupts disabled, the hypervisor won't
  1397. * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
  1398. * (other than to advance the PC on return).
  1399. *
  1400. * We have to manually validate the user vs kernel address range
  1401. * (since at PL1 we can read/write both), and for performance reasons
  1402. * we don't allow cmpxchg on the fc000000 memory region, since we only
  1403. * validate that the user address is below PAGE_OFFSET.
  1404. *
  1405. * We place it in the __HEAD section to ensure it is relatively
  1406. * near to the intvec_SWINT_1 code (reachable by a conditional branch).
  1407. *
  1408. * Must match register usage in do_page_fault().
  1409. */
  1410. __HEAD
  1411. .align 64
  1412. /* Align much later jump on the start of a cache line. */
  1413. #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
  1414. nop
  1415. #if PAGE_SIZE >= 0x10000
  1416. nop
  1417. #endif
  1418. #endif
  1419. ENTRY(sys_cmpxchg)
  1420. /*
  1421. * Save "sp" and set it zero for any possible page fault.
  1422. *
  1423. * HACK: We want to both zero sp and check r0's alignment,
  1424. * so we do both at once. If "sp" becomes nonzero we
  1425. * know r0 is unaligned and branch to the error handler that
  1426. * restores sp, so this is OK.
  1427. *
  1428. * ICS is disabled right now so having a garbage but nonzero
  1429. * sp is OK, since we won't execute any faulting instructions
  1430. * when it is nonzero.
  1431. */
  1432. {
  1433. move r27, sp
  1434. andi sp, r0, 3
  1435. }
  1436. /*
  1437. * Get the lock address in ATOMIC_LOCK_REG, and also validate that the
  1438. * address is less than PAGE_OFFSET, since that won't trap at PL1.
  1439. * We only use bits less than PAGE_SHIFT to avoid having to worry
  1440. * about aliasing among multiple mappings of the same physical page,
  1441. * and we ignore the low 3 bits so we have one lock that covers
  1442. * both a cmpxchg64() and a cmpxchg() on either its low or high word.
  1443. * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
  1444. */
  1445. #if (PAGE_OFFSET & 0xffff) != 0
  1446. # error Code here assumes PAGE_OFFSET can be loaded with just hi16()
  1447. #endif
  1448. #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
  1449. {
  1450. /* Check for unaligned input. */
  1451. bnz sp, .Lcmpxchg_badaddr
  1452. mm r25, r0, zero, 3, PAGE_SHIFT-1
  1453. }
  1454. {
  1455. crc32_32 r25, zero, r25
  1456. moveli r21, lo16(atomic_lock_ptr)
  1457. }
  1458. {
  1459. auli r21, r21, ha16(atomic_lock_ptr)
  1460. auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
  1461. }
  1462. {
  1463. shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
  1464. slt_u r23, r0, r23
  1465. /*
  1466. * Ensure that the TLB is loaded before we take out the lock.
  1467. * On TILEPro, this will start fetching the value all the way
  1468. * into our L1 as well (and if it gets modified before we
  1469. * grab the lock, it will be invalidated from our cache
  1470. * before we reload it). On tile64, we'll start fetching it
  1471. * into our L1 if we're the home, and if we're not, we'll
  1472. * still at least start fetching it into the home's L2.
  1473. */
  1474. lw r26, r0
  1475. }
  1476. {
  1477. s2a r21, r20, r21
  1478. bbns r23, .Lcmpxchg_badaddr
  1479. }
  1480. {
  1481. lw r21, r21
  1482. seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
  1483. andi r25, r25, ATOMIC_HASH_L2_SIZE - 1
  1484. }
  1485. {
  1486. /* Branch away at this point if we're doing a 64-bit cmpxchg. */
  1487. bbs r23, .Lcmpxchg64
  1488. andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
  1489. }
  1490. {
  1491. /*
  1492. * We very carefully align the code that actually runs with
  1493. * the lock held (nine bundles) so that we know it is all in
  1494. * the icache when we start. This instruction (the jump) is
  1495. * at the start of the first cache line, address zero mod 64;
  1496. * we jump to somewhere in the second cache line to issue the
  1497. * tns, then jump back to finish up.
  1498. */
  1499. s2a ATOMIC_LOCK_REG_NAME, r25, r21
  1500. j .Lcmpxchg32_tns
  1501. }
  1502. #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
  1503. {
  1504. /* Check for unaligned input. */
  1505. bnz sp, .Lcmpxchg_badaddr
  1506. auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
  1507. }
  1508. {
  1509. /*
  1510. * Slide bits into position for 'mm'. We want to ignore
  1511. * the low 3 bits of r0, and consider only the next
  1512. * ATOMIC_HASH_SHIFT bits.
  1513. * Because of C pointer arithmetic, we want to compute this:
  1514. *
  1515. * ((char*)atomic_locks +
  1516. * (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2))
  1517. *
  1518. * Instead of two shifts we just ">> 1", and use 'mm'
  1519. * to ignore the low and high bits we don't want.
  1520. */
  1521. shri r25, r0, 1
  1522. slt_u r23, r0, r23
  1523. /*
  1524. * Ensure that the TLB is loaded before we take out the lock.
  1525. * On tilepro, this will start fetching the value all the way
  1526. * into our L1 as well (and if it gets modified before we
  1527. * grab the lock, it will be invalidated from our cache
  1528. * before we reload it). On tile64, we'll start fetching it
  1529. * into our L1 if we're the home, and if we're not, we'll
  1530. * still at least start fetching it into the home's L2.
  1531. */
  1532. lw r26, r0
  1533. }
  1534. {
  1535. auli r21, zero, ha16(atomic_locks)
  1536. bbns r23, .Lcmpxchg_badaddr
  1537. }
  1538. #if PAGE_SIZE < 0x10000
  1539. /* atomic_locks is page-aligned so for big pages we don't need this. */
  1540. addli r21, r21, lo16(atomic_locks)
  1541. #endif
  1542. {
  1543. /*
  1544. * Insert the hash bits into the page-aligned pointer.
  1545. * ATOMIC_HASH_SHIFT is so big that we don't actually hash
  1546. * the unmasked address bits, as that may cause unnecessary
  1547. * collisions.
  1548. */
  1549. mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1
  1550. seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
  1551. }
  1552. {
  1553. /* Branch away at this point if we're doing a 64-bit cmpxchg. */
  1554. bbs r23, .Lcmpxchg64
  1555. andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
  1556. }
  1557. {
  1558. /*
  1559. * We very carefully align the code that actually runs with
  1560. * the lock held (nine bundles) so that we know it is all in
  1561. * the icache when we start. This instruction (the jump) is
  1562. * at the start of the first cache line, address zero mod 64;
  1563. * we jump to somewhere in the second cache line to issue the
  1564. * tns, then jump back to finish up.
  1565. */
  1566. j .Lcmpxchg32_tns
  1567. }
  1568. #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
  1569. ENTRY(__sys_cmpxchg_grab_lock)
  1570. /*
  1571. * Perform the actual cmpxchg or atomic_update.
  1572. * Note that the system <arch/atomic.h> header relies on
  1573. * atomic_update() to always perform an "mf", so don't make
  1574. * it optional or conditional without modifying that code.
  1575. */
  1576. .Ldo_cmpxchg32:
  1577. {
  1578. lw r21, r0
  1579. seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update
  1580. move r24, r2
  1581. }
  1582. {
  1583. seq r22, r21, r1 /* See if cmpxchg matches. */
  1584. and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */
  1585. }
  1586. {
  1587. or r22, r22, r23 /* Skip compare branch for atomic_update. */
  1588. add r25, r25, r2 /* Compute (*mem & mask) + addend. */
  1589. }
  1590. {
  1591. mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */
  1592. bbns r22, .Lcmpxchg32_mismatch
  1593. }
  1594. sw r0, r24
  1595. /* Do slow mtspr here so the following "mf" waits less. */
  1596. {
  1597. move sp, r27
  1598. mtspr SPR_EX_CONTEXT_K_0, r28
  1599. }
  1600. mf
  1601. /* The following instruction is the start of the second cache line. */
  1602. {
  1603. move r0, r21
  1604. sw ATOMIC_LOCK_REG_NAME, zero
  1605. }
  1606. iret
  1607. /* Duplicated code here in the case where we don't overlap "mf" */
  1608. .Lcmpxchg32_mismatch:
  1609. {
  1610. move r0, r21
  1611. sw ATOMIC_LOCK_REG_NAME, zero
  1612. }
  1613. {
  1614. move sp, r27
  1615. mtspr SPR_EX_CONTEXT_K_0, r28
  1616. }
  1617. iret
  1618. /*
  1619. * The locking code is the same for 32-bit cmpxchg/atomic_update,
  1620. * and for 64-bit cmpxchg. We provide it as a macro and put
  1621. * it into both versions. We can't share the code literally
  1622. * since it depends on having the right branch-back address.
  1623. * Note that the first few instructions should share the cache
  1624. * line with the second half of the actual locked code.
  1625. */
  1626. .macro cmpxchg_lock, bitwidth
  1627. /* Lock; if we succeed, jump back up to the read-modify-write. */
  1628. #ifdef CONFIG_SMP
  1629. tns r21, ATOMIC_LOCK_REG_NAME
  1630. #else
  1631. /*
  1632. * Non-SMP preserves all the lock infrastructure, to keep the
  1633. * code simpler for the interesting (SMP) case. However, we do
  1634. * one small optimization here and in atomic_asm.S, which is
  1635. * to fake out acquiring the actual lock in the atomic_lock table.
  1636. */
  1637. movei r21, 0
  1638. #endif
  1639. /* Issue the slow SPR here while the tns result is in flight. */
  1640. mfspr r28, SPR_EX_CONTEXT_K_0
  1641. {
  1642. addi r28, r28, 8 /* return to the instruction after the swint1 */
  1643. bzt r21, .Ldo_cmpxchg\bitwidth
  1644. }
  1645. /*
  1646. * The preceding instruction is the last thing that must be
  1647. * on the second cache line.
  1648. */
  1649. #ifdef CONFIG_SMP
  1650. /*
  1651. * We failed to acquire the tns lock on our first try. Now use
  1652. * bounded exponential backoff to retry, like __atomic_spinlock().
  1653. */
  1654. {
  1655. moveli r23, 2048 /* maximum backoff time in cycles */
  1656. moveli r25, 32 /* starting backoff time in cycles */
  1657. }
  1658. 1: mfspr r26, CYCLE_LOW /* get start point for this backoff */
  1659. 2: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */
  1660. sub r22, r22, r26
  1661. slt r22, r22, r25
  1662. bbst r22, 2b
  1663. {
  1664. shli r25, r25, 1 /* double the backoff; retry the tns */
  1665. tns r21, ATOMIC_LOCK_REG_NAME
  1666. }
  1667. slt r26, r23, r25 /* is the proposed backoff too big? */
  1668. {
  1669. mvnz r25, r26, r23
  1670. bzt r21, .Ldo_cmpxchg\bitwidth
  1671. }
  1672. j 1b
  1673. #endif /* CONFIG_SMP */
  1674. .endm
  1675. .Lcmpxchg32_tns:
  1676. cmpxchg_lock 32
  1677. /*
  1678. * This code is invoked from sys_cmpxchg after most of the
  1679. * preconditions have been checked. We still need to check
  1680. * that r0 is 8-byte aligned, since if it's not we won't
  1681. * actually be atomic. However, ATOMIC_LOCK_REG has the atomic
  1682. * lock pointer and r27/r28 have the saved SP/PC.
  1683. * r23 is holding "r0 & 7" so we can test for alignment.
  1684. * The compare value is in r2/r3; the new value is in r4/r5.
  1685. * On return, we must put the old value in r0/r1.
  1686. */
  1687. .align 64
  1688. .Lcmpxchg64:
  1689. {
  1690. #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
  1691. s2a ATOMIC_LOCK_REG_NAME, r25, r21
  1692. #endif
  1693. bzt r23, .Lcmpxchg64_tns
  1694. }
  1695. j .Lcmpxchg_badaddr
  1696. .Ldo_cmpxchg64:
  1697. {
  1698. lw r21, r0
  1699. addi r25, r0, 4
  1700. }
  1701. {
  1702. lw r1, r25
  1703. }
  1704. seq r26, r21, r2
  1705. {
  1706. bz r26, .Lcmpxchg64_mismatch
  1707. seq r26, r1, r3
  1708. }
  1709. {
  1710. bz r26, .Lcmpxchg64_mismatch
  1711. }
  1712. sw r0, r4
  1713. sw r25, r5
  1714. /*
  1715. * The 32-bit path provides optimized "match" and "mismatch"
  1716. * iret paths, but we don't have enough bundles in this cache line
  1717. * to do that, so we just make even the "mismatch" path do an "mf".
  1718. */
  1719. .Lcmpxchg64_mismatch:
  1720. {
  1721. move sp, r27
  1722. mtspr SPR_EX_CONTEXT_K_0, r28
  1723. }
  1724. mf
  1725. {
  1726. move r0, r21
  1727. sw ATOMIC_LOCK_REG_NAME, zero
  1728. }
  1729. iret
  1730. .Lcmpxchg64_tns:
  1731. cmpxchg_lock 64
  1732. /*
  1733. * Reset sp and revector to sys_cmpxchg_badaddr(), which will
  1734. * just raise the appropriate signal and exit. Doing it this
  1735. * way means we don't have to duplicate the code in intvec.S's
  1736. * int_hand macro that locates the top of the stack.
  1737. */
  1738. .Lcmpxchg_badaddr:
  1739. {
  1740. moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr
  1741. move sp, r27
  1742. }
  1743. j intvec_SWINT_1
  1744. ENDPROC(sys_cmpxchg)
  1745. ENTRY(__sys_cmpxchg_end)
  1746. /* The single-step support may need to read all the registers. */
  1747. int_unalign:
  1748. push_extra_callee_saves r0
  1749. j do_trap
  1750. /* Include .intrpt1 array of interrupt vectors */
  1751. .section ".intrpt1", "ax"
  1752. #define op_handle_perf_interrupt bad_intr
  1753. #define op_handle_aux_perf_interrupt bad_intr
  1754. #ifndef CONFIG_HARDWALL
  1755. #define do_hardwall_trap bad_intr
  1756. #endif
  1757. int_hand INT_ITLB_MISS, ITLB_MISS, \
  1758. do_page_fault, handle_interrupt_no_single_step
  1759. int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
  1760. int_hand INT_ILL, ILL, do_trap, handle_ill
  1761. int_hand INT_GPV, GPV, do_trap
  1762. int_hand INT_SN_ACCESS, SN_ACCESS, do_trap
  1763. int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
  1764. int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
  1765. int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr
  1766. int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr
  1767. int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
  1768. int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
  1769. int_hand INT_SWINT_3, SWINT_3, do_trap
  1770. int_hand INT_SWINT_2, SWINT_2, do_trap
  1771. int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
  1772. int_hand INT_SWINT_0, SWINT_0, do_trap
  1773. int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
  1774. int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
  1775. int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
  1776. int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault
  1777. int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault
  1778. int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault
  1779. int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr
  1780. int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap
  1781. int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
  1782. int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
  1783. int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
  1784. int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
  1785. int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
  1786. int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr
  1787. int_hand INT_IDN_CA, IDN_CA, bad_intr
  1788. int_hand INT_UDN_CA, UDN_CA, bad_intr
  1789. int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
  1790. int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
  1791. int_hand INT_PERF_COUNT, PERF_COUNT, \
  1792. op_handle_perf_interrupt, handle_nmi
  1793. int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
  1794. #if CONFIG_KERNEL_PL == 2
  1795. dc_dispatch INT_INTCTRL_2, INTCTRL_2
  1796. int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
  1797. #else
  1798. int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
  1799. dc_dispatch INT_INTCTRL_1, INTCTRL_1
  1800. #endif
  1801. int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
  1802. int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
  1803. hv_message_intr
  1804. int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
  1805. tile_dev_intr
  1806. int_hand INT_I_ASID, I_ASID, bad_intr
  1807. int_hand INT_D_ASID, D_ASID, bad_intr
  1808. int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
  1809. do_page_fault
  1810. int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
  1811. do_page_fault
  1812. int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
  1813. do_page_fault
  1814. int_hand INT_SN_CPL, SN_CPL, bad_intr
  1815. int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
  1816. #if CHIP_HAS_AUX_PERF_COUNTERS()
  1817. int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
  1818. op_handle_aux_perf_interrupt, handle_nmi
  1819. #endif
  1820. /* Synthetic interrupt delivered only by the simulator */
  1821. int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint