entry.S 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338
  1. /*
  2. * Linux/PA-RISC Project (http://www.parisc-linux.org/)
  3. *
  4. * kernel entry points (interruptions, system call wrappers)
  5. * Copyright (C) 1999,2000 Philipp Rumpf
  6. * Copyright (C) 1999 SuSE GmbH Nuernberg
  7. * Copyright (C) 2000 Hewlett-Packard (John Marvin)
  8. * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2, or (at your option)
  13. * any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23. */
  24. #include <asm/asm-offsets.h>
  25. /* we have the following possibilities to act on an interruption:
  26. * - handle in assembly and use shadowed registers only
  27. * - save registers to kernel stack and handle in assembly or C */
  28. #include <asm/psw.h>
  29. #include <asm/cache.h> /* for L1_CACHE_SHIFT */
  30. #include <asm/assembly.h> /* for LDREG/STREG defines */
  31. #include <asm/pgtable.h>
  32. #include <asm/signal.h>
  33. #include <asm/unistd.h>
  34. #include <asm/thread_info.h>
  35. #include <linux/linkage.h>
  36. #ifdef CONFIG_64BIT
  37. #define CMPIB cmpib,*
  38. #define CMPB cmpb,*
  39. #define COND(x) *x
  40. .level 2.0w
  41. #else
  42. #define CMPIB cmpib,
  43. #define CMPB cmpb,
  44. #define COND(x) x
  45. .level 2.0
  46. #endif
  47. .import pa_dbit_lock,data
  48. /* space_to_prot macro creates a prot id from a space id */
  49. #if (SPACEID_SHIFT) == 0
  50. .macro space_to_prot spc prot
  51. depd,z \spc,62,31,\prot
  52. .endm
  53. #else
  54. .macro space_to_prot spc prot
  55. extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
  56. .endm
  57. #endif
  58. /* Switch to virtual mapping, trashing only %r1 */
  59. .macro virt_map
  60. /* pcxt_ssm_bug */
  61. rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
  62. mtsp %r0, %sr4
  63. mtsp %r0, %sr5
  64. mfsp %sr7, %r1
  65. or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
  66. mtsp %r1, %sr3
  67. tovirt_r1 %r29
  68. load32 KERNEL_PSW, %r1
  69. rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
  70. mtsp %r0, %sr6
  71. mtsp %r0, %sr7
  72. mtctl %r0, %cr17 /* Clear IIASQ tail */
  73. mtctl %r0, %cr17 /* Clear IIASQ head */
  74. mtctl %r1, %ipsw
  75. load32 4f, %r1
  76. mtctl %r1, %cr18 /* Set IIAOQ tail */
  77. ldo 4(%r1), %r1
  78. mtctl %r1, %cr18 /* Set IIAOQ head */
  79. rfir
  80. nop
  81. 4:
  82. .endm
  83. /*
  84. * The "get_stack" macros are responsible for determining the
  85. * kernel stack value.
  86. *
  87. * If sr7 == 0
  88. * Already using a kernel stack, so call the
  89. * get_stack_use_r30 macro to push a pt_regs structure
  90. * on the stack, and store registers there.
  91. * else
  92. * Need to set up a kernel stack, so call the
  93. * get_stack_use_cr30 macro to set up a pointer
  94. * to the pt_regs structure contained within the
  95. * task pointer pointed to by cr30. Set the stack
  96. * pointer to point to the end of the task structure.
  97. *
  98. * Note that we use shadowed registers for temps until
  99. * we can save %r26 and %r29. %r26 is used to preserve
  100. * %r8 (a shadowed register) which temporarily contained
  101. * either the fault type ("code") or the eirr. We need
  102. * to use a non-shadowed register to carry the value over
  103. * the rfir in virt_map. We use %r26 since this value winds
  104. * up being passed as the argument to either do_cpu_irq_mask
  105. * or handle_interruption. %r29 is used to hold a pointer
  106. * the register save area, and once again, it needs to
  107. * be a non-shadowed register so that it survives the rfir.
  108. *
  109. * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
  110. */
  111. .macro get_stack_use_cr30
  112. /* we save the registers in the task struct */
  113. mfctl %cr30, %r1
  114. tophys %r1,%r9
  115. LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
  116. tophys %r1,%r9
  117. ldo TASK_REGS(%r9),%r9
  118. STREG %r30, PT_GR30(%r9)
  119. STREG %r29,PT_GR29(%r9)
  120. STREG %r26,PT_GR26(%r9)
  121. copy %r9,%r29
  122. mfctl %cr30, %r1
  123. ldo THREAD_SZ_ALGN(%r1), %r30
  124. .endm
  125. .macro get_stack_use_r30
  126. /* we put a struct pt_regs on the stack and save the registers there */
  127. tophys %r30,%r9
  128. STREG %r30,PT_GR30(%r9)
  129. ldo PT_SZ_ALGN(%r30),%r30
  130. STREG %r29,PT_GR29(%r9)
  131. STREG %r26,PT_GR26(%r9)
  132. copy %r9,%r29
  133. .endm
  134. .macro rest_stack
  135. LDREG PT_GR1(%r29), %r1
  136. LDREG PT_GR30(%r29),%r30
  137. LDREG PT_GR29(%r29),%r29
  138. .endm
  139. /* default interruption handler
  140. * (calls traps.c:handle_interruption) */
  141. .macro def code
  142. b intr_save
  143. ldi \code, %r8
  144. .align 32
  145. .endm
  146. /* Interrupt interruption handler
  147. * (calls irq.c:do_cpu_irq_mask) */
  148. .macro extint code
  149. b intr_extint
  150. mfsp %sr7,%r16
  151. .align 32
  152. .endm
  153. .import os_hpmc, code
  154. /* HPMC handler */
  155. .macro hpmc code
  156. nop /* must be a NOP, will be patched later */
  157. load32 PA(os_hpmc), %r3
  158. bv,n 0(%r3)
  159. nop
  160. .word 0 /* checksum (will be patched) */
  161. .word PA(os_hpmc) /* address of handler */
  162. .word 0 /* length of handler */
  163. .endm
  164. /*
  165. * Performance Note: Instructions will be moved up into
  166. * this part of the code later on, once we are sure
  167. * that the tlb miss handlers are close to final form.
  168. */
  169. /* Register definitions for tlb miss handler macros */
  170. va = r8 /* virtual address for which the trap occured */
  171. spc = r24 /* space for which the trap occured */
  172. #ifndef CONFIG_64BIT
  173. /*
  174. * itlb miss interruption handler (parisc 1.1 - 32 bit)
  175. */
  176. .macro itlb_11 code
  177. mfctl %pcsq, spc
  178. b itlb_miss_11
  179. mfctl %pcoq, va
  180. .align 32
  181. .endm
  182. #endif
  183. /*
  184. * itlb miss interruption handler (parisc 2.0)
  185. */
  186. .macro itlb_20 code
  187. mfctl %pcsq, spc
  188. #ifdef CONFIG_64BIT
  189. b itlb_miss_20w
  190. #else
  191. b itlb_miss_20
  192. #endif
  193. mfctl %pcoq, va
  194. .align 32
  195. .endm
  196. #ifndef CONFIG_64BIT
  197. /*
  198. * naitlb miss interruption handler (parisc 1.1 - 32 bit)
  199. *
  200. * Note: naitlb misses will be treated
  201. * as an ordinary itlb miss for now.
  202. * However, note that naitlb misses
  203. * have the faulting address in the
  204. * IOR/ISR.
  205. */
  206. .macro naitlb_11 code
  207. mfctl %isr,spc
  208. b itlb_miss_11
  209. mfctl %ior,va
  210. /* FIXME: If user causes a naitlb miss, the priv level may not be in
  211. * lower bits of va, where the itlb miss handler is expecting them
  212. */
  213. .align 32
  214. .endm
  215. #endif
  216. /*
  217. * naitlb miss interruption handler (parisc 2.0)
  218. *
  219. * Note: naitlb misses will be treated
  220. * as an ordinary itlb miss for now.
  221. * However, note that naitlb misses
  222. * have the faulting address in the
  223. * IOR/ISR.
  224. */
  225. .macro naitlb_20 code
  226. mfctl %isr,spc
  227. #ifdef CONFIG_64BIT
  228. b itlb_miss_20w
  229. #else
  230. b itlb_miss_20
  231. #endif
  232. mfctl %ior,va
  233. /* FIXME: If user causes a naitlb miss, the priv level may not be in
  234. * lower bits of va, where the itlb miss handler is expecting them
  235. */
  236. .align 32
  237. .endm
  238. #ifndef CONFIG_64BIT
  239. /*
  240. * dtlb miss interruption handler (parisc 1.1 - 32 bit)
  241. */
  242. .macro dtlb_11 code
  243. mfctl %isr, spc
  244. b dtlb_miss_11
  245. mfctl %ior, va
  246. .align 32
  247. .endm
  248. #endif
  249. /*
  250. * dtlb miss interruption handler (parisc 2.0)
  251. */
  252. .macro dtlb_20 code
  253. mfctl %isr, spc
  254. #ifdef CONFIG_64BIT
  255. b dtlb_miss_20w
  256. #else
  257. b dtlb_miss_20
  258. #endif
  259. mfctl %ior, va
  260. .align 32
  261. .endm
  262. #ifndef CONFIG_64BIT
  263. /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
  264. .macro nadtlb_11 code
  265. mfctl %isr,spc
  266. b nadtlb_miss_11
  267. mfctl %ior,va
  268. .align 32
  269. .endm
  270. #endif
  271. /* nadtlb miss interruption handler (parisc 2.0) */
  272. .macro nadtlb_20 code
  273. mfctl %isr,spc
  274. #ifdef CONFIG_64BIT
  275. b nadtlb_miss_20w
  276. #else
  277. b nadtlb_miss_20
  278. #endif
  279. mfctl %ior,va
  280. .align 32
  281. .endm
  282. #ifndef CONFIG_64BIT
  283. /*
  284. * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
  285. */
  286. .macro dbit_11 code
  287. mfctl %isr,spc
  288. b dbit_trap_11
  289. mfctl %ior,va
  290. .align 32
  291. .endm
  292. #endif
  293. /*
  294. * dirty bit trap interruption handler (parisc 2.0)
  295. */
  296. .macro dbit_20 code
  297. mfctl %isr,spc
  298. #ifdef CONFIG_64BIT
  299. b dbit_trap_20w
  300. #else
  301. b dbit_trap_20
  302. #endif
  303. mfctl %ior,va
  304. .align 32
  305. .endm
  306. /* The following are simple 32 vs 64 bit instruction
  307. * abstractions for the macros */
  308. .macro EXTR reg1,start,length,reg2
  309. #ifdef CONFIG_64BIT
  310. extrd,u \reg1,32+\start,\length,\reg2
  311. #else
  312. extrw,u \reg1,\start,\length,\reg2
  313. #endif
  314. .endm
  315. .macro DEP reg1,start,length,reg2
  316. #ifdef CONFIG_64BIT
  317. depd \reg1,32+\start,\length,\reg2
  318. #else
  319. depw \reg1,\start,\length,\reg2
  320. #endif
  321. .endm
  322. .macro DEPI val,start,length,reg
  323. #ifdef CONFIG_64BIT
  324. depdi \val,32+\start,\length,\reg
  325. #else
  326. depwi \val,\start,\length,\reg
  327. #endif
  328. .endm
  329. /* In LP64, the space contains part of the upper 32 bits of the
  330. * fault. We have to extract this and place it in the va,
  331. * zeroing the corresponding bits in the space register */
  332. .macro space_adjust spc,va,tmp
  333. #ifdef CONFIG_64BIT
  334. extrd,u \spc,63,SPACEID_SHIFT,\tmp
  335. depd %r0,63,SPACEID_SHIFT,\spc
  336. depd \tmp,31,SPACEID_SHIFT,\va
  337. #endif
  338. .endm
  339. .import swapper_pg_dir,code
  340. /* Get the pgd. For faults on space zero (kernel space), this
  341. * is simply swapper_pg_dir. For user space faults, the
  342. * pgd is stored in %cr25 */
  343. .macro get_pgd spc,reg
  344. ldil L%PA(swapper_pg_dir),\reg
  345. ldo R%PA(swapper_pg_dir)(\reg),\reg
  346. or,COND(=) %r0,\spc,%r0
  347. mfctl %cr25,\reg
  348. .endm
  349. /*
  350. space_check(spc,tmp,fault)
  351. spc - The space we saw the fault with.
  352. tmp - The place to store the current space.
  353. fault - Function to call on failure.
  354. Only allow faults on different spaces from the
  355. currently active one if we're the kernel
  356. */
  357. .macro space_check spc,tmp,fault
  358. mfsp %sr7,\tmp
  359. or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
  360. * as kernel, so defeat the space
  361. * check if it is */
  362. copy \spc,\tmp
  363. or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
  364. cmpb,COND(<>),n \tmp,\spc,\fault
  365. .endm
  366. /* Look up a PTE in a 2-Level scheme (faulting at each
  367. * level if the entry isn't present
  368. *
  369. * NOTE: we use ldw even for LP64, since the short pointers
  370. * can address up to 1TB
  371. */
  372. .macro L2_ptep pmd,pte,index,va,fault
  373. #if PT_NLEVELS == 3
  374. EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
  375. #else
  376. EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
  377. #endif
  378. DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
  379. copy %r0,\pte
  380. ldw,s \index(\pmd),\pmd
  381. bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
  382. DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
  383. copy \pmd,%r9
  384. SHLREG %r9,PxD_VALUE_SHIFT,\pmd
  385. EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
  386. DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
  387. shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
  388. LDREG %r0(\pmd),\pte /* pmd is now pte */
  389. bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
  390. .endm
  391. /* Look up PTE in a 3-Level scheme.
  392. *
  393. * Here we implement a Hybrid L2/L3 scheme: we allocate the
  394. * first pmd adjacent to the pgd. This means that we can
  395. * subtract a constant offset to get to it. The pmd and pgd
  396. * sizes are arranged so that a single pmd covers 4GB (giving
  397. * a full LP64 process access to 8TB) so our lookups are
  398. * effectively L2 for the first 4GB of the kernel (i.e. for
  399. * all ILP32 processes and all the kernel for machines with
  400. * under 4GB of memory) */
  401. .macro L3_ptep pgd,pte,index,va,fault
  402. #if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
  403. extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
  404. copy %r0,\pte
  405. extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
  406. ldw,s \index(\pgd),\pgd
  407. extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
  408. bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
  409. extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
  410. shld \pgd,PxD_VALUE_SHIFT,\index
  411. extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
  412. copy \index,\pgd
  413. extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
  414. ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
  415. #endif
  416. L2_ptep \pgd,\pte,\index,\va,\fault
  417. .endm
  418. /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
  419. * don't needlessly dirty the cache line if it was already set */
  420. .macro update_ptep ptep,pte,tmp,tmp1
  421. ldi _PAGE_ACCESSED,\tmp1
  422. or \tmp1,\pte,\tmp
  423. and,COND(<>) \tmp1,\pte,%r0
  424. STREG \tmp,0(\ptep)
  425. .endm
  426. /* Set the dirty bit (and accessed bit). No need to be
  427. * clever, this is only used from the dirty fault */
  428. .macro update_dirty ptep,pte,tmp
  429. ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
  430. or \tmp,\pte,\pte
  431. STREG \pte,0(\ptep)
  432. .endm
  433. /* Convert the pte and prot to tlb insertion values. How
  434. * this happens is quite subtle, read below */
  435. .macro make_insert_tlb spc,pte,prot
  436. space_to_prot \spc \prot /* create prot id from space */
  437. /* The following is the real subtlety. This is depositing
  438. * T <-> _PAGE_REFTRAP
  439. * D <-> _PAGE_DIRTY
  440. * B <-> _PAGE_DMB (memory break)
  441. *
  442. * Then incredible subtlety: The access rights are
  443. * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
  444. * See 3-14 of the parisc 2.0 manual
  445. *
  446. * Finally, _PAGE_READ goes in the top bit of PL1 (so we
  447. * trigger an access rights trap in user space if the user
  448. * tries to read an unreadable page */
  449. depd \pte,8,7,\prot
  450. /* PAGE_USER indicates the page can be read with user privileges,
  451. * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
  452. * contains _PAGE_READ */
  453. extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
  454. depdi 7,11,3,\prot
  455. /* If we're a gateway page, drop PL2 back to zero for promotion
  456. * to kernel privilege (so we can execute the page as kernel).
  457. * Any privilege promotion page always denys read and write */
  458. extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
  459. depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
  460. /* Enforce uncacheable pages.
  461. * This should ONLY be use for MMIO on PA 2.0 machines.
  462. * Memory/DMA is cache coherent on all PA2.0 machines we support
  463. * (that means T-class is NOT supported) and the memory controllers
  464. * on most of those machines only handles cache transactions.
  465. */
  466. extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
  467. depi 1,12,1,\prot
  468. /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
  469. extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
  470. depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
  471. .endm
  472. /* Identical macro to make_insert_tlb above, except it
  473. * makes the tlb entry for the differently formatted pa11
  474. * insertion instructions */
  475. .macro make_insert_tlb_11 spc,pte,prot
  476. zdep \spc,30,15,\prot
  477. dep \pte,8,7,\prot
  478. extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
  479. depi 1,12,1,\prot
  480. extru,= \pte,_PAGE_USER_BIT,1,%r0
  481. depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
  482. extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
  483. depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
  484. /* Get rid of prot bits and convert to page addr for iitlba */
  485. depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
  486. extru \pte,24,25,\pte
  487. .endm
  488. /* This is for ILP32 PA2.0 only. The TLB insertion needs
  489. * to extend into I/O space if the address is 0xfXXXXXXX
  490. * so we extend the f's into the top word of the pte in
  491. * this case */
  492. .macro f_extend pte,tmp
  493. extrd,s \pte,42,4,\tmp
  494. addi,<> 1,\tmp,%r0
  495. extrd,s \pte,63,25,\pte
  496. .endm
  497. /* The alias region is an 8MB aligned 16MB to do clear and
  498. * copy user pages at addresses congruent with the user
  499. * virtual address.
  500. *
  501. * To use the alias page, you set %r26 up with the to TLB
  502. * entry (identifying the physical page) and %r23 up with
  503. * the from tlb entry (or nothing if only a to entry---for
  504. * clear_user_page_asm) */
  505. .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
  506. cmpib,COND(<>),n 0,\spc,\fault
  507. ldil L%(TMPALIAS_MAP_START),\tmp
  508. #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
  509. /* on LP64, ldi will sign extend into the upper 32 bits,
  510. * which is behaviour we don't want */
  511. depdi 0,31,32,\tmp
  512. #endif
  513. copy \va,\tmp1
  514. DEPI 0,31,23,\tmp1
  515. cmpb,COND(<>),n \tmp,\tmp1,\fault
  516. ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
  517. depd,z \prot,8,7,\prot
  518. /*
  519. * OK, it is in the temp alias region, check whether "from" or "to".
  520. * Check "subtle" note in pacache.S re: r23/r26.
  521. */
  522. #ifdef CONFIG_64BIT
  523. extrd,u,*= \va,41,1,%r0
  524. #else
  525. extrw,u,= \va,9,1,%r0
  526. #endif
  527. or,COND(tr) %r23,%r0,\pte
  528. or %r26,%r0,\pte
  529. .endm
  530. /*
  531. * Align fault_vector_20 on 4K boundary so that both
  532. * fault_vector_11 and fault_vector_20 are on the
  533. * same page. This is only necessary as long as we
  534. * write protect the kernel text, which we may stop
  535. * doing once we use large page translations to cover
  536. * the static part of the kernel address space.
  537. */
  538. .text
  539. .align PAGE_SIZE
  540. ENTRY(fault_vector_20)
  541. /* First vector is invalid (0) */
  542. .ascii "cows can fly"
  543. .byte 0
  544. .align 32
  545. hpmc 1
  546. def 2
  547. def 3
  548. extint 4
  549. def 5
  550. itlb_20 6
  551. def 7
  552. def 8
  553. def 9
  554. def 10
  555. def 11
  556. def 12
  557. def 13
  558. def 14
  559. dtlb_20 15
  560. #if 0
  561. naitlb_20 16
  562. #else
  563. def 16
  564. #endif
  565. nadtlb_20 17
  566. def 18
  567. def 19
  568. dbit_20 20
  569. def 21
  570. def 22
  571. def 23
  572. def 24
  573. def 25
  574. def 26
  575. def 27
  576. def 28
  577. def 29
  578. def 30
  579. def 31
  580. END(fault_vector_20)
  581. #ifndef CONFIG_64BIT
  582. .align 2048
  583. ENTRY(fault_vector_11)
  584. /* First vector is invalid (0) */
  585. .ascii "cows can fly"
  586. .byte 0
  587. .align 32
  588. hpmc 1
  589. def 2
  590. def 3
  591. extint 4
  592. def 5
  593. itlb_11 6
  594. def 7
  595. def 8
  596. def 9
  597. def 10
  598. def 11
  599. def 12
  600. def 13
  601. def 14
  602. dtlb_11 15
  603. #if 0
  604. naitlb_11 16
  605. #else
  606. def 16
  607. #endif
  608. nadtlb_11 17
  609. def 18
  610. def 19
  611. dbit_11 20
  612. def 21
  613. def 22
  614. def 23
  615. def 24
  616. def 25
  617. def 26
  618. def 27
  619. def 28
  620. def 29
  621. def 30
  622. def 31
  623. END(fault_vector_11)
  624. #endif
  625. .import handle_interruption,code
  626. .import do_cpu_irq_mask,code
  627. /*
  628. * r26 = function to be called
  629. * r25 = argument to pass in
  630. * r24 = flags for do_fork()
  631. *
  632. * Kernel threads don't ever return, so they don't need
  633. * a true register context. We just save away the arguments
  634. * for copy_thread/ret_ to properly set up the child.
  635. */
  636. #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
  637. #define CLONE_UNTRACED 0x00800000
  638. .import do_fork
  639. ENTRY(__kernel_thread)
  640. STREG %r2, -RP_OFFSET(%r30)
  641. copy %r30, %r1
  642. ldo PT_SZ_ALGN(%r30),%r30
  643. #ifdef CONFIG_64BIT
  644. /* Yo, function pointers in wide mode are little structs... -PB */
  645. ldd 24(%r26), %r2
  646. STREG %r2, PT_GR27(%r1) /* Store childs %dp */
  647. ldd 16(%r26), %r26
  648. STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
  649. copy %r0, %r22 /* user_tid */
  650. #endif
  651. STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
  652. STREG %r25, PT_GR25(%r1)
  653. ldil L%CLONE_UNTRACED, %r26
  654. ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
  655. or %r26, %r24, %r26 /* will have kernel mappings. */
  656. ldi 1, %r25 /* stack_start, signals kernel thread */
  657. stw %r0, -52(%r30) /* user_tid */
  658. #ifdef CONFIG_64BIT
  659. ldo -16(%r30),%r29 /* Reference param save area */
  660. #endif
  661. BL do_fork, %r2
  662. copy %r1, %r24 /* pt_regs */
  663. /* Parent Returns here */
  664. LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
  665. ldo -PT_SZ_ALGN(%r30), %r30
  666. bv %r0(%r2)
  667. nop
  668. ENDPROC(__kernel_thread)
  669. /*
  670. * Child Returns here
  671. *
  672. * copy_thread moved args from temp save area set up above
  673. * into task save area.
  674. */
  675. ENTRY(ret_from_kernel_thread)
  676. /* Call schedule_tail first though */
  677. BL schedule_tail, %r2
  678. nop
  679. LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
  680. LDREG TASK_PT_GR25(%r1), %r26
  681. #ifdef CONFIG_64BIT
  682. LDREG TASK_PT_GR27(%r1), %r27
  683. LDREG TASK_PT_GR22(%r1), %r22
  684. #endif
  685. LDREG TASK_PT_GR26(%r1), %r1
  686. ble 0(%sr7, %r1)
  687. copy %r31, %r2
  688. #ifdef CONFIG_64BIT
  689. ldo -16(%r30),%r29 /* Reference param save area */
  690. loadgp /* Thread could have been in a module */
  691. #endif
  692. #ifndef CONFIG_64BIT
  693. b sys_exit
  694. #else
  695. load32 sys_exit, %r1
  696. bv %r0(%r1)
  697. #endif
  698. ldi 0, %r26
  699. ENDPROC(ret_from_kernel_thread)
  700. .import sys_execve, code
  701. ENTRY(__execve)
  702. copy %r2, %r15
  703. copy %r30, %r16
  704. ldo PT_SZ_ALGN(%r30), %r30
  705. STREG %r26, PT_GR26(%r16)
  706. STREG %r25, PT_GR25(%r16)
  707. STREG %r24, PT_GR24(%r16)
  708. #ifdef CONFIG_64BIT
  709. ldo -16(%r30),%r29 /* Reference param save area */
  710. #endif
  711. BL sys_execve, %r2
  712. copy %r16, %r26
  713. cmpib,=,n 0,%r28,intr_return /* forward */
  714. /* yes, this will trap and die. */
  715. copy %r15, %r2
  716. copy %r16, %r30
  717. bv %r0(%r2)
  718. nop
  719. ENDPROC(__execve)
  720. /*
  721. * struct task_struct *_switch_to(struct task_struct *prev,
  722. * struct task_struct *next)
  723. *
  724. * switch kernel stacks and return prev */
  725. ENTRY(_switch_to)
  726. STREG %r2, -RP_OFFSET(%r30)
  727. callee_save_float
  728. callee_save
  729. load32 _switch_to_ret, %r2
  730. STREG %r2, TASK_PT_KPC(%r26)
  731. LDREG TASK_PT_KPC(%r25), %r2
  732. STREG %r30, TASK_PT_KSP(%r26)
  733. LDREG TASK_PT_KSP(%r25), %r30
  734. LDREG TASK_THREAD_INFO(%r25), %r25
  735. bv %r0(%r2)
  736. mtctl %r25,%cr30
  737. _switch_to_ret:
  738. mtctl %r0, %cr0 /* Needed for single stepping */
  739. callee_rest
  740. callee_rest_float
  741. LDREG -RP_OFFSET(%r30), %r2
  742. bv %r0(%r2)
  743. copy %r26, %r28
  744. ENDPROC(_switch_to)
  745. /*
  746. * Common rfi return path for interruptions, kernel execve, and
  747. * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
  748. * return via this path if the signal was received when the process
  749. * was running; if the process was blocked on a syscall then the
  750. * normal syscall_exit path is used. All syscalls for traced
  751. * proceses exit via intr_restore.
  752. *
  753. * XXX If any syscalls that change a processes space id ever exit
  754. * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
  755. * adjust IASQ[0..1].
  756. *
  757. */
  758. .align PAGE_SIZE
  759. ENTRY(syscall_exit_rfi)
  760. mfctl %cr30,%r16
  761. LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
  762. ldo TASK_REGS(%r16),%r16
  763. /* Force iaoq to userspace, as the user has had access to our current
  764. * context via sigcontext. Also Filter the PSW for the same reason.
  765. */
  766. LDREG PT_IAOQ0(%r16),%r19
  767. depi 3,31,2,%r19
  768. STREG %r19,PT_IAOQ0(%r16)
  769. LDREG PT_IAOQ1(%r16),%r19
  770. depi 3,31,2,%r19
  771. STREG %r19,PT_IAOQ1(%r16)
  772. LDREG PT_PSW(%r16),%r19
  773. load32 USER_PSW_MASK,%r1
  774. #ifdef CONFIG_64BIT
  775. load32 USER_PSW_HI_MASK,%r20
  776. depd %r20,31,32,%r1
  777. #endif
  778. and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
  779. load32 USER_PSW,%r1
  780. or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
  781. STREG %r19,PT_PSW(%r16)
  782. /*
  783. * If we aren't being traced, we never saved space registers
  784. * (we don't store them in the sigcontext), so set them
  785. * to "proper" values now (otherwise we'll wind up restoring
  786. * whatever was last stored in the task structure, which might
  787. * be inconsistent if an interrupt occured while on the gateway
  788. * page). Note that we may be "trashing" values the user put in
  789. * them, but we don't support the user changing them.
  790. */
  791. STREG %r0,PT_SR2(%r16)
  792. mfsp %sr3,%r19
  793. STREG %r19,PT_SR0(%r16)
  794. STREG %r19,PT_SR1(%r16)
  795. STREG %r19,PT_SR3(%r16)
  796. STREG %r19,PT_SR4(%r16)
  797. STREG %r19,PT_SR5(%r16)
  798. STREG %r19,PT_SR6(%r16)
  799. STREG %r19,PT_SR7(%r16)
  800. intr_return:
  801. /* NOTE: Need to enable interrupts incase we schedule. */
  802. ssm PSW_SM_I, %r0
  803. intr_check_resched:
  804. /* check for reschedule */
  805. mfctl %cr30,%r1
  806. LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
  807. bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
  808. .import do_notify_resume,code
  809. intr_check_sig:
  810. /* As above */
  811. mfctl %cr30,%r1
  812. LDREG TI_FLAGS(%r1),%r19
  813. ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
  814. and,COND(<>) %r19, %r20, %r0
  815. b,n intr_restore /* skip past if we've nothing to do */
  816. /* This check is critical to having LWS
  817. * working. The IASQ is zero on the gateway
  818. * page and we cannot deliver any signals until
  819. * we get off the gateway page.
  820. *
  821. * Only do signals if we are returning to user space
  822. */
  823. LDREG PT_IASQ0(%r16), %r20
  824. CMPIB=,n 0,%r20,intr_restore /* backward */
  825. LDREG PT_IASQ1(%r16), %r20
  826. CMPIB=,n 0,%r20,intr_restore /* backward */
  827. copy %r0, %r25 /* long in_syscall = 0 */
  828. #ifdef CONFIG_64BIT
  829. ldo -16(%r30),%r29 /* Reference param save area */
  830. #endif
  831. BL do_notify_resume,%r2
  832. copy %r16, %r26 /* struct pt_regs *regs */
  833. b,n intr_check_sig
  834. intr_restore:
  835. copy %r16,%r29
  836. ldo PT_FR31(%r29),%r1
  837. rest_fp %r1
  838. rest_general %r29
  839. /* inverse of virt_map */
  840. pcxt_ssm_bug
  841. rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
  842. tophys_r1 %r29
  843. /* Restore space id's and special cr's from PT_REGS
  844. * structure pointed to by r29
  845. */
  846. rest_specials %r29
  847. /* IMPORTANT: rest_stack restores r29 last (we are using it)!
  848. * It also restores r1 and r30.
  849. */
  850. rest_stack
  851. rfi
  852. nop
  853. nop
  854. nop
  855. nop
  856. nop
  857. nop
  858. nop
  859. nop
  860. #ifndef CONFIG_PREEMPT
  861. # define intr_do_preempt intr_restore
  862. #endif /* !CONFIG_PREEMPT */
  863. .import schedule,code
  864. intr_do_resched:
  865. /* Only call schedule on return to userspace. If we're returning
  866. * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
  867. * we jump back to intr_restore.
  868. */
  869. LDREG PT_IASQ0(%r16), %r20
  870. CMPIB= 0, %r20, intr_do_preempt
  871. nop
  872. LDREG PT_IASQ1(%r16), %r20
  873. CMPIB= 0, %r20, intr_do_preempt
  874. nop
  875. #ifdef CONFIG_64BIT
  876. ldo -16(%r30),%r29 /* Reference param save area */
  877. #endif
  878. ldil L%intr_check_sig, %r2
  879. #ifndef CONFIG_64BIT
  880. b schedule
  881. #else
  882. load32 schedule, %r20
  883. bv %r0(%r20)
  884. #endif
  885. ldo R%intr_check_sig(%r2), %r2
  886. /* preempt the current task on returning to kernel
  887. * mode from an interrupt, iff need_resched is set,
  888. * and preempt_count is 0. otherwise, we continue on
  889. * our merry way back to the current running task.
  890. */
  891. #ifdef CONFIG_PREEMPT
  892. .import preempt_schedule_irq,code
  893. intr_do_preempt:
  894. rsm PSW_SM_I, %r0 /* disable interrupts */
  895. /* current_thread_info()->preempt_count */
  896. mfctl %cr30, %r1
  897. LDREG TI_PRE_COUNT(%r1), %r19
  898. CMPIB<> 0, %r19, intr_restore /* if preempt_count > 0 */
  899. nop /* prev insn branched backwards */
  900. /* check if we interrupted a critical path */
  901. LDREG PT_PSW(%r16), %r20
  902. bb,<,n %r20, 31 - PSW_SM_I, intr_restore
  903. nop
  904. BL preempt_schedule_irq, %r2
  905. nop
  906. b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
  907. #endif /* CONFIG_PREEMPT */
  908. /*
  909. * External interrupts.
  910. */
  911. intr_extint:
  912. CMPIB=,n 0,%r16,1f
  913. get_stack_use_cr30
  914. b,n 2f
  915. 1:
  916. get_stack_use_r30
  917. 2:
  918. save_specials %r29
  919. virt_map
  920. save_general %r29
  921. ldo PT_FR0(%r29), %r24
  922. save_fp %r24
  923. loadgp
  924. copy %r29, %r26 /* arg0 is pt_regs */
  925. copy %r29, %r16 /* save pt_regs */
  926. ldil L%intr_return, %r2
  927. #ifdef CONFIG_64BIT
  928. ldo -16(%r30),%r29 /* Reference param save area */
  929. #endif
  930. b do_cpu_irq_mask
  931. ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
  932. ENDPROC(syscall_exit_rfi)
  933. /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
  934. ENTRY(intr_save) /* for os_hpmc */
  935. mfsp %sr7,%r16
  936. CMPIB=,n 0,%r16,1f
  937. get_stack_use_cr30
  938. b 2f
  939. copy %r8,%r26
  940. 1:
  941. get_stack_use_r30
  942. copy %r8,%r26
  943. 2:
  944. save_specials %r29
  945. /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
  946. /*
  947. * FIXME: 1) Use a #define for the hardwired "6" below (and in
  948. * traps.c.
  949. * 2) Once we start executing code above 4 Gb, we need
  950. * to adjust iasq/iaoq here in the same way we
  951. * adjust isr/ior below.
  952. */
  953. CMPIB=,n 6,%r26,skip_save_ior
  954. mfctl %cr20, %r16 /* isr */
  955. nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
  956. mfctl %cr21, %r17 /* ior */
  957. #ifdef CONFIG_64BIT
  958. /*
  959. * If the interrupted code was running with W bit off (32 bit),
  960. * clear the b bits (bits 0 & 1) in the ior.
  961. * save_specials left ipsw value in r8 for us to test.
  962. */
  963. extrd,u,*<> %r8,PSW_W_BIT,1,%r0
  964. depdi 0,1,2,%r17
  965. /*
  966. * FIXME: This code has hardwired assumptions about the split
  967. * between space bits and offset bits. This will change
  968. * when we allow alternate page sizes.
  969. */
  970. /* adjust isr/ior. */
  971. extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
  972. depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
  973. depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
  974. #endif
  975. STREG %r16, PT_ISR(%r29)
  976. STREG %r17, PT_IOR(%r29)
  977. skip_save_ior:
  978. virt_map
  979. save_general %r29
  980. ldo PT_FR0(%r29), %r25
  981. save_fp %r25
  982. loadgp
  983. copy %r29, %r25 /* arg1 is pt_regs */
  984. #ifdef CONFIG_64BIT
  985. ldo -16(%r30),%r29 /* Reference param save area */
  986. #endif
  987. ldil L%intr_check_sig, %r2
  988. copy %r25, %r16 /* save pt_regs */
  989. b handle_interruption
  990. ldo R%intr_check_sig(%r2), %r2
  991. ENDPROC(intr_save)
  992. /*
  993. * Note for all tlb miss handlers:
  994. *
  995. * cr24 contains a pointer to the kernel address space
  996. * page directory.
  997. *
  998. * cr25 contains a pointer to the current user address
  999. * space page directory.
  1000. *
  1001. * sr3 will contain the space id of the user address space
  1002. * of the current running thread while that thread is
  1003. * running in the kernel.
  1004. */
  1005. /*
  1006. * register number allocations. Note that these are all
  1007. * in the shadowed registers
  1008. */
  1009. t0 = r1 /* temporary register 0 */
  1010. va = r8 /* virtual address for which the trap occured */
  1011. t1 = r9 /* temporary register 1 */
  1012. pte = r16 /* pte/phys page # */
  1013. prot = r17 /* prot bits */
  1014. spc = r24 /* space for which the trap occured */
  1015. ptp = r25 /* page directory/page table pointer */
  1016. #ifdef CONFIG_64BIT
  1017. dtlb_miss_20w:
  1018. space_adjust spc,va,t0
  1019. get_pgd spc,ptp
  1020. space_check spc,t0,dtlb_fault
  1021. L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
  1022. update_ptep ptp,pte,t0,t1
  1023. make_insert_tlb spc,pte,prot
  1024. idtlbt pte,prot
  1025. rfir
  1026. nop
  1027. dtlb_check_alias_20w:
  1028. do_alias spc,t0,t1,va,pte,prot,dtlb_fault
  1029. idtlbt pte,prot
  1030. rfir
  1031. nop
  1032. nadtlb_miss_20w:
  1033. space_adjust spc,va,t0
  1034. get_pgd spc,ptp
  1035. space_check spc,t0,nadtlb_fault
  1036. L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
  1037. update_ptep ptp,pte,t0,t1
  1038. make_insert_tlb spc,pte,prot
  1039. idtlbt pte,prot
  1040. rfir
  1041. nop
  1042. nadtlb_check_flush_20w:
  1043. bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
  1044. /* Insert a "flush only" translation */
  1045. depdi,z 7,7,3,prot
  1046. depdi 1,10,1,prot
  1047. /* Get rid of prot bits and convert to page addr for idtlbt */
  1048. depdi 0,63,12,pte
  1049. extrd,u pte,56,52,pte
  1050. idtlbt pte,prot
  1051. rfir
  1052. nop
  1053. #else
  1054. dtlb_miss_11:
  1055. get_pgd spc,ptp
  1056. space_check spc,t0,dtlb_fault
  1057. L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
  1058. update_ptep ptp,pte,t0,t1
  1059. make_insert_tlb_11 spc,pte,prot
  1060. mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
  1061. mtsp spc,%sr1
  1062. idtlba pte,(%sr1,va)
  1063. idtlbp prot,(%sr1,va)
  1064. mtsp t0, %sr1 /* Restore sr1 */
  1065. rfir
  1066. nop
  1067. dtlb_check_alias_11:
  1068. /* Check to see if fault is in the temporary alias region */
  1069. cmpib,<>,n 0,spc,dtlb_fault /* forward */
  1070. ldil L%(TMPALIAS_MAP_START),t0
  1071. copy va,t1
  1072. depwi 0,31,23,t1
  1073. cmpb,<>,n t0,t1,dtlb_fault /* forward */
  1074. ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
  1075. depw,z prot,8,7,prot
  1076. /*
  1077. * OK, it is in the temp alias region, check whether "from" or "to".
  1078. * Check "subtle" note in pacache.S re: r23/r26.
  1079. */
  1080. extrw,u,= va,9,1,r0
  1081. or,tr %r23,%r0,pte /* If "from" use "from" page */
  1082. or %r26,%r0,pte /* else "to", use "to" page */
  1083. idtlba pte,(va)
  1084. idtlbp prot,(va)
  1085. rfir
  1086. nop
  1087. nadtlb_miss_11:
  1088. get_pgd spc,ptp
  1089. space_check spc,t0,nadtlb_fault
  1090. L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
  1091. update_ptep ptp,pte,t0,t1
  1092. make_insert_tlb_11 spc,pte,prot
  1093. mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
  1094. mtsp spc,%sr1
  1095. idtlba pte,(%sr1,va)
  1096. idtlbp prot,(%sr1,va)
  1097. mtsp t0, %sr1 /* Restore sr1 */
  1098. rfir
  1099. nop
  1100. nadtlb_check_flush_11:
  1101. bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
  1102. /* Insert a "flush only" translation */
  1103. zdepi 7,7,3,prot
  1104. depi 1,10,1,prot
  1105. /* Get rid of prot bits and convert to page addr for idtlba */
  1106. depi 0,31,12,pte
  1107. extru pte,24,25,pte
  1108. mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
  1109. mtsp spc,%sr1
  1110. idtlba pte,(%sr1,va)
  1111. idtlbp prot,(%sr1,va)
  1112. mtsp t0, %sr1 /* Restore sr1 */
  1113. rfir
  1114. nop
  1115. dtlb_miss_20:
  1116. space_adjust spc,va,t0
  1117. get_pgd spc,ptp
  1118. space_check spc,t0,dtlb_fault
  1119. L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
  1120. update_ptep ptp,pte,t0,t1
  1121. make_insert_tlb spc,pte,prot
  1122. f_extend pte,t0
  1123. idtlbt pte,prot
  1124. rfir
  1125. nop
  1126. dtlb_check_alias_20:
  1127. do_alias spc,t0,t1,va,pte,prot,dtlb_fault
  1128. idtlbt pte,prot
  1129. rfir
  1130. nop
  1131. nadtlb_miss_20:
  1132. get_pgd spc,ptp
  1133. space_check spc,t0,nadtlb_fault
  1134. L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
  1135. update_ptep ptp,pte,t0,t1
  1136. make_insert_tlb spc,pte,prot
  1137. f_extend pte,t0
  1138. idtlbt pte,prot
  1139. rfir
  1140. nop
  1141. nadtlb_check_flush_20:
  1142. bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
  1143. /* Insert a "flush only" translation */
  1144. depdi,z 7,7,3,prot
  1145. depdi 1,10,1,prot
  1146. /* Get rid of prot bits and convert to page addr for idtlbt */
  1147. depdi 0,63,12,pte
  1148. extrd,u pte,56,32,pte
  1149. idtlbt pte,prot
  1150. rfir
  1151. nop
  1152. #endif
  1153. nadtlb_emulate:
  1154. /*
  1155. * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
  1156. * probei instructions. We don't want to fault for these
  1157. * instructions (not only does it not make sense, it can cause
  1158. * deadlocks, since some flushes are done with the mmap
  1159. * semaphore held). If the translation doesn't exist, we can't
  1160. * insert a translation, so have to emulate the side effects
  1161. * of the instruction. Since we don't insert a translation
  1162. * we can get a lot of faults during a flush loop, so it makes
  1163. * sense to try to do it here with minimum overhead. We only
  1164. * emulate fdc,fic,pdc,probew,prober instructions whose base
  1165. * and index registers are not shadowed. We defer everything
  1166. * else to the "slow" path.
  1167. */
  1168. mfctl %cr19,%r9 /* Get iir */
  1169. /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
  1170. Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
  1171. /* Checks for fdc,fdce,pdc,"fic,4f" only */
  1172. ldi 0x280,%r16
  1173. and %r9,%r16,%r17
  1174. cmpb,<>,n %r16,%r17,nadtlb_probe_check
  1175. bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
  1176. BL get_register,%r25
  1177. extrw,u %r9,15,5,%r8 /* Get index register # */
  1178. CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
  1179. copy %r1,%r24
  1180. BL get_register,%r25
  1181. extrw,u %r9,10,5,%r8 /* Get base register # */
  1182. CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
  1183. BL set_register,%r25
  1184. add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
  1185. nadtlb_nullify:
  1186. mfctl %ipsw,%r8
  1187. ldil L%PSW_N,%r9
  1188. or %r8,%r9,%r8 /* Set PSW_N */
  1189. mtctl %r8,%ipsw
  1190. rfir
  1191. nop
  1192. /*
  1193. When there is no translation for the probe address then we
  1194. must nullify the insn and return zero in the target regsiter.
  1195. This will indicate to the calling code that it does not have
  1196. write/read privileges to this address.
  1197. This should technically work for prober and probew in PA 1.1,
  1198. and also probe,r and probe,w in PA 2.0
  1199. WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
  1200. THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
  1201. */
  1202. nadtlb_probe_check:
  1203. ldi 0x80,%r16
  1204. and %r9,%r16,%r17
  1205. cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
  1206. BL get_register,%r25 /* Find the target register */
  1207. extrw,u %r9,31,5,%r8 /* Get target register */
  1208. CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
  1209. BL set_register,%r25
  1210. copy %r0,%r1 /* Write zero to target register */
  1211. b nadtlb_nullify /* Nullify return insn */
  1212. nop
  1213. #ifdef CONFIG_64BIT
  1214. itlb_miss_20w:
  1215. /*
  1216. * I miss is a little different, since we allow users to fault
  1217. * on the gateway page which is in the kernel address space.
  1218. */
  1219. space_adjust spc,va,t0
  1220. get_pgd spc,ptp
  1221. space_check spc,t0,itlb_fault
  1222. L3_ptep ptp,pte,t0,va,itlb_fault
  1223. update_ptep ptp,pte,t0,t1
  1224. make_insert_tlb spc,pte,prot
  1225. iitlbt pte,prot
  1226. rfir
  1227. nop
  1228. #else
  1229. itlb_miss_11:
  1230. get_pgd spc,ptp
  1231. space_check spc,t0,itlb_fault
  1232. L2_ptep ptp,pte,t0,va,itlb_fault
  1233. update_ptep ptp,pte,t0,t1
  1234. make_insert_tlb_11 spc,pte,prot
  1235. mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
  1236. mtsp spc,%sr1
  1237. iitlba pte,(%sr1,va)
  1238. iitlbp prot,(%sr1,va)
  1239. mtsp t0, %sr1 /* Restore sr1 */
  1240. rfir
  1241. nop
  1242. itlb_miss_20:
  1243. get_pgd spc,ptp
  1244. space_check spc,t0,itlb_fault
  1245. L2_ptep ptp,pte,t0,va,itlb_fault
  1246. update_ptep ptp,pte,t0,t1
  1247. make_insert_tlb spc,pte,prot
  1248. f_extend pte,t0
  1249. iitlbt pte,prot
  1250. rfir
  1251. nop
  1252. #endif
  1253. #ifdef CONFIG_64BIT
  1254. dbit_trap_20w:
  1255. space_adjust spc,va,t0
  1256. get_pgd spc,ptp
  1257. space_check spc,t0,dbit_fault
  1258. L3_ptep ptp,pte,t0,va,dbit_fault
  1259. #ifdef CONFIG_SMP
  1260. CMPIB=,n 0,spc,dbit_nolock_20w
  1261. load32 PA(pa_dbit_lock),t0
  1262. dbit_spin_20w:
  1263. LDCW 0(t0),t1
  1264. cmpib,= 0,t1,dbit_spin_20w
  1265. nop
  1266. dbit_nolock_20w:
  1267. #endif
  1268. update_dirty ptp,pte,t1
  1269. make_insert_tlb spc,pte,prot
  1270. idtlbt pte,prot
  1271. #ifdef CONFIG_SMP
  1272. CMPIB=,n 0,spc,dbit_nounlock_20w
  1273. ldi 1,t1
  1274. stw t1,0(t0)
  1275. dbit_nounlock_20w:
  1276. #endif
  1277. rfir
  1278. nop
  1279. #else
  1280. dbit_trap_11:
  1281. get_pgd spc,ptp
  1282. space_check spc,t0,dbit_fault
  1283. L2_ptep ptp,pte,t0,va,dbit_fault
  1284. #ifdef CONFIG_SMP
  1285. CMPIB=,n 0,spc,dbit_nolock_11
  1286. load32 PA(pa_dbit_lock),t0
  1287. dbit_spin_11:
  1288. LDCW 0(t0),t1
  1289. cmpib,= 0,t1,dbit_spin_11
  1290. nop
  1291. dbit_nolock_11:
  1292. #endif
  1293. update_dirty ptp,pte,t1
  1294. make_insert_tlb_11 spc,pte,prot
  1295. mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
  1296. mtsp spc,%sr1
  1297. idtlba pte,(%sr1,va)
  1298. idtlbp prot,(%sr1,va)
  1299. mtsp t1, %sr1 /* Restore sr1 */
  1300. #ifdef CONFIG_SMP
  1301. CMPIB=,n 0,spc,dbit_nounlock_11
  1302. ldi 1,t1
  1303. stw t1,0(t0)
  1304. dbit_nounlock_11:
  1305. #endif
  1306. rfir
  1307. nop
  1308. dbit_trap_20:
  1309. get_pgd spc,ptp
  1310. space_check spc,t0,dbit_fault
  1311. L2_ptep ptp,pte,t0,va,dbit_fault
  1312. #ifdef CONFIG_SMP
  1313. CMPIB=,n 0,spc,dbit_nolock_20
  1314. load32 PA(pa_dbit_lock),t0
  1315. dbit_spin_20:
  1316. LDCW 0(t0),t1
  1317. cmpib,= 0,t1,dbit_spin_20
  1318. nop
  1319. dbit_nolock_20:
  1320. #endif
  1321. update_dirty ptp,pte,t1
  1322. make_insert_tlb spc,pte,prot
  1323. f_extend pte,t1
  1324. idtlbt pte,prot
  1325. #ifdef CONFIG_SMP
  1326. CMPIB=,n 0,spc,dbit_nounlock_20
  1327. ldi 1,t1
  1328. stw t1,0(t0)
  1329. dbit_nounlock_20:
  1330. #endif
  1331. rfir
  1332. nop
  1333. #endif
  1334. .import handle_interruption,code
  1335. kernel_bad_space:
  1336. b intr_save
  1337. ldi 31,%r8 /* Use an unused code */
  1338. dbit_fault:
  1339. b intr_save
  1340. ldi 20,%r8
  1341. itlb_fault:
  1342. b intr_save
  1343. ldi 6,%r8
  1344. nadtlb_fault:
  1345. b intr_save
  1346. ldi 17,%r8
  1347. dtlb_fault:
  1348. b intr_save
  1349. ldi 15,%r8
  1350. /* Register saving semantics for system calls:
  1351. %r1 clobbered by system call macro in userspace
  1352. %r2 saved in PT_REGS by gateway page
  1353. %r3 - %r18 preserved by C code (saved by signal code)
  1354. %r19 - %r20 saved in PT_REGS by gateway page
  1355. %r21 - %r22 non-standard syscall args
  1356. stored in kernel stack by gateway page
  1357. %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
  1358. %r27 - %r30 saved in PT_REGS by gateway page
  1359. %r31 syscall return pointer
  1360. */
  1361. /* Floating point registers (FIXME: what do we do with these?)
  1362. %fr0 - %fr3 status/exception, not preserved
  1363. %fr4 - %fr7 arguments
  1364. %fr8 - %fr11 not preserved by C code
  1365. %fr12 - %fr21 preserved by C code
  1366. %fr22 - %fr31 not preserved by C code
  1367. */
  1368. .macro reg_save regs
  1369. STREG %r3, PT_GR3(\regs)
  1370. STREG %r4, PT_GR4(\regs)
  1371. STREG %r5, PT_GR5(\regs)
  1372. STREG %r6, PT_GR6(\regs)
  1373. STREG %r7, PT_GR7(\regs)
  1374. STREG %r8, PT_GR8(\regs)
  1375. STREG %r9, PT_GR9(\regs)
  1376. STREG %r10,PT_GR10(\regs)
  1377. STREG %r11,PT_GR11(\regs)
  1378. STREG %r12,PT_GR12(\regs)
  1379. STREG %r13,PT_GR13(\regs)
  1380. STREG %r14,PT_GR14(\regs)
  1381. STREG %r15,PT_GR15(\regs)
  1382. STREG %r16,PT_GR16(\regs)
  1383. STREG %r17,PT_GR17(\regs)
  1384. STREG %r18,PT_GR18(\regs)
  1385. .endm
  1386. .macro reg_restore regs
  1387. LDREG PT_GR3(\regs), %r3
  1388. LDREG PT_GR4(\regs), %r4
  1389. LDREG PT_GR5(\regs), %r5
  1390. LDREG PT_GR6(\regs), %r6
  1391. LDREG PT_GR7(\regs), %r7
  1392. LDREG PT_GR8(\regs), %r8
  1393. LDREG PT_GR9(\regs), %r9
  1394. LDREG PT_GR10(\regs),%r10
  1395. LDREG PT_GR11(\regs),%r11
  1396. LDREG PT_GR12(\regs),%r12
  1397. LDREG PT_GR13(\regs),%r13
  1398. LDREG PT_GR14(\regs),%r14
  1399. LDREG PT_GR15(\regs),%r15
  1400. LDREG PT_GR16(\regs),%r16
  1401. LDREG PT_GR17(\regs),%r17
  1402. LDREG PT_GR18(\regs),%r18
  1403. .endm
  1404. ENTRY(sys_fork_wrapper)
  1405. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
  1406. ldo TASK_REGS(%r1),%r1
  1407. reg_save %r1
  1408. mfctl %cr27, %r3
  1409. STREG %r3, PT_CR27(%r1)
  1410. STREG %r2,-RP_OFFSET(%r30)
  1411. ldo FRAME_SIZE(%r30),%r30
  1412. #ifdef CONFIG_64BIT
  1413. ldo -16(%r30),%r29 /* Reference param save area */
  1414. #endif
  1415. /* These are call-clobbered registers and therefore
  1416. also syscall-clobbered (we hope). */
  1417. STREG %r2,PT_GR19(%r1) /* save for child */
  1418. STREG %r30,PT_GR21(%r1)
  1419. LDREG PT_GR30(%r1),%r25
  1420. copy %r1,%r24
  1421. BL sys_clone,%r2
  1422. ldi SIGCHLD,%r26
  1423. LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
  1424. wrapper_exit:
  1425. ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
  1426. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1427. ldo TASK_REGS(%r1),%r1 /* get pt regs */
  1428. LDREG PT_CR27(%r1), %r3
  1429. mtctl %r3, %cr27
  1430. reg_restore %r1
  1431. /* strace expects syscall # to be preserved in r20 */
  1432. ldi __NR_fork,%r20
  1433. bv %r0(%r2)
  1434. STREG %r20,PT_GR20(%r1)
  1435. ENDPROC(sys_fork_wrapper)
  1436. /* Set the return value for the child */
  1437. ENTRY(child_return)
  1438. BL schedule_tail, %r2
  1439. nop
  1440. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
  1441. LDREG TASK_PT_GR19(%r1),%r2
  1442. b wrapper_exit
  1443. copy %r0,%r28
  1444. ENDPROC(child_return)
  1445. ENTRY(sys_clone_wrapper)
  1446. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1447. ldo TASK_REGS(%r1),%r1 /* get pt regs */
  1448. reg_save %r1
  1449. mfctl %cr27, %r3
  1450. STREG %r3, PT_CR27(%r1)
  1451. STREG %r2,-RP_OFFSET(%r30)
  1452. ldo FRAME_SIZE(%r30),%r30
  1453. #ifdef CONFIG_64BIT
  1454. ldo -16(%r30),%r29 /* Reference param save area */
  1455. #endif
  1456. /* WARNING - Clobbers r19 and r21, userspace must save these! */
  1457. STREG %r2,PT_GR19(%r1) /* save for child */
  1458. STREG %r30,PT_GR21(%r1)
  1459. BL sys_clone,%r2
  1460. copy %r1,%r24
  1461. b wrapper_exit
  1462. LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
  1463. ENDPROC(sys_clone_wrapper)
  1464. ENTRY(sys_vfork_wrapper)
  1465. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1466. ldo TASK_REGS(%r1),%r1 /* get pt regs */
  1467. reg_save %r1
  1468. mfctl %cr27, %r3
  1469. STREG %r3, PT_CR27(%r1)
  1470. STREG %r2,-RP_OFFSET(%r30)
  1471. ldo FRAME_SIZE(%r30),%r30
  1472. #ifdef CONFIG_64BIT
  1473. ldo -16(%r30),%r29 /* Reference param save area */
  1474. #endif
  1475. STREG %r2,PT_GR19(%r1) /* save for child */
  1476. STREG %r30,PT_GR21(%r1)
  1477. BL sys_vfork,%r2
  1478. copy %r1,%r26
  1479. b wrapper_exit
  1480. LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
  1481. ENDPROC(sys_vfork_wrapper)
  1482. .macro execve_wrapper execve
  1483. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1484. ldo TASK_REGS(%r1),%r1 /* get pt regs */
  1485. /*
  1486. * Do we need to save/restore r3-r18 here?
  1487. * I don't think so. why would new thread need old
  1488. * threads registers?
  1489. */
  1490. /* %arg0 - %arg3 are already saved for us. */
  1491. STREG %r2,-RP_OFFSET(%r30)
  1492. ldo FRAME_SIZE(%r30),%r30
  1493. #ifdef CONFIG_64BIT
  1494. ldo -16(%r30),%r29 /* Reference param save area */
  1495. #endif
  1496. BL \execve,%r2
  1497. copy %r1,%arg0
  1498. ldo -FRAME_SIZE(%r30),%r30
  1499. LDREG -RP_OFFSET(%r30),%r2
  1500. /* If exec succeeded we need to load the args */
  1501. ldo -1024(%r0),%r1
  1502. cmpb,>>= %r28,%r1,error_\execve
  1503. copy %r2,%r19
  1504. error_\execve:
  1505. bv %r0(%r19)
  1506. nop
  1507. .endm
  1508. .import sys_execve
  1509. ENTRY(sys_execve_wrapper)
  1510. execve_wrapper sys_execve
  1511. ENDPROC(sys_execve_wrapper)
  1512. #ifdef CONFIG_64BIT
  1513. .import sys32_execve
  1514. ENTRY(sys32_execve_wrapper)
  1515. execve_wrapper sys32_execve
  1516. ENDPROC(sys32_execve_wrapper)
  1517. #endif
  1518. ENTRY(sys_rt_sigreturn_wrapper)
  1519. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
  1520. ldo TASK_REGS(%r26),%r26 /* get pt regs */
  1521. /* Don't save regs, we are going to restore them from sigcontext. */
  1522. STREG %r2, -RP_OFFSET(%r30)
  1523. #ifdef CONFIG_64BIT
  1524. ldo FRAME_SIZE(%r30), %r30
  1525. BL sys_rt_sigreturn,%r2
  1526. ldo -16(%r30),%r29 /* Reference param save area */
  1527. #else
  1528. BL sys_rt_sigreturn,%r2
  1529. ldo FRAME_SIZE(%r30), %r30
  1530. #endif
  1531. ldo -FRAME_SIZE(%r30), %r30
  1532. LDREG -RP_OFFSET(%r30), %r2
  1533. /* FIXME: I think we need to restore a few more things here. */
  1534. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1535. ldo TASK_REGS(%r1),%r1 /* get pt regs */
  1536. reg_restore %r1
  1537. /* If the signal was received while the process was blocked on a
  1538. * syscall, then r2 will take us to syscall_exit; otherwise r2 will
  1539. * take us to syscall_exit_rfi and on to intr_return.
  1540. */
  1541. bv %r0(%r2)
  1542. LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
  1543. ENDPROC(sys_rt_sigreturn_wrapper)
  1544. ENTRY(sys_sigaltstack_wrapper)
  1545. /* Get the user stack pointer */
  1546. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1547. ldo TASK_REGS(%r1),%r24 /* get pt regs */
  1548. LDREG TASK_PT_GR30(%r24),%r24
  1549. STREG %r2, -RP_OFFSET(%r30)
  1550. #ifdef CONFIG_64BIT
  1551. ldo FRAME_SIZE(%r30), %r30
  1552. BL do_sigaltstack,%r2
  1553. ldo -16(%r30),%r29 /* Reference param save area */
  1554. #else
  1555. BL do_sigaltstack,%r2
  1556. ldo FRAME_SIZE(%r30), %r30
  1557. #endif
  1558. ldo -FRAME_SIZE(%r30), %r30
  1559. LDREG -RP_OFFSET(%r30), %r2
  1560. bv %r0(%r2)
  1561. nop
  1562. ENDPROC(sys_sigaltstack_wrapper)
  1563. #ifdef CONFIG_64BIT
  1564. ENTRY(sys32_sigaltstack_wrapper)
  1565. /* Get the user stack pointer */
  1566. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
  1567. LDREG TASK_PT_GR30(%r24),%r24
  1568. STREG %r2, -RP_OFFSET(%r30)
  1569. ldo FRAME_SIZE(%r30), %r30
  1570. BL do_sigaltstack32,%r2
  1571. ldo -16(%r30),%r29 /* Reference param save area */
  1572. ldo -FRAME_SIZE(%r30), %r30
  1573. LDREG -RP_OFFSET(%r30), %r2
  1574. bv %r0(%r2)
  1575. nop
  1576. ENDPROC(sys32_sigaltstack_wrapper)
  1577. #endif
  1578. ENTRY(syscall_exit)
  1579. /* NOTE: HP-UX syscalls also come through here
  1580. * after hpux_syscall_exit fixes up return
  1581. * values. */
  1582. /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
  1583. * via syscall_exit_rfi if the signal was received while the process
  1584. * was running.
  1585. */
  1586. /* save return value now */
  1587. mfctl %cr30, %r1
  1588. LDREG TI_TASK(%r1),%r1
  1589. STREG %r28,TASK_PT_GR28(%r1)
  1590. #ifdef CONFIG_HPUX
  1591. /* <linux/personality.h> cannot be easily included */
  1592. #define PER_HPUX 0x10
  1593. ldw TASK_PERSONALITY(%r1),%r19
  1594. /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
  1595. ldo -PER_HPUX(%r19), %r19
  1596. CMPIB<>,n 0,%r19,1f
  1597. /* Save other hpux returns if personality is PER_HPUX */
  1598. STREG %r22,TASK_PT_GR22(%r1)
  1599. STREG %r29,TASK_PT_GR29(%r1)
  1600. 1:
  1601. #endif /* CONFIG_HPUX */
  1602. /* Seems to me that dp could be wrong here, if the syscall involved
  1603. * calling a module, and nothing got round to restoring dp on return.
  1604. */
  1605. loadgp
  1606. syscall_check_resched:
  1607. /* check for reschedule */
  1608. LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
  1609. bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
  1610. .import do_signal,code
  1611. syscall_check_sig:
  1612. LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
  1613. ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
  1614. and,COND(<>) %r19, %r26, %r0
  1615. b,n syscall_restore /* skip past if we've nothing to do */
  1616. syscall_do_signal:
  1617. /* Save callee-save registers (for sigcontext).
  1618. * FIXME: After this point the process structure should be
  1619. * consistent with all the relevant state of the process
  1620. * before the syscall. We need to verify this.
  1621. */
  1622. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1623. ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
  1624. reg_save %r26
  1625. #ifdef CONFIG_64BIT
  1626. ldo -16(%r30),%r29 /* Reference param save area */
  1627. #endif
  1628. BL do_notify_resume,%r2
  1629. ldi 1, %r25 /* long in_syscall = 1 */
  1630. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1631. ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
  1632. reg_restore %r20
  1633. b,n syscall_check_sig
  1634. syscall_restore:
  1635. /* Are we being ptraced? */
  1636. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1637. ldw TASK_PTRACE(%r1), %r19
  1638. bb,< %r19,31,syscall_restore_rfi
  1639. nop
  1640. ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
  1641. rest_fp %r19
  1642. LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
  1643. mtsar %r19
  1644. LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
  1645. LDREG TASK_PT_GR19(%r1),%r19
  1646. LDREG TASK_PT_GR20(%r1),%r20
  1647. LDREG TASK_PT_GR21(%r1),%r21
  1648. LDREG TASK_PT_GR22(%r1),%r22
  1649. LDREG TASK_PT_GR23(%r1),%r23
  1650. LDREG TASK_PT_GR24(%r1),%r24
  1651. LDREG TASK_PT_GR25(%r1),%r25
  1652. LDREG TASK_PT_GR26(%r1),%r26
  1653. LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
  1654. LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
  1655. LDREG TASK_PT_GR29(%r1),%r29
  1656. LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
  1657. /* NOTE: We use rsm/ssm pair to make this operation atomic */
  1658. rsm PSW_SM_I, %r0
  1659. LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
  1660. mfsp %sr3,%r1 /* Get users space id */
  1661. mtsp %r1,%sr7 /* Restore sr7 */
  1662. ssm PSW_SM_I, %r0
  1663. /* Set sr2 to zero for userspace syscalls to work. */
  1664. mtsp %r0,%sr2
  1665. mtsp %r1,%sr4 /* Restore sr4 */
  1666. mtsp %r1,%sr5 /* Restore sr5 */
  1667. mtsp %r1,%sr6 /* Restore sr6 */
  1668. depi 3,31,2,%r31 /* ensure return to user mode. */
  1669. #ifdef CONFIG_64BIT
  1670. /* decide whether to reset the wide mode bit
  1671. *
  1672. * For a syscall, the W bit is stored in the lowest bit
  1673. * of sp. Extract it and reset W if it is zero */
  1674. extrd,u,*<> %r30,63,1,%r1
  1675. rsm PSW_SM_W, %r0
  1676. /* now reset the lowest bit of sp if it was set */
  1677. xor %r30,%r1,%r30
  1678. #endif
  1679. be,n 0(%sr3,%r31) /* return to user space */
  1680. /* We have to return via an RFI, so that PSW T and R bits can be set
  1681. * appropriately.
  1682. * This sets up pt_regs so we can return via intr_restore, which is not
  1683. * the most efficient way of doing things, but it works.
  1684. */
  1685. syscall_restore_rfi:
  1686. ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
  1687. mtctl %r2,%cr0 /* for immediate trap */
  1688. LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
  1689. ldi 0x0b,%r20 /* Create new PSW */
  1690. depi -1,13,1,%r20 /* C, Q, D, and I bits */
  1691. /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
  1692. * set in include/linux/ptrace.h and converted to PA bitmap
  1693. * numbers in asm-offsets.c */
  1694. /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
  1695. extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
  1696. depi -1,27,1,%r20 /* R bit */
  1697. /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
  1698. extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
  1699. depi -1,7,1,%r20 /* T bit */
  1700. STREG %r20,TASK_PT_PSW(%r1)
  1701. /* Always store space registers, since sr3 can be changed (e.g. fork) */
  1702. mfsp %sr3,%r25
  1703. STREG %r25,TASK_PT_SR3(%r1)
  1704. STREG %r25,TASK_PT_SR4(%r1)
  1705. STREG %r25,TASK_PT_SR5(%r1)
  1706. STREG %r25,TASK_PT_SR6(%r1)
  1707. STREG %r25,TASK_PT_SR7(%r1)
  1708. STREG %r25,TASK_PT_IASQ0(%r1)
  1709. STREG %r25,TASK_PT_IASQ1(%r1)
  1710. /* XXX W bit??? */
  1711. /* Now if old D bit is clear, it means we didn't save all registers
  1712. * on syscall entry, so do that now. This only happens on TRACEME
  1713. * calls, or if someone attached to us while we were on a syscall.
  1714. * We could make this more efficient by not saving r3-r18, but
  1715. * then we wouldn't be able to use the common intr_restore path.
  1716. * It is only for traced processes anyway, so performance is not
  1717. * an issue.
  1718. */
  1719. bb,< %r2,30,pt_regs_ok /* Branch if D set */
  1720. ldo TASK_REGS(%r1),%r25
  1721. reg_save %r25 /* Save r3 to r18 */
  1722. /* Save the current sr */
  1723. mfsp %sr0,%r2
  1724. STREG %r2,TASK_PT_SR0(%r1)
  1725. /* Save the scratch sr */
  1726. mfsp %sr1,%r2
  1727. STREG %r2,TASK_PT_SR1(%r1)
  1728. /* sr2 should be set to zero for userspace syscalls */
  1729. STREG %r0,TASK_PT_SR2(%r1)
  1730. pt_regs_ok:
  1731. LDREG TASK_PT_GR31(%r1),%r2
  1732. depi 3,31,2,%r2 /* ensure return to user mode. */
  1733. STREG %r2,TASK_PT_IAOQ0(%r1)
  1734. ldo 4(%r2),%r2
  1735. STREG %r2,TASK_PT_IAOQ1(%r1)
  1736. copy %r25,%r16
  1737. b intr_restore
  1738. nop
  1739. .import schedule,code
  1740. syscall_do_resched:
  1741. BL schedule,%r2
  1742. #ifdef CONFIG_64BIT
  1743. ldo -16(%r30),%r29 /* Reference param save area */
  1744. #else
  1745. nop
  1746. #endif
  1747. b syscall_check_resched /* if resched, we start over again */
  1748. nop
  1749. ENDPROC(syscall_exit)
  1750. get_register:
  1751. /*
  1752. * get_register is used by the non access tlb miss handlers to
  1753. * copy the value of the general register specified in r8 into
  1754. * r1. This routine can't be used for shadowed registers, since
  1755. * the rfir will restore the original value. So, for the shadowed
  1756. * registers we put a -1 into r1 to indicate that the register
  1757. * should not be used (the register being copied could also have
  1758. * a -1 in it, but that is OK, it just means that we will have
  1759. * to use the slow path instead).
  1760. */
  1761. blr %r8,%r0
  1762. nop
  1763. bv %r0(%r25) /* r0 */
  1764. copy %r0,%r1
  1765. bv %r0(%r25) /* r1 - shadowed */
  1766. ldi -1,%r1
  1767. bv %r0(%r25) /* r2 */
  1768. copy %r2,%r1
  1769. bv %r0(%r25) /* r3 */
  1770. copy %r3,%r1
  1771. bv %r0(%r25) /* r4 */
  1772. copy %r4,%r1
  1773. bv %r0(%r25) /* r5 */
  1774. copy %r5,%r1
  1775. bv %r0(%r25) /* r6 */
  1776. copy %r6,%r1
  1777. bv %r0(%r25) /* r7 */
  1778. copy %r7,%r1
  1779. bv %r0(%r25) /* r8 - shadowed */
  1780. ldi -1,%r1
  1781. bv %r0(%r25) /* r9 - shadowed */
  1782. ldi -1,%r1
  1783. bv %r0(%r25) /* r10 */
  1784. copy %r10,%r1
  1785. bv %r0(%r25) /* r11 */
  1786. copy %r11,%r1
  1787. bv %r0(%r25) /* r12 */
  1788. copy %r12,%r1
  1789. bv %r0(%r25) /* r13 */
  1790. copy %r13,%r1
  1791. bv %r0(%r25) /* r14 */
  1792. copy %r14,%r1
  1793. bv %r0(%r25) /* r15 */
  1794. copy %r15,%r1
  1795. bv %r0(%r25) /* r16 - shadowed */
  1796. ldi -1,%r1
  1797. bv %r0(%r25) /* r17 - shadowed */
  1798. ldi -1,%r1
  1799. bv %r0(%r25) /* r18 */
  1800. copy %r18,%r1
  1801. bv %r0(%r25) /* r19 */
  1802. copy %r19,%r1
  1803. bv %r0(%r25) /* r20 */
  1804. copy %r20,%r1
  1805. bv %r0(%r25) /* r21 */
  1806. copy %r21,%r1
  1807. bv %r0(%r25) /* r22 */
  1808. copy %r22,%r1
  1809. bv %r0(%r25) /* r23 */
  1810. copy %r23,%r1
  1811. bv %r0(%r25) /* r24 - shadowed */
  1812. ldi -1,%r1
  1813. bv %r0(%r25) /* r25 - shadowed */
  1814. ldi -1,%r1
  1815. bv %r0(%r25) /* r26 */
  1816. copy %r26,%r1
  1817. bv %r0(%r25) /* r27 */
  1818. copy %r27,%r1
  1819. bv %r0(%r25) /* r28 */
  1820. copy %r28,%r1
  1821. bv %r0(%r25) /* r29 */
  1822. copy %r29,%r1
  1823. bv %r0(%r25) /* r30 */
  1824. copy %r30,%r1
  1825. bv %r0(%r25) /* r31 */
  1826. copy %r31,%r1
  1827. set_register:
  1828. /*
  1829. * set_register is used by the non access tlb miss handlers to
  1830. * copy the value of r1 into the general register specified in
  1831. * r8.
  1832. */
  1833. blr %r8,%r0
  1834. nop
  1835. bv %r0(%r25) /* r0 (silly, but it is a place holder) */
  1836. copy %r1,%r0
  1837. bv %r0(%r25) /* r1 */
  1838. copy %r1,%r1
  1839. bv %r0(%r25) /* r2 */
  1840. copy %r1,%r2
  1841. bv %r0(%r25) /* r3 */
  1842. copy %r1,%r3
  1843. bv %r0(%r25) /* r4 */
  1844. copy %r1,%r4
  1845. bv %r0(%r25) /* r5 */
  1846. copy %r1,%r5
  1847. bv %r0(%r25) /* r6 */
  1848. copy %r1,%r6
  1849. bv %r0(%r25) /* r7 */
  1850. copy %r1,%r7
  1851. bv %r0(%r25) /* r8 */
  1852. copy %r1,%r8
  1853. bv %r0(%r25) /* r9 */
  1854. copy %r1,%r9
  1855. bv %r0(%r25) /* r10 */
  1856. copy %r1,%r10
  1857. bv %r0(%r25) /* r11 */
  1858. copy %r1,%r11
  1859. bv %r0(%r25) /* r12 */
  1860. copy %r1,%r12
  1861. bv %r0(%r25) /* r13 */
  1862. copy %r1,%r13
  1863. bv %r0(%r25) /* r14 */
  1864. copy %r1,%r14
  1865. bv %r0(%r25) /* r15 */
  1866. copy %r1,%r15
  1867. bv %r0(%r25) /* r16 */
  1868. copy %r1,%r16
  1869. bv %r0(%r25) /* r17 */
  1870. copy %r1,%r17
  1871. bv %r0(%r25) /* r18 */
  1872. copy %r1,%r18
  1873. bv %r0(%r25) /* r19 */
  1874. copy %r1,%r19
  1875. bv %r0(%r25) /* r20 */
  1876. copy %r1,%r20
  1877. bv %r0(%r25) /* r21 */
  1878. copy %r1,%r21
  1879. bv %r0(%r25) /* r22 */
  1880. copy %r1,%r22
  1881. bv %r0(%r25) /* r23 */
  1882. copy %r1,%r23
  1883. bv %r0(%r25) /* r24 */
  1884. copy %r1,%r24
  1885. bv %r0(%r25) /* r25 */
  1886. copy %r1,%r25
  1887. bv %r0(%r25) /* r26 */
  1888. copy %r1,%r26
  1889. bv %r0(%r25) /* r27 */
  1890. copy %r1,%r27
  1891. bv %r0(%r25) /* r28 */
  1892. copy %r1,%r28
  1893. bv %r0(%r25) /* r29 */
  1894. copy %r1,%r29
  1895. bv %r0(%r25) /* r30 */
  1896. copy %r1,%r30
  1897. bv %r0(%r25) /* r31 */
  1898. copy %r1,%r31