entry.S 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489
  1. /* entry.S: FR-V entry
  2. *
  3. * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. *
  12. * Entry to the kernel is "interesting":
  13. * (1) There are no stack pointers, not even for the kernel
  14. * (2) General Registers should not be clobbered
  15. * (3) There are no kernel-only data registers
  16. * (4) Since all addressing modes are wrt to a General Register, no global
  17. * variables can be reached
  18. *
  19. * We deal with this by declaring that we shall kill GR28 on entering the
  20. * kernel from userspace
  21. *
  22. * However, since break interrupts can interrupt the CPU even when PSR.ET==0,
  23. * they can't rely on GR28 to be anything useful, and so need to clobber a
  24. * separate register (GR31). Break interrupts are managed in break.S
  25. *
  26. * GR29 _is_ saved, and holds the current task pointer globally
  27. *
  28. */
  29. #include <linux/sys.h>
  30. #include <linux/config.h>
  31. #include <linux/linkage.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/setup.h>
  34. #include <asm/segment.h>
  35. #include <asm/ptrace.h>
  36. #include <asm/errno.h>
  37. #include <asm/cache.h>
  38. #include <asm/spr-regs.h>
  39. #define nr_syscalls ((syscall_table_size)/4)
  40. .text
  41. .balign 4
  42. .macro LEDS val
  43. # sethi.p %hi(0xe1200004),gr30
  44. # setlo %lo(0xe1200004),gr30
  45. # setlos #~\val,gr31
  46. # st gr31,@(gr30,gr0)
  47. # sethi.p %hi(0xffc00100),gr30
  48. # setlo %lo(0xffc00100),gr30
  49. # sth gr0,@(gr30,gr0)
  50. # membar
  51. .endm
  52. .macro LEDS32
  53. # not gr31,gr31
  54. # sethi.p %hi(0xe1200004),gr30
  55. # setlo %lo(0xe1200004),gr30
  56. # st.p gr31,@(gr30,gr0)
  57. # srli gr31,#16,gr31
  58. # sethi.p %hi(0xffc00100),gr30
  59. # setlo %lo(0xffc00100),gr30
  60. # sth gr31,@(gr30,gr0)
  61. # membar
  62. .endm
  63. ###############################################################################
  64. #
  65. # entry point for External interrupts received whilst executing userspace code
  66. #
  67. ###############################################################################
  68. .globl __entry_uspace_external_interrupt
  69. .type __entry_uspace_external_interrupt,@function
  70. __entry_uspace_external_interrupt:
  71. LEDS 0x6200
  72. sethi.p %hi(__kernel_frame0_ptr),gr28
  73. setlo %lo(__kernel_frame0_ptr),gr28
  74. ldi @(gr28,#0),gr28
  75. # handle h/w single-step through exceptions
  76. sti gr0,@(gr28,#REG__STATUS)
  77. .globl __entry_uspace_external_interrupt_reentry
  78. __entry_uspace_external_interrupt_reentry:
  79. LEDS 0x6201
  80. setlos #REG__END,gr30
  81. dcpl gr28,gr30,#0
  82. # finish building the exception frame
  83. sti sp, @(gr28,#REG_SP)
  84. stdi gr2, @(gr28,#REG_GR(2))
  85. stdi gr4, @(gr28,#REG_GR(4))
  86. stdi gr6, @(gr28,#REG_GR(6))
  87. stdi gr8, @(gr28,#REG_GR(8))
  88. stdi gr10,@(gr28,#REG_GR(10))
  89. stdi gr12,@(gr28,#REG_GR(12))
  90. stdi gr14,@(gr28,#REG_GR(14))
  91. stdi gr16,@(gr28,#REG_GR(16))
  92. stdi gr18,@(gr28,#REG_GR(18))
  93. stdi gr20,@(gr28,#REG_GR(20))
  94. stdi gr22,@(gr28,#REG_GR(22))
  95. stdi gr24,@(gr28,#REG_GR(24))
  96. stdi gr26,@(gr28,#REG_GR(26))
  97. sti gr0, @(gr28,#REG_GR(28))
  98. sti gr29,@(gr28,#REG_GR(29))
  99. stdi.p gr30,@(gr28,#REG_GR(30))
  100. # set up the kernel stack pointer
  101. ori gr28,0,sp
  102. movsg tbr ,gr20
  103. movsg psr ,gr22
  104. movsg pcsr,gr21
  105. movsg isr ,gr23
  106. movsg ccr ,gr24
  107. movsg cccr,gr25
  108. movsg lr ,gr26
  109. movsg lcr ,gr27
  110. setlos.p #-1,gr4
  111. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  112. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  113. slli gr5,#1,gr5
  114. or gr6,gr5,gr5
  115. andi gr5,#~PSR_ET,gr5
  116. sti gr20,@(gr28,#REG_TBR)
  117. sti gr21,@(gr28,#REG_PC)
  118. sti gr5 ,@(gr28,#REG_PSR)
  119. sti gr23,@(gr28,#REG_ISR)
  120. stdi gr24,@(gr28,#REG_CCR)
  121. stdi gr26,@(gr28,#REG_LR)
  122. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  123. movsg iacc0h,gr4
  124. movsg iacc0l,gr5
  125. stdi gr4,@(gr28,#REG_IACC0)
  126. movsg gner0,gr4
  127. movsg gner1,gr5
  128. stdi.p gr4,@(gr28,#REG_GNER0)
  129. # interrupts start off fully disabled in the interrupt handler
  130. subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
  131. # set up kernel global registers
  132. sethi.p %hi(__kernel_current_task),gr5
  133. setlo %lo(__kernel_current_task),gr5
  134. sethi.p %hi(_gp),gr16
  135. setlo %lo(_gp),gr16
  136. ldi @(gr5,#0),gr29
  137. ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  138. # make sure we (the kernel) get div-zero and misalignment exceptions
  139. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  140. movgs gr5,isr
  141. # switch to the kernel trap table
  142. sethi.p %hi(__entry_kerneltrap_table),gr6
  143. setlo %lo(__entry_kerneltrap_table),gr6
  144. movgs gr6,tbr
  145. # set the return address
  146. sethi.p %hi(__entry_return_from_user_interrupt),gr4
  147. setlo %lo(__entry_return_from_user_interrupt),gr4
  148. movgs gr4,lr
  149. # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
  150. movsg psr,gr4
  151. ori gr4,#PSR_PIL_14,gr4
  152. movgs gr4,psr
  153. ori gr4,#PSR_PIL_14|PSR_ET,gr4
  154. movgs gr4,psr
  155. LEDS 0x6202
  156. bra do_IRQ
  157. .size __entry_uspace_external_interrupt,.-__entry_uspace_external_interrupt
  158. ###############################################################################
  159. #
  160. # entry point for External interrupts received whilst executing kernel code
  161. # - on arriving here, the following registers should already be set up:
  162. # GR15 - current thread_info struct pointer
  163. # GR16 - kernel GP-REL pointer
  164. # GR29 - current task struct pointer
  165. # TBR - kernel trap vector table
  166. # ISR - kernel's preferred integer controls
  167. #
  168. ###############################################################################
  169. .globl __entry_kernel_external_interrupt
  170. .type __entry_kernel_external_interrupt,@function
  171. __entry_kernel_external_interrupt:
  172. LEDS 0x6210
  173. // sub sp,gr15,gr31
  174. // LEDS32
  175. # set up the stack pointer
  176. or.p sp,gr0,gr30
  177. subi sp,#REG__END,sp
  178. sti gr30,@(sp,#REG_SP)
  179. # handle h/w single-step through exceptions
  180. sti gr0,@(sp,#REG__STATUS)
  181. .globl __entry_kernel_external_interrupt_reentry
  182. __entry_kernel_external_interrupt_reentry:
  183. LEDS 0x6211
  184. # set up the exception frame
  185. setlos #REG__END,gr30
  186. dcpl sp,gr30,#0
  187. sti.p gr28,@(sp,#REG_GR(28))
  188. ori sp,0,gr28
  189. # finish building the exception frame
  190. stdi gr2,@(gr28,#REG_GR(2))
  191. stdi gr4,@(gr28,#REG_GR(4))
  192. stdi gr6,@(gr28,#REG_GR(6))
  193. stdi gr8,@(gr28,#REG_GR(8))
  194. stdi gr10,@(gr28,#REG_GR(10))
  195. stdi gr12,@(gr28,#REG_GR(12))
  196. stdi gr14,@(gr28,#REG_GR(14))
  197. stdi gr16,@(gr28,#REG_GR(16))
  198. stdi gr18,@(gr28,#REG_GR(18))
  199. stdi gr20,@(gr28,#REG_GR(20))
  200. stdi gr22,@(gr28,#REG_GR(22))
  201. stdi gr24,@(gr28,#REG_GR(24))
  202. stdi gr26,@(gr28,#REG_GR(26))
  203. sti gr29,@(gr28,#REG_GR(29))
  204. stdi.p gr30,@(gr28,#REG_GR(30))
  205. # note virtual interrupts will be fully enabled upon return
  206. subicc gr0,#1,gr0,icc2 /* clear Z, set C */
  207. movsg tbr ,gr20
  208. movsg psr ,gr22
  209. movsg pcsr,gr21
  210. movsg isr ,gr23
  211. movsg ccr ,gr24
  212. movsg cccr,gr25
  213. movsg lr ,gr26
  214. movsg lcr ,gr27
  215. setlos.p #-1,gr4
  216. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  217. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  218. slli gr5,#1,gr5
  219. or gr6,gr5,gr5
  220. andi.p gr5,#~PSR_ET,gr5
  221. # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
  222. # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt
  223. andi gr25,#~0xc0,gr25
  224. sti gr20,@(gr28,#REG_TBR)
  225. sti gr21,@(gr28,#REG_PC)
  226. sti gr5 ,@(gr28,#REG_PSR)
  227. sti gr23,@(gr28,#REG_ISR)
  228. stdi gr24,@(gr28,#REG_CCR)
  229. stdi gr26,@(gr28,#REG_LR)
  230. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  231. movsg iacc0h,gr4
  232. movsg iacc0l,gr5
  233. stdi gr4,@(gr28,#REG_IACC0)
  234. movsg gner0,gr4
  235. movsg gner1,gr5
  236. stdi.p gr4,@(gr28,#REG_GNER0)
  237. # interrupts start off fully disabled in the interrupt handler
  238. subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
  239. # set the return address
  240. sethi.p %hi(__entry_return_from_kernel_interrupt),gr4
  241. setlo %lo(__entry_return_from_kernel_interrupt),gr4
  242. movgs gr4,lr
  243. # clear power-saving mode flags
  244. movsg hsr0,gr4
  245. andi gr4,#~HSR0_PDM,gr4
  246. movgs gr4,hsr0
  247. # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
  248. movsg psr,gr4
  249. ori gr4,#PSR_PIL_14,gr4
  250. movgs gr4,psr
  251. ori gr4,#PSR_ET,gr4
  252. movgs gr4,psr
  253. LEDS 0x6212
  254. bra do_IRQ
  255. .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
  256. ###############################################################################
  257. #
  258. # deal with interrupts that were actually virtually disabled
  259. # - we need to really disable them, flag the fact and return immediately
  260. # - if you change this, you must alter break.S also
  261. #
  262. ###############################################################################
  263. .balign L1_CACHE_BYTES
  264. .globl __entry_kernel_external_interrupt_virtually_disabled
  265. .type __entry_kernel_external_interrupt_virtually_disabled,@function
  266. __entry_kernel_external_interrupt_virtually_disabled:
  267. movsg psr,gr30
  268. andi gr30,#~PSR_PIL,gr30
  269. ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only
  270. movgs gr30,psr
  271. subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C
  272. rett #0
  273. .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled
  274. ###############################################################################
  275. #
  276. # deal with re-enablement of interrupts that were pending when virtually re-enabled
  277. # - set ICC2.C, re-enable the real interrupts and return
  278. # - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI]
  279. # - if you change this, you must alter break.S also
  280. #
  281. ###############################################################################
  282. .balign L1_CACHE_BYTES
  283. .globl __entry_kernel_external_interrupt_virtual_reenable
  284. .type __entry_kernel_external_interrupt_virtual_reenable,@function
  285. __entry_kernel_external_interrupt_virtual_reenable:
  286. movsg psr,gr30
  287. andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts
  288. movgs gr30,psr
  289. subicc gr0,#1,gr0,icc2 ; clear Z, set C
  290. rett #0
  291. .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable
  292. ###############################################################################
  293. #
  294. # entry point for Software and Progam interrupts generated whilst executing userspace code
  295. #
  296. ###############################################################################
  297. .globl __entry_uspace_softprog_interrupt
  298. .type __entry_uspace_softprog_interrupt,@function
  299. .globl __entry_uspace_handle_mmu_fault
  300. __entry_uspace_softprog_interrupt:
  301. LEDS 0x6000
  302. #ifdef CONFIG_MMU
  303. movsg ear0,gr28
  304. __entry_uspace_handle_mmu_fault:
  305. movgs gr28,scr2
  306. #endif
  307. sethi.p %hi(__kernel_frame0_ptr),gr28
  308. setlo %lo(__kernel_frame0_ptr),gr28
  309. ldi @(gr28,#0),gr28
  310. # handle h/w single-step through exceptions
  311. sti gr0,@(gr28,#REG__STATUS)
  312. .globl __entry_uspace_softprog_interrupt_reentry
  313. __entry_uspace_softprog_interrupt_reentry:
  314. LEDS 0x6001
  315. setlos #REG__END,gr30
  316. dcpl gr28,gr30,#0
  317. # set up the kernel stack pointer
  318. sti.p sp,@(gr28,#REG_SP)
  319. ori gr28,0,sp
  320. sti gr0,@(gr28,#REG_GR(28))
  321. stdi gr20,@(gr28,#REG_GR(20))
  322. stdi gr22,@(gr28,#REG_GR(22))
  323. movsg tbr,gr20
  324. movsg pcsr,gr21
  325. movsg psr,gr22
  326. sethi.p %hi(__entry_return_from_user_exception),gr23
  327. setlo %lo(__entry_return_from_user_exception),gr23
  328. bra __entry_common
  329. .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
  330. # single-stepping was disabled on entry to a TLB handler that then faulted
  331. #ifdef CONFIG_MMU
  332. .globl __entry_uspace_handle_mmu_fault_sstep
  333. __entry_uspace_handle_mmu_fault_sstep:
  334. movgs gr28,scr2
  335. sethi.p %hi(__kernel_frame0_ptr),gr28
  336. setlo %lo(__kernel_frame0_ptr),gr28
  337. ldi @(gr28,#0),gr28
  338. # flag single-step re-enablement
  339. sti gr0,@(gr28,#REG__STATUS)
  340. bra __entry_uspace_softprog_interrupt_reentry
  341. #endif
  342. ###############################################################################
  343. #
  344. # entry point for Software and Progam interrupts generated whilst executing kernel code
  345. #
  346. ###############################################################################
  347. .globl __entry_kernel_softprog_interrupt
  348. .type __entry_kernel_softprog_interrupt,@function
  349. __entry_kernel_softprog_interrupt:
  350. LEDS 0x6004
  351. #ifdef CONFIG_MMU
  352. movsg ear0,gr30
  353. movgs gr30,scr2
  354. #endif
  355. .globl __entry_kernel_handle_mmu_fault
  356. __entry_kernel_handle_mmu_fault:
  357. # set up the stack pointer
  358. subi sp,#REG__END,sp
  359. sti sp,@(sp,#REG_SP)
  360. sti sp,@(sp,#REG_SP-4)
  361. andi sp,#~7,sp
  362. # handle h/w single-step through exceptions
  363. sti gr0,@(sp,#REG__STATUS)
  364. .globl __entry_kernel_softprog_interrupt_reentry
  365. __entry_kernel_softprog_interrupt_reentry:
  366. LEDS 0x6005
  367. setlos #REG__END,gr30
  368. dcpl sp,gr30,#0
  369. # set up the exception frame
  370. sti.p gr28,@(sp,#REG_GR(28))
  371. ori sp,0,gr28
  372. stdi gr20,@(gr28,#REG_GR(20))
  373. stdi gr22,@(gr28,#REG_GR(22))
  374. ldi @(sp,#REG_SP),gr22 /* reconstruct the old SP */
  375. addi gr22,#REG__END,gr22
  376. sti gr22,@(sp,#REG_SP)
  377. # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
  378. # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt
  379. movsg cccr,gr20
  380. andi gr20,#~0xc0,gr20
  381. movgs gr20,cccr
  382. movsg tbr,gr20
  383. movsg pcsr,gr21
  384. movsg psr,gr22
  385. sethi.p %hi(__entry_return_from_kernel_exception),gr23
  386. setlo %lo(__entry_return_from_kernel_exception),gr23
  387. bra __entry_common
  388. .size __entry_kernel_softprog_interrupt,.-__entry_kernel_softprog_interrupt
  389. # single-stepping was disabled on entry to a TLB handler that then faulted
  390. #ifdef CONFIG_MMU
  391. .globl __entry_kernel_handle_mmu_fault_sstep
  392. __entry_kernel_handle_mmu_fault_sstep:
  393. # set up the stack pointer
  394. subi sp,#REG__END,sp
  395. sti sp,@(sp,#REG_SP)
  396. sti sp,@(sp,#REG_SP-4)
  397. andi sp,#~7,sp
  398. # flag single-step re-enablement
  399. sethi #REG__STATUS_STEP,gr30
  400. sti gr30,@(sp,#REG__STATUS)
  401. bra __entry_kernel_softprog_interrupt_reentry
  402. #endif
  403. ###############################################################################
  404. #
  405. # the rest of the kernel entry point code
  406. # - on arriving here, the following registers should be set up:
  407. # GR1 - kernel stack pointer
  408. # GR7 - syscall number (trap 0 only)
  409. # GR8-13 - syscall args (trap 0 only)
  410. # GR20 - saved TBR
  411. # GR21 - saved PC
  412. # GR22 - saved PSR
  413. # GR23 - return handler address
  414. # GR28 - exception frame on stack
  415. # SCR2 - saved EAR0 where applicable (clobbered by ICI & ICEF insns on FR451)
  416. # PSR - PSR.S 1, PSR.ET 0
  417. #
  418. ###############################################################################
  419. .globl __entry_common
  420. .type __entry_common,@function
  421. __entry_common:
  422. LEDS 0x6008
  423. # finish building the exception frame
  424. stdi gr2,@(gr28,#REG_GR(2))
  425. stdi gr4,@(gr28,#REG_GR(4))
  426. stdi gr6,@(gr28,#REG_GR(6))
  427. stdi gr8,@(gr28,#REG_GR(8))
  428. stdi gr10,@(gr28,#REG_GR(10))
  429. stdi gr12,@(gr28,#REG_GR(12))
  430. stdi gr14,@(gr28,#REG_GR(14))
  431. stdi gr16,@(gr28,#REG_GR(16))
  432. stdi gr18,@(gr28,#REG_GR(18))
  433. stdi gr24,@(gr28,#REG_GR(24))
  434. stdi gr26,@(gr28,#REG_GR(26))
  435. sti gr29,@(gr28,#REG_GR(29))
  436. stdi gr30,@(gr28,#REG_GR(30))
  437. movsg lcr ,gr27
  438. movsg lr ,gr26
  439. movgs gr23,lr
  440. movsg cccr,gr25
  441. movsg ccr ,gr24
  442. movsg isr ,gr23
  443. setlos.p #-1,gr4
  444. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  445. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  446. slli gr5,#1,gr5
  447. or gr6,gr5,gr5
  448. andi gr5,#~PSR_ET,gr5
  449. sti gr20,@(gr28,#REG_TBR)
  450. sti gr21,@(gr28,#REG_PC)
  451. sti gr5 ,@(gr28,#REG_PSR)
  452. sti gr23,@(gr28,#REG_ISR)
  453. stdi gr24,@(gr28,#REG_CCR)
  454. stdi gr26,@(gr28,#REG_LR)
  455. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  456. movsg iacc0h,gr4
  457. movsg iacc0l,gr5
  458. stdi gr4,@(gr28,#REG_IACC0)
  459. movsg gner0,gr4
  460. movsg gner1,gr5
  461. stdi.p gr4,@(gr28,#REG_GNER0)
  462. # set up virtual interrupt disablement
  463. subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */
  464. # set up kernel global registers
  465. sethi.p %hi(__kernel_current_task),gr5
  466. setlo %lo(__kernel_current_task),gr5
  467. sethi.p %hi(_gp),gr16
  468. setlo %lo(_gp),gr16
  469. ldi @(gr5,#0),gr29
  470. ldi @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  471. # switch to the kernel trap table
  472. sethi.p %hi(__entry_kerneltrap_table),gr6
  473. setlo %lo(__entry_kerneltrap_table),gr6
  474. movgs gr6,tbr
  475. # make sure we (the kernel) get div-zero and misalignment exceptions
  476. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  477. movgs gr5,isr
  478. # clear power-saving mode flags
  479. movsg hsr0,gr4
  480. andi gr4,#~HSR0_PDM,gr4
  481. movgs gr4,hsr0
  482. # multiplex again using old TBR as a guide
  483. setlos.p #TBR_TT,gr3
  484. sethi %hi(__entry_vector_table),gr6
  485. and.p gr20,gr3,gr5
  486. setlo %lo(__entry_vector_table),gr6
  487. srli gr5,#2,gr5
  488. ld @(gr5,gr6),gr5
  489. LEDS 0x6009
  490. jmpl @(gr5,gr0)
  491. .size __entry_common,.-__entry_common
  492. ###############################################################################
  493. #
  494. # handle instruction MMU fault
  495. #
  496. ###############################################################################
  497. #ifdef CONFIG_MMU
  498. .globl __entry_insn_mmu_fault
  499. __entry_insn_mmu_fault:
  500. LEDS 0x6010
  501. setlos #0,gr8
  502. movsg esr0,gr9
  503. movsg scr2,gr10
  504. # now that we've accessed the exception regs, we can enable exceptions
  505. movsg psr,gr4
  506. ori gr4,#PSR_ET,gr4
  507. movgs gr4,psr
  508. sethi.p %hi(do_page_fault),gr5
  509. setlo %lo(do_page_fault),gr5
  510. jmpl @(gr5,gr0) ; call do_page_fault(0,esr0,ear0)
  511. #endif
  512. ###############################################################################
  513. #
  514. # handle instruction access error
  515. #
  516. ###############################################################################
  517. .globl __entry_insn_access_error
  518. __entry_insn_access_error:
  519. LEDS 0x6011
  520. sethi.p %hi(insn_access_error),gr5
  521. setlo %lo(insn_access_error),gr5
  522. movsg esfr1,gr8
  523. movsg epcr0,gr9
  524. movsg esr0,gr10
  525. # now that we've accessed the exception regs, we can enable exceptions
  526. movsg psr,gr4
  527. ori gr4,#PSR_ET,gr4
  528. movgs gr4,psr
  529. jmpl @(gr5,gr0) ; call insn_access_error(esfr1,epcr0,esr0)
  530. ###############################################################################
  531. #
  532. # handle various instructions of dubious legality
  533. #
  534. ###############################################################################
  535. .globl __entry_unsupported_trap
  536. .globl __entry_illegal_instruction
  537. .globl __entry_privileged_instruction
  538. .globl __entry_debug_exception
  539. __entry_unsupported_trap:
  540. subi gr21,#4,gr21
  541. sti gr21,@(gr28,#REG_PC)
  542. __entry_illegal_instruction:
  543. __entry_privileged_instruction:
  544. __entry_debug_exception:
  545. LEDS 0x6012
  546. sethi.p %hi(illegal_instruction),gr5
  547. setlo %lo(illegal_instruction),gr5
  548. movsg esfr1,gr8
  549. movsg epcr0,gr9
  550. movsg esr0,gr10
  551. # now that we've accessed the exception regs, we can enable exceptions
  552. movsg psr,gr4
  553. ori gr4,#PSR_ET,gr4
  554. movgs gr4,psr
  555. jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0)
  556. ###############################################################################
  557. #
  558. # handle media exception
  559. #
  560. ###############################################################################
  561. .globl __entry_media_exception
  562. __entry_media_exception:
  563. LEDS 0x6013
  564. sethi.p %hi(media_exception),gr5
  565. setlo %lo(media_exception),gr5
  566. movsg msr0,gr8
  567. movsg msr1,gr9
  568. # now that we've accessed the exception regs, we can enable exceptions
  569. movsg psr,gr4
  570. ori gr4,#PSR_ET,gr4
  571. movgs gr4,psr
  572. jmpl @(gr5,gr0) ; call media_excep(msr0,msr1)
  573. ###############################################################################
  574. #
  575. # handle data MMU fault
  576. # handle data DAT fault (write-protect exception)
  577. #
  578. ###############################################################################
  579. #ifdef CONFIG_MMU
  580. .globl __entry_data_mmu_fault
  581. __entry_data_mmu_fault:
  582. .globl __entry_data_dat_fault
  583. __entry_data_dat_fault:
  584. LEDS 0x6014
  585. setlos #1,gr8
  586. movsg esr0,gr9
  587. movsg scr2,gr10 ; saved EAR0
  588. # now that we've accessed the exception regs, we can enable exceptions
  589. movsg psr,gr4
  590. ori gr4,#PSR_ET,gr4
  591. movgs gr4,psr
  592. sethi.p %hi(do_page_fault),gr5
  593. setlo %lo(do_page_fault),gr5
  594. jmpl @(gr5,gr0) ; call do_page_fault(1,esr0,ear0)
  595. #endif
  596. ###############################################################################
  597. #
  598. # handle data and instruction access exceptions
  599. #
  600. ###############################################################################
  601. .globl __entry_insn_access_exception
  602. .globl __entry_data_access_exception
  603. __entry_insn_access_exception:
  604. __entry_data_access_exception:
  605. LEDS 0x6016
  606. sethi.p %hi(memory_access_exception),gr5
  607. setlo %lo(memory_access_exception),gr5
  608. movsg esr0,gr8
  609. movsg scr2,gr9 ; saved EAR0
  610. movsg epcr0,gr10
  611. # now that we've accessed the exception regs, we can enable exceptions
  612. movsg psr,gr4
  613. ori gr4,#PSR_ET,gr4
  614. movgs gr4,psr
  615. jmpl @(gr5,gr0) ; call memory_access_error(esr0,ear0,epcr0)
  616. ###############################################################################
  617. #
  618. # handle data access error
  619. #
  620. ###############################################################################
  621. .globl __entry_data_access_error
  622. __entry_data_access_error:
  623. LEDS 0x6016
  624. sethi.p %hi(data_access_error),gr5
  625. setlo %lo(data_access_error),gr5
  626. movsg esfr1,gr8
  627. movsg esr15,gr9
  628. movsg ear15,gr10
  629. # now that we've accessed the exception regs, we can enable exceptions
  630. movsg psr,gr4
  631. ori gr4,#PSR_ET,gr4
  632. movgs gr4,psr
  633. jmpl @(gr5,gr0) ; call data_access_error(esfr1,esr15,ear15)
  634. ###############################################################################
  635. #
  636. # handle data store error
  637. #
  638. ###############################################################################
  639. .globl __entry_data_store_error
  640. __entry_data_store_error:
  641. LEDS 0x6017
  642. sethi.p %hi(data_store_error),gr5
  643. setlo %lo(data_store_error),gr5
  644. movsg esfr1,gr8
  645. movsg esr14,gr9
  646. # now that we've accessed the exception regs, we can enable exceptions
  647. movsg psr,gr4
  648. ori gr4,#PSR_ET,gr4
  649. movgs gr4,psr
  650. jmpl @(gr5,gr0) ; call data_store_error(esfr1,esr14)
  651. ###############################################################################
  652. #
  653. # handle division exception
  654. #
  655. ###############################################################################
  656. .globl __entry_division_exception
  657. __entry_division_exception:
  658. LEDS 0x6018
  659. sethi.p %hi(division_exception),gr5
  660. setlo %lo(division_exception),gr5
  661. movsg esfr1,gr8
  662. movsg esr0,gr9
  663. movsg isr,gr10
  664. # now that we've accessed the exception regs, we can enable exceptions
  665. movsg psr,gr4
  666. ori gr4,#PSR_ET,gr4
  667. movgs gr4,psr
  668. jmpl @(gr5,gr0) ; call div_excep(esfr1,esr0,isr)
  669. ###############################################################################
  670. #
  671. # handle compound exception
  672. #
  673. ###############################################################################
  674. .globl __entry_compound_exception
  675. __entry_compound_exception:
  676. LEDS 0x6019
  677. sethi.p %hi(compound_exception),gr5
  678. setlo %lo(compound_exception),gr5
  679. movsg esfr1,gr8
  680. movsg esr0,gr9
  681. movsg esr14,gr10
  682. movsg esr15,gr11
  683. movsg msr0,gr12
  684. movsg msr1,gr13
  685. # now that we've accessed the exception regs, we can enable exceptions
  686. movsg psr,gr4
  687. ori gr4,#PSR_ET,gr4
  688. movgs gr4,psr
  689. jmpl @(gr5,gr0) ; call comp_excep(esfr1,esr0,esr14,esr15,msr0,msr1)
  690. ###############################################################################
  691. #
  692. # handle interrupts and NMIs
  693. #
  694. ###############################################################################
  695. .globl __entry_do_IRQ
  696. __entry_do_IRQ:
  697. LEDS 0x6020
  698. # we can enable exceptions
  699. movsg psr,gr4
  700. ori gr4,#PSR_ET,gr4
  701. movgs gr4,psr
  702. bra do_IRQ
  703. .globl __entry_do_NMI
  704. __entry_do_NMI:
  705. LEDS 0x6021
  706. # we can enable exceptions
  707. movsg psr,gr4
  708. ori gr4,#PSR_ET,gr4
  709. movgs gr4,psr
  710. bra do_NMI
  711. ###############################################################################
  712. #
  713. # the return path for a newly forked child process
  714. # - __switch_to() saved the old current pointer in GR8 for us
  715. #
  716. ###############################################################################
  717. .globl ret_from_fork
  718. ret_from_fork:
  719. LEDS 0x6100
  720. call schedule_tail
  721. # fork & co. return 0 to child
  722. setlos.p #0,gr8
  723. bra __syscall_exit
  724. ###################################################################################################
  725. #
  726. # Return to user mode is not as complex as all this looks,
  727. # but we want the default path for a system call return to
  728. # go as quickly as possible which is why some of this is
  729. # less clear than it otherwise should be.
  730. #
  731. ###################################################################################################
  732. .balign L1_CACHE_BYTES
  733. .globl system_call
  734. system_call:
  735. LEDS 0x6101
  736. movsg psr,gr4 ; enable exceptions
  737. ori gr4,#PSR_ET,gr4
  738. movgs gr4,psr
  739. sti gr7,@(gr28,#REG_SYSCALLNO)
  740. sti.p gr8,@(gr28,#REG_ORIG_GR8)
  741. subicc gr7,#nr_syscalls,gr0,icc0
  742. bnc icc0,#0,__syscall_badsys
  743. ldi @(gr15,#TI_FLAGS),gr4
  744. ori gr4,#_TIF_SYSCALL_TRACE,gr4
  745. andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
  746. bne icc0,#0,__syscall_trace_entry
  747. __syscall_call:
  748. slli.p gr7,#2,gr7
  749. sethi %hi(sys_call_table),gr5
  750. setlo %lo(sys_call_table),gr5
  751. ld @(gr5,gr7),gr4
  752. calll @(gr4,gr0)
  753. ###############################################################################
  754. #
  755. # return to interrupted process
  756. #
  757. ###############################################################################
  758. __syscall_exit:
  759. LEDS 0x6300
  760. sti gr8,@(gr28,#REG_GR(8)) ; save return value
  761. # rebuild saved psr - execve will change it for init/main.c
  762. ldi @(gr28,#REG_PSR),gr22
  763. srli gr22,#1,gr5
  764. andi.p gr22,#~PSR_PS,gr22
  765. andi gr5,#PSR_PS,gr5
  766. or gr5,gr22,gr22
  767. ori gr22,#PSR_S,gr22
  768. # keep current PSR in GR23
  769. movsg psr,gr23
  770. # make sure we don't miss an interrupt setting need_resched or sigpending between
  771. # sampling and the RETT
  772. ori gr23,#PSR_PIL_14,gr23
  773. movgs gr23,psr
  774. ldi @(gr15,#TI_FLAGS),gr4
  775. sethi.p %hi(_TIF_ALLWORK_MASK),gr5
  776. setlo %lo(_TIF_ALLWORK_MASK),gr5
  777. andcc gr4,gr5,gr0,icc0
  778. bne icc0,#0,__syscall_exit_work
  779. # restore all registers and return
  780. __entry_return_direct:
  781. LEDS 0x6301
  782. andi gr22,#~PSR_ET,gr22
  783. movgs gr22,psr
  784. ldi @(gr28,#REG_ISR),gr23
  785. lddi @(gr28,#REG_CCR),gr24
  786. lddi @(gr28,#REG_LR) ,gr26
  787. ldi @(gr28,#REG_PC) ,gr21
  788. ldi @(gr28,#REG_TBR),gr20
  789. movgs gr20,tbr
  790. movgs gr21,pcsr
  791. movgs gr23,isr
  792. movgs gr24,ccr
  793. movgs gr25,cccr
  794. movgs gr26,lr
  795. movgs gr27,lcr
  796. lddi @(gr28,#REG_GNER0),gr4
  797. movgs gr4,gner0
  798. movgs gr5,gner1
  799. lddi @(gr28,#REG_IACC0),gr4
  800. movgs gr4,iacc0h
  801. movgs gr5,iacc0l
  802. lddi @(gr28,#REG_GR(4)) ,gr4
  803. lddi @(gr28,#REG_GR(6)) ,gr6
  804. lddi @(gr28,#REG_GR(8)) ,gr8
  805. lddi @(gr28,#REG_GR(10)),gr10
  806. lddi @(gr28,#REG_GR(12)),gr12
  807. lddi @(gr28,#REG_GR(14)),gr14
  808. lddi @(gr28,#REG_GR(16)),gr16
  809. lddi @(gr28,#REG_GR(18)),gr18
  810. lddi @(gr28,#REG_GR(20)),gr20
  811. lddi @(gr28,#REG_GR(22)),gr22
  812. lddi @(gr28,#REG_GR(24)),gr24
  813. lddi @(gr28,#REG_GR(26)),gr26
  814. ldi @(gr28,#REG_GR(29)),gr29
  815. lddi @(gr28,#REG_GR(30)),gr30
  816. # check to see if a debugging return is required
  817. LEDS 0x67f0
  818. movsg ccr,gr2
  819. ldi @(gr28,#REG__STATUS),gr3
  820. andicc gr3,#REG__STATUS_STEP,gr0,icc0
  821. bne icc0,#0,__entry_return_singlestep
  822. movgs gr2,ccr
  823. ldi @(gr28,#REG_SP) ,sp
  824. lddi @(gr28,#REG_GR(2)) ,gr2
  825. ldi @(gr28,#REG_GR(28)),gr28
  826. LEDS 0x67fe
  827. // movsg pcsr,gr31
  828. // LEDS32
  829. #if 0
  830. # store the current frame in the workram on the FR451
  831. movgs gr28,scr2
  832. sethi.p %hi(0xfe800000),gr28
  833. setlo %lo(0xfe800000),gr28
  834. stdi gr2,@(gr28,#REG_GR(2))
  835. stdi gr4,@(gr28,#REG_GR(4))
  836. stdi gr6,@(gr28,#REG_GR(6))
  837. stdi gr8,@(gr28,#REG_GR(8))
  838. stdi gr10,@(gr28,#REG_GR(10))
  839. stdi gr12,@(gr28,#REG_GR(12))
  840. stdi gr14,@(gr28,#REG_GR(14))
  841. stdi gr16,@(gr28,#REG_GR(16))
  842. stdi gr18,@(gr28,#REG_GR(18))
  843. stdi gr24,@(gr28,#REG_GR(24))
  844. stdi gr26,@(gr28,#REG_GR(26))
  845. sti gr29,@(gr28,#REG_GR(29))
  846. stdi gr30,@(gr28,#REG_GR(30))
  847. movsg tbr ,gr30
  848. sti gr30,@(gr28,#REG_TBR)
  849. movsg pcsr,gr30
  850. sti gr30,@(gr28,#REG_PC)
  851. movsg psr ,gr30
  852. sti gr30,@(gr28,#REG_PSR)
  853. movsg isr ,gr30
  854. sti gr30,@(gr28,#REG_ISR)
  855. movsg ccr ,gr30
  856. movsg cccr,gr31
  857. stdi gr30,@(gr28,#REG_CCR)
  858. movsg lr ,gr30
  859. movsg lcr ,gr31
  860. stdi gr30,@(gr28,#REG_LR)
  861. sti gr0 ,@(gr28,#REG_SYSCALLNO)
  862. movsg scr2,gr28
  863. #endif
  864. rett #0
  865. # return via break.S
  866. __entry_return_singlestep:
  867. movgs gr2,ccr
  868. lddi @(gr28,#REG_GR(2)) ,gr2
  869. ldi @(gr28,#REG_SP) ,sp
  870. ldi @(gr28,#REG_GR(28)),gr28
  871. LEDS 0x67ff
  872. break
  873. .globl __entry_return_singlestep_breaks_here
  874. __entry_return_singlestep_breaks_here:
  875. nop
  876. ###############################################################################
  877. #
  878. # return to a process interrupted in kernel space
  879. # - we need to consider preemption if that is enabled
  880. #
  881. ###############################################################################
  882. .balign L1_CACHE_BYTES
  883. __entry_return_from_kernel_exception:
  884. LEDS 0x6302
  885. movsg psr,gr23
  886. ori gr23,#PSR_PIL_14,gr23
  887. movgs gr23,psr
  888. bra __entry_return_direct
  889. .balign L1_CACHE_BYTES
  890. __entry_return_from_kernel_interrupt:
  891. LEDS 0x6303
  892. movsg psr,gr23
  893. ori gr23,#PSR_PIL_14,gr23
  894. movgs gr23,psr
  895. #ifdef CONFIG_PREEMPT
  896. ldi @(gr15,#TI_PRE_COUNT),gr5
  897. subicc gr5,#0,gr0,icc0
  898. beq icc0,#0,__entry_return_direct
  899. __entry_preempt_need_resched:
  900. ldi @(gr15,#TI_FLAGS),gr4
  901. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  902. beq icc0,#1,__entry_return_direct
  903. setlos #PREEMPT_ACTIVE,gr5
  904. sti gr5,@(gr15,#TI_FLAGS)
  905. andi gr23,#~PSR_PIL,gr23
  906. movgs gr23,psr
  907. call schedule
  908. sti gr0,@(gr15,#TI_PRE_COUNT)
  909. movsg psr,gr23
  910. ori gr23,#PSR_PIL_14,gr23
  911. movgs gr23,psr
  912. bra __entry_preempt_need_resched
  913. #else
  914. bra __entry_return_direct
  915. #endif
  916. ###############################################################################
  917. #
  918. # perform work that needs to be done immediately before resumption
  919. #
  920. ###############################################################################
  921. .globl __entry_return_from_user_exception
  922. .balign L1_CACHE_BYTES
  923. __entry_return_from_user_exception:
  924. LEDS 0x6501
  925. __entry_resume_userspace:
  926. # make sure we don't miss an interrupt setting need_resched or sigpending between
  927. # sampling and the RETT
  928. movsg psr,gr23
  929. ori gr23,#PSR_PIL_14,gr23
  930. movgs gr23,psr
  931. __entry_return_from_user_interrupt:
  932. LEDS 0x6402
  933. ldi @(gr15,#TI_FLAGS),gr4
  934. sethi.p %hi(_TIF_WORK_MASK),gr5
  935. setlo %lo(_TIF_WORK_MASK),gr5
  936. andcc gr4,gr5,gr0,icc0
  937. beq icc0,#1,__entry_return_direct
  938. __entry_work_pending:
  939. LEDS 0x6404
  940. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  941. beq icc0,#1,__entry_work_notifysig
  942. __entry_work_resched:
  943. LEDS 0x6408
  944. movsg psr,gr23
  945. andi gr23,#~PSR_PIL,gr23
  946. movgs gr23,psr
  947. call schedule
  948. movsg psr,gr23
  949. ori gr23,#PSR_PIL_14,gr23
  950. movgs gr23,psr
  951. LEDS 0x6401
  952. ldi @(gr15,#TI_FLAGS),gr4
  953. sethi.p %hi(_TIF_WORK_MASK),gr5
  954. setlo %lo(_TIF_WORK_MASK),gr5
  955. andcc gr4,gr5,gr0,icc0
  956. beq icc0,#1,__entry_return_direct
  957. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  958. bne icc0,#1,__entry_work_resched
  959. __entry_work_notifysig:
  960. LEDS 0x6410
  961. ori.p gr4,#0,gr8
  962. call do_notify_resume
  963. bra __entry_resume_userspace
  964. # perform syscall entry tracing
  965. __syscall_trace_entry:
  966. LEDS 0x6320
  967. setlos.p #0,gr8
  968. call do_syscall_trace
  969. ldi @(gr28,#REG_SYSCALLNO),gr7
  970. lddi @(gr28,#REG_GR(8)) ,gr8
  971. lddi @(gr28,#REG_GR(10)),gr10
  972. lddi.p @(gr28,#REG_GR(12)),gr12
  973. subicc gr7,#nr_syscalls,gr0,icc0
  974. bnc icc0,#0,__syscall_badsys
  975. bra __syscall_call
  976. # perform syscall exit tracing
  977. __syscall_exit_work:
  978. LEDS 0x6340
  979. andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
  980. beq icc0,#1,__entry_work_pending
  981. movsg psr,gr23
  982. andi gr23,#~PSR_PIL,gr23 ; could let do_syscall_trace() call schedule()
  983. movgs gr23,psr
  984. setlos.p #1,gr8
  985. call do_syscall_trace
  986. bra __entry_resume_userspace
  987. __syscall_badsys:
  988. LEDS 0x6380
  989. setlos #-ENOSYS,gr8
  990. sti gr8,@(gr28,#REG_GR(8)) ; save return value
  991. bra __entry_resume_userspace
  992. ###############################################################################
  993. #
  994. # syscall vector table
  995. #
  996. ###############################################################################
  997. .section .rodata
  998. ALIGN
  999. .globl sys_call_table
  1000. sys_call_table:
  1001. .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
  1002. .long sys_exit
  1003. .long sys_fork
  1004. .long sys_read
  1005. .long sys_write
  1006. .long sys_open /* 5 */
  1007. .long sys_close
  1008. .long sys_waitpid
  1009. .long sys_creat
  1010. .long sys_link
  1011. .long sys_unlink /* 10 */
  1012. .long sys_execve
  1013. .long sys_chdir
  1014. .long sys_time
  1015. .long sys_mknod
  1016. .long sys_chmod /* 15 */
  1017. .long sys_lchown16
  1018. .long sys_ni_syscall /* old break syscall holder */
  1019. .long sys_stat
  1020. .long sys_lseek
  1021. .long sys_getpid /* 20 */
  1022. .long sys_mount
  1023. .long sys_oldumount
  1024. .long sys_setuid16
  1025. .long sys_getuid16
  1026. .long sys_ni_syscall // sys_stime /* 25 */
  1027. .long sys_ptrace
  1028. .long sys_alarm
  1029. .long sys_fstat
  1030. .long sys_pause
  1031. .long sys_utime /* 30 */
  1032. .long sys_ni_syscall /* old stty syscall holder */
  1033. .long sys_ni_syscall /* old gtty syscall holder */
  1034. .long sys_access
  1035. .long sys_nice
  1036. .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
  1037. .long sys_sync
  1038. .long sys_kill
  1039. .long sys_rename
  1040. .long sys_mkdir
  1041. .long sys_rmdir /* 40 */
  1042. .long sys_dup
  1043. .long sys_pipe
  1044. .long sys_times
  1045. .long sys_ni_syscall /* old prof syscall holder */
  1046. .long sys_brk /* 45 */
  1047. .long sys_setgid16
  1048. .long sys_getgid16
  1049. .long sys_ni_syscall // sys_signal
  1050. .long sys_geteuid16
  1051. .long sys_getegid16 /* 50 */
  1052. .long sys_acct
  1053. .long sys_umount /* recycled never used phys( */
  1054. .long sys_ni_syscall /* old lock syscall holder */
  1055. .long sys_ioctl
  1056. .long sys_fcntl /* 55 */
  1057. .long sys_ni_syscall /* old mpx syscall holder */
  1058. .long sys_setpgid
  1059. .long sys_ni_syscall /* old ulimit syscall holder */
  1060. .long sys_ni_syscall /* old old uname syscall */
  1061. .long sys_umask /* 60 */
  1062. .long sys_chroot
  1063. .long sys_ustat
  1064. .long sys_dup2
  1065. .long sys_getppid
  1066. .long sys_getpgrp /* 65 */
  1067. .long sys_setsid
  1068. .long sys_sigaction
  1069. .long sys_ni_syscall // sys_sgetmask
  1070. .long sys_ni_syscall // sys_ssetmask
  1071. .long sys_setreuid16 /* 70 */
  1072. .long sys_setregid16
  1073. .long sys_sigsuspend
  1074. .long sys_ni_syscall // sys_sigpending
  1075. .long sys_sethostname
  1076. .long sys_setrlimit /* 75 */
  1077. .long sys_ni_syscall // sys_old_getrlimit
  1078. .long sys_getrusage
  1079. .long sys_gettimeofday
  1080. .long sys_settimeofday
  1081. .long sys_getgroups16 /* 80 */
  1082. .long sys_setgroups16
  1083. .long sys_ni_syscall /* old_select slot */
  1084. .long sys_symlink
  1085. .long sys_lstat
  1086. .long sys_readlink /* 85 */
  1087. .long sys_uselib
  1088. .long sys_swapon
  1089. .long sys_reboot
  1090. .long sys_ni_syscall // old_readdir
  1091. .long sys_ni_syscall /* 90 */ /* old_mmap slot */
  1092. .long sys_munmap
  1093. .long sys_truncate
  1094. .long sys_ftruncate
  1095. .long sys_fchmod
  1096. .long sys_fchown16 /* 95 */
  1097. .long sys_getpriority
  1098. .long sys_setpriority
  1099. .long sys_ni_syscall /* old profil syscall holder */
  1100. .long sys_statfs
  1101. .long sys_fstatfs /* 100 */
  1102. .long sys_ni_syscall /* ioperm for i386 */
  1103. .long sys_socketcall
  1104. .long sys_syslog
  1105. .long sys_setitimer
  1106. .long sys_getitimer /* 105 */
  1107. .long sys_newstat
  1108. .long sys_newlstat
  1109. .long sys_newfstat
  1110. .long sys_ni_syscall /* obsolete olduname( syscall */
  1111. .long sys_ni_syscall /* iopl for i386 */ /* 110 */
  1112. .long sys_vhangup
  1113. .long sys_ni_syscall /* obsolete idle( syscall */
  1114. .long sys_ni_syscall /* vm86old for i386 */
  1115. .long sys_wait4
  1116. .long sys_swapoff /* 115 */
  1117. .long sys_sysinfo
  1118. .long sys_ipc
  1119. .long sys_fsync
  1120. .long sys_sigreturn
  1121. .long sys_clone /* 120 */
  1122. .long sys_setdomainname
  1123. .long sys_newuname
  1124. .long sys_ni_syscall /* old "cacheflush" */
  1125. .long sys_adjtimex
  1126. .long sys_mprotect /* 125 */
  1127. .long sys_sigprocmask
  1128. .long sys_ni_syscall /* old "create_module" */
  1129. .long sys_init_module
  1130. .long sys_delete_module
  1131. .long sys_ni_syscall /* old "get_kernel_syms" */
  1132. .long sys_quotactl
  1133. .long sys_getpgid
  1134. .long sys_fchdir
  1135. .long sys_bdflush
  1136. .long sys_sysfs /* 135 */
  1137. .long sys_personality
  1138. .long sys_ni_syscall /* for afs_syscall */
  1139. .long sys_setfsuid16
  1140. .long sys_setfsgid16
  1141. .long sys_llseek /* 140 */
  1142. .long sys_getdents
  1143. .long sys_select
  1144. .long sys_flock
  1145. .long sys_msync
  1146. .long sys_readv /* 145 */
  1147. .long sys_writev
  1148. .long sys_getsid
  1149. .long sys_fdatasync
  1150. .long sys_sysctl
  1151. .long sys_mlock /* 150 */
  1152. .long sys_munlock
  1153. .long sys_mlockall
  1154. .long sys_munlockall
  1155. .long sys_sched_setparam
  1156. .long sys_sched_getparam /* 155 */
  1157. .long sys_sched_setscheduler
  1158. .long sys_sched_getscheduler
  1159. .long sys_sched_yield
  1160. .long sys_sched_get_priority_max
  1161. .long sys_sched_get_priority_min /* 160 */
  1162. .long sys_sched_rr_get_interval
  1163. .long sys_nanosleep
  1164. .long sys_mremap
  1165. .long sys_setresuid16
  1166. .long sys_getresuid16 /* 165 */
  1167. .long sys_ni_syscall /* for vm86 */
  1168. .long sys_ni_syscall /* Old sys_query_module */
  1169. .long sys_poll
  1170. .long sys_nfsservctl
  1171. .long sys_setresgid16 /* 170 */
  1172. .long sys_getresgid16
  1173. .long sys_prctl
  1174. .long sys_rt_sigreturn
  1175. .long sys_rt_sigaction
  1176. .long sys_rt_sigprocmask /* 175 */
  1177. .long sys_rt_sigpending
  1178. .long sys_rt_sigtimedwait
  1179. .long sys_rt_sigqueueinfo
  1180. .long sys_rt_sigsuspend
  1181. .long sys_pread64 /* 180 */
  1182. .long sys_pwrite64
  1183. .long sys_chown16
  1184. .long sys_getcwd
  1185. .long sys_capget
  1186. .long sys_capset /* 185 */
  1187. .long sys_sigaltstack
  1188. .long sys_sendfile
  1189. .long sys_ni_syscall /* streams1 */
  1190. .long sys_ni_syscall /* streams2 */
  1191. .long sys_vfork /* 190 */
  1192. .long sys_getrlimit
  1193. .long sys_mmap2
  1194. .long sys_truncate64
  1195. .long sys_ftruncate64
  1196. .long sys_stat64 /* 195 */
  1197. .long sys_lstat64
  1198. .long sys_fstat64
  1199. .long sys_lchown
  1200. .long sys_getuid
  1201. .long sys_getgid /* 200 */
  1202. .long sys_geteuid
  1203. .long sys_getegid
  1204. .long sys_setreuid
  1205. .long sys_setregid
  1206. .long sys_getgroups /* 205 */
  1207. .long sys_setgroups
  1208. .long sys_fchown
  1209. .long sys_setresuid
  1210. .long sys_getresuid
  1211. .long sys_setresgid /* 210 */
  1212. .long sys_getresgid
  1213. .long sys_chown
  1214. .long sys_setuid
  1215. .long sys_setgid
  1216. .long sys_setfsuid /* 215 */
  1217. .long sys_setfsgid
  1218. .long sys_pivot_root
  1219. .long sys_mincore
  1220. .long sys_madvise
  1221. .long sys_getdents64 /* 220 */
  1222. .long sys_fcntl64
  1223. .long sys_ni_syscall /* reserved for TUX */
  1224. .long sys_ni_syscall /* Reserved for Security */
  1225. .long sys_gettid
  1226. .long sys_readahead /* 225 */
  1227. .long sys_setxattr
  1228. .long sys_lsetxattr
  1229. .long sys_fsetxattr
  1230. .long sys_getxattr
  1231. .long sys_lgetxattr /* 230 */
  1232. .long sys_fgetxattr
  1233. .long sys_listxattr
  1234. .long sys_llistxattr
  1235. .long sys_flistxattr
  1236. .long sys_removexattr /* 235 */
  1237. .long sys_lremovexattr
  1238. .long sys_fremovexattr
  1239. .long sys_tkill
  1240. .long sys_sendfile64
  1241. .long sys_futex /* 240 */
  1242. .long sys_sched_setaffinity
  1243. .long sys_sched_getaffinity
  1244. .long sys_ni_syscall //sys_set_thread_area
  1245. .long sys_ni_syscall //sys_get_thread_area
  1246. .long sys_io_setup /* 245 */
  1247. .long sys_io_destroy
  1248. .long sys_io_getevents
  1249. .long sys_io_submit
  1250. .long sys_io_cancel
  1251. .long sys_fadvise64 /* 250 */
  1252. .long sys_ni_syscall
  1253. .long sys_exit_group
  1254. .long sys_lookup_dcookie
  1255. .long sys_epoll_create
  1256. .long sys_epoll_ctl /* 255 */
  1257. .long sys_epoll_wait
  1258. .long sys_remap_file_pages
  1259. .long sys_set_tid_address
  1260. .long sys_timer_create
  1261. .long sys_timer_settime /* 260 */
  1262. .long sys_timer_gettime
  1263. .long sys_timer_getoverrun
  1264. .long sys_timer_delete
  1265. .long sys_clock_settime
  1266. .long sys_clock_gettime /* 265 */
  1267. .long sys_clock_getres
  1268. .long sys_clock_nanosleep
  1269. .long sys_statfs64
  1270. .long sys_fstatfs64
  1271. .long sys_tgkill /* 270 */
  1272. .long sys_utimes
  1273. .long sys_fadvise64_64
  1274. .long sys_ni_syscall /* sys_vserver */
  1275. .long sys_mbind
  1276. .long sys_get_mempolicy
  1277. .long sys_set_mempolicy
  1278. .long sys_mq_open
  1279. .long sys_mq_unlink
  1280. .long sys_mq_timedsend
  1281. .long sys_mq_timedreceive /* 280 */
  1282. .long sys_mq_notify
  1283. .long sys_mq_getsetattr
  1284. .long sys_ni_syscall /* reserved for kexec */
  1285. .long sys_waitid
  1286. .long sys_ni_syscall /* 285 */ /* available */
  1287. .long sys_add_key
  1288. .long sys_request_key
  1289. .long sys_keyctl
  1290. .long sys_ioprio_set
  1291. .long sys_ioprio_get /* 290 */
  1292. .long sys_inotify_init
  1293. .long sys_inotify_add_watch
  1294. .long sys_inotify_rm_watch
  1295. .long sys_migrate_pages
  1296. .long sys_openat /* 295 */
  1297. .long sys_mkdirat
  1298. .long sys_mknodat
  1299. .long sys_fchownat
  1300. .long sys_futimesat
  1301. .long sys_newfstatat /* 300 */
  1302. .long sys_unlinkat
  1303. .long sys_renameat
  1304. .long sys_linkat
  1305. .long sys_symlinkat
  1306. .long sys_readlinkat /* 305 */
  1307. .long sys_fchmodat
  1308. .long sys_faccessat
  1309. .long sys_pselect6
  1310. .long sys_ppoll
  1311. syscall_table_size = (. - sys_call_table)