entry.S 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428
  1. /* entry.S: FR-V entry
  2. *
  3. * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. *
  12. * Entry to the kernel is "interesting":
  13. * (1) There are no stack pointers, not even for the kernel
  14. * (2) General Registers should not be clobbered
  15. * (3) There are no kernel-only data registers
  16. * (4) Since all addressing modes are wrt to a General Register, no global
  17. * variables can be reached
  18. *
  19. * We deal with this by declaring that we shall kill GR28 on entering the
  20. * kernel from userspace
  21. *
  22. * However, since break interrupts can interrupt the CPU even when PSR.ET==0,
  23. * they can't rely on GR28 to be anything useful, and so need to clobber a
  24. * separate register (GR31). Break interrupts are managed in break.S
  25. *
  26. * GR29 _is_ saved, and holds the current task pointer globally
  27. *
  28. */
  29. #include <linux/sys.h>
  30. #include <linux/config.h>
  31. #include <linux/linkage.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/setup.h>
  34. #include <asm/segment.h>
  35. #include <asm/ptrace.h>
  36. #include <asm/errno.h>
  37. #include <asm/cache.h>
  38. #include <asm/spr-regs.h>
  39. #define nr_syscalls ((syscall_table_size)/4)
  40. .text
  41. .balign 4
  42. .macro LEDS val
  43. # sethi.p %hi(0xe1200004),gr30
  44. # setlo %lo(0xe1200004),gr30
  45. # setlos #~\val,gr31
  46. # st gr31,@(gr30,gr0)
  47. # sethi.p %hi(0xffc00100),gr30
  48. # setlo %lo(0xffc00100),gr30
  49. # sth gr0,@(gr30,gr0)
  50. # membar
  51. .endm
  52. .macro LEDS32
  53. # not gr31,gr31
  54. # sethi.p %hi(0xe1200004),gr30
  55. # setlo %lo(0xe1200004),gr30
  56. # st.p gr31,@(gr30,gr0)
  57. # srli gr31,#16,gr31
  58. # sethi.p %hi(0xffc00100),gr30
  59. # setlo %lo(0xffc00100),gr30
  60. # sth gr31,@(gr30,gr0)
  61. # membar
  62. .endm
  63. ###############################################################################
  64. #
  65. # entry point for External interrupts received whilst executing userspace code
  66. #
  67. ###############################################################################
  68. .globl __entry_uspace_external_interrupt
  69. .type __entry_uspace_external_interrupt,@function
  70. __entry_uspace_external_interrupt:
  71. LEDS 0x6200
  72. sethi.p %hi(__kernel_frame0_ptr),gr28
  73. setlo %lo(__kernel_frame0_ptr),gr28
  74. ldi @(gr28,#0),gr28
  75. # handle h/w single-step through exceptions
  76. sti gr0,@(gr28,#REG__STATUS)
  77. .globl __entry_uspace_external_interrupt_reentry
  78. __entry_uspace_external_interrupt_reentry:
  79. LEDS 0x6201
  80. setlos #REG__END,gr30
  81. dcpl gr28,gr30,#0
  82. # finish building the exception frame
  83. sti sp, @(gr28,#REG_SP)
  84. stdi gr2, @(gr28,#REG_GR(2))
  85. stdi gr4, @(gr28,#REG_GR(4))
  86. stdi gr6, @(gr28,#REG_GR(6))
  87. stdi gr8, @(gr28,#REG_GR(8))
  88. stdi gr10,@(gr28,#REG_GR(10))
  89. stdi gr12,@(gr28,#REG_GR(12))
  90. stdi gr14,@(gr28,#REG_GR(14))
  91. stdi gr16,@(gr28,#REG_GR(16))
  92. stdi gr18,@(gr28,#REG_GR(18))
  93. stdi gr20,@(gr28,#REG_GR(20))
  94. stdi gr22,@(gr28,#REG_GR(22))
  95. stdi gr24,@(gr28,#REG_GR(24))
  96. stdi gr26,@(gr28,#REG_GR(26))
  97. sti gr0, @(gr28,#REG_GR(28))
  98. sti gr29,@(gr28,#REG_GR(29))
  99. stdi.p gr30,@(gr28,#REG_GR(30))
  100. # set up the kernel stack pointer
  101. ori gr28,0,sp
  102. movsg tbr ,gr20
  103. movsg psr ,gr22
  104. movsg pcsr,gr21
  105. movsg isr ,gr23
  106. movsg ccr ,gr24
  107. movsg cccr,gr25
  108. movsg lr ,gr26
  109. movsg lcr ,gr27
  110. setlos.p #-1,gr4
  111. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  112. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  113. slli gr5,#1,gr5
  114. or gr6,gr5,gr5
  115. andi gr5,#~PSR_ET,gr5
  116. sti gr20,@(gr28,#REG_TBR)
  117. sti gr21,@(gr28,#REG_PC)
  118. sti gr5 ,@(gr28,#REG_PSR)
  119. sti gr23,@(gr28,#REG_ISR)
  120. stdi gr24,@(gr28,#REG_CCR)
  121. stdi gr26,@(gr28,#REG_LR)
  122. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  123. movsg iacc0h,gr4
  124. movsg iacc0l,gr5
  125. stdi gr4,@(gr28,#REG_IACC0)
  126. movsg gner0,gr4
  127. movsg gner1,gr5
  128. stdi gr4,@(gr28,#REG_GNER0)
  129. # set up kernel global registers
  130. sethi.p %hi(__kernel_current_task),gr5
  131. setlo %lo(__kernel_current_task),gr5
  132. sethi.p %hi(_gp),gr16
  133. setlo %lo(_gp),gr16
  134. ldi @(gr5,#0),gr29
  135. ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  136. # make sure we (the kernel) get div-zero and misalignment exceptions
  137. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  138. movgs gr5,isr
  139. # switch to the kernel trap table
  140. sethi.p %hi(__entry_kerneltrap_table),gr6
  141. setlo %lo(__entry_kerneltrap_table),gr6
  142. movgs gr6,tbr
  143. # set the return address
  144. sethi.p %hi(__entry_return_from_user_interrupt),gr4
  145. setlo %lo(__entry_return_from_user_interrupt),gr4
  146. movgs gr4,lr
  147. # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
  148. movsg psr,gr4
  149. ori gr4,#PSR_PIL_14,gr4
  150. movgs gr4,psr
  151. ori gr4,#PSR_PIL_14|PSR_ET,gr4
  152. movgs gr4,psr
  153. LEDS 0x6202
  154. bra do_IRQ
  155. .size __entry_uspace_external_interrupt,.-__entry_uspace_external_interrupt
  156. ###############################################################################
  157. #
  158. # entry point for External interrupts received whilst executing kernel code
  159. # - on arriving here, the following registers should already be set up:
  160. # GR15 - current thread_info struct pointer
  161. # GR16 - kernel GP-REL pointer
  162. # GR29 - current task struct pointer
  163. # TBR - kernel trap vector table
  164. # ISR - kernel's preferred integer controls
  165. #
  166. ###############################################################################
  167. .globl __entry_kernel_external_interrupt
  168. .type __entry_kernel_external_interrupt,@function
  169. __entry_kernel_external_interrupt:
  170. LEDS 0x6210
  171. sub sp,gr15,gr31
  172. LEDS32
  173. # set up the stack pointer
  174. or.p sp,gr0,gr30
  175. subi sp,#REG__END,sp
  176. sti gr30,@(sp,#REG_SP)
  177. # handle h/w single-step through exceptions
  178. sti gr0,@(sp,#REG__STATUS)
  179. .globl __entry_kernel_external_interrupt_reentry
  180. __entry_kernel_external_interrupt_reentry:
  181. LEDS 0x6211
  182. # set up the exception frame
  183. setlos #REG__END,gr30
  184. dcpl sp,gr30,#0
  185. sti.p gr28,@(sp,#REG_GR(28))
  186. ori sp,0,gr28
  187. # finish building the exception frame
  188. stdi gr2,@(gr28,#REG_GR(2))
  189. stdi gr4,@(gr28,#REG_GR(4))
  190. stdi gr6,@(gr28,#REG_GR(6))
  191. stdi gr8,@(gr28,#REG_GR(8))
  192. stdi gr10,@(gr28,#REG_GR(10))
  193. stdi gr12,@(gr28,#REG_GR(12))
  194. stdi gr14,@(gr28,#REG_GR(14))
  195. stdi gr16,@(gr28,#REG_GR(16))
  196. stdi gr18,@(gr28,#REG_GR(18))
  197. stdi gr20,@(gr28,#REG_GR(20))
  198. stdi gr22,@(gr28,#REG_GR(22))
  199. stdi gr24,@(gr28,#REG_GR(24))
  200. stdi gr26,@(gr28,#REG_GR(26))
  201. sti gr29,@(gr28,#REG_GR(29))
  202. stdi gr30,@(gr28,#REG_GR(30))
  203. movsg tbr ,gr20
  204. movsg psr ,gr22
  205. movsg pcsr,gr21
  206. movsg isr ,gr23
  207. movsg ccr ,gr24
  208. movsg cccr,gr25
  209. movsg lr ,gr26
  210. movsg lcr ,gr27
  211. setlos.p #-1,gr4
  212. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  213. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  214. slli gr5,#1,gr5
  215. or gr6,gr5,gr5
  216. andi.p gr5,#~PSR_ET,gr5
  217. # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
  218. # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt
  219. andi gr25,#~0xc0,gr25
  220. sti gr20,@(gr28,#REG_TBR)
  221. sti gr21,@(gr28,#REG_PC)
  222. sti gr5 ,@(gr28,#REG_PSR)
  223. sti gr23,@(gr28,#REG_ISR)
  224. stdi gr24,@(gr28,#REG_CCR)
  225. stdi gr26,@(gr28,#REG_LR)
  226. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  227. movsg iacc0h,gr4
  228. movsg iacc0l,gr5
  229. stdi gr4,@(gr28,#REG_IACC0)
  230. movsg gner0,gr4
  231. movsg gner1,gr5
  232. stdi gr4,@(gr28,#REG_GNER0)
  233. # set the return address
  234. sethi.p %hi(__entry_return_from_kernel_interrupt),gr4
  235. setlo %lo(__entry_return_from_kernel_interrupt),gr4
  236. movgs gr4,lr
  237. # clear power-saving mode flags
  238. movsg hsr0,gr4
  239. andi gr4,#~HSR0_PDM,gr4
  240. movgs gr4,hsr0
  241. # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
  242. movsg psr,gr4
  243. ori gr4,#PSR_PIL_14,gr4
  244. movgs gr4,psr
  245. ori gr4,#PSR_ET,gr4
  246. movgs gr4,psr
  247. LEDS 0x6212
  248. bra do_IRQ
  249. .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
  250. ###############################################################################
  251. #
  252. # entry point for Software and Progam interrupts generated whilst executing userspace code
  253. #
  254. ###############################################################################
  255. .globl __entry_uspace_softprog_interrupt
  256. .type __entry_uspace_softprog_interrupt,@function
  257. .globl __entry_uspace_handle_mmu_fault
  258. __entry_uspace_softprog_interrupt:
  259. LEDS 0x6000
  260. #ifdef CONFIG_MMU
  261. movsg ear0,gr28
  262. __entry_uspace_handle_mmu_fault:
  263. movgs gr28,scr2
  264. #endif
  265. sethi.p %hi(__kernel_frame0_ptr),gr28
  266. setlo %lo(__kernel_frame0_ptr),gr28
  267. ldi @(gr28,#0),gr28
  268. # handle h/w single-step through exceptions
  269. sti gr0,@(gr28,#REG__STATUS)
  270. .globl __entry_uspace_softprog_interrupt_reentry
  271. __entry_uspace_softprog_interrupt_reentry:
  272. LEDS 0x6001
  273. setlos #REG__END,gr30
  274. dcpl gr28,gr30,#0
  275. # set up the kernel stack pointer
  276. sti.p sp,@(gr28,#REG_SP)
  277. ori gr28,0,sp
  278. sti gr0,@(gr28,#REG_GR(28))
  279. stdi gr20,@(gr28,#REG_GR(20))
  280. stdi gr22,@(gr28,#REG_GR(22))
  281. movsg tbr,gr20
  282. movsg pcsr,gr21
  283. movsg psr,gr22
  284. sethi.p %hi(__entry_return_from_user_exception),gr23
  285. setlo %lo(__entry_return_from_user_exception),gr23
  286. bra __entry_common
  287. .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
  288. # single-stepping was disabled on entry to a TLB handler that then faulted
  289. #ifdef CONFIG_MMU
  290. .globl __entry_uspace_handle_mmu_fault_sstep
  291. __entry_uspace_handle_mmu_fault_sstep:
  292. movgs gr28,scr2
  293. sethi.p %hi(__kernel_frame0_ptr),gr28
  294. setlo %lo(__kernel_frame0_ptr),gr28
  295. ldi @(gr28,#0),gr28
  296. # flag single-step re-enablement
  297. sti gr0,@(gr28,#REG__STATUS)
  298. bra __entry_uspace_softprog_interrupt_reentry
  299. #endif
  300. ###############################################################################
  301. #
  302. # entry point for Software and Progam interrupts generated whilst executing kernel code
  303. #
  304. ###############################################################################
  305. .globl __entry_kernel_softprog_interrupt
  306. .type __entry_kernel_softprog_interrupt,@function
  307. __entry_kernel_softprog_interrupt:
  308. LEDS 0x6004
  309. #ifdef CONFIG_MMU
  310. movsg ear0,gr30
  311. movgs gr30,scr2
  312. #endif
  313. .globl __entry_kernel_handle_mmu_fault
  314. __entry_kernel_handle_mmu_fault:
  315. # set up the stack pointer
  316. subi sp,#REG__END,sp
  317. sti sp,@(sp,#REG_SP)
  318. sti sp,@(sp,#REG_SP-4)
  319. andi sp,#~7,sp
  320. # handle h/w single-step through exceptions
  321. sti gr0,@(sp,#REG__STATUS)
  322. .globl __entry_kernel_softprog_interrupt_reentry
  323. __entry_kernel_softprog_interrupt_reentry:
  324. LEDS 0x6005
  325. setlos #REG__END,gr30
  326. dcpl sp,gr30,#0
  327. # set up the exception frame
  328. sti.p gr28,@(sp,#REG_GR(28))
  329. ori sp,0,gr28
  330. stdi gr20,@(gr28,#REG_GR(20))
  331. stdi gr22,@(gr28,#REG_GR(22))
  332. ldi @(sp,#REG_SP),gr22 /* reconstruct the old SP */
  333. addi gr22,#REG__END,gr22
  334. sti gr22,@(sp,#REG_SP)
  335. # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
  336. # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt
  337. movsg cccr,gr20
  338. andi gr20,#~0xc0,gr20
  339. movgs gr20,cccr
  340. movsg tbr,gr20
  341. movsg pcsr,gr21
  342. movsg psr,gr22
  343. sethi.p %hi(__entry_return_from_kernel_exception),gr23
  344. setlo %lo(__entry_return_from_kernel_exception),gr23
  345. bra __entry_common
  346. .size __entry_kernel_softprog_interrupt,.-__entry_kernel_softprog_interrupt
  347. # single-stepping was disabled on entry to a TLB handler that then faulted
  348. #ifdef CONFIG_MMU
  349. .globl __entry_kernel_handle_mmu_fault_sstep
  350. __entry_kernel_handle_mmu_fault_sstep:
  351. # set up the stack pointer
  352. subi sp,#REG__END,sp
  353. sti sp,@(sp,#REG_SP)
  354. sti sp,@(sp,#REG_SP-4)
  355. andi sp,#~7,sp
  356. # flag single-step re-enablement
  357. sethi #REG__STATUS_STEP,gr30
  358. sti gr30,@(sp,#REG__STATUS)
  359. bra __entry_kernel_softprog_interrupt_reentry
  360. #endif
  361. ###############################################################################
  362. #
  363. # the rest of the kernel entry point code
  364. # - on arriving here, the following registers should be set up:
  365. # GR1 - kernel stack pointer
  366. # GR7 - syscall number (trap 0 only)
  367. # GR8-13 - syscall args (trap 0 only)
  368. # GR20 - saved TBR
  369. # GR21 - saved PC
  370. # GR22 - saved PSR
  371. # GR23 - return handler address
  372. # GR28 - exception frame on stack
  373. # SCR2 - saved EAR0 where applicable (clobbered by ICI & ICEF insns on FR451)
  374. # PSR - PSR.S 1, PSR.ET 0
  375. #
  376. ###############################################################################
  377. .globl __entry_common
  378. .type __entry_common,@function
  379. __entry_common:
  380. LEDS 0x6008
  381. # finish building the exception frame
  382. stdi gr2,@(gr28,#REG_GR(2))
  383. stdi gr4,@(gr28,#REG_GR(4))
  384. stdi gr6,@(gr28,#REG_GR(6))
  385. stdi gr8,@(gr28,#REG_GR(8))
  386. stdi gr10,@(gr28,#REG_GR(10))
  387. stdi gr12,@(gr28,#REG_GR(12))
  388. stdi gr14,@(gr28,#REG_GR(14))
  389. stdi gr16,@(gr28,#REG_GR(16))
  390. stdi gr18,@(gr28,#REG_GR(18))
  391. stdi gr24,@(gr28,#REG_GR(24))
  392. stdi gr26,@(gr28,#REG_GR(26))
  393. sti gr29,@(gr28,#REG_GR(29))
  394. stdi gr30,@(gr28,#REG_GR(30))
  395. movsg lcr ,gr27
  396. movsg lr ,gr26
  397. movgs gr23,lr
  398. movsg cccr,gr25
  399. movsg ccr ,gr24
  400. movsg isr ,gr23
  401. setlos.p #-1,gr4
  402. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  403. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  404. slli gr5,#1,gr5
  405. or gr6,gr5,gr5
  406. andi gr5,#~PSR_ET,gr5
  407. sti gr20,@(gr28,#REG_TBR)
  408. sti gr21,@(gr28,#REG_PC)
  409. sti gr5 ,@(gr28,#REG_PSR)
  410. sti gr23,@(gr28,#REG_ISR)
  411. stdi gr24,@(gr28,#REG_CCR)
  412. stdi gr26,@(gr28,#REG_LR)
  413. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  414. movsg iacc0h,gr4
  415. movsg iacc0l,gr5
  416. stdi gr4,@(gr28,#REG_IACC0)
  417. movsg gner0,gr4
  418. movsg gner1,gr5
  419. stdi gr4,@(gr28,#REG_GNER0)
  420. # set up kernel global registers
  421. sethi.p %hi(__kernel_current_task),gr5
  422. setlo %lo(__kernel_current_task),gr5
  423. sethi.p %hi(_gp),gr16
  424. setlo %lo(_gp),gr16
  425. ldi @(gr5,#0),gr29
  426. ldi @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  427. # switch to the kernel trap table
  428. sethi.p %hi(__entry_kerneltrap_table),gr6
  429. setlo %lo(__entry_kerneltrap_table),gr6
  430. movgs gr6,tbr
  431. # make sure we (the kernel) get div-zero and misalignment exceptions
  432. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  433. movgs gr5,isr
  434. # clear power-saving mode flags
  435. movsg hsr0,gr4
  436. andi gr4,#~HSR0_PDM,gr4
  437. movgs gr4,hsr0
  438. # multiplex again using old TBR as a guide
  439. setlos.p #TBR_TT,gr3
  440. sethi %hi(__entry_vector_table),gr6
  441. and.p gr20,gr3,gr5
  442. setlo %lo(__entry_vector_table),gr6
  443. srli gr5,#2,gr5
  444. ld @(gr5,gr6),gr5
  445. LEDS 0x6009
  446. jmpl @(gr5,gr0)
  447. .size __entry_common,.-__entry_common
  448. ###############################################################################
  449. #
  450. # handle instruction MMU fault
  451. #
  452. ###############################################################################
  453. #ifdef CONFIG_MMU
  454. .globl __entry_insn_mmu_fault
  455. __entry_insn_mmu_fault:
  456. LEDS 0x6010
  457. setlos #0,gr8
  458. movsg esr0,gr9
  459. movsg scr2,gr10
  460. # now that we've accessed the exception regs, we can enable exceptions
  461. movsg psr,gr4
  462. ori gr4,#PSR_ET,gr4
  463. movgs gr4,psr
  464. sethi.p %hi(do_page_fault),gr5
  465. setlo %lo(do_page_fault),gr5
  466. jmpl @(gr5,gr0) ; call do_page_fault(0,esr0,ear0)
  467. #endif
  468. ###############################################################################
  469. #
  470. # handle instruction access error
  471. #
  472. ###############################################################################
  473. .globl __entry_insn_access_error
  474. __entry_insn_access_error:
  475. LEDS 0x6011
  476. sethi.p %hi(insn_access_error),gr5
  477. setlo %lo(insn_access_error),gr5
  478. movsg esfr1,gr8
  479. movsg epcr0,gr9
  480. movsg esr0,gr10
  481. # now that we've accessed the exception regs, we can enable exceptions
  482. movsg psr,gr4
  483. ori gr4,#PSR_ET,gr4
  484. movgs gr4,psr
  485. jmpl @(gr5,gr0) ; call insn_access_error(esfr1,epcr0,esr0)
  486. ###############################################################################
  487. #
  488. # handle various instructions of dubious legality
  489. #
  490. ###############################################################################
  491. .globl __entry_unsupported_trap
  492. .globl __entry_illegal_instruction
  493. .globl __entry_privileged_instruction
  494. .globl __entry_debug_exception
  495. __entry_unsupported_trap:
  496. subi gr21,#4,gr21
  497. sti gr21,@(gr28,#REG_PC)
  498. __entry_illegal_instruction:
  499. __entry_privileged_instruction:
  500. __entry_debug_exception:
  501. LEDS 0x6012
  502. sethi.p %hi(illegal_instruction),gr5
  503. setlo %lo(illegal_instruction),gr5
  504. movsg esfr1,gr8
  505. movsg epcr0,gr9
  506. movsg esr0,gr10
  507. # now that we've accessed the exception regs, we can enable exceptions
  508. movsg psr,gr4
  509. ori gr4,#PSR_ET,gr4
  510. movgs gr4,psr
  511. jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0)
  512. ###############################################################################
  513. #
  514. # handle media exception
  515. #
  516. ###############################################################################
  517. .globl __entry_media_exception
  518. __entry_media_exception:
  519. LEDS 0x6013
  520. sethi.p %hi(media_exception),gr5
  521. setlo %lo(media_exception),gr5
  522. movsg msr0,gr8
  523. movsg msr1,gr9
  524. # now that we've accessed the exception regs, we can enable exceptions
  525. movsg psr,gr4
  526. ori gr4,#PSR_ET,gr4
  527. movgs gr4,psr
  528. jmpl @(gr5,gr0) ; call media_excep(msr0,msr1)
  529. ###############################################################################
  530. #
  531. # handle data MMU fault
  532. # handle data DAT fault (write-protect exception)
  533. #
  534. ###############################################################################
  535. #ifdef CONFIG_MMU
  536. .globl __entry_data_mmu_fault
  537. __entry_data_mmu_fault:
  538. .globl __entry_data_dat_fault
  539. __entry_data_dat_fault:
  540. LEDS 0x6014
  541. setlos #1,gr8
  542. movsg esr0,gr9
  543. movsg scr2,gr10 ; saved EAR0
  544. # now that we've accessed the exception regs, we can enable exceptions
  545. movsg psr,gr4
  546. ori gr4,#PSR_ET,gr4
  547. movgs gr4,psr
  548. sethi.p %hi(do_page_fault),gr5
  549. setlo %lo(do_page_fault),gr5
  550. jmpl @(gr5,gr0) ; call do_page_fault(1,esr0,ear0)
  551. #endif
  552. ###############################################################################
  553. #
  554. # handle data and instruction access exceptions
  555. #
  556. ###############################################################################
  557. .globl __entry_insn_access_exception
  558. .globl __entry_data_access_exception
  559. __entry_insn_access_exception:
  560. __entry_data_access_exception:
  561. LEDS 0x6016
  562. sethi.p %hi(memory_access_exception),gr5
  563. setlo %lo(memory_access_exception),gr5
  564. movsg esr0,gr8
  565. movsg scr2,gr9 ; saved EAR0
  566. movsg epcr0,gr10
  567. # now that we've accessed the exception regs, we can enable exceptions
  568. movsg psr,gr4
  569. ori gr4,#PSR_ET,gr4
  570. movgs gr4,psr
  571. jmpl @(gr5,gr0) ; call memory_access_error(esr0,ear0,epcr0)
  572. ###############################################################################
  573. #
  574. # handle data access error
  575. #
  576. ###############################################################################
  577. .globl __entry_data_access_error
  578. __entry_data_access_error:
  579. LEDS 0x6016
  580. sethi.p %hi(data_access_error),gr5
  581. setlo %lo(data_access_error),gr5
  582. movsg esfr1,gr8
  583. movsg esr15,gr9
  584. movsg ear15,gr10
  585. # now that we've accessed the exception regs, we can enable exceptions
  586. movsg psr,gr4
  587. ori gr4,#PSR_ET,gr4
  588. movgs gr4,psr
  589. jmpl @(gr5,gr0) ; call data_access_error(esfr1,esr15,ear15)
  590. ###############################################################################
  591. #
  592. # handle data store error
  593. #
  594. ###############################################################################
  595. .globl __entry_data_store_error
  596. __entry_data_store_error:
  597. LEDS 0x6017
  598. sethi.p %hi(data_store_error),gr5
  599. setlo %lo(data_store_error),gr5
  600. movsg esfr1,gr8
  601. movsg esr14,gr9
  602. # now that we've accessed the exception regs, we can enable exceptions
  603. movsg psr,gr4
  604. ori gr4,#PSR_ET,gr4
  605. movgs gr4,psr
  606. jmpl @(gr5,gr0) ; call data_store_error(esfr1,esr14)
  607. ###############################################################################
  608. #
  609. # handle division exception
  610. #
  611. ###############################################################################
  612. .globl __entry_division_exception
  613. __entry_division_exception:
  614. LEDS 0x6018
  615. sethi.p %hi(division_exception),gr5
  616. setlo %lo(division_exception),gr5
  617. movsg esfr1,gr8
  618. movsg esr0,gr9
  619. movsg isr,gr10
  620. # now that we've accessed the exception regs, we can enable exceptions
  621. movsg psr,gr4
  622. ori gr4,#PSR_ET,gr4
  623. movgs gr4,psr
  624. jmpl @(gr5,gr0) ; call div_excep(esfr1,esr0,isr)
  625. ###############################################################################
  626. #
  627. # handle compound exception
  628. #
  629. ###############################################################################
  630. .globl __entry_compound_exception
  631. __entry_compound_exception:
  632. LEDS 0x6019
  633. sethi.p %hi(compound_exception),gr5
  634. setlo %lo(compound_exception),gr5
  635. movsg esfr1,gr8
  636. movsg esr0,gr9
  637. movsg esr14,gr10
  638. movsg esr15,gr11
  639. movsg msr0,gr12
  640. movsg msr1,gr13
  641. # now that we've accessed the exception regs, we can enable exceptions
  642. movsg psr,gr4
  643. ori gr4,#PSR_ET,gr4
  644. movgs gr4,psr
  645. jmpl @(gr5,gr0) ; call comp_excep(esfr1,esr0,esr14,esr15,msr0,msr1)
  646. ###############################################################################
  647. #
  648. # handle interrupts and NMIs
  649. #
  650. ###############################################################################
  651. .globl __entry_do_IRQ
  652. __entry_do_IRQ:
  653. LEDS 0x6020
  654. # we can enable exceptions
  655. movsg psr,gr4
  656. ori gr4,#PSR_ET,gr4
  657. movgs gr4,psr
  658. bra do_IRQ
  659. .globl __entry_do_NMI
  660. __entry_do_NMI:
  661. LEDS 0x6021
  662. # we can enable exceptions
  663. movsg psr,gr4
  664. ori gr4,#PSR_ET,gr4
  665. movgs gr4,psr
  666. bra do_NMI
  667. ###############################################################################
  668. #
  669. # the return path for a newly forked child process
  670. # - __switch_to() saved the old current pointer in GR8 for us
  671. #
  672. ###############################################################################
  673. .globl ret_from_fork
  674. ret_from_fork:
  675. LEDS 0x6100
  676. call schedule_tail
  677. # fork & co. return 0 to child
  678. setlos.p #0,gr8
  679. bra __syscall_exit
  680. ###################################################################################################
  681. #
  682. # Return to user mode is not as complex as all this looks,
  683. # but we want the default path for a system call return to
  684. # go as quickly as possible which is why some of this is
  685. # less clear than it otherwise should be.
  686. #
  687. ###################################################################################################
  688. .balign L1_CACHE_BYTES
  689. .globl system_call
  690. system_call:
  691. LEDS 0x6101
  692. movsg psr,gr4 ; enable exceptions
  693. ori gr4,#PSR_ET,gr4
  694. movgs gr4,psr
  695. sti gr7,@(gr28,#REG_SYSCALLNO)
  696. sti.p gr8,@(gr28,#REG_ORIG_GR8)
  697. subicc gr7,#nr_syscalls,gr0,icc0
  698. bnc icc0,#0,__syscall_badsys
  699. ldi @(gr15,#TI_FLAGS),gr4
  700. ori gr4,#_TIF_SYSCALL_TRACE,gr4
  701. andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
  702. bne icc0,#0,__syscall_trace_entry
  703. __syscall_call:
  704. slli.p gr7,#2,gr7
  705. sethi %hi(sys_call_table),gr5
  706. setlo %lo(sys_call_table),gr5
  707. ld @(gr5,gr7),gr4
  708. calll @(gr4,gr0)
  709. ###############################################################################
  710. #
  711. # return to interrupted process
  712. #
  713. ###############################################################################
  714. __syscall_exit:
  715. LEDS 0x6300
  716. sti gr8,@(gr28,#REG_GR(8)) ; save return value
  717. # rebuild saved psr - execve will change it for init/main.c
  718. ldi @(gr28,#REG_PSR),gr22
  719. srli gr22,#1,gr5
  720. andi.p gr22,#~PSR_PS,gr22
  721. andi gr5,#PSR_PS,gr5
  722. or gr5,gr22,gr22
  723. ori gr22,#PSR_S,gr22
  724. # keep current PSR in GR23
  725. movsg psr,gr23
  726. # make sure we don't miss an interrupt setting need_resched or sigpending between
  727. # sampling and the RETT
  728. ori gr23,#PSR_PIL_14,gr23
  729. movgs gr23,psr
  730. ldi @(gr15,#TI_FLAGS),gr4
  731. sethi.p %hi(_TIF_ALLWORK_MASK),gr5
  732. setlo %lo(_TIF_ALLWORK_MASK),gr5
  733. andcc gr4,gr5,gr0,icc0
  734. bne icc0,#0,__syscall_exit_work
  735. # restore all registers and return
  736. __entry_return_direct:
  737. LEDS 0x6301
  738. andi gr22,#~PSR_ET,gr22
  739. movgs gr22,psr
  740. ldi @(gr28,#REG_ISR),gr23
  741. lddi @(gr28,#REG_CCR),gr24
  742. lddi @(gr28,#REG_LR) ,gr26
  743. ldi @(gr28,#REG_PC) ,gr21
  744. ldi @(gr28,#REG_TBR),gr20
  745. movgs gr20,tbr
  746. movgs gr21,pcsr
  747. movgs gr23,isr
  748. movgs gr24,ccr
  749. movgs gr25,cccr
  750. movgs gr26,lr
  751. movgs gr27,lcr
  752. lddi @(gr28,#REG_GNER0),gr4
  753. movgs gr4,gner0
  754. movgs gr5,gner1
  755. lddi @(gr28,#REG_IACC0),gr4
  756. movgs gr4,iacc0h
  757. movgs gr5,iacc0l
  758. lddi @(gr28,#REG_GR(4)) ,gr4
  759. lddi @(gr28,#REG_GR(6)) ,gr6
  760. lddi @(gr28,#REG_GR(8)) ,gr8
  761. lddi @(gr28,#REG_GR(10)),gr10
  762. lddi @(gr28,#REG_GR(12)),gr12
  763. lddi @(gr28,#REG_GR(14)),gr14
  764. lddi @(gr28,#REG_GR(16)),gr16
  765. lddi @(gr28,#REG_GR(18)),gr18
  766. lddi @(gr28,#REG_GR(20)),gr20
  767. lddi @(gr28,#REG_GR(22)),gr22
  768. lddi @(gr28,#REG_GR(24)),gr24
  769. lddi @(gr28,#REG_GR(26)),gr26
  770. ldi @(gr28,#REG_GR(29)),gr29
  771. lddi @(gr28,#REG_GR(30)),gr30
  772. # check to see if a debugging return is required
  773. LEDS 0x67f0
  774. movsg ccr,gr2
  775. ldi @(gr28,#REG__STATUS),gr3
  776. andicc gr3,#REG__STATUS_STEP,gr0,icc0
  777. bne icc0,#0,__entry_return_singlestep
  778. movgs gr2,ccr
  779. ldi @(gr28,#REG_SP) ,sp
  780. lddi @(gr28,#REG_GR(2)) ,gr2
  781. ldi @(gr28,#REG_GR(28)),gr28
  782. LEDS 0x67fe
  783. // movsg pcsr,gr31
  784. // LEDS32
  785. #if 0
  786. # store the current frame in the workram on the FR451
  787. movgs gr28,scr2
  788. sethi.p %hi(0xfe800000),gr28
  789. setlo %lo(0xfe800000),gr28
  790. stdi gr2,@(gr28,#REG_GR(2))
  791. stdi gr4,@(gr28,#REG_GR(4))
  792. stdi gr6,@(gr28,#REG_GR(6))
  793. stdi gr8,@(gr28,#REG_GR(8))
  794. stdi gr10,@(gr28,#REG_GR(10))
  795. stdi gr12,@(gr28,#REG_GR(12))
  796. stdi gr14,@(gr28,#REG_GR(14))
  797. stdi gr16,@(gr28,#REG_GR(16))
  798. stdi gr18,@(gr28,#REG_GR(18))
  799. stdi gr24,@(gr28,#REG_GR(24))
  800. stdi gr26,@(gr28,#REG_GR(26))
  801. sti gr29,@(gr28,#REG_GR(29))
  802. stdi gr30,@(gr28,#REG_GR(30))
  803. movsg tbr ,gr30
  804. sti gr30,@(gr28,#REG_TBR)
  805. movsg pcsr,gr30
  806. sti gr30,@(gr28,#REG_PC)
  807. movsg psr ,gr30
  808. sti gr30,@(gr28,#REG_PSR)
  809. movsg isr ,gr30
  810. sti gr30,@(gr28,#REG_ISR)
  811. movsg ccr ,gr30
  812. movsg cccr,gr31
  813. stdi gr30,@(gr28,#REG_CCR)
  814. movsg lr ,gr30
  815. movsg lcr ,gr31
  816. stdi gr30,@(gr28,#REG_LR)
  817. sti gr0 ,@(gr28,#REG_SYSCALLNO)
  818. movsg scr2,gr28
  819. #endif
  820. rett #0
  821. # return via break.S
  822. __entry_return_singlestep:
  823. movgs gr2,ccr
  824. lddi @(gr28,#REG_GR(2)) ,gr2
  825. ldi @(gr28,#REG_SP) ,sp
  826. ldi @(gr28,#REG_GR(28)),gr28
  827. LEDS 0x67ff
  828. break
  829. .globl __entry_return_singlestep_breaks_here
  830. __entry_return_singlestep_breaks_here:
  831. nop
  832. ###############################################################################
  833. #
  834. # return to a process interrupted in kernel space
  835. # - we need to consider preemption if that is enabled
  836. #
  837. ###############################################################################
  838. .balign L1_CACHE_BYTES
  839. __entry_return_from_kernel_exception:
  840. LEDS 0x6302
  841. movsg psr,gr23
  842. ori gr23,#PSR_PIL_14,gr23
  843. movgs gr23,psr
  844. bra __entry_return_direct
  845. .balign L1_CACHE_BYTES
  846. __entry_return_from_kernel_interrupt:
  847. LEDS 0x6303
  848. movsg psr,gr23
  849. ori gr23,#PSR_PIL_14,gr23
  850. movgs gr23,psr
  851. #ifdef CONFIG_PREEMPT
  852. ldi @(gr15,#TI_PRE_COUNT),gr5
  853. subicc gr5,#0,gr0,icc0
  854. beq icc0,#0,__entry_return_direct
  855. __entry_preempt_need_resched:
  856. ldi @(gr15,#TI_FLAGS),gr4
  857. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  858. beq icc0,#1,__entry_return_direct
  859. setlos #PREEMPT_ACTIVE,gr5
  860. sti gr5,@(gr15,#TI_FLAGS)
  861. andi gr23,#~PSR_PIL,gr23
  862. movgs gr23,psr
  863. call schedule
  864. sti gr0,@(gr15,#TI_PRE_COUNT)
  865. movsg psr,gr23
  866. ori gr23,#PSR_PIL_14,gr23
  867. movgs gr23,psr
  868. bra __entry_preempt_need_resched
  869. #else
  870. bra __entry_return_direct
  871. #endif
  872. ###############################################################################
  873. #
  874. # perform work that needs to be done immediately before resumption
  875. #
  876. ###############################################################################
  877. .globl __entry_return_from_user_exception
  878. .balign L1_CACHE_BYTES
  879. __entry_return_from_user_exception:
  880. LEDS 0x6501
  881. __entry_resume_userspace:
  882. # make sure we don't miss an interrupt setting need_resched or sigpending between
  883. # sampling and the RETT
  884. movsg psr,gr23
  885. ori gr23,#PSR_PIL_14,gr23
  886. movgs gr23,psr
  887. __entry_return_from_user_interrupt:
  888. LEDS 0x6402
  889. ldi @(gr15,#TI_FLAGS),gr4
  890. sethi.p %hi(_TIF_WORK_MASK),gr5
  891. setlo %lo(_TIF_WORK_MASK),gr5
  892. andcc gr4,gr5,gr0,icc0
  893. beq icc0,#1,__entry_return_direct
  894. __entry_work_pending:
  895. LEDS 0x6404
  896. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  897. beq icc0,#1,__entry_work_notifysig
  898. __entry_work_resched:
  899. LEDS 0x6408
  900. movsg psr,gr23
  901. andi gr23,#~PSR_PIL,gr23
  902. movgs gr23,psr
  903. call schedule
  904. movsg psr,gr23
  905. ori gr23,#PSR_PIL_14,gr23
  906. movgs gr23,psr
  907. LEDS 0x6401
  908. ldi @(gr15,#TI_FLAGS),gr4
  909. sethi.p %hi(_TIF_WORK_MASK),gr5
  910. setlo %lo(_TIF_WORK_MASK),gr5
  911. andcc gr4,gr5,gr0,icc0
  912. beq icc0,#1,__entry_return_direct
  913. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  914. bne icc0,#1,__entry_work_resched
  915. __entry_work_notifysig:
  916. LEDS 0x6410
  917. ori.p gr4,#0,gr8
  918. call do_notify_resume
  919. bra __entry_return_direct
  920. # perform syscall entry tracing
  921. __syscall_trace_entry:
  922. LEDS 0x6320
  923. setlos.p #0,gr8
  924. call do_syscall_trace
  925. ldi @(gr28,#REG_SYSCALLNO),gr7
  926. lddi @(gr28,#REG_GR(8)) ,gr8
  927. lddi @(gr28,#REG_GR(10)),gr10
  928. lddi.p @(gr28,#REG_GR(12)),gr12
  929. subicc gr7,#nr_syscalls,gr0,icc0
  930. bnc icc0,#0,__syscall_badsys
  931. bra __syscall_call
  932. # perform syscall exit tracing
  933. __syscall_exit_work:
  934. LEDS 0x6340
  935. andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
  936. beq icc0,#1,__entry_work_pending
  937. movsg psr,gr23
  938. andi gr23,#~PSR_PIL,gr23 ; could let do_syscall_trace() call schedule()
  939. movgs gr23,psr
  940. setlos.p #1,gr8
  941. call do_syscall_trace
  942. bra __entry_resume_userspace
  943. __syscall_badsys:
  944. LEDS 0x6380
  945. setlos #-ENOSYS,gr8
  946. sti gr8,@(gr28,#REG_GR(8)) ; save return value
  947. bra __entry_resume_userspace
  948. ###############################################################################
  949. #
  950. # syscall vector table
  951. #
  952. ###############################################################################
  953. #ifdef CONFIG_MMU
  954. #define __MMU(X) X
  955. #else
  956. #define __MMU(X) sys_ni_syscall
  957. #endif
  958. .section .rodata
  959. ALIGN
  960. .globl sys_call_table
  961. sys_call_table:
  962. .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
  963. .long sys_exit
  964. .long sys_fork
  965. .long sys_read
  966. .long sys_write
  967. .long sys_open /* 5 */
  968. .long sys_close
  969. .long sys_waitpid
  970. .long sys_creat
  971. .long sys_link
  972. .long sys_unlink /* 10 */
  973. .long sys_execve
  974. .long sys_chdir
  975. .long sys_time
  976. .long sys_mknod
  977. .long sys_chmod /* 15 */
  978. .long sys_lchown16
  979. .long sys_ni_syscall /* old break syscall holder */
  980. .long sys_stat
  981. .long sys_lseek
  982. .long sys_getpid /* 20 */
  983. .long sys_mount
  984. .long sys_oldumount
  985. .long sys_setuid16
  986. .long sys_getuid16
  987. .long sys_ni_syscall // sys_stime /* 25 */
  988. .long sys_ptrace
  989. .long sys_alarm
  990. .long sys_fstat
  991. .long sys_pause
  992. .long sys_utime /* 30 */
  993. .long sys_ni_syscall /* old stty syscall holder */
  994. .long sys_ni_syscall /* old gtty syscall holder */
  995. .long sys_access
  996. .long sys_nice
  997. .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
  998. .long sys_sync
  999. .long sys_kill
  1000. .long sys_rename
  1001. .long sys_mkdir
  1002. .long sys_rmdir /* 40 */
  1003. .long sys_dup
  1004. .long sys_pipe
  1005. .long sys_times
  1006. .long sys_ni_syscall /* old prof syscall holder */
  1007. .long sys_brk /* 45 */
  1008. .long sys_setgid16
  1009. .long sys_getgid16
  1010. .long sys_ni_syscall // sys_signal
  1011. .long sys_geteuid16
  1012. .long sys_getegid16 /* 50 */
  1013. .long sys_acct
  1014. .long sys_umount /* recycled never used phys( */
  1015. .long sys_ni_syscall /* old lock syscall holder */
  1016. .long sys_ioctl
  1017. .long sys_fcntl /* 55 */
  1018. .long sys_ni_syscall /* old mpx syscall holder */
  1019. .long sys_setpgid
  1020. .long sys_ni_syscall /* old ulimit syscall holder */
  1021. .long sys_ni_syscall /* old old uname syscall */
  1022. .long sys_umask /* 60 */
  1023. .long sys_chroot
  1024. .long sys_ustat
  1025. .long sys_dup2
  1026. .long sys_getppid
  1027. .long sys_getpgrp /* 65 */
  1028. .long sys_setsid
  1029. .long sys_sigaction
  1030. .long sys_ni_syscall // sys_sgetmask
  1031. .long sys_ni_syscall // sys_ssetmask
  1032. .long sys_setreuid16 /* 70 */
  1033. .long sys_setregid16
  1034. .long sys_sigsuspend
  1035. .long sys_ni_syscall // sys_sigpending
  1036. .long sys_sethostname
  1037. .long sys_setrlimit /* 75 */
  1038. .long sys_ni_syscall // sys_old_getrlimit
  1039. .long sys_getrusage
  1040. .long sys_gettimeofday
  1041. .long sys_settimeofday
  1042. .long sys_getgroups16 /* 80 */
  1043. .long sys_setgroups16
  1044. .long sys_ni_syscall /* old_select slot */
  1045. .long sys_symlink
  1046. .long sys_lstat
  1047. .long sys_readlink /* 85 */
  1048. .long sys_uselib
  1049. .long sys_swapon
  1050. .long sys_reboot
  1051. .long sys_ni_syscall // old_readdir
  1052. .long sys_ni_syscall /* 90 */ /* old_mmap slot */
  1053. .long sys_munmap
  1054. .long sys_truncate
  1055. .long sys_ftruncate
  1056. .long sys_fchmod
  1057. .long sys_fchown16 /* 95 */
  1058. .long sys_getpriority
  1059. .long sys_setpriority
  1060. .long sys_ni_syscall /* old profil syscall holder */
  1061. .long sys_statfs
  1062. .long sys_fstatfs /* 100 */
  1063. .long sys_ni_syscall /* ioperm for i386 */
  1064. .long sys_socketcall
  1065. .long sys_syslog
  1066. .long sys_setitimer
  1067. .long sys_getitimer /* 105 */
  1068. .long sys_newstat
  1069. .long sys_newlstat
  1070. .long sys_newfstat
  1071. .long sys_ni_syscall /* obsolete olduname( syscall */
  1072. .long sys_ni_syscall /* iopl for i386 */ /* 110 */
  1073. .long sys_vhangup
  1074. .long sys_ni_syscall /* obsolete idle( syscall */
  1075. .long sys_ni_syscall /* vm86old for i386 */
  1076. .long sys_wait4
  1077. .long sys_swapoff /* 115 */
  1078. .long sys_sysinfo
  1079. .long sys_ipc
  1080. .long sys_fsync
  1081. .long sys_sigreturn
  1082. .long sys_clone /* 120 */
  1083. .long sys_setdomainname
  1084. .long sys_newuname
  1085. .long sys_ni_syscall /* old "cacheflush" */
  1086. .long sys_adjtimex
  1087. .long __MMU(sys_mprotect) /* 125 */
  1088. .long sys_sigprocmask
  1089. .long sys_ni_syscall /* old "create_module" */
  1090. .long sys_init_module
  1091. .long sys_delete_module
  1092. .long sys_ni_syscall /* old "get_kernel_syms" */
  1093. .long sys_quotactl
  1094. .long sys_getpgid
  1095. .long sys_fchdir
  1096. .long sys_bdflush
  1097. .long sys_sysfs /* 135 */
  1098. .long sys_personality
  1099. .long sys_ni_syscall /* for afs_syscall */
  1100. .long sys_setfsuid16
  1101. .long sys_setfsgid16
  1102. .long sys_llseek /* 140 */
  1103. .long sys_getdents
  1104. .long sys_select
  1105. .long sys_flock
  1106. .long __MMU(sys_msync)
  1107. .long sys_readv /* 145 */
  1108. .long sys_writev
  1109. .long sys_getsid
  1110. .long sys_fdatasync
  1111. .long sys_sysctl
  1112. .long __MMU(sys_mlock) /* 150 */
  1113. .long __MMU(sys_munlock)
  1114. .long __MMU(sys_mlockall)
  1115. .long __MMU(sys_munlockall)
  1116. .long sys_sched_setparam
  1117. .long sys_sched_getparam /* 155 */
  1118. .long sys_sched_setscheduler
  1119. .long sys_sched_getscheduler
  1120. .long sys_sched_yield
  1121. .long sys_sched_get_priority_max
  1122. .long sys_sched_get_priority_min /* 160 */
  1123. .long sys_sched_rr_get_interval
  1124. .long sys_nanosleep
  1125. .long __MMU(sys_mremap)
  1126. .long sys_setresuid16
  1127. .long sys_getresuid16 /* 165 */
  1128. .long sys_ni_syscall /* for vm86 */
  1129. .long sys_ni_syscall /* Old sys_query_module */
  1130. .long sys_poll
  1131. .long sys_nfsservctl
  1132. .long sys_setresgid16 /* 170 */
  1133. .long sys_getresgid16
  1134. .long sys_prctl
  1135. .long sys_rt_sigreturn
  1136. .long sys_rt_sigaction
  1137. .long sys_rt_sigprocmask /* 175 */
  1138. .long sys_rt_sigpending
  1139. .long sys_rt_sigtimedwait
  1140. .long sys_rt_sigqueueinfo
  1141. .long sys_rt_sigsuspend
  1142. .long sys_pread64 /* 180 */
  1143. .long sys_pwrite64
  1144. .long sys_chown16
  1145. .long sys_getcwd
  1146. .long sys_capget
  1147. .long sys_capset /* 185 */
  1148. .long sys_sigaltstack
  1149. .long sys_sendfile
  1150. .long sys_ni_syscall /* streams1 */
  1151. .long sys_ni_syscall /* streams2 */
  1152. .long sys_vfork /* 190 */
  1153. .long sys_getrlimit
  1154. .long sys_mmap2
  1155. .long sys_truncate64
  1156. .long sys_ftruncate64
  1157. .long sys_stat64 /* 195 */
  1158. .long sys_lstat64
  1159. .long sys_fstat64
  1160. .long sys_lchown
  1161. .long sys_getuid
  1162. .long sys_getgid /* 200 */
  1163. .long sys_geteuid
  1164. .long sys_getegid
  1165. .long sys_setreuid
  1166. .long sys_setregid
  1167. .long sys_getgroups /* 205 */
  1168. .long sys_setgroups
  1169. .long sys_fchown
  1170. .long sys_setresuid
  1171. .long sys_getresuid
  1172. .long sys_setresgid /* 210 */
  1173. .long sys_getresgid
  1174. .long sys_chown
  1175. .long sys_setuid
  1176. .long sys_setgid
  1177. .long sys_setfsuid /* 215 */
  1178. .long sys_setfsgid
  1179. .long sys_pivot_root
  1180. .long __MMU(sys_mincore)
  1181. .long __MMU(sys_madvise)
  1182. .long sys_getdents64 /* 220 */
  1183. .long sys_fcntl64
  1184. .long sys_ni_syscall /* reserved for TUX */
  1185. .long sys_ni_syscall /* Reserved for Security */
  1186. .long sys_gettid
  1187. .long sys_readahead /* 225 */
  1188. .long sys_setxattr
  1189. .long sys_lsetxattr
  1190. .long sys_fsetxattr
  1191. .long sys_getxattr
  1192. .long sys_lgetxattr /* 230 */
  1193. .long sys_fgetxattr
  1194. .long sys_listxattr
  1195. .long sys_llistxattr
  1196. .long sys_flistxattr
  1197. .long sys_removexattr /* 235 */
  1198. .long sys_lremovexattr
  1199. .long sys_fremovexattr
  1200. .long sys_tkill
  1201. .long sys_sendfile64
  1202. .long sys_futex /* 240 */
  1203. .long sys_sched_setaffinity
  1204. .long sys_sched_getaffinity
  1205. .long sys_ni_syscall //sys_set_thread_area
  1206. .long sys_ni_syscall //sys_get_thread_area
  1207. .long sys_io_setup /* 245 */
  1208. .long sys_io_destroy
  1209. .long sys_io_getevents
  1210. .long sys_io_submit
  1211. .long sys_io_cancel
  1212. .long sys_fadvise64 /* 250 */
  1213. .long sys_ni_syscall
  1214. .long sys_exit_group
  1215. .long sys_lookup_dcookie
  1216. .long sys_epoll_create
  1217. .long sys_epoll_ctl /* 255 */
  1218. .long sys_epoll_wait
  1219. .long __MMU(sys_remap_file_pages)
  1220. .long sys_set_tid_address
  1221. .long sys_timer_create
  1222. .long sys_timer_settime /* 260 */
  1223. .long sys_timer_gettime
  1224. .long sys_timer_getoverrun
  1225. .long sys_timer_delete
  1226. .long sys_clock_settime
  1227. .long sys_clock_gettime /* 265 */
  1228. .long sys_clock_getres
  1229. .long sys_clock_nanosleep
  1230. .long sys_statfs64
  1231. .long sys_fstatfs64
  1232. .long sys_tgkill /* 270 */
  1233. .long sys_utimes
  1234. .long sys_fadvise64_64
  1235. .long sys_ni_syscall /* sys_vserver */
  1236. .long sys_mbind
  1237. .long sys_get_mempolicy
  1238. .long sys_set_mempolicy
  1239. .long sys_mq_open
  1240. .long sys_mq_unlink
  1241. .long sys_mq_timedsend
  1242. .long sys_mq_timedreceive /* 280 */
  1243. .long sys_mq_notify
  1244. .long sys_mq_getsetattr
  1245. .long sys_ni_syscall /* reserved for kexec */
  1246. .long sys_waitid
  1247. .long sys_ni_syscall /* 285 */ /* available */
  1248. .long sys_add_key
  1249. .long sys_request_key
  1250. .long sys_keyctl
  1251. .long sys_ni_syscall // sys_vperfctr_open
  1252. .long sys_ni_syscall // sys_vperfctr_control /* 290 */
  1253. .long sys_ni_syscall // sys_vperfctr_unlink
  1254. .long sys_ni_syscall // sys_vperfctr_iresume
  1255. .long sys_ni_syscall // sys_vperfctr_read
  1256. syscall_table_size = (. - sys_call_table)