entry.S 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. /*
  2. * arch/score/kernel/entry.S
  3. *
  4. * Score Processor version.
  5. *
  6. * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
  7. * Chen Liqin <liqin.chen@sunplusct.com>
  8. * Lennox Wu <lennox.wu@sunplusct.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, see the file COPYING, or write
  22. * to the Free Software Foundation, Inc.,
  23. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  24. */
  25. #include <linux/errno.h>
  26. #include <linux/init.h>
  27. #include <linux/linkage.h>
  28. #include <asm/asmmacro.h>
  29. #include <asm/thread_info.h>
  30. #include <asm/unistd.h>
  31. /*
  32. * disable interrupts.
  33. */
  34. .macro disable_irq
  35. mfcr r8, cr0
  36. srli r8, r8, 1
  37. slli r8, r8, 1
  38. mtcr r8, cr0
  39. nop
  40. nop
  41. nop
  42. nop
  43. nop
  44. .endm
  45. /*
  46. * enable interrupts.
  47. */
  48. .macro enable_irq
  49. mfcr r8, cr0
  50. ori r8, 1
  51. mtcr r8, cr0
  52. nop
  53. nop
  54. nop
  55. nop
  56. nop
  57. .endm
  58. __INIT
  59. ENTRY(debug_exception_vector)
  60. nop!
  61. nop!
  62. nop!
  63. nop!
  64. nop!
  65. nop!
  66. nop!
  67. nop!
  68. ENTRY(general_exception_vector) # should move to addr 0x200
  69. j general_exception
  70. nop!
  71. nop!
  72. nop!
  73. nop!
  74. nop!
  75. nop!
  76. ENTRY(interrupt_exception_vector) # should move to addr 0x210
  77. j interrupt_exception
  78. nop!
  79. nop!
  80. nop!
  81. nop!
  82. nop!
  83. nop!
  84. .section ".text", "ax"
  85. .align 2;
  86. general_exception:
  87. mfcr r31, cr2
  88. nop
  89. la r30, exception_handlers
  90. andi r31, 0x1f # get ecr.exc_code
  91. slli r31, r31, 2
  92. add r30, r30, r31
  93. lw r30, [r30]
  94. br r30
  95. interrupt_exception:
  96. SAVE_ALL
  97. mfcr r4, cr2
  98. nop
  99. lw r16, [r28, TI_REGS]
  100. sw r0, [r28, TI_REGS]
  101. la r3, ret_from_irq
  102. srli r4, r4, 18 # get ecr.ip[7:2], interrupt No.
  103. mv r5, r0
  104. j do_IRQ
  105. ENTRY(handle_nmi) # NMI #1
  106. SAVE_ALL
  107. mv r4, r0
  108. la r8, nmi_exception_handler
  109. brl r8
  110. j restore_all
  111. ENTRY(handle_adelinsn) # AdEL-instruction #2
  112. SAVE_ALL
  113. mfcr r8, cr6
  114. nop
  115. nop
  116. sw r8, [r0, PT_EMA]
  117. mv r4, r0
  118. la r8, do_adelinsn
  119. brl r8
  120. mv r4, r0
  121. j ret_from_exception
  122. nop
  123. ENTRY(handle_ibe) # BusEL-instruction #5
  124. SAVE_ALL
  125. mv r4, r0
  126. la r8, do_be
  127. brl r8
  128. mv r4, r0
  129. j ret_from_exception
  130. nop
  131. ENTRY(handle_pel) # P-EL #6
  132. SAVE_ALL
  133. mv r4, r0
  134. la r8, do_pel
  135. brl r8
  136. mv r4, r0
  137. j ret_from_exception
  138. nop
  139. ENTRY(handle_ccu) # CCU #8
  140. SAVE_ALL
  141. mv r4, r0
  142. la r8, do_ccu
  143. brl r8
  144. mv r4, r0
  145. j ret_from_exception
  146. nop
  147. ENTRY(handle_ri) # RI #9
  148. SAVE_ALL
  149. mv r4, r0
  150. la r8, do_ri
  151. brl r8
  152. mv r4, r0
  153. j ret_from_exception
  154. nop
  155. ENTRY(handle_tr) # Trap #10
  156. SAVE_ALL
  157. mv r4, r0
  158. la r8, do_tr
  159. brl r8
  160. mv r4, r0
  161. j ret_from_exception
  162. nop
  163. ENTRY(handle_adedata) # AdES-instruction #12
  164. SAVE_ALL
  165. mfcr r8, cr6
  166. nop
  167. nop
  168. sw r8, [r0, PT_EMA]
  169. mv r4, r0
  170. la r8, do_adedata
  171. brl r8
  172. mv r4, r0
  173. j ret_from_exception
  174. nop
  175. ENTRY(handle_cee) # CeE #16
  176. SAVE_ALL
  177. mv r4, r0
  178. la r8, do_cee
  179. brl r8
  180. mv r4, r0
  181. j ret_from_exception
  182. nop
  183. ENTRY(handle_cpe) # CpE #17
  184. SAVE_ALL
  185. mv r4, r0
  186. la r8, do_cpe
  187. brl r8
  188. mv r4, r0
  189. j ret_from_exception
  190. nop
  191. ENTRY(handle_dbe) # BusEL-data #18
  192. SAVE_ALL
  193. mv r4, r0
  194. la r8, do_be
  195. brl r8
  196. mv r4, r0
  197. j ret_from_exception
  198. nop
  199. ENTRY(handle_reserved) # others
  200. SAVE_ALL
  201. mv r4, r0
  202. la r8, do_reserved
  203. brl r8
  204. mv r4, r0
  205. j ret_from_exception
  206. nop
  207. #ifndef CONFIG_PREEMPT
  208. #define resume_kernel restore_all
  209. #else
  210. #define __ret_from_irq ret_from_exception
  211. #endif
  212. .align 2
  213. #ifndef CONFIG_PREEMPT
  214. ENTRY(ret_from_exception)
  215. disable_irq # preempt stop
  216. nop
  217. j __ret_from_irq
  218. nop
  219. #endif
  220. ENTRY(ret_from_irq)
  221. sw r16, [r28, TI_REGS]
  222. ENTRY(__ret_from_irq)
  223. lw r8, [r0, PT_PSR] # returning to kernel mode?
  224. andri.c r8, r8, KU_USER
  225. beq resume_kernel
  226. resume_userspace:
  227. disable_irq
  228. lw r6, [r28, TI_FLAGS] # current->work
  229. li r8, _TIF_WORK_MASK
  230. and.c r8, r8, r6 # ignoring syscall_trace
  231. bne work_pending
  232. nop
  233. j restore_all
  234. nop
  235. #ifdef CONFIG_PREEMPT
  236. resume_kernel:
  237. disable_irq
  238. lw r8, [r28, TI_PRE_COUNT]
  239. cmpz.c r8
  240. bne r8, restore_all
  241. need_resched:
  242. lw r8, [r28, TI_FLAGS]
  243. andri.c r9, r8, _TIF_NEED_RESCHED
  244. beq restore_all
  245. lw r8, [r28, PT_PSR] # Interrupts off?
  246. andri.c r8, r8, 1
  247. beq restore_all
  248. bl preempt_schedule_irq
  249. nop
  250. j need_resched
  251. nop
  252. #endif
  253. ENTRY(ret_from_fork)
  254. bl schedule_tail # r4=struct task_struct *prev
  255. ENTRY(syscall_exit)
  256. nop
  257. disable_irq
  258. lw r6, [r28, TI_FLAGS] # current->work
  259. li r8, _TIF_WORK_MASK
  260. and.c r8, r6, r8
  261. bne syscall_exit_work
  262. ENTRY(restore_all) # restore full frame
  263. RESTORE_ALL_AND_RET
  264. work_pending:
  265. andri.c r8, r6, _TIF_NEED_RESCHED # r6 is preloaded with TI_FLAGS
  266. beq work_notifysig
  267. work_resched:
  268. bl schedule
  269. nop
  270. disable_irq
  271. lw r6, [r28, TI_FLAGS]
  272. li r8, _TIF_WORK_MASK
  273. and.c r8, r6, r8 # is there any work to be done
  274. # other than syscall tracing?
  275. beq restore_all
  276. andri.c r8, r6, _TIF_NEED_RESCHED
  277. bne work_resched
  278. work_notifysig:
  279. mv r4, r0
  280. li r5, 0
  281. bl do_notify_resume # r6 already loaded
  282. nop
  283. j resume_userspace
  284. nop
  285. ENTRY(syscall_exit_work)
  286. li r8, _TIF_SYSCALL_TRACE
  287. and.c r8, r8, r6 # r6 is preloaded with TI_FLAGS
  288. beq work_pending # trace bit set?
  289. nop
  290. enable_irq
  291. mv r4, r0
  292. li r5, 1
  293. bl do_syscall_trace
  294. nop
  295. b resume_userspace
  296. nop
  297. .macro save_context reg
  298. sw r12, [\reg, THREAD_REG12];
  299. sw r13, [\reg, THREAD_REG13];
  300. sw r14, [\reg, THREAD_REG14];
  301. sw r15, [\reg, THREAD_REG15];
  302. sw r16, [\reg, THREAD_REG16];
  303. sw r17, [\reg, THREAD_REG17];
  304. sw r18, [\reg, THREAD_REG18];
  305. sw r19, [\reg, THREAD_REG19];
  306. sw r20, [\reg, THREAD_REG20];
  307. sw r21, [\reg, THREAD_REG21];
  308. sw r29, [\reg, THREAD_REG29];
  309. sw r2, [\reg, THREAD_REG2];
  310. sw r0, [\reg, THREAD_REG0]
  311. .endm
  312. .macro restore_context reg
  313. lw r12, [\reg, THREAD_REG12];
  314. lw r13, [\reg, THREAD_REG13];
  315. lw r14, [\reg, THREAD_REG14];
  316. lw r15, [\reg, THREAD_REG15];
  317. lw r16, [\reg, THREAD_REG16];
  318. lw r17, [\reg, THREAD_REG17];
  319. lw r18, [\reg, THREAD_REG18];
  320. lw r19, [\reg, THREAD_REG19];
  321. lw r20, [\reg, THREAD_REG20];
  322. lw r21, [\reg, THREAD_REG21];
  323. lw r29, [\reg, THREAD_REG29];
  324. lw r0, [\reg, THREAD_REG0];
  325. lw r2, [\reg, THREAD_REG2];
  326. lw r3, [\reg, THREAD_REG3]
  327. .endm
  328. /*
  329. * task_struct *resume(task_struct *prev, task_struct *next,
  330. * struct thread_info *next_ti)
  331. */
  332. ENTRY(resume)
  333. mfcr r9, cr0
  334. nop
  335. nop
  336. sw r9, [r4, THREAD_PSR]
  337. save_context r4
  338. sw r3, [r4, THREAD_REG3]
  339. mv r28, r6
  340. restore_context r5
  341. mv r8, r6
  342. addi r8, KERNEL_STACK_SIZE
  343. subi r8, 32
  344. la r9, kernelsp;
  345. sw r8, [r9];
  346. mfcr r9, cr0
  347. ldis r7, 0x00ff
  348. nop
  349. and r9, r9, r7
  350. lw r6, [r5, THREAD_PSR]
  351. not r7, r7
  352. and r6, r6, r7
  353. or r6, r6, r9
  354. mtcr r6, cr0
  355. nop; nop; nop; nop; nop
  356. br r3
  357. ENTRY(handle_sys)
  358. SAVE_ALL
  359. enable_irq
  360. sw r4, [r0, PT_ORIG_R4] #for restart syscall
  361. sw r7, [r0, PT_ORIG_R7] #for restart syscall
  362. sw r27, [r0, PT_IS_SYSCALL] # it from syscall
  363. lw r9, [r0, PT_EPC] # skip syscall on return
  364. addi r9, 4
  365. sw r9, [r0, PT_EPC]
  366. cmpi.c r27, __NR_syscalls # check syscall number
  367. bgtu illegal_syscall
  368. slli r8, r27, 3 # get syscall routine
  369. la r11, sys_call_table
  370. add r11, r11, r8
  371. lw r10, [r11] # get syscall entry
  372. lw r11, [r11, 4] # get number of args
  373. cmpz.c r10
  374. beq illegal_syscall
  375. cmpi.c r11, 4 # more than 4 arguments?
  376. bgtu stackargs
  377. stack_done:
  378. lw r8, [r28, TI_FLAGS]
  379. li r9, _TIF_SYSCALL_TRACE
  380. and.c r8, r8, r9
  381. bne syscall_trace_entry
  382. brl r10 # Do The Real system call
  383. cmpi.c r4, 0
  384. blt 1f
  385. ldi r8, 0
  386. sw r8, [r0, PT_R7]
  387. b 2f
  388. 1:
  389. cmpi.c r4, -EMAXERRNO-1 # -EMAXERRNO - 1=-1134
  390. ble 2f
  391. ldi r8, 0x1;
  392. sw r8, [r0, PT_R7]
  393. neg r4, r4
  394. 2:
  395. sw r4, [r0, PT_R4] # save result
  396. syscall_return:
  397. disable_irq
  398. lw r6, [r28, TI_FLAGS] # current->work
  399. li r8, _TIF_WORK_MASK
  400. and.c r8, r6, r8
  401. bne syscall_return_work
  402. j restore_all
  403. syscall_return_work:
  404. j syscall_exit_work
  405. syscall_trace_entry:
  406. mv r16, r10
  407. mv r4, r0
  408. li r5, 0
  409. bl do_syscall_trace
  410. mv r8, r16
  411. lw r4, [r0, PT_R4] # Restore argument registers
  412. lw r5, [r0, PT_R5]
  413. lw r6, [r0, PT_R6]
  414. lw r7, [r0, PT_R7]
  415. brl r8
  416. li r8, -EMAXERRNO - 1 # error?
  417. sw r8, [r0, PT_R7] # set error flag
  418. neg r4, r4 # error
  419. sw r4, [r0, PT_R0] # set flag for syscall
  420. # restarting
  421. 1: sw r4, [r0, PT_R2] # result
  422. j syscall_exit
  423. stackargs:
  424. lw r8, [r0, PT_R0]
  425. andri.c r9, r8, 3 # test whether user sp is align a word
  426. bne bad_stack
  427. subi r11, 5
  428. slli r9, r11, 2
  429. add.c r9, r9, r8
  430. bmi bad_stack
  431. la r9, 3f # calculate branch address
  432. slli r11, r11, 3
  433. sub r9, r9, r11
  434. br r9
  435. 2: lw r9, [r8, 20] # argument 6 from usp
  436. sw r9, [r0, 20]
  437. 3: lw r9, [r8, 16] # argument 5 from usp
  438. sw r9, [r0, 16]
  439. j stack_done
  440. .section __ex_table,"a"
  441. .word 2b, bad_stack
  442. .word 3b, bad_stack
  443. .previous
  444. /*
  445. * The stackpointer for a call with more than 4 arguments is bad.
  446. * We probably should handle this case a bit more drastic.
  447. */
  448. bad_stack:
  449. neg r27, r27 # error
  450. sw r27, [r0, PT_ORIG_R4]
  451. sw r27, [r0, PT_R4]
  452. ldi r8, 1 # set error flag
  453. sw r8, [r0, PT_R7]
  454. j syscall_return
  455. illegal_syscall:
  456. ldi r4, -ENOSYS # error
  457. sw r4, [r0, PT_ORIG_R4]
  458. sw r4, [r0, PT_R4]
  459. ldi r9, 1 # set error flag
  460. sw r9, [r0, PT_R7]
  461. j syscall_return
  462. ENTRY(sys_execve)
  463. mv r4, r0
  464. la r8, score_execve
  465. br r8
  466. ENTRY(sys_clone)
  467. mv r4, r0
  468. la r8, score_clone
  469. br r8
  470. ENTRY(sys_rt_sigreturn)
  471. mv r4, r0
  472. la r8, score_rt_sigreturn
  473. br r8
  474. ENTRY(sys_sigaltstack)
  475. mv r4, r0
  476. la r8, score_sigaltstack
  477. br r8