entry-armv.S 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /*
  2. * linux/arch/arm/kernel/entry-armv.S
  3. *
  4. * Copyright (C) 1996,1997,1998 Russell King.
  5. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
  6. * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Low-level vector interface routines
  13. *
  14. * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
  15. * that causes it to save wrong values... Be aware!
  16. */
  17. #include <asm/assembler.h>
  18. #include <asm/memory.h>
  19. #include <asm/glue-df.h>
  20. #include <asm/glue-pf.h>
  21. #include <asm/vfpmacros.h>
  22. #ifndef CONFIG_MULTI_IRQ_HANDLER
  23. #include <mach/entry-macro.S>
  24. #endif
  25. #include <asm/thread_notify.h>
  26. #include <asm/unwind.h>
  27. #include <asm/unistd.h>
  28. #include <asm/tls.h>
  29. #include <asm/system_info.h>
  30. #include "entry-header.S"
  31. #include <asm/entry-macro-multi.S>
  32. /*
  33. * Interrupt handling.
  34. */
  35. .macro irq_handler
  36. #ifdef CONFIG_MULTI_IRQ_HANDLER
  37. ldr r1, =handle_arch_irq
  38. mov r0, sp
  39. adr lr, BSYM(9997f)
  40. ldr pc, [r1]
  41. #else
  42. arch_irq_handler_default
  43. #endif
  44. 9997:
  45. .endm
  46. .macro pabt_helper
  47. @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
  48. #ifdef MULTI_PABORT
  49. ldr ip, .LCprocfns
  50. mov lr, pc
  51. ldr pc, [ip, #PROCESSOR_PABT_FUNC]
  52. #else
  53. bl CPU_PABORT_HANDLER
  54. #endif
  55. .endm
  56. .macro dabt_helper
  57. @
  58. @ Call the processor-specific abort handler:
  59. @
  60. @ r2 - pt_regs
  61. @ r4 - aborted context pc
  62. @ r5 - aborted context psr
  63. @
  64. @ The abort handler must return the aborted address in r0, and
  65. @ the fault status register in r1. r9 must be preserved.
  66. @
  67. #ifdef MULTI_DABORT
  68. ldr ip, .LCprocfns
  69. mov lr, pc
  70. ldr pc, [ip, #PROCESSOR_DABT_FUNC]
  71. #else
  72. bl CPU_DABORT_HANDLER
  73. #endif
  74. .endm
  75. #ifdef CONFIG_KPROBES
  76. .section .kprobes.text,"ax",%progbits
  77. #else
  78. .text
  79. #endif
  80. /*
  81. * Invalid mode handlers
  82. */
  83. .macro inv_entry, reason
  84. sub sp, sp, #S_FRAME_SIZE
  85. ARM( stmib sp, {r1 - lr} )
  86. THUMB( stmia sp, {r0 - r12} )
  87. THUMB( str sp, [sp, #S_SP] )
  88. THUMB( str lr, [sp, #S_LR] )
  89. mov r1, #\reason
  90. .endm
  91. __pabt_invalid:
  92. inv_entry BAD_PREFETCH
  93. b common_invalid
  94. ENDPROC(__pabt_invalid)
  95. __dabt_invalid:
  96. inv_entry BAD_DATA
  97. b common_invalid
  98. ENDPROC(__dabt_invalid)
  99. __irq_invalid:
  100. inv_entry BAD_IRQ
  101. b common_invalid
  102. ENDPROC(__irq_invalid)
  103. __und_invalid:
  104. inv_entry BAD_UNDEFINSTR
  105. @
  106. @ XXX fall through to common_invalid
  107. @
  108. @
  109. @ common_invalid - generic code for failed exception (re-entrant version of handlers)
  110. @
  111. common_invalid:
  112. zero_fp
  113. ldmia r0, {r4 - r6}
  114. add r0, sp, #S_PC @ here for interlock avoidance
  115. mov r7, #-1 @ "" "" "" ""
  116. str r4, [sp] @ save preserved r0
  117. stmia r0, {r5 - r7} @ lr_<exception>,
  118. @ cpsr_<exception>, "old_r0"
  119. mov r0, sp
  120. b bad_mode
  121. ENDPROC(__und_invalid)
  122. /*
  123. * SVC mode handlers
  124. */
  125. #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
  126. #define SPFIX(code...) code
  127. #else
  128. #define SPFIX(code...)
  129. #endif
  130. .macro svc_entry, stack_hole=0
  131. UNWIND(.fnstart )
  132. UNWIND(.save {r0 - pc} )
  133. sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
  134. #ifdef CONFIG_THUMB2_KERNEL
  135. SPFIX( str r0, [sp] ) @ temporarily saved
  136. SPFIX( mov r0, sp )
  137. SPFIX( tst r0, #4 ) @ test original stack alignment
  138. SPFIX( ldr r0, [sp] ) @ restored
  139. #else
  140. SPFIX( tst sp, #4 )
  141. #endif
  142. SPFIX( subeq sp, sp, #4 )
  143. stmia sp, {r1 - r12}
  144. ldmia r0, {r3 - r5}
  145. add r7, sp, #S_SP - 4 @ here for interlock avoidance
  146. mov r6, #-1 @ "" "" "" ""
  147. add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
  148. SPFIX( addeq r2, r2, #4 )
  149. str r3, [sp, #-4]! @ save the "real" r0 copied
  150. @ from the exception stack
  151. mov r3, lr
  152. @
  153. @ We are now ready to fill in the remaining blanks on the stack:
  154. @
  155. @ r2 - sp_svc
  156. @ r3 - lr_svc
  157. @ r4 - lr_<exception>, already fixed up for correct return/restart
  158. @ r5 - spsr_<exception>
  159. @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
  160. @
  161. stmia r7, {r2 - r6}
  162. #ifdef CONFIG_TRACE_IRQFLAGS
  163. bl trace_hardirqs_off
  164. #endif
  165. .endm
  166. .align 5
  167. __dabt_svc:
  168. svc_entry
  169. mov r2, sp
  170. dabt_helper
  171. @
  172. @ IRQs off again before pulling preserved data off the stack
  173. @
  174. disable_irq_notrace
  175. #ifdef CONFIG_TRACE_IRQFLAGS
  176. tst r5, #PSR_I_BIT
  177. bleq trace_hardirqs_on
  178. tst r5, #PSR_I_BIT
  179. blne trace_hardirqs_off
  180. #endif
  181. svc_exit r5 @ return from exception
  182. UNWIND(.fnend )
  183. ENDPROC(__dabt_svc)
  184. .align 5
  185. __irq_svc:
  186. svc_entry
  187. irq_handler
  188. #ifdef CONFIG_PREEMPT
  189. get_thread_info tsk
  190. ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
  191. ldr r0, [tsk, #TI_FLAGS] @ get flags
  192. teq r8, #0 @ if preempt count != 0
  193. movne r0, #0 @ force flags to 0
  194. tst r0, #_TIF_NEED_RESCHED
  195. blne svc_preempt
  196. #endif
  197. #ifdef CONFIG_TRACE_IRQFLAGS
  198. @ The parent context IRQs must have been enabled to get here in
  199. @ the first place, so there's no point checking the PSR I bit.
  200. bl trace_hardirqs_on
  201. #endif
  202. svc_exit r5 @ return from exception
  203. UNWIND(.fnend )
  204. ENDPROC(__irq_svc)
  205. .ltorg
  206. #ifdef CONFIG_PREEMPT
  207. svc_preempt:
  208. mov r8, lr
  209. 1: bl preempt_schedule_irq @ irq en/disable is done inside
  210. ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
  211. tst r0, #_TIF_NEED_RESCHED
  212. moveq pc, r8 @ go again
  213. b 1b
  214. #endif
  215. __und_fault:
  216. @ Correct the PC such that it is pointing at the instruction
  217. @ which caused the fault. If the faulting instruction was ARM
  218. @ the PC will be pointing at the next instruction, and have to
  219. @ subtract 4. Otherwise, it is Thumb, and the PC will be
  220. @ pointing at the second half of the Thumb instruction. We
  221. @ have to subtract 2.
  222. ldr r2, [r0, #S_PC]
  223. sub r2, r2, r1
  224. str r2, [r0, #S_PC]
  225. b do_undefinstr
  226. ENDPROC(__und_fault)
  227. .align 5
  228. __und_svc:
  229. #ifdef CONFIG_KPROBES
  230. @ If a kprobe is about to simulate a "stmdb sp..." instruction,
  231. @ it obviously needs free stack space which then will belong to
  232. @ the saved context.
  233. svc_entry 64
  234. #else
  235. svc_entry
  236. #endif
  237. @
  238. @ call emulation code, which returns using r9 if it has emulated
  239. @ the instruction, or the more conventional lr if we are to treat
  240. @ this as a real undefined instruction
  241. @
  242. @ r0 - instruction
  243. @
  244. #ifndef CONFIG_THUMB2_KERNEL
  245. ldr r0, [r4, #-4]
  246. #else
  247. mov r1, #2
  248. ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
  249. cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
  250. blo __und_svc_fault
  251. ldrh r9, [r4] @ bottom 16 bits
  252. add r4, r4, #2
  253. str r4, [sp, #S_PC]
  254. orr r0, r9, r0, lsl #16
  255. #endif
  256. adr r9, BSYM(__und_svc_finish)
  257. mov r2, r4
  258. bl call_fpe
  259. mov r1, #4 @ PC correction to apply
  260. __und_svc_fault:
  261. mov r0, sp @ struct pt_regs *regs
  262. bl __und_fault
  263. @
  264. @ IRQs off again before pulling preserved data off the stack
  265. @
  266. __und_svc_finish:
  267. disable_irq_notrace
  268. @
  269. @ restore SPSR and restart the instruction
  270. @
  271. ldr r5, [sp, #S_PSR] @ Get SVC cpsr
  272. #ifdef CONFIG_TRACE_IRQFLAGS
  273. tst r5, #PSR_I_BIT
  274. bleq trace_hardirqs_on
  275. tst r5, #PSR_I_BIT
  276. blne trace_hardirqs_off
  277. #endif
  278. svc_exit r5 @ return from exception
  279. UNWIND(.fnend )
  280. ENDPROC(__und_svc)
  281. .align 5
  282. __pabt_svc:
  283. svc_entry
  284. mov r2, sp @ regs
  285. pabt_helper
  286. @
  287. @ IRQs off again before pulling preserved data off the stack
  288. @
  289. disable_irq_notrace
  290. #ifdef CONFIG_TRACE_IRQFLAGS
  291. tst r5, #PSR_I_BIT
  292. bleq trace_hardirqs_on
  293. tst r5, #PSR_I_BIT
  294. blne trace_hardirqs_off
  295. #endif
  296. svc_exit r5 @ return from exception
  297. UNWIND(.fnend )
  298. ENDPROC(__pabt_svc)
  299. .align 5
  300. .LCcralign:
  301. .word cr_alignment
  302. #ifdef MULTI_DABORT
  303. .LCprocfns:
  304. .word processor
  305. #endif
  306. .LCfp:
  307. .word fp_enter
  308. /*
  309. * User mode handlers
  310. *
  311. * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
  312. */
  313. #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
  314. #error "sizeof(struct pt_regs) must be a multiple of 8"
  315. #endif
  316. .macro usr_entry
  317. UNWIND(.fnstart )
  318. UNWIND(.cantunwind ) @ don't unwind the user space
  319. sub sp, sp, #S_FRAME_SIZE
  320. ARM( stmib sp, {r1 - r12} )
  321. THUMB( stmia sp, {r0 - r12} )
  322. ldmia r0, {r3 - r5}
  323. add r0, sp, #S_PC @ here for interlock avoidance
  324. mov r6, #-1 @ "" "" "" ""
  325. str r3, [sp] @ save the "real" r0 copied
  326. @ from the exception stack
  327. @
  328. @ We are now ready to fill in the remaining blanks on the stack:
  329. @
  330. @ r4 - lr_<exception>, already fixed up for correct return/restart
  331. @ r5 - spsr_<exception>
  332. @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
  333. @
  334. @ Also, separately save sp_usr and lr_usr
  335. @
  336. stmia r0, {r4 - r6}
  337. ARM( stmdb r0, {sp, lr}^ )
  338. THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
  339. @
  340. @ Enable the alignment trap while in kernel mode
  341. @
  342. alignment_trap r0
  343. @
  344. @ Clear FP to mark the first stack frame
  345. @
  346. zero_fp
  347. #ifdef CONFIG_IRQSOFF_TRACER
  348. bl trace_hardirqs_off
  349. #endif
  350. .endm
  351. .macro kuser_cmpxchg_check
  352. #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  353. #ifndef CONFIG_MMU
  354. #warning "NPTL on non MMU needs fixing"
  355. #else
  356. @ Make sure our user space atomic helper is restarted
  357. @ if it was interrupted in a critical region. Here we
  358. @ perform a quick test inline since it should be false
  359. @ 99.9999% of the time. The rest is done out of line.
  360. cmp r4, #TASK_SIZE
  361. blhs kuser_cmpxchg64_fixup
  362. #endif
  363. #endif
  364. .endm
  365. .align 5
  366. __dabt_usr:
  367. usr_entry
  368. kuser_cmpxchg_check
  369. mov r2, sp
  370. dabt_helper
  371. b ret_from_exception
  372. UNWIND(.fnend )
  373. ENDPROC(__dabt_usr)
  374. .align 5
  375. __irq_usr:
  376. usr_entry
  377. kuser_cmpxchg_check
  378. irq_handler
  379. get_thread_info tsk
  380. mov why, #0
  381. b ret_to_user_from_irq
  382. UNWIND(.fnend )
  383. ENDPROC(__irq_usr)
  384. .ltorg
  385. .align 5
  386. __und_usr:
  387. usr_entry
  388. mov r2, r4
  389. mov r3, r5
  390. @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
  391. @ faulting instruction depending on Thumb mode.
  392. @ r3 = regs->ARM_cpsr
  393. @
  394. @ The emulation code returns using r9 if it has emulated the
  395. @ instruction, or the more conventional lr if we are to treat
  396. @ this as a real undefined instruction
  397. @
  398. adr r9, BSYM(ret_from_exception)
  399. tst r3, #PSR_T_BIT @ Thumb mode?
  400. bne __und_usr_thumb
  401. sub r4, r2, #4 @ ARM instr at LR - 4
  402. 1: ldrt r0, [r4]
  403. #ifdef CONFIG_CPU_ENDIAN_BE8
  404. rev r0, r0 @ little endian instruction
  405. #endif
  406. @ r0 = 32-bit ARM instruction which caused the exception
  407. @ r2 = PC value for the following instruction (:= regs->ARM_pc)
  408. @ r4 = PC value for the faulting instruction
  409. @ lr = 32-bit undefined instruction function
  410. adr lr, BSYM(__und_usr_fault_32)
  411. b call_fpe
  412. __und_usr_thumb:
  413. @ Thumb instruction
  414. sub r4, r2, #2 @ First half of thumb instr at LR - 2
  415. #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
  416. /*
  417. * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
  418. * can never be supported in a single kernel, this code is not applicable at
  419. * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
  420. * made about .arch directives.
  421. */
  422. #if __LINUX_ARM_ARCH__ < 7
  423. /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
  424. #define NEED_CPU_ARCHITECTURE
  425. ldr r5, .LCcpu_architecture
  426. ldr r5, [r5]
  427. cmp r5, #CPU_ARCH_ARMv7
  428. blo __und_usr_fault_16 @ 16bit undefined instruction
  429. /*
  430. * The following code won't get run unless the running CPU really is v7, so
  431. * coding round the lack of ldrht on older arches is pointless. Temporarily
  432. * override the assembler target arch with the minimum required instead:
  433. */
  434. .arch armv6t2
  435. #endif
  436. 2: ldrht r5, [r4]
  437. cmp r5, #0xe800 @ 32bit instruction if xx != 0
  438. blo __und_usr_fault_16 @ 16bit undefined instruction
  439. 3: ldrht r0, [r2]
  440. add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
  441. str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
  442. orr r0, r0, r5, lsl #16
  443. adr lr, BSYM(__und_usr_fault_32)
  444. @ r0 = the two 16-bit Thumb instructions which caused the exception
  445. @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
  446. @ r4 = PC value for the first 16-bit Thumb instruction
  447. @ lr = 32bit undefined instruction function
  448. #if __LINUX_ARM_ARCH__ < 7
  449. /* If the target arch was overridden, change it back: */
  450. #ifdef CONFIG_CPU_32v6K
  451. .arch armv6k
  452. #else
  453. .arch armv6
  454. #endif
  455. #endif /* __LINUX_ARM_ARCH__ < 7 */
  456. #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
  457. b __und_usr_fault_16
  458. #endif
  459. UNWIND(.fnend)
  460. ENDPROC(__und_usr)
  461. /*
  462. * The out of line fixup for the ldrt instructions above.
  463. */
  464. .pushsection .fixup, "ax"
  465. .align 2
  466. 4: mov pc, r9
  467. .popsection
  468. .pushsection __ex_table,"a"
  469. .long 1b, 4b
  470. #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
  471. .long 2b, 4b
  472. .long 3b, 4b
  473. #endif
  474. .popsection
  475. /*
  476. * Check whether the instruction is a co-processor instruction.
  477. * If yes, we need to call the relevant co-processor handler.
  478. *
  479. * Note that we don't do a full check here for the co-processor
  480. * instructions; all instructions with bit 27 set are well
  481. * defined. The only instructions that should fault are the
  482. * co-processor instructions. However, we have to watch out
  483. * for the ARM6/ARM7 SWI bug.
  484. *
  485. * NEON is a special case that has to be handled here. Not all
  486. * NEON instructions are co-processor instructions, so we have
  487. * to make a special case of checking for them. Plus, there's
  488. * five groups of them, so we have a table of mask/opcode pairs
  489. * to check against, and if any match then we branch off into the
  490. * NEON handler code.
  491. *
  492. * Emulators may wish to make use of the following registers:
  493. * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
  494. * r2 = PC value to resume execution after successful emulation
  495. * r9 = normal "successful" return address
  496. * r10 = this threads thread_info structure
  497. * lr = unrecognised instruction return address
  498. * IRQs disabled, FIQs enabled.
  499. */
  500. @
  501. @ Fall-through from Thumb-2 __und_usr
  502. @
  503. #ifdef CONFIG_NEON
  504. get_thread_info r10 @ get current thread
  505. adr r6, .LCneon_thumb_opcodes
  506. b 2f
  507. #endif
  508. call_fpe:
  509. get_thread_info r10 @ get current thread
  510. #ifdef CONFIG_NEON
  511. adr r6, .LCneon_arm_opcodes
  512. 2: ldr r5, [r6], #4 @ mask value
  513. ldr r7, [r6], #4 @ opcode bits matching in mask
  514. cmp r5, #0 @ end mask?
  515. beq 1f
  516. and r8, r0, r5
  517. cmp r8, r7 @ NEON instruction?
  518. bne 2b
  519. mov r7, #1
  520. strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
  521. strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
  522. b do_vfp @ let VFP handler handle this
  523. 1:
  524. #endif
  525. tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
  526. tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
  527. moveq pc, lr
  528. and r8, r0, #0x00000f00 @ mask out CP number
  529. THUMB( lsr r8, r8, #8 )
  530. mov r7, #1
  531. add r6, r10, #TI_USED_CP
  532. ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
  533. THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
  534. #ifdef CONFIG_IWMMXT
  535. @ Test if we need to give access to iWMMXt coprocessors
  536. ldr r5, [r10, #TI_FLAGS]
  537. rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
  538. movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
  539. bcs iwmmxt_task_enable
  540. #endif
  541. ARM( add pc, pc, r8, lsr #6 )
  542. THUMB( lsl r8, r8, #2 )
  543. THUMB( add pc, r8 )
  544. nop
  545. movw_pc lr @ CP#0
  546. W(b) do_fpe @ CP#1 (FPE)
  547. W(b) do_fpe @ CP#2 (FPE)
  548. movw_pc lr @ CP#3
  549. #ifdef CONFIG_CRUNCH
  550. b crunch_task_enable @ CP#4 (MaverickCrunch)
  551. b crunch_task_enable @ CP#5 (MaverickCrunch)
  552. b crunch_task_enable @ CP#6 (MaverickCrunch)
  553. #else
  554. movw_pc lr @ CP#4
  555. movw_pc lr @ CP#5
  556. movw_pc lr @ CP#6
  557. #endif
  558. movw_pc lr @ CP#7
  559. movw_pc lr @ CP#8
  560. movw_pc lr @ CP#9
  561. #ifdef CONFIG_VFP
  562. W(b) do_vfp @ CP#10 (VFP)
  563. W(b) do_vfp @ CP#11 (VFP)
  564. #else
  565. movw_pc lr @ CP#10 (VFP)
  566. movw_pc lr @ CP#11 (VFP)
  567. #endif
  568. movw_pc lr @ CP#12
  569. movw_pc lr @ CP#13
  570. movw_pc lr @ CP#14 (Debug)
  571. movw_pc lr @ CP#15 (Control)
  572. #ifdef NEED_CPU_ARCHITECTURE
  573. .align 2
  574. .LCcpu_architecture:
  575. .word __cpu_architecture
  576. #endif
  577. #ifdef CONFIG_NEON
  578. .align 6
  579. .LCneon_arm_opcodes:
  580. .word 0xfe000000 @ mask
  581. .word 0xf2000000 @ opcode
  582. .word 0xff100000 @ mask
  583. .word 0xf4000000 @ opcode
  584. .word 0x00000000 @ mask
  585. .word 0x00000000 @ opcode
  586. .LCneon_thumb_opcodes:
  587. .word 0xef000000 @ mask
  588. .word 0xef000000 @ opcode
  589. .word 0xff100000 @ mask
  590. .word 0xf9000000 @ opcode
  591. .word 0x00000000 @ mask
  592. .word 0x00000000 @ opcode
  593. #endif
  594. do_fpe:
  595. enable_irq
  596. ldr r4, .LCfp
  597. add r10, r10, #TI_FPSTATE @ r10 = workspace
  598. ldr pc, [r4] @ Call FP module USR entry point
  599. /*
  600. * The FP module is called with these registers set:
  601. * r0 = instruction
  602. * r2 = PC+4
  603. * r9 = normal "successful" return address
  604. * r10 = FP workspace
  605. * lr = unrecognised FP instruction return address
  606. */
  607. .pushsection .data
  608. ENTRY(fp_enter)
  609. .word no_fp
  610. .popsection
  611. ENTRY(no_fp)
  612. mov pc, lr
  613. ENDPROC(no_fp)
  614. __und_usr_fault_32:
  615. mov r1, #4
  616. b 1f
  617. __und_usr_fault_16:
  618. mov r1, #2
  619. 1: enable_irq
  620. mov r0, sp
  621. adr lr, BSYM(ret_from_exception)
  622. b __und_fault
  623. ENDPROC(__und_usr_fault_32)
  624. ENDPROC(__und_usr_fault_16)
  625. .align 5
  626. __pabt_usr:
  627. usr_entry
  628. mov r2, sp @ regs
  629. pabt_helper
  630. UNWIND(.fnend )
  631. /* fall through */
  632. /*
  633. * This is the return code to user mode for abort handlers
  634. */
  635. ENTRY(ret_from_exception)
  636. UNWIND(.fnstart )
  637. UNWIND(.cantunwind )
  638. get_thread_info tsk
  639. mov why, #0
  640. b ret_to_user
  641. UNWIND(.fnend )
  642. ENDPROC(__pabt_usr)
  643. ENDPROC(ret_from_exception)
  644. /*
  645. * Register switch for ARMv3 and ARMv4 processors
  646. * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
  647. * previous and next are guaranteed not to be the same.
  648. */
  649. ENTRY(__switch_to)
  650. UNWIND(.fnstart )
  651. UNWIND(.cantunwind )
  652. add ip, r1, #TI_CPU_SAVE
  653. ldr r3, [r2, #TI_TP_VALUE]
  654. ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
  655. THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
  656. THUMB( str sp, [ip], #4 )
  657. THUMB( str lr, [ip], #4 )
  658. #ifdef CONFIG_CPU_USE_DOMAINS
  659. ldr r6, [r2, #TI_CPU_DOMAIN]
  660. #endif
  661. set_tls r3, r4, r5
  662. #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
  663. ldr r7, [r2, #TI_TASK]
  664. ldr r8, =__stack_chk_guard
  665. ldr r7, [r7, #TSK_STACK_CANARY]
  666. #endif
  667. #ifdef CONFIG_CPU_USE_DOMAINS
  668. mcr p15, 0, r6, c3, c0, 0 @ Set domain register
  669. #endif
  670. mov r5, r0
  671. add r4, r2, #TI_CPU_SAVE
  672. ldr r0, =thread_notify_head
  673. mov r1, #THREAD_NOTIFY_SWITCH
  674. bl atomic_notifier_call_chain
  675. #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
  676. str r7, [r8]
  677. #endif
  678. THUMB( mov ip, r4 )
  679. mov r0, r5
  680. ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
  681. THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
  682. THUMB( ldr sp, [ip], #4 )
  683. THUMB( ldr pc, [ip] )
  684. UNWIND(.fnend )
  685. ENDPROC(__switch_to)
  686. __INIT
  687. /*
  688. * User helpers.
  689. *
  690. * Each segment is 32-byte aligned and will be moved to the top of the high
  691. * vector page. New segments (if ever needed) must be added in front of
  692. * existing ones. This mechanism should be used only for things that are
  693. * really small and justified, and not be abused freely.
  694. *
  695. * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
  696. */
  697. THUMB( .arm )
  698. .macro usr_ret, reg
  699. #ifdef CONFIG_ARM_THUMB
  700. bx \reg
  701. #else
  702. mov pc, \reg
  703. #endif
  704. .endm
  705. .align 5
  706. .globl __kuser_helper_start
  707. __kuser_helper_start:
  708. /*
  709. * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
  710. * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
  711. */
  712. __kuser_cmpxchg64: @ 0xffff0f60
  713. #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  714. /*
  715. * Poor you. No fast solution possible...
  716. * The kernel itself must perform the operation.
  717. * A special ghost syscall is used for that (see traps.c).
  718. */
  719. stmfd sp!, {r7, lr}
  720. ldr r7, 1f @ it's 20 bits
  721. swi __ARM_NR_cmpxchg64
  722. ldmfd sp!, {r7, pc}
  723. 1: .word __ARM_NR_cmpxchg64
  724. #elif defined(CONFIG_CPU_32v6K)
  725. stmfd sp!, {r4, r5, r6, r7}
  726. ldrd r4, r5, [r0] @ load old val
  727. ldrd r6, r7, [r1] @ load new val
  728. smp_dmb arm
  729. 1: ldrexd r0, r1, [r2] @ load current val
  730. eors r3, r0, r4 @ compare with oldval (1)
  731. eoreqs r3, r1, r5 @ compare with oldval (2)
  732. strexdeq r3, r6, r7, [r2] @ store newval if eq
  733. teqeq r3, #1 @ success?
  734. beq 1b @ if no then retry
  735. smp_dmb arm
  736. rsbs r0, r3, #0 @ set returned val and C flag
  737. ldmfd sp!, {r4, r5, r6, r7}
  738. usr_ret lr
  739. #elif !defined(CONFIG_SMP)
  740. #ifdef CONFIG_MMU
  741. /*
  742. * The only thing that can break atomicity in this cmpxchg64
  743. * implementation is either an IRQ or a data abort exception
  744. * causing another process/thread to be scheduled in the middle of
  745. * the critical sequence. The same strategy as for cmpxchg is used.
  746. */
  747. stmfd sp!, {r4, r5, r6, lr}
  748. ldmia r0, {r4, r5} @ load old val
  749. ldmia r1, {r6, lr} @ load new val
  750. 1: ldmia r2, {r0, r1} @ load current val
  751. eors r3, r0, r4 @ compare with oldval (1)
  752. eoreqs r3, r1, r5 @ compare with oldval (2)
  753. 2: stmeqia r2, {r6, lr} @ store newval if eq
  754. rsbs r0, r3, #0 @ set return val and C flag
  755. ldmfd sp!, {r4, r5, r6, pc}
  756. .text
  757. kuser_cmpxchg64_fixup:
  758. @ Called from kuser_cmpxchg_fixup.
  759. @ r4 = address of interrupted insn (must be preserved).
  760. @ sp = saved regs. r7 and r8 are clobbered.
  761. @ 1b = first critical insn, 2b = last critical insn.
  762. @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
  763. mov r7, #0xffff0fff
  764. sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
  765. subs r8, r4, r7
  766. rsbcss r8, r8, #(2b - 1b)
  767. strcs r7, [sp, #S_PC]
  768. #if __LINUX_ARM_ARCH__ < 6
  769. bcc kuser_cmpxchg32_fixup
  770. #endif
  771. mov pc, lr
  772. .previous
  773. #else
  774. #warning "NPTL on non MMU needs fixing"
  775. mov r0, #-1
  776. adds r0, r0, #0
  777. usr_ret lr
  778. #endif
  779. #else
  780. #error "incoherent kernel configuration"
  781. #endif
  782. /* pad to next slot */
  783. .rept (16 - (. - __kuser_cmpxchg64)/4)
  784. .word 0
  785. .endr
  786. .align 5
  787. __kuser_memory_barrier: @ 0xffff0fa0
  788. smp_dmb arm
  789. usr_ret lr
  790. .align 5
  791. __kuser_cmpxchg: @ 0xffff0fc0
  792. #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  793. /*
  794. * Poor you. No fast solution possible...
  795. * The kernel itself must perform the operation.
  796. * A special ghost syscall is used for that (see traps.c).
  797. */
  798. stmfd sp!, {r7, lr}
  799. ldr r7, 1f @ it's 20 bits
  800. swi __ARM_NR_cmpxchg
  801. ldmfd sp!, {r7, pc}
  802. 1: .word __ARM_NR_cmpxchg
  803. #elif __LINUX_ARM_ARCH__ < 6
  804. #ifdef CONFIG_MMU
  805. /*
  806. * The only thing that can break atomicity in this cmpxchg
  807. * implementation is either an IRQ or a data abort exception
  808. * causing another process/thread to be scheduled in the middle
  809. * of the critical sequence. To prevent this, code is added to
  810. * the IRQ and data abort exception handlers to set the pc back
  811. * to the beginning of the critical section if it is found to be
  812. * within that critical section (see kuser_cmpxchg_fixup).
  813. */
  814. 1: ldr r3, [r2] @ load current val
  815. subs r3, r3, r0 @ compare with oldval
  816. 2: streq r1, [r2] @ store newval if eq
  817. rsbs r0, r3, #0 @ set return val and C flag
  818. usr_ret lr
  819. .text
  820. kuser_cmpxchg32_fixup:
  821. @ Called from kuser_cmpxchg_check macro.
  822. @ r4 = address of interrupted insn (must be preserved).
  823. @ sp = saved regs. r7 and r8 are clobbered.
  824. @ 1b = first critical insn, 2b = last critical insn.
  825. @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
  826. mov r7, #0xffff0fff
  827. sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
  828. subs r8, r4, r7
  829. rsbcss r8, r8, #(2b - 1b)
  830. strcs r7, [sp, #S_PC]
  831. mov pc, lr
  832. .previous
  833. #else
  834. #warning "NPTL on non MMU needs fixing"
  835. mov r0, #-1
  836. adds r0, r0, #0
  837. usr_ret lr
  838. #endif
  839. #else
  840. smp_dmb arm
  841. 1: ldrex r3, [r2]
  842. subs r3, r3, r0
  843. strexeq r3, r1, [r2]
  844. teqeq r3, #1
  845. beq 1b
  846. rsbs r0, r3, #0
  847. /* beware -- each __kuser slot must be 8 instructions max */
  848. ALT_SMP(b __kuser_memory_barrier)
  849. ALT_UP(usr_ret lr)
  850. #endif
  851. .align 5
  852. __kuser_get_tls: @ 0xffff0fe0
  853. ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
  854. usr_ret lr
  855. mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
  856. .rep 4
  857. .word 0 @ 0xffff0ff0 software TLS value, then
  858. .endr @ pad up to __kuser_helper_version
  859. __kuser_helper_version: @ 0xffff0ffc
  860. .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
  861. .globl __kuser_helper_end
  862. __kuser_helper_end:
  863. THUMB( .thumb )
  864. /*
  865. * Vector stubs.
  866. *
  867. * This code is copied to 0xffff0200 so we can use branches in the
  868. * vectors, rather than ldr's. Note that this code must not
  869. * exceed 0x300 bytes.
  870. *
  871. * Common stub entry macro:
  872. * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  873. *
  874. * SP points to a minimal amount of processor-private memory, the address
  875. * of which is copied into r0 for the mode specific abort handler.
  876. */
  877. .macro vector_stub, name, mode, correction=0
  878. .align 5
  879. vector_\name:
  880. .if \correction
  881. sub lr, lr, #\correction
  882. .endif
  883. @
  884. @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
  885. @ (parent CPSR)
  886. @
  887. stmia sp, {r0, lr} @ save r0, lr
  888. mrs lr, spsr
  889. str lr, [sp, #8] @ save spsr
  890. @
  891. @ Prepare for SVC32 mode. IRQs remain disabled.
  892. @
  893. mrs r0, cpsr
  894. eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
  895. msr spsr_cxsf, r0
  896. @
  897. @ the branch table must immediately follow this code
  898. @
  899. and lr, lr, #0x0f
  900. THUMB( adr r0, 1f )
  901. THUMB( ldr lr, [r0, lr, lsl #2] )
  902. mov r0, sp
  903. ARM( ldr lr, [pc, lr, lsl #2] )
  904. movs pc, lr @ branch to handler in SVC mode
  905. ENDPROC(vector_\name)
  906. .align 2
  907. @ handler addresses follow this label
  908. 1:
  909. .endm
  910. .globl __stubs_start
  911. __stubs_start:
  912. /*
  913. * Interrupt dispatcher
  914. */
  915. vector_stub irq, IRQ_MODE, 4
  916. .long __irq_usr @ 0 (USR_26 / USR_32)
  917. .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
  918. .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
  919. .long __irq_svc @ 3 (SVC_26 / SVC_32)
  920. .long __irq_invalid @ 4
  921. .long __irq_invalid @ 5
  922. .long __irq_invalid @ 6
  923. .long __irq_invalid @ 7
  924. .long __irq_invalid @ 8
  925. .long __irq_invalid @ 9
  926. .long __irq_invalid @ a
  927. .long __irq_invalid @ b
  928. .long __irq_invalid @ c
  929. .long __irq_invalid @ d
  930. .long __irq_invalid @ e
  931. .long __irq_invalid @ f
  932. /*
  933. * Data abort dispatcher
  934. * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  935. */
  936. vector_stub dabt, ABT_MODE, 8
  937. .long __dabt_usr @ 0 (USR_26 / USR_32)
  938. .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
  939. .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
  940. .long __dabt_svc @ 3 (SVC_26 / SVC_32)
  941. .long __dabt_invalid @ 4
  942. .long __dabt_invalid @ 5
  943. .long __dabt_invalid @ 6
  944. .long __dabt_invalid @ 7
  945. .long __dabt_invalid @ 8
  946. .long __dabt_invalid @ 9
  947. .long __dabt_invalid @ a
  948. .long __dabt_invalid @ b
  949. .long __dabt_invalid @ c
  950. .long __dabt_invalid @ d
  951. .long __dabt_invalid @ e
  952. .long __dabt_invalid @ f
  953. /*
  954. * Prefetch abort dispatcher
  955. * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  956. */
  957. vector_stub pabt, ABT_MODE, 4
  958. .long __pabt_usr @ 0 (USR_26 / USR_32)
  959. .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
  960. .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
  961. .long __pabt_svc @ 3 (SVC_26 / SVC_32)
  962. .long __pabt_invalid @ 4
  963. .long __pabt_invalid @ 5
  964. .long __pabt_invalid @ 6
  965. .long __pabt_invalid @ 7
  966. .long __pabt_invalid @ 8
  967. .long __pabt_invalid @ 9
  968. .long __pabt_invalid @ a
  969. .long __pabt_invalid @ b
  970. .long __pabt_invalid @ c
  971. .long __pabt_invalid @ d
  972. .long __pabt_invalid @ e
  973. .long __pabt_invalid @ f
  974. /*
  975. * Undef instr entry dispatcher
  976. * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  977. */
  978. vector_stub und, UND_MODE
  979. .long __und_usr @ 0 (USR_26 / USR_32)
  980. .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
  981. .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
  982. .long __und_svc @ 3 (SVC_26 / SVC_32)
  983. .long __und_invalid @ 4
  984. .long __und_invalid @ 5
  985. .long __und_invalid @ 6
  986. .long __und_invalid @ 7
  987. .long __und_invalid @ 8
  988. .long __und_invalid @ 9
  989. .long __und_invalid @ a
  990. .long __und_invalid @ b
  991. .long __und_invalid @ c
  992. .long __und_invalid @ d
  993. .long __und_invalid @ e
  994. .long __und_invalid @ f
  995. .align 5
  996. /*=============================================================================
  997. * Undefined FIQs
  998. *-----------------------------------------------------------------------------
  999. * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
  1000. * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
  1001. * Basically to switch modes, we *HAVE* to clobber one register... brain
  1002. * damage alert! I don't think that we can execute any code in here in any
  1003. * other mode than FIQ... Ok you can switch to another mode, but you can't
  1004. * get out of that mode without clobbering one register.
  1005. */
  1006. vector_fiq:
  1007. subs pc, lr, #4
  1008. /*=============================================================================
  1009. * Address exception handler
  1010. *-----------------------------------------------------------------------------
  1011. * These aren't too critical.
  1012. * (they're not supposed to happen, and won't happen in 32-bit data mode).
  1013. */
  1014. vector_addrexcptn:
  1015. b vector_addrexcptn
  1016. /*
  1017. * We group all the following data together to optimise
  1018. * for CPUs with separate I & D caches.
  1019. */
  1020. .align 5
  1021. .LCvswi:
  1022. .word vector_swi
  1023. .globl __stubs_end
  1024. __stubs_end:
  1025. .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
  1026. .globl __vectors_start
  1027. __vectors_start:
  1028. ARM( swi SYS_ERROR0 )
  1029. THUMB( svc #0 )
  1030. THUMB( nop )
  1031. W(b) vector_und + stubs_offset
  1032. W(ldr) pc, .LCvswi + stubs_offset
  1033. W(b) vector_pabt + stubs_offset
  1034. W(b) vector_dabt + stubs_offset
  1035. W(b) vector_addrexcptn + stubs_offset
  1036. W(b) vector_irq + stubs_offset
  1037. W(b) vector_fiq + stubs_offset
  1038. .globl __vectors_end
  1039. __vectors_end:
  1040. .data
  1041. .globl cr_alignment
  1042. .globl cr_no_alignment
  1043. cr_alignment:
  1044. .space 4
  1045. cr_no_alignment:
  1046. .space 4
  1047. #ifdef CONFIG_MULTI_IRQ_HANDLER
  1048. .globl handle_arch_irq
  1049. handle_arch_irq:
  1050. .space 4
  1051. #endif