entry-armv.S 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154
  1. /*
  2. * linux/arch/arm/kernel/entry-armv.S
  3. *
  4. * Copyright (C) 1996,1997,1998 Russell King.
  5. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
  6. * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Low-level vector interface routines
  13. *
  14. * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
  15. * that causes it to save wrong values... Be aware!
  16. */
  17. #include <asm/assembler.h>
  18. #include <asm/memory.h>
  19. #include <asm/glue-df.h>
  20. #include <asm/glue-pf.h>
  21. #include <asm/vfpmacros.h>
  22. #ifndef CONFIG_MULTI_IRQ_HANDLER
  23. #include <mach/entry-macro.S>
  24. #endif
  25. #include <asm/thread_notify.h>
  26. #include <asm/unwind.h>
  27. #include <asm/unistd.h>
  28. #include <asm/tls.h>
  29. #include <asm/system_info.h>
  30. #include "entry-header.S"
  31. #include <asm/entry-macro-multi.S>
  32. /*
  33. * Interrupt handling.
  34. */
  35. .macro irq_handler
  36. #ifdef CONFIG_MULTI_IRQ_HANDLER
  37. ldr r1, =handle_arch_irq
  38. mov r0, sp
  39. adr lr, BSYM(9997f)
  40. ldr pc, [r1]
  41. #else
  42. arch_irq_handler_default
  43. #endif
  44. 9997:
  45. .endm
  46. .macro pabt_helper
  47. @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
  48. #ifdef MULTI_PABORT
  49. ldr ip, .LCprocfns
  50. mov lr, pc
  51. ldr pc, [ip, #PROCESSOR_PABT_FUNC]
  52. #else
  53. bl CPU_PABORT_HANDLER
  54. #endif
  55. .endm
  56. .macro dabt_helper
  57. @
  58. @ Call the processor-specific abort handler:
  59. @
  60. @ r2 - pt_regs
  61. @ r4 - aborted context pc
  62. @ r5 - aborted context psr
  63. @
  64. @ The abort handler must return the aborted address in r0, and
  65. @ the fault status register in r1. r9 must be preserved.
  66. @
  67. #ifdef MULTI_DABORT
  68. ldr ip, .LCprocfns
  69. mov lr, pc
  70. ldr pc, [ip, #PROCESSOR_DABT_FUNC]
  71. #else
  72. bl CPU_DABORT_HANDLER
  73. #endif
  74. .endm
  75. #ifdef CONFIG_KPROBES
  76. .section .kprobes.text,"ax",%progbits
  77. #else
  78. .text
  79. #endif
  80. /*
  81. * Invalid mode handlers
  82. */
  83. .macro inv_entry, reason
  84. sub sp, sp, #S_FRAME_SIZE
  85. ARM( stmib sp, {r1 - lr} )
  86. THUMB( stmia sp, {r0 - r12} )
  87. THUMB( str sp, [sp, #S_SP] )
  88. THUMB( str lr, [sp, #S_LR] )
  89. mov r1, #\reason
  90. .endm
  91. __pabt_invalid:
  92. inv_entry BAD_PREFETCH
  93. b common_invalid
  94. ENDPROC(__pabt_invalid)
  95. __dabt_invalid:
  96. inv_entry BAD_DATA
  97. b common_invalid
  98. ENDPROC(__dabt_invalid)
  99. __irq_invalid:
  100. inv_entry BAD_IRQ
  101. b common_invalid
  102. ENDPROC(__irq_invalid)
  103. __und_invalid:
  104. inv_entry BAD_UNDEFINSTR
  105. @
  106. @ XXX fall through to common_invalid
  107. @
  108. @
  109. @ common_invalid - generic code for failed exception (re-entrant version of handlers)
  110. @
  111. common_invalid:
  112. zero_fp
  113. ldmia r0, {r4 - r6}
  114. add r0, sp, #S_PC @ here for interlock avoidance
  115. mov r7, #-1 @ "" "" "" ""
  116. str r4, [sp] @ save preserved r0
  117. stmia r0, {r5 - r7} @ lr_<exception>,
  118. @ cpsr_<exception>, "old_r0"
  119. mov r0, sp
  120. b bad_mode
  121. ENDPROC(__und_invalid)
  122. /*
  123. * SVC mode handlers
  124. */
  125. #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
  126. #define SPFIX(code...) code
  127. #else
  128. #define SPFIX(code...)
  129. #endif
  130. .macro svc_entry, stack_hole=0
  131. UNWIND(.fnstart )
  132. UNWIND(.save {r0 - pc} )
  133. sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
  134. #ifdef CONFIG_THUMB2_KERNEL
  135. SPFIX( str r0, [sp] ) @ temporarily saved
  136. SPFIX( mov r0, sp )
  137. SPFIX( tst r0, #4 ) @ test original stack alignment
  138. SPFIX( ldr r0, [sp] ) @ restored
  139. #else
  140. SPFIX( tst sp, #4 )
  141. #endif
  142. SPFIX( subeq sp, sp, #4 )
  143. stmia sp, {r1 - r12}
  144. ldmia r0, {r3 - r5}
  145. add r7, sp, #S_SP - 4 @ here for interlock avoidance
  146. mov r6, #-1 @ "" "" "" ""
  147. add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
  148. SPFIX( addeq r2, r2, #4 )
  149. str r3, [sp, #-4]! @ save the "real" r0 copied
  150. @ from the exception stack
  151. mov r3, lr
  152. @
  153. @ We are now ready to fill in the remaining blanks on the stack:
  154. @
  155. @ r2 - sp_svc
  156. @ r3 - lr_svc
  157. @ r4 - lr_<exception>, already fixed up for correct return/restart
  158. @ r5 - spsr_<exception>
  159. @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
  160. @
  161. stmia r7, {r2 - r6}
  162. #ifdef CONFIG_TRACE_IRQFLAGS
  163. bl trace_hardirqs_off
  164. #endif
  165. .endm
  166. .align 5
  167. __dabt_svc:
  168. svc_entry
  169. mov r2, sp
  170. dabt_helper
  171. THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
  172. svc_exit r5 @ return from exception
  173. UNWIND(.fnend )
  174. ENDPROC(__dabt_svc)
  175. .align 5
  176. __irq_svc:
  177. svc_entry
  178. irq_handler
  179. #ifdef CONFIG_PREEMPT
  180. get_thread_info tsk
  181. ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
  182. ldr r0, [tsk, #TI_FLAGS] @ get flags
  183. teq r8, #0 @ if preempt count != 0
  184. movne r0, #0 @ force flags to 0
  185. tst r0, #_TIF_NEED_RESCHED
  186. blne svc_preempt
  187. #endif
  188. svc_exit r5, irq = 1 @ return from exception
  189. UNWIND(.fnend )
  190. ENDPROC(__irq_svc)
  191. .ltorg
  192. #ifdef CONFIG_PREEMPT
  193. svc_preempt:
  194. mov r8, lr
  195. 1: bl preempt_schedule_irq @ irq en/disable is done inside
  196. ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
  197. tst r0, #_TIF_NEED_RESCHED
  198. moveq pc, r8 @ go again
  199. b 1b
  200. #endif
  201. __und_fault:
  202. @ Correct the PC such that it is pointing at the instruction
  203. @ which caused the fault. If the faulting instruction was ARM
  204. @ the PC will be pointing at the next instruction, and have to
  205. @ subtract 4. Otherwise, it is Thumb, and the PC will be
  206. @ pointing at the second half of the Thumb instruction. We
  207. @ have to subtract 2.
  208. ldr r2, [r0, #S_PC]
  209. sub r2, r2, r1
  210. str r2, [r0, #S_PC]
  211. b do_undefinstr
  212. ENDPROC(__und_fault)
  213. .align 5
  214. __und_svc:
  215. #ifdef CONFIG_KPROBES
  216. @ If a kprobe is about to simulate a "stmdb sp..." instruction,
  217. @ it obviously needs free stack space which then will belong to
  218. @ the saved context.
  219. svc_entry 64
  220. #else
  221. svc_entry
  222. #endif
  223. @
  224. @ call emulation code, which returns using r9 if it has emulated
  225. @ the instruction, or the more conventional lr if we are to treat
  226. @ this as a real undefined instruction
  227. @
  228. @ r0 - instruction
  229. @
  230. #ifndef CONFIG_THUMB2_KERNEL
  231. ldr r0, [r4, #-4]
  232. #else
  233. mov r1, #2
  234. ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
  235. cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
  236. blo __und_svc_fault
  237. ldrh r9, [r4] @ bottom 16 bits
  238. add r4, r4, #2
  239. str r4, [sp, #S_PC]
  240. orr r0, r9, r0, lsl #16
  241. #endif
  242. adr r9, BSYM(__und_svc_finish)
  243. mov r2, r4
  244. bl call_fpe
  245. mov r1, #4 @ PC correction to apply
  246. __und_svc_fault:
  247. mov r0, sp @ struct pt_regs *regs
  248. bl __und_fault
  249. __und_svc_finish:
  250. ldr r5, [sp, #S_PSR] @ Get SVC cpsr
  251. svc_exit r5 @ return from exception
  252. UNWIND(.fnend )
  253. ENDPROC(__und_svc)
  254. .align 5
  255. __pabt_svc:
  256. svc_entry
  257. mov r2, sp @ regs
  258. pabt_helper
  259. svc_exit r5 @ return from exception
  260. UNWIND(.fnend )
  261. ENDPROC(__pabt_svc)
  262. .align 5
  263. .LCcralign:
  264. .word cr_alignment
  265. #ifdef MULTI_DABORT
  266. .LCprocfns:
  267. .word processor
  268. #endif
  269. .LCfp:
  270. .word fp_enter
  271. /*
  272. * User mode handlers
  273. *
  274. * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
  275. */
  276. #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
  277. #error "sizeof(struct pt_regs) must be a multiple of 8"
  278. #endif
  279. .macro usr_entry
  280. UNWIND(.fnstart )
  281. UNWIND(.cantunwind ) @ don't unwind the user space
  282. sub sp, sp, #S_FRAME_SIZE
  283. ARM( stmib sp, {r1 - r12} )
  284. THUMB( stmia sp, {r0 - r12} )
  285. ldmia r0, {r3 - r5}
  286. add r0, sp, #S_PC @ here for interlock avoidance
  287. mov r6, #-1 @ "" "" "" ""
  288. str r3, [sp] @ save the "real" r0 copied
  289. @ from the exception stack
  290. @
  291. @ We are now ready to fill in the remaining blanks on the stack:
  292. @
  293. @ r4 - lr_<exception>, already fixed up for correct return/restart
  294. @ r5 - spsr_<exception>
  295. @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
  296. @
  297. @ Also, separately save sp_usr and lr_usr
  298. @
  299. stmia r0, {r4 - r6}
  300. ARM( stmdb r0, {sp, lr}^ )
  301. THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
  302. @
  303. @ Enable the alignment trap while in kernel mode
  304. @
  305. alignment_trap r0
  306. @
  307. @ Clear FP to mark the first stack frame
  308. @
  309. zero_fp
  310. #ifdef CONFIG_IRQSOFF_TRACER
  311. bl trace_hardirqs_off
  312. #endif
  313. ct_user_exit save = 0
  314. .endm
  315. .macro kuser_cmpxchg_check
  316. #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
  317. !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  318. #ifndef CONFIG_MMU
  319. #warning "NPTL on non MMU needs fixing"
  320. #else
  321. @ Make sure our user space atomic helper is restarted
  322. @ if it was interrupted in a critical region. Here we
  323. @ perform a quick test inline since it should be false
  324. @ 99.9999% of the time. The rest is done out of line.
  325. cmp r4, #TASK_SIZE
  326. blhs kuser_cmpxchg64_fixup
  327. #endif
  328. #endif
  329. .endm
  330. .align 5
  331. __dabt_usr:
  332. usr_entry
  333. kuser_cmpxchg_check
  334. mov r2, sp
  335. dabt_helper
  336. b ret_from_exception
  337. UNWIND(.fnend )
  338. ENDPROC(__dabt_usr)
  339. .align 5
  340. __irq_usr:
  341. usr_entry
  342. kuser_cmpxchg_check
  343. irq_handler
  344. get_thread_info tsk
  345. mov why, #0
  346. b ret_to_user_from_irq
  347. UNWIND(.fnend )
  348. ENDPROC(__irq_usr)
  349. .ltorg
  350. .align 5
  351. __und_usr:
  352. usr_entry
  353. mov r2, r4
  354. mov r3, r5
  355. @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
  356. @ faulting instruction depending on Thumb mode.
  357. @ r3 = regs->ARM_cpsr
  358. @
  359. @ The emulation code returns using r9 if it has emulated the
  360. @ instruction, or the more conventional lr if we are to treat
  361. @ this as a real undefined instruction
  362. @
  363. adr r9, BSYM(ret_from_exception)
  364. tst r3, #PSR_T_BIT @ Thumb mode?
  365. bne __und_usr_thumb
  366. sub r4, r2, #4 @ ARM instr at LR - 4
  367. 1: ldrt r0, [r4]
  368. ARM_BE8(rev r0, r0) @ little endian instruction
  369. @ r0 = 32-bit ARM instruction which caused the exception
  370. @ r2 = PC value for the following instruction (:= regs->ARM_pc)
  371. @ r4 = PC value for the faulting instruction
  372. @ lr = 32-bit undefined instruction function
  373. adr lr, BSYM(__und_usr_fault_32)
  374. b call_fpe
  375. __und_usr_thumb:
  376. @ Thumb instruction
  377. sub r4, r2, #2 @ First half of thumb instr at LR - 2
  378. #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
  379. /*
  380. * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
  381. * can never be supported in a single kernel, this code is not applicable at
  382. * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
  383. * made about .arch directives.
  384. */
  385. #if __LINUX_ARM_ARCH__ < 7
  386. /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
  387. #define NEED_CPU_ARCHITECTURE
  388. ldr r5, .LCcpu_architecture
  389. ldr r5, [r5]
  390. cmp r5, #CPU_ARCH_ARMv7
  391. blo __und_usr_fault_16 @ 16bit undefined instruction
  392. /*
  393. * The following code won't get run unless the running CPU really is v7, so
  394. * coding round the lack of ldrht on older arches is pointless. Temporarily
  395. * override the assembler target arch with the minimum required instead:
  396. */
  397. .arch armv6t2
  398. #endif
  399. 2: ldrht r5, [r4]
  400. cmp r5, #0xe800 @ 32bit instruction if xx != 0
  401. blo __und_usr_fault_16 @ 16bit undefined instruction
  402. 3: ldrht r0, [r2]
  403. add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
  404. str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
  405. orr r0, r0, r5, lsl #16
  406. adr lr, BSYM(__und_usr_fault_32)
  407. @ r0 = the two 16-bit Thumb instructions which caused the exception
  408. @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
  409. @ r4 = PC value for the first 16-bit Thumb instruction
  410. @ lr = 32bit undefined instruction function
  411. #if __LINUX_ARM_ARCH__ < 7
  412. /* If the target arch was overridden, change it back: */
  413. #ifdef CONFIG_CPU_32v6K
  414. .arch armv6k
  415. #else
  416. .arch armv6
  417. #endif
  418. #endif /* __LINUX_ARM_ARCH__ < 7 */
  419. #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
  420. b __und_usr_fault_16
  421. #endif
  422. UNWIND(.fnend)
  423. ENDPROC(__und_usr)
  424. /*
  425. * The out of line fixup for the ldrt instructions above.
  426. */
  427. .pushsection .fixup, "ax"
  428. .align 2
  429. 4: mov pc, r9
  430. .popsection
  431. .pushsection __ex_table,"a"
  432. .long 1b, 4b
  433. #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
  434. .long 2b, 4b
  435. .long 3b, 4b
  436. #endif
  437. .popsection
  438. /*
  439. * Check whether the instruction is a co-processor instruction.
  440. * If yes, we need to call the relevant co-processor handler.
  441. *
  442. * Note that we don't do a full check here for the co-processor
  443. * instructions; all instructions with bit 27 set are well
  444. * defined. The only instructions that should fault are the
  445. * co-processor instructions. However, we have to watch out
  446. * for the ARM6/ARM7 SWI bug.
  447. *
  448. * NEON is a special case that has to be handled here. Not all
  449. * NEON instructions are co-processor instructions, so we have
  450. * to make a special case of checking for them. Plus, there's
  451. * five groups of them, so we have a table of mask/opcode pairs
  452. * to check against, and if any match then we branch off into the
  453. * NEON handler code.
  454. *
  455. * Emulators may wish to make use of the following registers:
  456. * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
  457. * r2 = PC value to resume execution after successful emulation
  458. * r9 = normal "successful" return address
  459. * r10 = this threads thread_info structure
  460. * lr = unrecognised instruction return address
  461. * IRQs disabled, FIQs enabled.
  462. */
  463. @
  464. @ Fall-through from Thumb-2 __und_usr
  465. @
  466. #ifdef CONFIG_NEON
  467. get_thread_info r10 @ get current thread
  468. adr r6, .LCneon_thumb_opcodes
  469. b 2f
  470. #endif
  471. call_fpe:
  472. get_thread_info r10 @ get current thread
  473. #ifdef CONFIG_NEON
  474. adr r6, .LCneon_arm_opcodes
  475. 2: ldr r5, [r6], #4 @ mask value
  476. ldr r7, [r6], #4 @ opcode bits matching in mask
  477. cmp r5, #0 @ end mask?
  478. beq 1f
  479. and r8, r0, r5
  480. cmp r8, r7 @ NEON instruction?
  481. bne 2b
  482. mov r7, #1
  483. strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
  484. strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
  485. b do_vfp @ let VFP handler handle this
  486. 1:
  487. #endif
  488. tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
  489. tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
  490. moveq pc, lr
  491. and r8, r0, #0x00000f00 @ mask out CP number
  492. THUMB( lsr r8, r8, #8 )
  493. mov r7, #1
  494. add r6, r10, #TI_USED_CP
  495. ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
  496. THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
  497. #ifdef CONFIG_IWMMXT
  498. @ Test if we need to give access to iWMMXt coprocessors
  499. ldr r5, [r10, #TI_FLAGS]
  500. rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
  501. movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
  502. bcs iwmmxt_task_enable
  503. #endif
  504. ARM( add pc, pc, r8, lsr #6 )
  505. THUMB( lsl r8, r8, #2 )
  506. THUMB( add pc, r8 )
  507. nop
  508. movw_pc lr @ CP#0
  509. W(b) do_fpe @ CP#1 (FPE)
  510. W(b) do_fpe @ CP#2 (FPE)
  511. movw_pc lr @ CP#3
  512. #ifdef CONFIG_CRUNCH
  513. b crunch_task_enable @ CP#4 (MaverickCrunch)
  514. b crunch_task_enable @ CP#5 (MaverickCrunch)
  515. b crunch_task_enable @ CP#6 (MaverickCrunch)
  516. #else
  517. movw_pc lr @ CP#4
  518. movw_pc lr @ CP#5
  519. movw_pc lr @ CP#6
  520. #endif
  521. movw_pc lr @ CP#7
  522. movw_pc lr @ CP#8
  523. movw_pc lr @ CP#9
  524. #ifdef CONFIG_VFP
  525. W(b) do_vfp @ CP#10 (VFP)
  526. W(b) do_vfp @ CP#11 (VFP)
  527. #else
  528. movw_pc lr @ CP#10 (VFP)
  529. movw_pc lr @ CP#11 (VFP)
  530. #endif
  531. movw_pc lr @ CP#12
  532. movw_pc lr @ CP#13
  533. movw_pc lr @ CP#14 (Debug)
  534. movw_pc lr @ CP#15 (Control)
  535. #ifdef NEED_CPU_ARCHITECTURE
  536. .align 2
  537. .LCcpu_architecture:
  538. .word __cpu_architecture
  539. #endif
  540. #ifdef CONFIG_NEON
  541. .align 6
  542. .LCneon_arm_opcodes:
  543. .word 0xfe000000 @ mask
  544. .word 0xf2000000 @ opcode
  545. .word 0xff100000 @ mask
  546. .word 0xf4000000 @ opcode
  547. .word 0x00000000 @ mask
  548. .word 0x00000000 @ opcode
  549. .LCneon_thumb_opcodes:
  550. .word 0xef000000 @ mask
  551. .word 0xef000000 @ opcode
  552. .word 0xff100000 @ mask
  553. .word 0xf9000000 @ opcode
  554. .word 0x00000000 @ mask
  555. .word 0x00000000 @ opcode
  556. #endif
  557. do_fpe:
  558. enable_irq
  559. ldr r4, .LCfp
  560. add r10, r10, #TI_FPSTATE @ r10 = workspace
  561. ldr pc, [r4] @ Call FP module USR entry point
  562. /*
  563. * The FP module is called with these registers set:
  564. * r0 = instruction
  565. * r2 = PC+4
  566. * r9 = normal "successful" return address
  567. * r10 = FP workspace
  568. * lr = unrecognised FP instruction return address
  569. */
  570. .pushsection .data
  571. ENTRY(fp_enter)
  572. .word no_fp
  573. .popsection
  574. ENTRY(no_fp)
  575. mov pc, lr
  576. ENDPROC(no_fp)
  577. __und_usr_fault_32:
  578. mov r1, #4
  579. b 1f
  580. __und_usr_fault_16:
  581. mov r1, #2
  582. 1: enable_irq
  583. mov r0, sp
  584. adr lr, BSYM(ret_from_exception)
  585. b __und_fault
  586. ENDPROC(__und_usr_fault_32)
  587. ENDPROC(__und_usr_fault_16)
  588. .align 5
  589. __pabt_usr:
  590. usr_entry
  591. mov r2, sp @ regs
  592. pabt_helper
  593. UNWIND(.fnend )
  594. /* fall through */
  595. /*
  596. * This is the return code to user mode for abort handlers
  597. */
  598. ENTRY(ret_from_exception)
  599. UNWIND(.fnstart )
  600. UNWIND(.cantunwind )
  601. get_thread_info tsk
  602. mov why, #0
  603. b ret_to_user
  604. UNWIND(.fnend )
  605. ENDPROC(__pabt_usr)
  606. ENDPROC(ret_from_exception)
  607. /*
  608. * Register switch for ARMv3 and ARMv4 processors
  609. * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
  610. * previous and next are guaranteed not to be the same.
  611. */
  612. ENTRY(__switch_to)
  613. UNWIND(.fnstart )
  614. UNWIND(.cantunwind )
  615. add ip, r1, #TI_CPU_SAVE
  616. ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
  617. THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
  618. THUMB( str sp, [ip], #4 )
  619. THUMB( str lr, [ip], #4 )
  620. ldr r4, [r2, #TI_TP_VALUE]
  621. ldr r5, [r2, #TI_TP_VALUE + 4]
  622. #ifdef CONFIG_CPU_USE_DOMAINS
  623. ldr r6, [r2, #TI_CPU_DOMAIN]
  624. #endif
  625. switch_tls r1, r4, r5, r3, r7
  626. #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
  627. ldr r7, [r2, #TI_TASK]
  628. ldr r8, =__stack_chk_guard
  629. ldr r7, [r7, #TSK_STACK_CANARY]
  630. #endif
  631. #ifdef CONFIG_CPU_USE_DOMAINS
  632. mcr p15, 0, r6, c3, c0, 0 @ Set domain register
  633. #endif
  634. mov r5, r0
  635. add r4, r2, #TI_CPU_SAVE
  636. ldr r0, =thread_notify_head
  637. mov r1, #THREAD_NOTIFY_SWITCH
  638. bl atomic_notifier_call_chain
  639. #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
  640. str r7, [r8]
  641. #endif
  642. THUMB( mov ip, r4 )
  643. mov r0, r5
  644. ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
  645. THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
  646. THUMB( ldr sp, [ip], #4 )
  647. THUMB( ldr pc, [ip] )
  648. UNWIND(.fnend )
  649. ENDPROC(__switch_to)
  650. __INIT
  651. /*
  652. * User helpers.
  653. *
  654. * Each segment is 32-byte aligned and will be moved to the top of the high
  655. * vector page. New segments (if ever needed) must be added in front of
  656. * existing ones. This mechanism should be used only for things that are
  657. * really small and justified, and not be abused freely.
  658. *
  659. * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
  660. */
  661. THUMB( .arm )
  662. .macro usr_ret, reg
  663. #ifdef CONFIG_ARM_THUMB
  664. bx \reg
  665. #else
  666. mov pc, \reg
  667. #endif
  668. .endm
  669. .macro kuser_pad, sym, size
  670. .if (. - \sym) & 3
  671. .rept 4 - (. - \sym) & 3
  672. .byte 0
  673. .endr
  674. .endif
  675. .rept (\size - (. - \sym)) / 4
  676. .word 0xe7fddef1
  677. .endr
  678. .endm
  679. #ifdef CONFIG_KUSER_HELPERS
  680. .align 5
  681. .globl __kuser_helper_start
  682. __kuser_helper_start:
  683. /*
  684. * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
  685. * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
  686. */
  687. __kuser_cmpxchg64: @ 0xffff0f60
  688. #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  689. /*
  690. * Poor you. No fast solution possible...
  691. * The kernel itself must perform the operation.
  692. * A special ghost syscall is used for that (see traps.c).
  693. */
  694. stmfd sp!, {r7, lr}
  695. ldr r7, 1f @ it's 20 bits
  696. swi __ARM_NR_cmpxchg64
  697. ldmfd sp!, {r7, pc}
  698. 1: .word __ARM_NR_cmpxchg64
  699. #elif defined(CONFIG_CPU_32v6K)
  700. stmfd sp!, {r4, r5, r6, r7}
  701. ldrd r4, r5, [r0] @ load old val
  702. ldrd r6, r7, [r1] @ load new val
  703. smp_dmb arm
  704. 1: ldrexd r0, r1, [r2] @ load current val
  705. eors r3, r0, r4 @ compare with oldval (1)
  706. eoreqs r3, r1, r5 @ compare with oldval (2)
  707. strexdeq r3, r6, r7, [r2] @ store newval if eq
  708. teqeq r3, #1 @ success?
  709. beq 1b @ if no then retry
  710. smp_dmb arm
  711. rsbs r0, r3, #0 @ set returned val and C flag
  712. ldmfd sp!, {r4, r5, r6, r7}
  713. usr_ret lr
  714. #elif !defined(CONFIG_SMP)
  715. #ifdef CONFIG_MMU
  716. /*
  717. * The only thing that can break atomicity in this cmpxchg64
  718. * implementation is either an IRQ or a data abort exception
  719. * causing another process/thread to be scheduled in the middle of
  720. * the critical sequence. The same strategy as for cmpxchg is used.
  721. */
  722. stmfd sp!, {r4, r5, r6, lr}
  723. ldmia r0, {r4, r5} @ load old val
  724. ldmia r1, {r6, lr} @ load new val
  725. 1: ldmia r2, {r0, r1} @ load current val
  726. eors r3, r0, r4 @ compare with oldval (1)
  727. eoreqs r3, r1, r5 @ compare with oldval (2)
  728. 2: stmeqia r2, {r6, lr} @ store newval if eq
  729. rsbs r0, r3, #0 @ set return val and C flag
  730. ldmfd sp!, {r4, r5, r6, pc}
  731. .text
  732. kuser_cmpxchg64_fixup:
  733. @ Called from kuser_cmpxchg_fixup.
  734. @ r4 = address of interrupted insn (must be preserved).
  735. @ sp = saved regs. r7 and r8 are clobbered.
  736. @ 1b = first critical insn, 2b = last critical insn.
  737. @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
  738. mov r7, #0xffff0fff
  739. sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
  740. subs r8, r4, r7
  741. rsbcss r8, r8, #(2b - 1b)
  742. strcs r7, [sp, #S_PC]
  743. #if __LINUX_ARM_ARCH__ < 6
  744. bcc kuser_cmpxchg32_fixup
  745. #endif
  746. mov pc, lr
  747. .previous
  748. #else
  749. #warning "NPTL on non MMU needs fixing"
  750. mov r0, #-1
  751. adds r0, r0, #0
  752. usr_ret lr
  753. #endif
  754. #else
  755. #error "incoherent kernel configuration"
  756. #endif
  757. kuser_pad __kuser_cmpxchg64, 64
  758. __kuser_memory_barrier: @ 0xffff0fa0
  759. smp_dmb arm
  760. usr_ret lr
  761. kuser_pad __kuser_memory_barrier, 32
  762. __kuser_cmpxchg: @ 0xffff0fc0
  763. #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  764. /*
  765. * Poor you. No fast solution possible...
  766. * The kernel itself must perform the operation.
  767. * A special ghost syscall is used for that (see traps.c).
  768. */
  769. stmfd sp!, {r7, lr}
  770. ldr r7, 1f @ it's 20 bits
  771. swi __ARM_NR_cmpxchg
  772. ldmfd sp!, {r7, pc}
  773. 1: .word __ARM_NR_cmpxchg
  774. #elif __LINUX_ARM_ARCH__ < 6
  775. #ifdef CONFIG_MMU
  776. /*
  777. * The only thing that can break atomicity in this cmpxchg
  778. * implementation is either an IRQ or a data abort exception
  779. * causing another process/thread to be scheduled in the middle
  780. * of the critical sequence. To prevent this, code is added to
  781. * the IRQ and data abort exception handlers to set the pc back
  782. * to the beginning of the critical section if it is found to be
  783. * within that critical section (see kuser_cmpxchg_fixup).
  784. */
  785. 1: ldr r3, [r2] @ load current val
  786. subs r3, r3, r0 @ compare with oldval
  787. 2: streq r1, [r2] @ store newval if eq
  788. rsbs r0, r3, #0 @ set return val and C flag
  789. usr_ret lr
  790. .text
  791. kuser_cmpxchg32_fixup:
  792. @ Called from kuser_cmpxchg_check macro.
  793. @ r4 = address of interrupted insn (must be preserved).
  794. @ sp = saved regs. r7 and r8 are clobbered.
  795. @ 1b = first critical insn, 2b = last critical insn.
  796. @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
  797. mov r7, #0xffff0fff
  798. sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
  799. subs r8, r4, r7
  800. rsbcss r8, r8, #(2b - 1b)
  801. strcs r7, [sp, #S_PC]
  802. mov pc, lr
  803. .previous
  804. #else
  805. #warning "NPTL on non MMU needs fixing"
  806. mov r0, #-1
  807. adds r0, r0, #0
  808. usr_ret lr
  809. #endif
  810. #else
  811. smp_dmb arm
  812. 1: ldrex r3, [r2]
  813. subs r3, r3, r0
  814. strexeq r3, r1, [r2]
  815. teqeq r3, #1
  816. beq 1b
  817. rsbs r0, r3, #0
  818. /* beware -- each __kuser slot must be 8 instructions max */
  819. ALT_SMP(b __kuser_memory_barrier)
  820. ALT_UP(usr_ret lr)
  821. #endif
  822. kuser_pad __kuser_cmpxchg, 32
  823. __kuser_get_tls: @ 0xffff0fe0
  824. ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
  825. usr_ret lr
  826. mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
  827. kuser_pad __kuser_get_tls, 16
  828. .rep 3
  829. .word 0 @ 0xffff0ff0 software TLS value, then
  830. .endr @ pad up to __kuser_helper_version
  831. __kuser_helper_version: @ 0xffff0ffc
  832. .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
  833. .globl __kuser_helper_end
  834. __kuser_helper_end:
  835. #endif
  836. THUMB( .thumb )
  837. /*
  838. * Vector stubs.
  839. *
  840. * This code is copied to 0xffff1000 so we can use branches in the
  841. * vectors, rather than ldr's. Note that this code must not exceed
  842. * a page size.
  843. *
  844. * Common stub entry macro:
  845. * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  846. *
  847. * SP points to a minimal amount of processor-private memory, the address
  848. * of which is copied into r0 for the mode specific abort handler.
  849. */
  850. .macro vector_stub, name, mode, correction=0
  851. .align 5
  852. vector_\name:
  853. .if \correction
  854. sub lr, lr, #\correction
  855. .endif
  856. @
  857. @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
  858. @ (parent CPSR)
  859. @
  860. stmia sp, {r0, lr} @ save r0, lr
  861. mrs lr, spsr
  862. str lr, [sp, #8] @ save spsr
  863. @
  864. @ Prepare for SVC32 mode. IRQs remain disabled.
  865. @
  866. mrs r0, cpsr
  867. eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
  868. msr spsr_cxsf, r0
  869. @
  870. @ the branch table must immediately follow this code
  871. @
  872. and lr, lr, #0x0f
  873. THUMB( adr r0, 1f )
  874. THUMB( ldr lr, [r0, lr, lsl #2] )
  875. mov r0, sp
  876. ARM( ldr lr, [pc, lr, lsl #2] )
  877. movs pc, lr @ branch to handler in SVC mode
  878. ENDPROC(vector_\name)
  879. .align 2
  880. @ handler addresses follow this label
  881. 1:
  882. .endm
  883. .section .stubs, "ax", %progbits
  884. __stubs_start:
  885. @ This must be the first word
  886. .word vector_swi
  887. vector_rst:
  888. ARM( swi SYS_ERROR0 )
  889. THUMB( svc #0 )
  890. THUMB( nop )
  891. b vector_und
  892. /*
  893. * Interrupt dispatcher
  894. */
  895. vector_stub irq, IRQ_MODE, 4
  896. .long __irq_usr @ 0 (USR_26 / USR_32)
  897. .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
  898. .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
  899. .long __irq_svc @ 3 (SVC_26 / SVC_32)
  900. .long __irq_invalid @ 4
  901. .long __irq_invalid @ 5
  902. .long __irq_invalid @ 6
  903. .long __irq_invalid @ 7
  904. .long __irq_invalid @ 8
  905. .long __irq_invalid @ 9
  906. .long __irq_invalid @ a
  907. .long __irq_invalid @ b
  908. .long __irq_invalid @ c
  909. .long __irq_invalid @ d
  910. .long __irq_invalid @ e
  911. .long __irq_invalid @ f
  912. /*
  913. * Data abort dispatcher
  914. * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  915. */
  916. vector_stub dabt, ABT_MODE, 8
  917. .long __dabt_usr @ 0 (USR_26 / USR_32)
  918. .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
  919. .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
  920. .long __dabt_svc @ 3 (SVC_26 / SVC_32)
  921. .long __dabt_invalid @ 4
  922. .long __dabt_invalid @ 5
  923. .long __dabt_invalid @ 6
  924. .long __dabt_invalid @ 7
  925. .long __dabt_invalid @ 8
  926. .long __dabt_invalid @ 9
  927. .long __dabt_invalid @ a
  928. .long __dabt_invalid @ b
  929. .long __dabt_invalid @ c
  930. .long __dabt_invalid @ d
  931. .long __dabt_invalid @ e
  932. .long __dabt_invalid @ f
  933. /*
  934. * Prefetch abort dispatcher
  935. * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  936. */
  937. vector_stub pabt, ABT_MODE, 4
  938. .long __pabt_usr @ 0 (USR_26 / USR_32)
  939. .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
  940. .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
  941. .long __pabt_svc @ 3 (SVC_26 / SVC_32)
  942. .long __pabt_invalid @ 4
  943. .long __pabt_invalid @ 5
  944. .long __pabt_invalid @ 6
  945. .long __pabt_invalid @ 7
  946. .long __pabt_invalid @ 8
  947. .long __pabt_invalid @ 9
  948. .long __pabt_invalid @ a
  949. .long __pabt_invalid @ b
  950. .long __pabt_invalid @ c
  951. .long __pabt_invalid @ d
  952. .long __pabt_invalid @ e
  953. .long __pabt_invalid @ f
  954. /*
  955. * Undef instr entry dispatcher
  956. * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  957. */
  958. vector_stub und, UND_MODE
  959. .long __und_usr @ 0 (USR_26 / USR_32)
  960. .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
  961. .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
  962. .long __und_svc @ 3 (SVC_26 / SVC_32)
  963. .long __und_invalid @ 4
  964. .long __und_invalid @ 5
  965. .long __und_invalid @ 6
  966. .long __und_invalid @ 7
  967. .long __und_invalid @ 8
  968. .long __und_invalid @ 9
  969. .long __und_invalid @ a
  970. .long __und_invalid @ b
  971. .long __und_invalid @ c
  972. .long __und_invalid @ d
  973. .long __und_invalid @ e
  974. .long __und_invalid @ f
  975. .align 5
  976. /*=============================================================================
  977. * Address exception handler
  978. *-----------------------------------------------------------------------------
  979. * These aren't too critical.
  980. * (they're not supposed to happen, and won't happen in 32-bit data mode).
  981. */
  982. vector_addrexcptn:
  983. b vector_addrexcptn
  984. /*=============================================================================
  985. * Undefined FIQs
  986. *-----------------------------------------------------------------------------
  987. * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
  988. * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
  989. * Basically to switch modes, we *HAVE* to clobber one register... brain
  990. * damage alert! I don't think that we can execute any code in here in any
  991. * other mode than FIQ... Ok you can switch to another mode, but you can't
  992. * get out of that mode without clobbering one register.
  993. */
  994. vector_fiq:
  995. subs pc, lr, #4
  996. .globl vector_fiq_offset
  997. .equ vector_fiq_offset, vector_fiq
  998. .section .vectors, "ax", %progbits
  999. __vectors_start:
  1000. W(b) vector_rst
  1001. W(b) vector_und
  1002. W(ldr) pc, __vectors_start + 0x1000
  1003. W(b) vector_pabt
  1004. W(b) vector_dabt
  1005. W(b) vector_addrexcptn
  1006. W(b) vector_irq
  1007. W(b) vector_fiq
  1008. .data
  1009. .globl cr_alignment
  1010. .globl cr_no_alignment
  1011. cr_alignment:
  1012. .space 4
  1013. cr_no_alignment:
  1014. .space 4
  1015. #ifdef CONFIG_MULTI_IRQ_HANDLER
  1016. .globl handle_arch_irq
  1017. handle_arch_irq:
  1018. .space 4
  1019. #endif