entry-armv.S 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244
  1. /*
  2. * linux/arch/arm/kernel/entry-armv.S
  3. *
  4. * Copyright (C) 1996,1997,1998 Russell King.
  5. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
  6. * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Low-level vector interface routines
  13. *
  14. * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
  15. * that causes it to save wrong values... Be aware!
  16. */
  17. #include <asm/memory.h>
  18. #include <asm/glue.h>
  19. #include <asm/vfpmacros.h>
  20. #include <mach/entry-macro.S>
  21. #include <asm/thread_notify.h>
  22. #include <asm/unwind.h>
  23. #include <asm/unistd.h>
  24. #include <asm/tls.h>
  25. #include "entry-header.S"
  26. /*
  27. * Interrupt handling. Preserves r7, r8, r9
  28. */
  29. .macro irq_handler
  30. get_irqnr_preamble r5, lr
  31. 1: get_irqnr_and_base r0, r6, r5, lr
  32. movne r1, sp
  33. @
  34. @ routine called with r0 = irq number, r1 = struct pt_regs *
  35. @
  36. adrne lr, BSYM(1b)
  37. bne asm_do_IRQ
  38. #ifdef CONFIG_SMP
  39. /*
  40. * XXX
  41. *
  42. * this macro assumes that irqstat (r6) and base (r5) are
  43. * preserved from get_irqnr_and_base above
  44. */
  45. test_for_ipi r0, r6, r5, lr
  46. movne r0, sp
  47. adrne lr, BSYM(1b)
  48. bne do_IPI
  49. #ifdef CONFIG_LOCAL_TIMERS
  50. test_for_ltirq r0, r6, r5, lr
  51. movne r0, sp
  52. adrne lr, BSYM(1b)
  53. bne do_local_timer
  54. #endif
  55. #endif
  56. .endm
  57. #ifdef CONFIG_KPROBES
  58. .section .kprobes.text,"ax",%progbits
  59. #else
  60. .text
  61. #endif
  62. /*
  63. * Invalid mode handlers
  64. */
  65. .macro inv_entry, reason
  66. sub sp, sp, #S_FRAME_SIZE
  67. ARM( stmib sp, {r1 - lr} )
  68. THUMB( stmia sp, {r0 - r12} )
  69. THUMB( str sp, [sp, #S_SP] )
  70. THUMB( str lr, [sp, #S_LR] )
  71. mov r1, #\reason
  72. .endm
  73. __pabt_invalid:
  74. inv_entry BAD_PREFETCH
  75. b common_invalid
  76. ENDPROC(__pabt_invalid)
  77. __dabt_invalid:
  78. inv_entry BAD_DATA
  79. b common_invalid
  80. ENDPROC(__dabt_invalid)
  81. __irq_invalid:
  82. inv_entry BAD_IRQ
  83. b common_invalid
  84. ENDPROC(__irq_invalid)
  85. __und_invalid:
  86. inv_entry BAD_UNDEFINSTR
  87. @
  88. @ XXX fall through to common_invalid
  89. @
  90. @
  91. @ common_invalid - generic code for failed exception (re-entrant version of handlers)
  92. @
  93. common_invalid:
  94. zero_fp
  95. ldmia r0, {r4 - r6}
  96. add r0, sp, #S_PC @ here for interlock avoidance
  97. mov r7, #-1 @ "" "" "" ""
  98. str r4, [sp] @ save preserved r0
  99. stmia r0, {r5 - r7} @ lr_<exception>,
  100. @ cpsr_<exception>, "old_r0"
  101. mov r0, sp
  102. b bad_mode
  103. ENDPROC(__und_invalid)
  104. /*
  105. * SVC mode handlers
  106. */
  107. #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
  108. #define SPFIX(code...) code
  109. #else
  110. #define SPFIX(code...)
  111. #endif
  112. .macro svc_entry, stack_hole=0
  113. UNWIND(.fnstart )
  114. UNWIND(.save {r0 - pc} )
  115. sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
  116. #ifdef CONFIG_THUMB2_KERNEL
  117. SPFIX( str r0, [sp] ) @ temporarily saved
  118. SPFIX( mov r0, sp )
  119. SPFIX( tst r0, #4 ) @ test original stack alignment
  120. SPFIX( ldr r0, [sp] ) @ restored
  121. #else
  122. SPFIX( tst sp, #4 )
  123. #endif
  124. SPFIX( subeq sp, sp, #4 )
  125. stmia sp, {r1 - r12}
  126. ldmia r0, {r1 - r3}
  127. add r5, sp, #S_SP - 4 @ here for interlock avoidance
  128. mov r4, #-1 @ "" "" "" ""
  129. add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
  130. SPFIX( addeq r0, r0, #4 )
  131. str r1, [sp, #-4]! @ save the "real" r0 copied
  132. @ from the exception stack
  133. mov r1, lr
  134. @
  135. @ We are now ready to fill in the remaining blanks on the stack:
  136. @
  137. @ r0 - sp_svc
  138. @ r1 - lr_svc
  139. @ r2 - lr_<exception>, already fixed up for correct return/restart
  140. @ r3 - spsr_<exception>
  141. @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
  142. @
  143. stmia r5, {r0 - r4}
  144. asm_trace_hardirqs_off
  145. .endm
  146. .align 5
  147. __dabt_svc:
  148. svc_entry
  149. @
  150. @ get ready to re-enable interrupts if appropriate
  151. @
  152. mrs r9, cpsr
  153. tst r3, #PSR_I_BIT
  154. biceq r9, r9, #PSR_I_BIT
  155. @
  156. @ Call the processor-specific abort handler:
  157. @
  158. @ r2 - aborted context pc
  159. @ r3 - aborted context cpsr
  160. @
  161. @ The abort handler must return the aborted address in r0, and
  162. @ the fault status register in r1. r9 must be preserved.
  163. @
  164. #ifdef MULTI_DABORT
  165. ldr r4, .LCprocfns
  166. mov lr, pc
  167. ldr pc, [r4, #PROCESSOR_DABT_FUNC]
  168. #else
  169. bl CPU_DABORT_HANDLER
  170. #endif
  171. @
  172. @ set desired IRQ state, then call main handler
  173. @
  174. msr cpsr_c, r9
  175. mov r2, sp
  176. bl do_DataAbort
  177. @
  178. @ IRQs off again before pulling preserved data off the stack
  179. @
  180. disable_irq
  181. @
  182. @ restore SPSR and restart the instruction
  183. @
  184. ldr r2, [sp, #S_PSR]
  185. svc_exit r2 @ return from exception
  186. UNWIND(.fnend )
  187. ENDPROC(__dabt_svc)
  188. .align 5
  189. __irq_svc:
  190. svc_entry
  191. #ifdef CONFIG_PREEMPT
  192. get_thread_info tsk
  193. ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
  194. add r7, r8, #1 @ increment it
  195. str r7, [tsk, #TI_PREEMPT]
  196. #endif
  197. irq_handler
  198. #ifdef CONFIG_PREEMPT
  199. str r8, [tsk, #TI_PREEMPT] @ restore preempt count
  200. ldr r0, [tsk, #TI_FLAGS] @ get flags
  201. teq r8, #0 @ if preempt count != 0
  202. movne r0, #0 @ force flags to 0
  203. tst r0, #_TIF_NEED_RESCHED
  204. blne svc_preempt
  205. #endif
  206. ldr r4, [sp, #S_PSR] @ irqs are already disabled
  207. #ifdef CONFIG_TRACE_IRQFLAGS
  208. tst r4, #PSR_I_BIT
  209. bleq trace_hardirqs_on
  210. #endif
  211. svc_exit r4 @ return from exception
  212. UNWIND(.fnend )
  213. ENDPROC(__irq_svc)
  214. .ltorg
  215. #ifdef CONFIG_PREEMPT
  216. svc_preempt:
  217. mov r8, lr
  218. 1: bl preempt_schedule_irq @ irq en/disable is done inside
  219. ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
  220. tst r0, #_TIF_NEED_RESCHED
  221. moveq pc, r8 @ go again
  222. b 1b
  223. #endif
  224. .align 5
  225. __und_svc:
  226. #ifdef CONFIG_KPROBES
  227. @ If a kprobe is about to simulate a "stmdb sp..." instruction,
  228. @ it obviously needs free stack space which then will belong to
  229. @ the saved context.
  230. svc_entry 64
  231. #else
  232. svc_entry
  233. #endif
  234. @
  235. @ call emulation code, which returns using r9 if it has emulated
  236. @ the instruction, or the more conventional lr if we are to treat
  237. @ this as a real undefined instruction
  238. @
  239. @ r0 - instruction
  240. @
  241. #ifndef CONFIG_THUMB2_KERNEL
  242. ldr r0, [r2, #-4]
  243. #else
  244. ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2
  245. and r9, r0, #0xf800
  246. cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
  247. ldrhhs r9, [r2] @ bottom 16 bits
  248. orrhs r0, r9, r0, lsl #16
  249. #endif
  250. adr r9, BSYM(1f)
  251. bl call_fpe
  252. mov r0, sp @ struct pt_regs *regs
  253. bl do_undefinstr
  254. @
  255. @ IRQs off again before pulling preserved data off the stack
  256. @
  257. 1: disable_irq
  258. @
  259. @ restore SPSR and restart the instruction
  260. @
  261. ldr r2, [sp, #S_PSR] @ Get SVC cpsr
  262. svc_exit r2 @ return from exception
  263. UNWIND(.fnend )
  264. ENDPROC(__und_svc)
  265. .align 5
  266. __pabt_svc:
  267. svc_entry
  268. @
  269. @ re-enable interrupts if appropriate
  270. @
  271. mrs r9, cpsr
  272. tst r3, #PSR_I_BIT
  273. biceq r9, r9, #PSR_I_BIT
  274. mov r0, r2 @ pass address of aborted instruction.
  275. #ifdef MULTI_PABORT
  276. ldr r4, .LCprocfns
  277. mov lr, pc
  278. ldr pc, [r4, #PROCESSOR_PABT_FUNC]
  279. #else
  280. bl CPU_PABORT_HANDLER
  281. #endif
  282. msr cpsr_c, r9 @ Maybe enable interrupts
  283. mov r2, sp @ regs
  284. bl do_PrefetchAbort @ call abort handler
  285. @
  286. @ IRQs off again before pulling preserved data off the stack
  287. @
  288. disable_irq
  289. @
  290. @ restore SPSR and restart the instruction
  291. @
  292. ldr r2, [sp, #S_PSR]
  293. svc_exit r2 @ return from exception
  294. UNWIND(.fnend )
  295. ENDPROC(__pabt_svc)
  296. .align 5
  297. .LCcralign:
  298. .word cr_alignment
  299. #ifdef MULTI_DABORT
  300. .LCprocfns:
  301. .word processor
  302. #endif
  303. .LCfp:
  304. .word fp_enter
  305. /*
  306. * User mode handlers
  307. *
  308. * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
  309. */
  310. #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
  311. #error "sizeof(struct pt_regs) must be a multiple of 8"
  312. #endif
  313. .macro usr_entry
  314. UNWIND(.fnstart )
  315. UNWIND(.cantunwind ) @ don't unwind the user space
  316. sub sp, sp, #S_FRAME_SIZE
  317. ARM( stmib sp, {r1 - r12} )
  318. THUMB( stmia sp, {r0 - r12} )
  319. ldmia r0, {r1 - r3}
  320. add r0, sp, #S_PC @ here for interlock avoidance
  321. mov r4, #-1 @ "" "" "" ""
  322. str r1, [sp] @ save the "real" r0 copied
  323. @ from the exception stack
  324. @
  325. @ We are now ready to fill in the remaining blanks on the stack:
  326. @
  327. @ r2 - lr_<exception>, already fixed up for correct return/restart
  328. @ r3 - spsr_<exception>
  329. @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
  330. @
  331. @ Also, separately save sp_usr and lr_usr
  332. @
  333. stmia r0, {r2 - r4}
  334. ARM( stmdb r0, {sp, lr}^ )
  335. THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
  336. @
  337. @ Enable the alignment trap while in kernel mode
  338. @
  339. alignment_trap r0
  340. @
  341. @ Clear FP to mark the first stack frame
  342. @
  343. zero_fp
  344. asm_trace_hardirqs_off
  345. .endm
  346. .macro kuser_cmpxchg_check
  347. #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  348. #ifndef CONFIG_MMU
  349. #warning "NPTL on non MMU needs fixing"
  350. #else
  351. @ Make sure our user space atomic helper is restarted
  352. @ if it was interrupted in a critical region. Here we
  353. @ perform a quick test inline since it should be false
  354. @ 99.9999% of the time. The rest is done out of line.
  355. cmp r2, #TASK_SIZE
  356. blhs kuser_cmpxchg_fixup
  357. #endif
  358. #endif
  359. .endm
  360. .align 5
  361. __dabt_usr:
  362. usr_entry
  363. kuser_cmpxchg_check
  364. @
  365. @ Call the processor-specific abort handler:
  366. @
  367. @ r2 - aborted context pc
  368. @ r3 - aborted context cpsr
  369. @
  370. @ The abort handler must return the aborted address in r0, and
  371. @ the fault status register in r1.
  372. @
  373. #ifdef MULTI_DABORT
  374. ldr r4, .LCprocfns
  375. mov lr, pc
  376. ldr pc, [r4, #PROCESSOR_DABT_FUNC]
  377. #else
  378. bl CPU_DABORT_HANDLER
  379. #endif
  380. @
  381. @ IRQs on, then call the main handler
  382. @
  383. enable_irq
  384. mov r2, sp
  385. adr lr, BSYM(ret_from_exception)
  386. b do_DataAbort
  387. UNWIND(.fnend )
  388. ENDPROC(__dabt_usr)
  389. .align 5
  390. __irq_usr:
  391. usr_entry
  392. kuser_cmpxchg_check
  393. get_thread_info tsk
  394. #ifdef CONFIG_PREEMPT
  395. ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
  396. add r7, r8, #1 @ increment it
  397. str r7, [tsk, #TI_PREEMPT]
  398. #endif
  399. irq_handler
  400. #ifdef CONFIG_PREEMPT
  401. ldr r0, [tsk, #TI_PREEMPT]
  402. str r8, [tsk, #TI_PREEMPT]
  403. teq r0, r7
  404. ARM( strne r0, [r0, -r0] )
  405. THUMB( movne r0, #0 )
  406. THUMB( strne r0, [r0] )
  407. #endif
  408. #ifdef CONFIG_TRACE_IRQFLAGS
  409. bl trace_hardirqs_on
  410. #endif
  411. mov why, #0
  412. b ret_to_user
  413. UNWIND(.fnend )
  414. ENDPROC(__irq_usr)
  415. .ltorg
  416. .align 5
  417. __und_usr:
  418. usr_entry
  419. @
  420. @ fall through to the emulation code, which returns using r9 if
  421. @ it has emulated the instruction, or the more conventional lr
  422. @ if we are to treat this as a real undefined instruction
  423. @
  424. @ r0 - instruction
  425. @
  426. adr r9, BSYM(ret_from_exception)
  427. adr lr, BSYM(__und_usr_unknown)
  428. tst r3, #PSR_T_BIT @ Thumb mode?
  429. itet eq @ explicit IT needed for the 1f label
  430. subeq r4, r2, #4 @ ARM instr at LR - 4
  431. subne r4, r2, #2 @ Thumb instr at LR - 2
  432. 1: ldreqt r0, [r4]
  433. #ifdef CONFIG_CPU_ENDIAN_BE8
  434. reveq r0, r0 @ little endian instruction
  435. #endif
  436. beq call_fpe
  437. @ Thumb instruction
  438. #if __LINUX_ARM_ARCH__ >= 7
  439. 2:
  440. ARM( ldrht r5, [r4], #2 )
  441. THUMB( ldrht r5, [r4] )
  442. THUMB( add r4, r4, #2 )
  443. and r0, r5, #0xf800 @ mask bits 111x x... .... ....
  444. cmp r0, #0xe800 @ 32bit instruction if xx != 0
  445. blo __und_usr_unknown
  446. 3: ldrht r0, [r4]
  447. add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
  448. orr r0, r0, r5, lsl #16
  449. #else
  450. b __und_usr_unknown
  451. #endif
  452. UNWIND(.fnend )
  453. ENDPROC(__und_usr)
  454. @
  455. @ fallthrough to call_fpe
  456. @
  457. /*
  458. * The out of line fixup for the ldrt above.
  459. */
  460. .pushsection .fixup, "ax"
  461. 4: mov pc, r9
  462. .popsection
  463. .pushsection __ex_table,"a"
  464. .long 1b, 4b
  465. #if __LINUX_ARM_ARCH__ >= 7
  466. .long 2b, 4b
  467. .long 3b, 4b
  468. #endif
  469. .popsection
  470. /*
  471. * Check whether the instruction is a co-processor instruction.
  472. * If yes, we need to call the relevant co-processor handler.
  473. *
  474. * Note that we don't do a full check here for the co-processor
  475. * instructions; all instructions with bit 27 set are well
  476. * defined. The only instructions that should fault are the
  477. * co-processor instructions. However, we have to watch out
  478. * for the ARM6/ARM7 SWI bug.
  479. *
  480. * NEON is a special case that has to be handled here. Not all
  481. * NEON instructions are co-processor instructions, so we have
  482. * to make a special case of checking for them. Plus, there's
  483. * five groups of them, so we have a table of mask/opcode pairs
  484. * to check against, and if any match then we branch off into the
  485. * NEON handler code.
  486. *
  487. * Emulators may wish to make use of the following registers:
  488. * r0 = instruction opcode.
  489. * r2 = PC+4
  490. * r9 = normal "successful" return address
  491. * r10 = this threads thread_info structure.
  492. * lr = unrecognised instruction return address
  493. */
  494. @
  495. @ Fall-through from Thumb-2 __und_usr
  496. @
  497. #ifdef CONFIG_NEON
  498. adr r6, .LCneon_thumb_opcodes
  499. b 2f
  500. #endif
  501. call_fpe:
  502. #ifdef CONFIG_NEON
  503. adr r6, .LCneon_arm_opcodes
  504. 2:
  505. ldr r7, [r6], #4 @ mask value
  506. cmp r7, #0 @ end mask?
  507. beq 1f
  508. and r8, r0, r7
  509. ldr r7, [r6], #4 @ opcode bits matching in mask
  510. cmp r8, r7 @ NEON instruction?
  511. bne 2b
  512. get_thread_info r10
  513. mov r7, #1
  514. strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
  515. strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
  516. b do_vfp @ let VFP handler handle this
  517. 1:
  518. #endif
  519. tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
  520. tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
  521. #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
  522. and r8, r0, #0x0f000000 @ mask out op-code bits
  523. teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
  524. #endif
  525. moveq pc, lr
  526. get_thread_info r10 @ get current thread
  527. and r8, r0, #0x00000f00 @ mask out CP number
  528. THUMB( lsr r8, r8, #8 )
  529. mov r7, #1
  530. add r6, r10, #TI_USED_CP
  531. ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
  532. THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
  533. #ifdef CONFIG_IWMMXT
  534. @ Test if we need to give access to iWMMXt coprocessors
  535. ldr r5, [r10, #TI_FLAGS]
  536. rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
  537. movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
  538. bcs iwmmxt_task_enable
  539. #endif
  540. ARM( add pc, pc, r8, lsr #6 )
  541. THUMB( lsl r8, r8, #2 )
  542. THUMB( add pc, r8 )
  543. nop
  544. movw_pc lr @ CP#0
  545. W(b) do_fpe @ CP#1 (FPE)
  546. W(b) do_fpe @ CP#2 (FPE)
  547. movw_pc lr @ CP#3
  548. #ifdef CONFIG_CRUNCH
  549. b crunch_task_enable @ CP#4 (MaverickCrunch)
  550. b crunch_task_enable @ CP#5 (MaverickCrunch)
  551. b crunch_task_enable @ CP#6 (MaverickCrunch)
  552. #else
  553. movw_pc lr @ CP#4
  554. movw_pc lr @ CP#5
  555. movw_pc lr @ CP#6
  556. #endif
  557. movw_pc lr @ CP#7
  558. movw_pc lr @ CP#8
  559. movw_pc lr @ CP#9
  560. #ifdef CONFIG_VFP
  561. W(b) do_vfp @ CP#10 (VFP)
  562. W(b) do_vfp @ CP#11 (VFP)
  563. #else
  564. movw_pc lr @ CP#10 (VFP)
  565. movw_pc lr @ CP#11 (VFP)
  566. #endif
  567. movw_pc lr @ CP#12
  568. movw_pc lr @ CP#13
  569. movw_pc lr @ CP#14 (Debug)
  570. movw_pc lr @ CP#15 (Control)
  571. #ifdef CONFIG_NEON
  572. .align 6
  573. .LCneon_arm_opcodes:
  574. .word 0xfe000000 @ mask
  575. .word 0xf2000000 @ opcode
  576. .word 0xff100000 @ mask
  577. .word 0xf4000000 @ opcode
  578. .word 0x00000000 @ mask
  579. .word 0x00000000 @ opcode
  580. .LCneon_thumb_opcodes:
  581. .word 0xef000000 @ mask
  582. .word 0xef000000 @ opcode
  583. .word 0xff100000 @ mask
  584. .word 0xf9000000 @ opcode
  585. .word 0x00000000 @ mask
  586. .word 0x00000000 @ opcode
  587. #endif
  588. do_fpe:
  589. enable_irq
  590. ldr r4, .LCfp
  591. add r10, r10, #TI_FPSTATE @ r10 = workspace
  592. ldr pc, [r4] @ Call FP module USR entry point
  593. /*
  594. * The FP module is called with these registers set:
  595. * r0 = instruction
  596. * r2 = PC+4
  597. * r9 = normal "successful" return address
  598. * r10 = FP workspace
  599. * lr = unrecognised FP instruction return address
  600. */
  601. .pushsection .data
  602. ENTRY(fp_enter)
  603. .word no_fp
  604. .popsection
  605. ENTRY(no_fp)
  606. mov pc, lr
  607. ENDPROC(no_fp)
  608. __und_usr_unknown:
  609. enable_irq
  610. mov r0, sp
  611. adr lr, BSYM(ret_from_exception)
  612. b do_undefinstr
  613. ENDPROC(__und_usr_unknown)
  614. .align 5
  615. __pabt_usr:
  616. usr_entry
  617. mov r0, r2 @ pass address of aborted instruction.
  618. #ifdef MULTI_PABORT
  619. ldr r4, .LCprocfns
  620. mov lr, pc
  621. ldr pc, [r4, #PROCESSOR_PABT_FUNC]
  622. #else
  623. bl CPU_PABORT_HANDLER
  624. #endif
  625. enable_irq @ Enable interrupts
  626. mov r2, sp @ regs
  627. bl do_PrefetchAbort @ call abort handler
  628. UNWIND(.fnend )
  629. /* fall through */
  630. /*
  631. * This is the return code to user mode for abort handlers
  632. */
  633. ENTRY(ret_from_exception)
  634. UNWIND(.fnstart )
  635. UNWIND(.cantunwind )
  636. get_thread_info tsk
  637. mov why, #0
  638. b ret_to_user
  639. UNWIND(.fnend )
  640. ENDPROC(__pabt_usr)
  641. ENDPROC(ret_from_exception)
  642. /*
  643. * Register switch for ARMv3 and ARMv4 processors
  644. * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
  645. * previous and next are guaranteed not to be the same.
  646. */
  647. ENTRY(__switch_to)
  648. UNWIND(.fnstart )
  649. UNWIND(.cantunwind )
  650. add ip, r1, #TI_CPU_SAVE
  651. ldr r3, [r2, #TI_TP_VALUE]
  652. ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
  653. THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
  654. THUMB( str sp, [ip], #4 )
  655. THUMB( str lr, [ip], #4 )
  656. #ifdef CONFIG_MMU
  657. ldr r6, [r2, #TI_CPU_DOMAIN]
  658. #endif
  659. set_tls r3, r4, r5
  660. #ifdef CONFIG_MMU
  661. mcr p15, 0, r6, c3, c0, 0 @ Set domain register
  662. #endif
  663. mov r5, r0
  664. add r4, r2, #TI_CPU_SAVE
  665. ldr r0, =thread_notify_head
  666. mov r1, #THREAD_NOTIFY_SWITCH
  667. bl atomic_notifier_call_chain
  668. THUMB( mov ip, r4 )
  669. mov r0, r5
  670. ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
  671. THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
  672. THUMB( ldr sp, [ip], #4 )
  673. THUMB( ldr pc, [ip] )
  674. UNWIND(.fnend )
  675. ENDPROC(__switch_to)
  676. __INIT
  677. /*
  678. * User helpers.
  679. *
  680. * These are segment of kernel provided user code reachable from user space
  681. * at a fixed address in kernel memory. This is used to provide user space
  682. * with some operations which require kernel help because of unimplemented
  683. * native feature and/or instructions in many ARM CPUs. The idea is for
  684. * this code to be executed directly in user mode for best efficiency but
  685. * which is too intimate with the kernel counter part to be left to user
  686. * libraries. In fact this code might even differ from one CPU to another
  687. * depending on the available instruction set and restrictions like on
  688. * SMP systems. In other words, the kernel reserves the right to change
  689. * this code as needed without warning. Only the entry points and their
  690. * results are guaranteed to be stable.
  691. *
  692. * Each segment is 32-byte aligned and will be moved to the top of the high
  693. * vector page. New segments (if ever needed) must be added in front of
  694. * existing ones. This mechanism should be used only for things that are
  695. * really small and justified, and not be abused freely.
  696. *
  697. * User space is expected to implement those things inline when optimizing
  698. * for a processor that has the necessary native support, but only if such
  699. * resulting binaries are already to be incompatible with earlier ARM
  700. * processors due to the use of unsupported instructions other than what
  701. * is provided here. In other words don't make binaries unable to run on
  702. * earlier processors just for the sake of not using these kernel helpers
  703. * if your compiled code is not going to use the new instructions for other
  704. * purpose.
  705. */
  706. THUMB( .arm )
  707. .macro usr_ret, reg
  708. #ifdef CONFIG_ARM_THUMB
  709. bx \reg
  710. #else
  711. mov pc, \reg
  712. #endif
  713. .endm
  714. .align 5
  715. .globl __kuser_helper_start
  716. __kuser_helper_start:
  717. /*
  718. * Reference prototype:
  719. *
  720. * void __kernel_memory_barrier(void)
  721. *
  722. * Input:
  723. *
  724. * lr = return address
  725. *
  726. * Output:
  727. *
  728. * none
  729. *
  730. * Clobbered:
  731. *
  732. * none
  733. *
  734. * Definition and user space usage example:
  735. *
  736. * typedef void (__kernel_dmb_t)(void);
  737. * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
  738. *
  739. * Apply any needed memory barrier to preserve consistency with data modified
  740. * manually and __kuser_cmpxchg usage.
  741. *
  742. * This could be used as follows:
  743. *
  744. * #define __kernel_dmb() \
  745. * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
  746. * : : : "r0", "lr","cc" )
  747. */
  748. __kuser_memory_barrier: @ 0xffff0fa0
  749. smp_dmb
  750. usr_ret lr
  751. .align 5
  752. /*
  753. * Reference prototype:
  754. *
  755. * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
  756. *
  757. * Input:
  758. *
  759. * r0 = oldval
  760. * r1 = newval
  761. * r2 = ptr
  762. * lr = return address
  763. *
  764. * Output:
  765. *
  766. * r0 = returned value (zero or non-zero)
  767. * C flag = set if r0 == 0, clear if r0 != 0
  768. *
  769. * Clobbered:
  770. *
  771. * r3, ip, flags
  772. *
  773. * Definition and user space usage example:
  774. *
  775. * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
  776. * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
  777. *
  778. * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
  779. * Return zero if *ptr was changed or non-zero if no exchange happened.
  780. * The C flag is also set if *ptr was changed to allow for assembly
  781. * optimization in the calling code.
  782. *
  783. * Notes:
  784. *
  785. * - This routine already includes memory barriers as needed.
  786. *
  787. * For example, a user space atomic_add implementation could look like this:
  788. *
  789. * #define atomic_add(ptr, val) \
  790. * ({ register unsigned int *__ptr asm("r2") = (ptr); \
  791. * register unsigned int __result asm("r1"); \
  792. * asm volatile ( \
  793. * "1: @ atomic_add\n\t" \
  794. * "ldr r0, [r2]\n\t" \
  795. * "mov r3, #0xffff0fff\n\t" \
  796. * "add lr, pc, #4\n\t" \
  797. * "add r1, r0, %2\n\t" \
  798. * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
  799. * "bcc 1b" \
  800. * : "=&r" (__result) \
  801. * : "r" (__ptr), "rIL" (val) \
  802. * : "r0","r3","ip","lr","cc","memory" ); \
  803. * __result; })
  804. */
  805. __kuser_cmpxchg: @ 0xffff0fc0
  806. #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  807. /*
  808. * Poor you. No fast solution possible...
  809. * The kernel itself must perform the operation.
  810. * A special ghost syscall is used for that (see traps.c).
  811. */
  812. stmfd sp!, {r7, lr}
  813. ldr r7, =1f @ it's 20 bits
  814. swi __ARM_NR_cmpxchg
  815. ldmfd sp!, {r7, pc}
  816. 1: .word __ARM_NR_cmpxchg
  817. #elif __LINUX_ARM_ARCH__ < 6
  818. #ifdef CONFIG_MMU
  819. /*
  820. * The only thing that can break atomicity in this cmpxchg
  821. * implementation is either an IRQ or a data abort exception
  822. * causing another process/thread to be scheduled in the middle
  823. * of the critical sequence. To prevent this, code is added to
  824. * the IRQ and data abort exception handlers to set the pc back
  825. * to the beginning of the critical section if it is found to be
  826. * within that critical section (see kuser_cmpxchg_fixup).
  827. */
  828. 1: ldr r3, [r2] @ load current val
  829. subs r3, r3, r0 @ compare with oldval
  830. 2: streq r1, [r2] @ store newval if eq
  831. rsbs r0, r3, #0 @ set return val and C flag
  832. usr_ret lr
  833. .text
  834. kuser_cmpxchg_fixup:
  835. @ Called from kuser_cmpxchg_check macro.
  836. @ r2 = address of interrupted insn (must be preserved).
  837. @ sp = saved regs. r7 and r8 are clobbered.
  838. @ 1b = first critical insn, 2b = last critical insn.
  839. @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
  840. mov r7, #0xffff0fff
  841. sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
  842. subs r8, r2, r7
  843. rsbcss r8, r8, #(2b - 1b)
  844. strcs r7, [sp, #S_PC]
  845. mov pc, lr
  846. .previous
  847. #else
  848. #warning "NPTL on non MMU needs fixing"
  849. mov r0, #-1
  850. adds r0, r0, #0
  851. usr_ret lr
  852. #endif
  853. #else
  854. smp_dmb
  855. 1: ldrex r3, [r2]
  856. subs r3, r3, r0
  857. strexeq r3, r1, [r2]
  858. teqeq r3, #1
  859. beq 1b
  860. rsbs r0, r3, #0
  861. /* beware -- each __kuser slot must be 8 instructions max */
  862. #ifdef CONFIG_SMP
  863. b __kuser_memory_barrier
  864. #else
  865. usr_ret lr
  866. #endif
  867. #endif
  868. .align 5
  869. /*
  870. * Reference prototype:
  871. *
  872. * int __kernel_get_tls(void)
  873. *
  874. * Input:
  875. *
  876. * lr = return address
  877. *
  878. * Output:
  879. *
  880. * r0 = TLS value
  881. *
  882. * Clobbered:
  883. *
  884. * none
  885. *
  886. * Definition and user space usage example:
  887. *
  888. * typedef int (__kernel_get_tls_t)(void);
  889. * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
  890. *
  891. * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
  892. *
  893. * This could be used as follows:
  894. *
  895. * #define __kernel_get_tls() \
  896. * ({ register unsigned int __val asm("r0"); \
  897. * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
  898. * : "=r" (__val) : : "lr","cc" ); \
  899. * __val; })
  900. */
  901. __kuser_get_tls: @ 0xffff0fe0
  902. ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
  903. usr_ret lr
  904. mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
  905. .rep 4
  906. .word 0 @ 0xffff0ff0 software TLS value, then
  907. .endr @ pad up to __kuser_helper_version
  908. /*
  909. * Reference declaration:
  910. *
  911. * extern unsigned int __kernel_helper_version;
  912. *
  913. * Definition and user space usage example:
  914. *
  915. * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
  916. *
  917. * User space may read this to determine the curent number of helpers
  918. * available.
  919. */
  920. __kuser_helper_version: @ 0xffff0ffc
  921. .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
  922. .globl __kuser_helper_end
  923. __kuser_helper_end:
  924. THUMB( .thumb )
  925. /*
  926. * Vector stubs.
  927. *
  928. * This code is copied to 0xffff0200 so we can use branches in the
  929. * vectors, rather than ldr's. Note that this code must not
  930. * exceed 0x300 bytes.
  931. *
  932. * Common stub entry macro:
  933. * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  934. *
  935. * SP points to a minimal amount of processor-private memory, the address
  936. * of which is copied into r0 for the mode specific abort handler.
  937. */
  938. .macro vector_stub, name, mode, correction=0
  939. .align 5
  940. vector_\name:
  941. .if \correction
  942. sub lr, lr, #\correction
  943. .endif
  944. @
  945. @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
  946. @ (parent CPSR)
  947. @
  948. stmia sp, {r0, lr} @ save r0, lr
  949. mrs lr, spsr
  950. str lr, [sp, #8] @ save spsr
  951. @
  952. @ Prepare for SVC32 mode. IRQs remain disabled.
  953. @
  954. mrs r0, cpsr
  955. eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
  956. msr spsr_cxsf, r0
  957. @
  958. @ the branch table must immediately follow this code
  959. @
  960. and lr, lr, #0x0f
  961. THUMB( adr r0, 1f )
  962. THUMB( ldr lr, [r0, lr, lsl #2] )
  963. mov r0, sp
  964. ARM( ldr lr, [pc, lr, lsl #2] )
  965. movs pc, lr @ branch to handler in SVC mode
  966. ENDPROC(vector_\name)
  967. .align 2
  968. @ handler addresses follow this label
  969. 1:
  970. .endm
  971. .globl __stubs_start
  972. __stubs_start:
  973. /*
  974. * Interrupt dispatcher
  975. */
  976. vector_stub irq, IRQ_MODE, 4
  977. .long __irq_usr @ 0 (USR_26 / USR_32)
  978. .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
  979. .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
  980. .long __irq_svc @ 3 (SVC_26 / SVC_32)
  981. .long __irq_invalid @ 4
  982. .long __irq_invalid @ 5
  983. .long __irq_invalid @ 6
  984. .long __irq_invalid @ 7
  985. .long __irq_invalid @ 8
  986. .long __irq_invalid @ 9
  987. .long __irq_invalid @ a
  988. .long __irq_invalid @ b
  989. .long __irq_invalid @ c
  990. .long __irq_invalid @ d
  991. .long __irq_invalid @ e
  992. .long __irq_invalid @ f
  993. /*
  994. * Data abort dispatcher
  995. * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  996. */
  997. vector_stub dabt, ABT_MODE, 8
  998. .long __dabt_usr @ 0 (USR_26 / USR_32)
  999. .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
  1000. .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
  1001. .long __dabt_svc @ 3 (SVC_26 / SVC_32)
  1002. .long __dabt_invalid @ 4
  1003. .long __dabt_invalid @ 5
  1004. .long __dabt_invalid @ 6
  1005. .long __dabt_invalid @ 7
  1006. .long __dabt_invalid @ 8
  1007. .long __dabt_invalid @ 9
  1008. .long __dabt_invalid @ a
  1009. .long __dabt_invalid @ b
  1010. .long __dabt_invalid @ c
  1011. .long __dabt_invalid @ d
  1012. .long __dabt_invalid @ e
  1013. .long __dabt_invalid @ f
  1014. /*
  1015. * Prefetch abort dispatcher
  1016. * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  1017. */
  1018. vector_stub pabt, ABT_MODE, 4
  1019. .long __pabt_usr @ 0 (USR_26 / USR_32)
  1020. .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
  1021. .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
  1022. .long __pabt_svc @ 3 (SVC_26 / SVC_32)
  1023. .long __pabt_invalid @ 4
  1024. .long __pabt_invalid @ 5
  1025. .long __pabt_invalid @ 6
  1026. .long __pabt_invalid @ 7
  1027. .long __pabt_invalid @ 8
  1028. .long __pabt_invalid @ 9
  1029. .long __pabt_invalid @ a
  1030. .long __pabt_invalid @ b
  1031. .long __pabt_invalid @ c
  1032. .long __pabt_invalid @ d
  1033. .long __pabt_invalid @ e
  1034. .long __pabt_invalid @ f
  1035. /*
  1036. * Undef instr entry dispatcher
  1037. * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  1038. */
  1039. vector_stub und, UND_MODE
  1040. .long __und_usr @ 0 (USR_26 / USR_32)
  1041. .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
  1042. .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
  1043. .long __und_svc @ 3 (SVC_26 / SVC_32)
  1044. .long __und_invalid @ 4
  1045. .long __und_invalid @ 5
  1046. .long __und_invalid @ 6
  1047. .long __und_invalid @ 7
  1048. .long __und_invalid @ 8
  1049. .long __und_invalid @ 9
  1050. .long __und_invalid @ a
  1051. .long __und_invalid @ b
  1052. .long __und_invalid @ c
  1053. .long __und_invalid @ d
  1054. .long __und_invalid @ e
  1055. .long __und_invalid @ f
  1056. .align 5
  1057. /*=============================================================================
  1058. * Undefined FIQs
  1059. *-----------------------------------------------------------------------------
  1060. * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
  1061. * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
  1062. * Basically to switch modes, we *HAVE* to clobber one register... brain
  1063. * damage alert! I don't think that we can execute any code in here in any
  1064. * other mode than FIQ... Ok you can switch to another mode, but you can't
  1065. * get out of that mode without clobbering one register.
  1066. */
  1067. vector_fiq:
  1068. disable_fiq
  1069. subs pc, lr, #4
  1070. /*=============================================================================
  1071. * Address exception handler
  1072. *-----------------------------------------------------------------------------
  1073. * These aren't too critical.
  1074. * (they're not supposed to happen, and won't happen in 32-bit data mode).
  1075. */
  1076. vector_addrexcptn:
  1077. b vector_addrexcptn
  1078. /*
  1079. * We group all the following data together to optimise
  1080. * for CPUs with separate I & D caches.
  1081. */
  1082. .align 5
  1083. .LCvswi:
  1084. .word vector_swi
  1085. .globl __stubs_end
  1086. __stubs_end:
  1087. .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
  1088. .globl __vectors_start
  1089. __vectors_start:
  1090. ARM( swi SYS_ERROR0 )
  1091. THUMB( svc #0 )
  1092. THUMB( nop )
  1093. W(b) vector_und + stubs_offset
  1094. W(ldr) pc, .LCvswi + stubs_offset
  1095. W(b) vector_pabt + stubs_offset
  1096. W(b) vector_dabt + stubs_offset
  1097. W(b) vector_addrexcptn + stubs_offset
  1098. W(b) vector_irq + stubs_offset
  1099. W(b) vector_fiq + stubs_offset
  1100. .globl __vectors_end
  1101. __vectors_end:
  1102. .data
  1103. .globl cr_alignment
  1104. .globl cr_no_alignment
  1105. cr_alignment:
  1106. .space 4
  1107. cr_no_alignment:
  1108. .space 4