entry-armv.S 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247
  1. /*
  2. * linux/arch/arm/kernel/entry-armv.S
  3. *
  4. * Copyright (C) 1996,1997,1998 Russell King.
  5. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
  6. * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Low-level vector interface routines
  13. *
  14. * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
  15. * that causes it to save wrong values... Be aware!
  16. */
  17. #include <asm/memory.h>
  18. #include <asm/glue.h>
  19. #include <asm/vfpmacros.h>
  20. #include <mach/entry-macro.S>
  21. #include <asm/thread_notify.h>
  22. #include <asm/unwind.h>
  23. #include <asm/unistd.h>
  24. #include <asm/tls.h>
  25. #include "entry-header.S"
  26. /*
  27. * Interrupt handling. Preserves r7, r8, r9
  28. */
  29. .macro irq_handler
  30. get_irqnr_preamble r5, lr
  31. 1: get_irqnr_and_base r0, r6, r5, lr
  32. movne r1, sp
  33. @
  34. @ routine called with r0 = irq number, r1 = struct pt_regs *
  35. @
  36. adrne lr, BSYM(1b)
  37. bne asm_do_IRQ
  38. #ifdef CONFIG_SMP
  39. /*
  40. * XXX
  41. *
  42. * this macro assumes that irqstat (r6) and base (r5) are
  43. * preserved from get_irqnr_and_base above
  44. */
  45. ALT_SMP(test_for_ipi r0, r6, r5, lr)
  46. ALT_UP_B(9997f)
  47. movne r0, sp
  48. adrne lr, BSYM(1b)
  49. bne do_IPI
  50. #ifdef CONFIG_LOCAL_TIMERS
  51. test_for_ltirq r0, r6, r5, lr
  52. movne r0, sp
  53. adrne lr, BSYM(1b)
  54. bne do_local_timer
  55. #endif
  56. 9997:
  57. #endif
  58. .endm
  59. #ifdef CONFIG_KPROBES
  60. .section .kprobes.text,"ax",%progbits
  61. #else
  62. .text
  63. #endif
  64. /*
  65. * Invalid mode handlers
  66. */
  67. .macro inv_entry, reason
  68. sub sp, sp, #S_FRAME_SIZE
  69. ARM( stmib sp, {r1 - lr} )
  70. THUMB( stmia sp, {r0 - r12} )
  71. THUMB( str sp, [sp, #S_SP] )
  72. THUMB( str lr, [sp, #S_LR] )
  73. mov r1, #\reason
  74. .endm
  75. __pabt_invalid:
  76. inv_entry BAD_PREFETCH
  77. b common_invalid
  78. ENDPROC(__pabt_invalid)
  79. __dabt_invalid:
  80. inv_entry BAD_DATA
  81. b common_invalid
  82. ENDPROC(__dabt_invalid)
  83. __irq_invalid:
  84. inv_entry BAD_IRQ
  85. b common_invalid
  86. ENDPROC(__irq_invalid)
  87. __und_invalid:
  88. inv_entry BAD_UNDEFINSTR
  89. @
  90. @ XXX fall through to common_invalid
  91. @
  92. @
  93. @ common_invalid - generic code for failed exception (re-entrant version of handlers)
  94. @
  95. common_invalid:
  96. zero_fp
  97. ldmia r0, {r4 - r6}
  98. add r0, sp, #S_PC @ here for interlock avoidance
  99. mov r7, #-1 @ "" "" "" ""
  100. str r4, [sp] @ save preserved r0
  101. stmia r0, {r5 - r7} @ lr_<exception>,
  102. @ cpsr_<exception>, "old_r0"
  103. mov r0, sp
  104. b bad_mode
  105. ENDPROC(__und_invalid)
  106. /*
  107. * SVC mode handlers
  108. */
  109. #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
  110. #define SPFIX(code...) code
  111. #else
  112. #define SPFIX(code...)
  113. #endif
  114. .macro svc_entry, stack_hole=0
  115. UNWIND(.fnstart )
  116. UNWIND(.save {r0 - pc} )
  117. sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
  118. #ifdef CONFIG_THUMB2_KERNEL
  119. SPFIX( str r0, [sp] ) @ temporarily saved
  120. SPFIX( mov r0, sp )
  121. SPFIX( tst r0, #4 ) @ test original stack alignment
  122. SPFIX( ldr r0, [sp] ) @ restored
  123. #else
  124. SPFIX( tst sp, #4 )
  125. #endif
  126. SPFIX( subeq sp, sp, #4 )
  127. stmia sp, {r1 - r12}
  128. ldmia r0, {r1 - r3}
  129. add r5, sp, #S_SP - 4 @ here for interlock avoidance
  130. mov r4, #-1 @ "" "" "" ""
  131. add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
  132. SPFIX( addeq r0, r0, #4 )
  133. str r1, [sp, #-4]! @ save the "real" r0 copied
  134. @ from the exception stack
  135. mov r1, lr
  136. @
  137. @ We are now ready to fill in the remaining blanks on the stack:
  138. @
  139. @ r0 - sp_svc
  140. @ r1 - lr_svc
  141. @ r2 - lr_<exception>, already fixed up for correct return/restart
  142. @ r3 - spsr_<exception>
  143. @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
  144. @
  145. stmia r5, {r0 - r4}
  146. .endm
  147. .align 5
  148. __dabt_svc:
  149. svc_entry
  150. @
  151. @ get ready to re-enable interrupts if appropriate
  152. @
  153. mrs r9, cpsr
  154. tst r3, #PSR_I_BIT
  155. biceq r9, r9, #PSR_I_BIT
  156. @
  157. @ Call the processor-specific abort handler:
  158. @
  159. @ r2 - aborted context pc
  160. @ r3 - aborted context cpsr
  161. @
  162. @ The abort handler must return the aborted address in r0, and
  163. @ the fault status register in r1. r9 must be preserved.
  164. @
  165. #ifdef MULTI_DABORT
  166. ldr r4, .LCprocfns
  167. mov lr, pc
  168. ldr pc, [r4, #PROCESSOR_DABT_FUNC]
  169. #else
  170. bl CPU_DABORT_HANDLER
  171. #endif
  172. @
  173. @ set desired IRQ state, then call main handler
  174. @
  175. msr cpsr_c, r9
  176. mov r2, sp
  177. bl do_DataAbort
  178. @
  179. @ IRQs off again before pulling preserved data off the stack
  180. @
  181. disable_irq_notrace
  182. @
  183. @ restore SPSR and restart the instruction
  184. @
  185. ldr r2, [sp, #S_PSR]
  186. svc_exit r2 @ return from exception
  187. UNWIND(.fnend )
  188. ENDPROC(__dabt_svc)
  189. .align 5
  190. __irq_svc:
  191. svc_entry
  192. #ifdef CONFIG_TRACE_IRQFLAGS
  193. bl trace_hardirqs_off
  194. #endif
  195. #ifdef CONFIG_PREEMPT
  196. get_thread_info tsk
  197. ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
  198. add r7, r8, #1 @ increment it
  199. str r7, [tsk, #TI_PREEMPT]
  200. #endif
  201. irq_handler
  202. #ifdef CONFIG_PREEMPT
  203. str r8, [tsk, #TI_PREEMPT] @ restore preempt count
  204. ldr r0, [tsk, #TI_FLAGS] @ get flags
  205. teq r8, #0 @ if preempt count != 0
  206. movne r0, #0 @ force flags to 0
  207. tst r0, #_TIF_NEED_RESCHED
  208. blne svc_preempt
  209. #endif
  210. ldr r4, [sp, #S_PSR] @ irqs are already disabled
  211. #ifdef CONFIG_TRACE_IRQFLAGS
  212. tst r4, #PSR_I_BIT
  213. bleq trace_hardirqs_on
  214. #endif
  215. svc_exit r4 @ return from exception
  216. UNWIND(.fnend )
  217. ENDPROC(__irq_svc)
  218. .ltorg
  219. #ifdef CONFIG_PREEMPT
  220. svc_preempt:
  221. mov r8, lr
  222. 1: bl preempt_schedule_irq @ irq en/disable is done inside
  223. ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
  224. tst r0, #_TIF_NEED_RESCHED
  225. moveq pc, r8 @ go again
  226. b 1b
  227. #endif
  228. .align 5
  229. __und_svc:
  230. #ifdef CONFIG_KPROBES
  231. @ If a kprobe is about to simulate a "stmdb sp..." instruction,
  232. @ it obviously needs free stack space which then will belong to
  233. @ the saved context.
  234. svc_entry 64
  235. #else
  236. svc_entry
  237. #endif
  238. @
  239. @ call emulation code, which returns using r9 if it has emulated
  240. @ the instruction, or the more conventional lr if we are to treat
  241. @ this as a real undefined instruction
  242. @
  243. @ r0 - instruction
  244. @
  245. #ifndef CONFIG_THUMB2_KERNEL
  246. ldr r0, [r2, #-4]
  247. #else
  248. ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2
  249. and r9, r0, #0xf800
  250. cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
  251. ldrhhs r9, [r2] @ bottom 16 bits
  252. orrhs r0, r9, r0, lsl #16
  253. #endif
  254. adr r9, BSYM(1f)
  255. bl call_fpe
  256. mov r0, sp @ struct pt_regs *regs
  257. bl do_undefinstr
  258. @
  259. @ IRQs off again before pulling preserved data off the stack
  260. @
  261. 1: disable_irq_notrace
  262. @
  263. @ restore SPSR and restart the instruction
  264. @
  265. ldr r2, [sp, #S_PSR] @ Get SVC cpsr
  266. svc_exit r2 @ return from exception
  267. UNWIND(.fnend )
  268. ENDPROC(__und_svc)
  269. .align 5
  270. __pabt_svc:
  271. svc_entry
  272. @
  273. @ re-enable interrupts if appropriate
  274. @
  275. mrs r9, cpsr
  276. tst r3, #PSR_I_BIT
  277. biceq r9, r9, #PSR_I_BIT
  278. mov r0, r2 @ pass address of aborted instruction.
  279. #ifdef MULTI_PABORT
  280. ldr r4, .LCprocfns
  281. mov lr, pc
  282. ldr pc, [r4, #PROCESSOR_PABT_FUNC]
  283. #else
  284. bl CPU_PABORT_HANDLER
  285. #endif
  286. msr cpsr_c, r9 @ Maybe enable interrupts
  287. mov r2, sp @ regs
  288. bl do_PrefetchAbort @ call abort handler
  289. @
  290. @ IRQs off again before pulling preserved data off the stack
  291. @
  292. disable_irq_notrace
  293. @
  294. @ restore SPSR and restart the instruction
  295. @
  296. ldr r2, [sp, #S_PSR]
  297. svc_exit r2 @ return from exception
  298. UNWIND(.fnend )
  299. ENDPROC(__pabt_svc)
  300. .align 5
  301. .LCcralign:
  302. .word cr_alignment
  303. #ifdef MULTI_DABORT
  304. .LCprocfns:
  305. .word processor
  306. #endif
  307. .LCfp:
  308. .word fp_enter
  309. /*
  310. * User mode handlers
  311. *
  312. * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
  313. */
  314. #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
  315. #error "sizeof(struct pt_regs) must be a multiple of 8"
  316. #endif
  317. .macro usr_entry
  318. UNWIND(.fnstart )
  319. UNWIND(.cantunwind ) @ don't unwind the user space
  320. sub sp, sp, #S_FRAME_SIZE
  321. ARM( stmib sp, {r1 - r12} )
  322. THUMB( stmia sp, {r0 - r12} )
  323. ldmia r0, {r1 - r3}
  324. add r0, sp, #S_PC @ here for interlock avoidance
  325. mov r4, #-1 @ "" "" "" ""
  326. str r1, [sp] @ save the "real" r0 copied
  327. @ from the exception stack
  328. @
  329. @ We are now ready to fill in the remaining blanks on the stack:
  330. @
  331. @ r2 - lr_<exception>, already fixed up for correct return/restart
  332. @ r3 - spsr_<exception>
  333. @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
  334. @
  335. @ Also, separately save sp_usr and lr_usr
  336. @
  337. stmia r0, {r2 - r4}
  338. ARM( stmdb r0, {sp, lr}^ )
  339. THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
  340. @
  341. @ Enable the alignment trap while in kernel mode
  342. @
  343. alignment_trap r0
  344. @
  345. @ Clear FP to mark the first stack frame
  346. @
  347. zero_fp
  348. .endm
  349. .macro kuser_cmpxchg_check
  350. #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  351. #ifndef CONFIG_MMU
  352. #warning "NPTL on non MMU needs fixing"
  353. #else
  354. @ Make sure our user space atomic helper is restarted
  355. @ if it was interrupted in a critical region. Here we
  356. @ perform a quick test inline since it should be false
  357. @ 99.9999% of the time. The rest is done out of line.
  358. cmp r2, #TASK_SIZE
  359. blhs kuser_cmpxchg_fixup
  360. #endif
  361. #endif
  362. .endm
  363. .align 5
  364. __dabt_usr:
  365. usr_entry
  366. kuser_cmpxchg_check
  367. @
  368. @ Call the processor-specific abort handler:
  369. @
  370. @ r2 - aborted context pc
  371. @ r3 - aborted context cpsr
  372. @
  373. @ The abort handler must return the aborted address in r0, and
  374. @ the fault status register in r1.
  375. @
  376. #ifdef MULTI_DABORT
  377. ldr r4, .LCprocfns
  378. mov lr, pc
  379. ldr pc, [r4, #PROCESSOR_DABT_FUNC]
  380. #else
  381. bl CPU_DABORT_HANDLER
  382. #endif
  383. @
  384. @ IRQs on, then call the main handler
  385. @
  386. enable_irq
  387. mov r2, sp
  388. adr lr, BSYM(ret_from_exception)
  389. b do_DataAbort
  390. UNWIND(.fnend )
  391. ENDPROC(__dabt_usr)
  392. .align 5
  393. __irq_usr:
  394. usr_entry
  395. kuser_cmpxchg_check
  396. get_thread_info tsk
  397. #ifdef CONFIG_PREEMPT
  398. ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
  399. add r7, r8, #1 @ increment it
  400. str r7, [tsk, #TI_PREEMPT]
  401. #endif
  402. irq_handler
  403. #ifdef CONFIG_PREEMPT
  404. ldr r0, [tsk, #TI_PREEMPT]
  405. str r8, [tsk, #TI_PREEMPT]
  406. teq r0, r7
  407. ARM( strne r0, [r0, -r0] )
  408. THUMB( movne r0, #0 )
  409. THUMB( strne r0, [r0] )
  410. #endif
  411. mov why, #0
  412. b ret_to_user
  413. UNWIND(.fnend )
  414. ENDPROC(__irq_usr)
  415. .ltorg
  416. .align 5
  417. __und_usr:
  418. usr_entry
  419. @
  420. @ fall through to the emulation code, which returns using r9 if
  421. @ it has emulated the instruction, or the more conventional lr
  422. @ if we are to treat this as a real undefined instruction
  423. @
  424. @ r0 - instruction
  425. @
  426. adr r9, BSYM(ret_from_exception)
  427. adr lr, BSYM(__und_usr_unknown)
  428. tst r3, #PSR_T_BIT @ Thumb mode?
  429. itet eq @ explicit IT needed for the 1f label
  430. subeq r4, r2, #4 @ ARM instr at LR - 4
  431. subne r4, r2, #2 @ Thumb instr at LR - 2
  432. 1: ldreqt r0, [r4]
  433. #ifdef CONFIG_CPU_ENDIAN_BE8
  434. reveq r0, r0 @ little endian instruction
  435. #endif
  436. beq call_fpe
  437. @ Thumb instruction
  438. #if __LINUX_ARM_ARCH__ >= 7
  439. 2:
  440. ARM( ldrht r5, [r4], #2 )
  441. THUMB( ldrht r5, [r4] )
  442. THUMB( add r4, r4, #2 )
  443. and r0, r5, #0xf800 @ mask bits 111x x... .... ....
  444. cmp r0, #0xe800 @ 32bit instruction if xx != 0
  445. blo __und_usr_unknown
  446. 3: ldrht r0, [r4]
  447. add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
  448. orr r0, r0, r5, lsl #16
  449. #else
  450. b __und_usr_unknown
  451. #endif
  452. UNWIND(.fnend )
  453. ENDPROC(__und_usr)
  454. @
  455. @ fallthrough to call_fpe
  456. @
  457. /*
  458. * The out of line fixup for the ldrt above.
  459. */
  460. .pushsection .fixup, "ax"
  461. 4: mov pc, r9
  462. .popsection
  463. .pushsection __ex_table,"a"
  464. .long 1b, 4b
  465. #if __LINUX_ARM_ARCH__ >= 7
  466. .long 2b, 4b
  467. .long 3b, 4b
  468. #endif
  469. .popsection
  470. /*
  471. * Check whether the instruction is a co-processor instruction.
  472. * If yes, we need to call the relevant co-processor handler.
  473. *
  474. * Note that we don't do a full check here for the co-processor
  475. * instructions; all instructions with bit 27 set are well
  476. * defined. The only instructions that should fault are the
  477. * co-processor instructions. However, we have to watch out
  478. * for the ARM6/ARM7 SWI bug.
  479. *
  480. * NEON is a special case that has to be handled here. Not all
  481. * NEON instructions are co-processor instructions, so we have
  482. * to make a special case of checking for them. Plus, there's
  483. * five groups of them, so we have a table of mask/opcode pairs
  484. * to check against, and if any match then we branch off into the
  485. * NEON handler code.
  486. *
  487. * Emulators may wish to make use of the following registers:
  488. * r0 = instruction opcode.
  489. * r2 = PC+4
  490. * r9 = normal "successful" return address
  491. * r10 = this threads thread_info structure.
  492. * lr = unrecognised instruction return address
  493. */
  494. @
  495. @ Fall-through from Thumb-2 __und_usr
  496. @
  497. #ifdef CONFIG_NEON
  498. adr r6, .LCneon_thumb_opcodes
  499. b 2f
  500. #endif
  501. call_fpe:
  502. #ifdef CONFIG_NEON
  503. adr r6, .LCneon_arm_opcodes
  504. 2:
  505. ldr r7, [r6], #4 @ mask value
  506. cmp r7, #0 @ end mask?
  507. beq 1f
  508. and r8, r0, r7
  509. ldr r7, [r6], #4 @ opcode bits matching in mask
  510. cmp r8, r7 @ NEON instruction?
  511. bne 2b
  512. get_thread_info r10
  513. mov r7, #1
  514. strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
  515. strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
  516. b do_vfp @ let VFP handler handle this
  517. 1:
  518. #endif
  519. tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
  520. tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
  521. #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
  522. and r8, r0, #0x0f000000 @ mask out op-code bits
  523. teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
  524. #endif
  525. moveq pc, lr
  526. get_thread_info r10 @ get current thread
  527. and r8, r0, #0x00000f00 @ mask out CP number
  528. THUMB( lsr r8, r8, #8 )
  529. mov r7, #1
  530. add r6, r10, #TI_USED_CP
  531. ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
  532. THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
  533. #ifdef CONFIG_IWMMXT
  534. @ Test if we need to give access to iWMMXt coprocessors
  535. ldr r5, [r10, #TI_FLAGS]
  536. rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
  537. movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
  538. bcs iwmmxt_task_enable
  539. #endif
  540. ARM( add pc, pc, r8, lsr #6 )
  541. THUMB( lsl r8, r8, #2 )
  542. THUMB( add pc, r8 )
  543. nop
  544. movw_pc lr @ CP#0
  545. W(b) do_fpe @ CP#1 (FPE)
  546. W(b) do_fpe @ CP#2 (FPE)
  547. movw_pc lr @ CP#3
  548. #ifdef CONFIG_CRUNCH
  549. b crunch_task_enable @ CP#4 (MaverickCrunch)
  550. b crunch_task_enable @ CP#5 (MaverickCrunch)
  551. b crunch_task_enable @ CP#6 (MaverickCrunch)
  552. #else
  553. movw_pc lr @ CP#4
  554. movw_pc lr @ CP#5
  555. movw_pc lr @ CP#6
  556. #endif
  557. movw_pc lr @ CP#7
  558. movw_pc lr @ CP#8
  559. movw_pc lr @ CP#9
  560. #ifdef CONFIG_VFP
  561. W(b) do_vfp @ CP#10 (VFP)
  562. W(b) do_vfp @ CP#11 (VFP)
  563. #else
  564. movw_pc lr @ CP#10 (VFP)
  565. movw_pc lr @ CP#11 (VFP)
  566. #endif
  567. movw_pc lr @ CP#12
  568. movw_pc lr @ CP#13
  569. movw_pc lr @ CP#14 (Debug)
  570. movw_pc lr @ CP#15 (Control)
  571. #ifdef CONFIG_NEON
  572. .align 6
  573. .LCneon_arm_opcodes:
  574. .word 0xfe000000 @ mask
  575. .word 0xf2000000 @ opcode
  576. .word 0xff100000 @ mask
  577. .word 0xf4000000 @ opcode
  578. .word 0x00000000 @ mask
  579. .word 0x00000000 @ opcode
  580. .LCneon_thumb_opcodes:
  581. .word 0xef000000 @ mask
  582. .word 0xef000000 @ opcode
  583. .word 0xff100000 @ mask
  584. .word 0xf9000000 @ opcode
  585. .word 0x00000000 @ mask
  586. .word 0x00000000 @ opcode
  587. #endif
  588. do_fpe:
  589. enable_irq
  590. ldr r4, .LCfp
  591. add r10, r10, #TI_FPSTATE @ r10 = workspace
  592. ldr pc, [r4] @ Call FP module USR entry point
  593. /*
  594. * The FP module is called with these registers set:
  595. * r0 = instruction
  596. * r2 = PC+4
  597. * r9 = normal "successful" return address
  598. * r10 = FP workspace
  599. * lr = unrecognised FP instruction return address
  600. */
  601. .pushsection .data
  602. ENTRY(fp_enter)
  603. .word no_fp
  604. .popsection
  605. ENTRY(no_fp)
  606. mov pc, lr
  607. ENDPROC(no_fp)
  608. __und_usr_unknown:
  609. enable_irq
  610. mov r0, sp
  611. adr lr, BSYM(ret_from_exception)
  612. b do_undefinstr
  613. ENDPROC(__und_usr_unknown)
  614. .align 5
  615. __pabt_usr:
  616. usr_entry
  617. mov r0, r2 @ pass address of aborted instruction.
  618. #ifdef MULTI_PABORT
  619. ldr r4, .LCprocfns
  620. mov lr, pc
  621. ldr pc, [r4, #PROCESSOR_PABT_FUNC]
  622. #else
  623. bl CPU_PABORT_HANDLER
  624. #endif
  625. enable_irq @ Enable interrupts
  626. mov r2, sp @ regs
  627. bl do_PrefetchAbort @ call abort handler
  628. UNWIND(.fnend )
  629. /* fall through */
  630. /*
  631. * This is the return code to user mode for abort handlers
  632. */
  633. ENTRY(ret_from_exception)
  634. UNWIND(.fnstart )
  635. UNWIND(.cantunwind )
  636. get_thread_info tsk
  637. mov why, #0
  638. b ret_to_user
  639. UNWIND(.fnend )
  640. ENDPROC(__pabt_usr)
  641. ENDPROC(ret_from_exception)
  642. /*
  643. * Register switch for ARMv3 and ARMv4 processors
  644. * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
  645. * previous and next are guaranteed not to be the same.
  646. */
  647. ENTRY(__switch_to)
  648. UNWIND(.fnstart )
  649. UNWIND(.cantunwind )
  650. add ip, r1, #TI_CPU_SAVE
  651. ldr r3, [r2, #TI_TP_VALUE]
  652. ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
  653. THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
  654. THUMB( str sp, [ip], #4 )
  655. THUMB( str lr, [ip], #4 )
  656. #ifdef CONFIG_MMU
  657. ldr r6, [r2, #TI_CPU_DOMAIN]
  658. #endif
  659. set_tls r3, r4, r5
  660. #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
  661. ldr r7, [r2, #TI_TASK]
  662. ldr r8, =__stack_chk_guard
  663. ldr r7, [r7, #TSK_STACK_CANARY]
  664. #endif
  665. #ifdef CONFIG_MMU
  666. mcr p15, 0, r6, c3, c0, 0 @ Set domain register
  667. #endif
  668. mov r5, r0
  669. add r4, r2, #TI_CPU_SAVE
  670. ldr r0, =thread_notify_head
  671. mov r1, #THREAD_NOTIFY_SWITCH
  672. bl atomic_notifier_call_chain
  673. #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
  674. str r7, [r8]
  675. #endif
  676. THUMB( mov ip, r4 )
  677. mov r0, r5
  678. ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
  679. THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
  680. THUMB( ldr sp, [ip], #4 )
  681. THUMB( ldr pc, [ip] )
  682. UNWIND(.fnend )
  683. ENDPROC(__switch_to)
  684. __INIT
  685. /*
  686. * User helpers.
  687. *
  688. * These are segment of kernel provided user code reachable from user space
  689. * at a fixed address in kernel memory. This is used to provide user space
  690. * with some operations which require kernel help because of unimplemented
  691. * native feature and/or instructions in many ARM CPUs. The idea is for
  692. * this code to be executed directly in user mode for best efficiency but
  693. * which is too intimate with the kernel counter part to be left to user
  694. * libraries. In fact this code might even differ from one CPU to another
  695. * depending on the available instruction set and restrictions like on
  696. * SMP systems. In other words, the kernel reserves the right to change
  697. * this code as needed without warning. Only the entry points and their
  698. * results are guaranteed to be stable.
  699. *
  700. * Each segment is 32-byte aligned and will be moved to the top of the high
  701. * vector page. New segments (if ever needed) must be added in front of
  702. * existing ones. This mechanism should be used only for things that are
  703. * really small and justified, and not be abused freely.
  704. *
  705. * User space is expected to implement those things inline when optimizing
  706. * for a processor that has the necessary native support, but only if such
  707. * resulting binaries are already to be incompatible with earlier ARM
  708. * processors due to the use of unsupported instructions other than what
  709. * is provided here. In other words don't make binaries unable to run on
  710. * earlier processors just for the sake of not using these kernel helpers
  711. * if your compiled code is not going to use the new instructions for other
  712. * purpose.
  713. */
  714. THUMB( .arm )
  715. .macro usr_ret, reg
  716. #ifdef CONFIG_ARM_THUMB
  717. bx \reg
  718. #else
  719. mov pc, \reg
  720. #endif
  721. .endm
  722. .align 5
  723. .globl __kuser_helper_start
  724. __kuser_helper_start:
  725. /*
  726. * Reference prototype:
  727. *
  728. * void __kernel_memory_barrier(void)
  729. *
  730. * Input:
  731. *
  732. * lr = return address
  733. *
  734. * Output:
  735. *
  736. * none
  737. *
  738. * Clobbered:
  739. *
  740. * none
  741. *
  742. * Definition and user space usage example:
  743. *
  744. * typedef void (__kernel_dmb_t)(void);
  745. * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
  746. *
  747. * Apply any needed memory barrier to preserve consistency with data modified
  748. * manually and __kuser_cmpxchg usage.
  749. *
  750. * This could be used as follows:
  751. *
  752. * #define __kernel_dmb() \
  753. * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
  754. * : : : "r0", "lr","cc" )
  755. */
  756. __kuser_memory_barrier: @ 0xffff0fa0
  757. smp_dmb
  758. usr_ret lr
  759. .align 5
  760. /*
  761. * Reference prototype:
  762. *
  763. * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
  764. *
  765. * Input:
  766. *
  767. * r0 = oldval
  768. * r1 = newval
  769. * r2 = ptr
  770. * lr = return address
  771. *
  772. * Output:
  773. *
  774. * r0 = returned value (zero or non-zero)
  775. * C flag = set if r0 == 0, clear if r0 != 0
  776. *
  777. * Clobbered:
  778. *
  779. * r3, ip, flags
  780. *
  781. * Definition and user space usage example:
  782. *
  783. * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
  784. * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
  785. *
  786. * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
  787. * Return zero if *ptr was changed or non-zero if no exchange happened.
  788. * The C flag is also set if *ptr was changed to allow for assembly
  789. * optimization in the calling code.
  790. *
  791. * Notes:
  792. *
  793. * - This routine already includes memory barriers as needed.
  794. *
  795. * For example, a user space atomic_add implementation could look like this:
  796. *
  797. * #define atomic_add(ptr, val) \
  798. * ({ register unsigned int *__ptr asm("r2") = (ptr); \
  799. * register unsigned int __result asm("r1"); \
  800. * asm volatile ( \
  801. * "1: @ atomic_add\n\t" \
  802. * "ldr r0, [r2]\n\t" \
  803. * "mov r3, #0xffff0fff\n\t" \
  804. * "add lr, pc, #4\n\t" \
  805. * "add r1, r0, %2\n\t" \
  806. * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
  807. * "bcc 1b" \
  808. * : "=&r" (__result) \
  809. * : "r" (__ptr), "rIL" (val) \
  810. * : "r0","r3","ip","lr","cc","memory" ); \
  811. * __result; })
  812. */
  813. __kuser_cmpxchg: @ 0xffff0fc0
  814. #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  815. /*
  816. * Poor you. No fast solution possible...
  817. * The kernel itself must perform the operation.
  818. * A special ghost syscall is used for that (see traps.c).
  819. */
  820. stmfd sp!, {r7, lr}
  821. ldr r7, =1f @ it's 20 bits
  822. swi __ARM_NR_cmpxchg
  823. ldmfd sp!, {r7, pc}
  824. 1: .word __ARM_NR_cmpxchg
  825. #elif __LINUX_ARM_ARCH__ < 6
  826. #ifdef CONFIG_MMU
  827. /*
  828. * The only thing that can break atomicity in this cmpxchg
  829. * implementation is either an IRQ or a data abort exception
  830. * causing another process/thread to be scheduled in the middle
  831. * of the critical sequence. To prevent this, code is added to
  832. * the IRQ and data abort exception handlers to set the pc back
  833. * to the beginning of the critical section if it is found to be
  834. * within that critical section (see kuser_cmpxchg_fixup).
  835. */
  836. 1: ldr r3, [r2] @ load current val
  837. subs r3, r3, r0 @ compare with oldval
  838. 2: streq r1, [r2] @ store newval if eq
  839. rsbs r0, r3, #0 @ set return val and C flag
  840. usr_ret lr
  841. .text
  842. kuser_cmpxchg_fixup:
  843. @ Called from kuser_cmpxchg_check macro.
  844. @ r2 = address of interrupted insn (must be preserved).
  845. @ sp = saved regs. r7 and r8 are clobbered.
  846. @ 1b = first critical insn, 2b = last critical insn.
  847. @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
  848. mov r7, #0xffff0fff
  849. sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
  850. subs r8, r2, r7
  851. rsbcss r8, r8, #(2b - 1b)
  852. strcs r7, [sp, #S_PC]
  853. mov pc, lr
  854. .previous
  855. #else
  856. #warning "NPTL on non MMU needs fixing"
  857. mov r0, #-1
  858. adds r0, r0, #0
  859. usr_ret lr
  860. #endif
  861. #else
  862. smp_dmb
  863. 1: ldrex r3, [r2]
  864. subs r3, r3, r0
  865. strexeq r3, r1, [r2]
  866. teqeq r3, #1
  867. beq 1b
  868. rsbs r0, r3, #0
  869. /* beware -- each __kuser slot must be 8 instructions max */
  870. ALT_SMP(b __kuser_memory_barrier)
  871. ALT_UP(usr_ret lr)
  872. #endif
  873. .align 5
  874. /*
  875. * Reference prototype:
  876. *
  877. * int __kernel_get_tls(void)
  878. *
  879. * Input:
  880. *
  881. * lr = return address
  882. *
  883. * Output:
  884. *
  885. * r0 = TLS value
  886. *
  887. * Clobbered:
  888. *
  889. * none
  890. *
  891. * Definition and user space usage example:
  892. *
  893. * typedef int (__kernel_get_tls_t)(void);
  894. * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
  895. *
  896. * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
  897. *
  898. * This could be used as follows:
  899. *
  900. * #define __kernel_get_tls() \
  901. * ({ register unsigned int __val asm("r0"); \
  902. * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
  903. * : "=r" (__val) : : "lr","cc" ); \
  904. * __val; })
  905. */
  906. __kuser_get_tls: @ 0xffff0fe0
  907. ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
  908. usr_ret lr
  909. mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
  910. .rep 4
  911. .word 0 @ 0xffff0ff0 software TLS value, then
  912. .endr @ pad up to __kuser_helper_version
  913. /*
  914. * Reference declaration:
  915. *
  916. * extern unsigned int __kernel_helper_version;
  917. *
  918. * Definition and user space usage example:
  919. *
  920. * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
  921. *
  922. * User space may read this to determine the curent number of helpers
  923. * available.
  924. */
  925. __kuser_helper_version: @ 0xffff0ffc
  926. .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
  927. .globl __kuser_helper_end
  928. __kuser_helper_end:
  929. THUMB( .thumb )
  930. /*
  931. * Vector stubs.
  932. *
  933. * This code is copied to 0xffff0200 so we can use branches in the
  934. * vectors, rather than ldr's. Note that this code must not
  935. * exceed 0x300 bytes.
  936. *
  937. * Common stub entry macro:
  938. * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  939. *
  940. * SP points to a minimal amount of processor-private memory, the address
  941. * of which is copied into r0 for the mode specific abort handler.
  942. */
  943. .macro vector_stub, name, mode, correction=0
  944. .align 5
  945. vector_\name:
  946. .if \correction
  947. sub lr, lr, #\correction
  948. .endif
  949. @
  950. @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
  951. @ (parent CPSR)
  952. @
  953. stmia sp, {r0, lr} @ save r0, lr
  954. mrs lr, spsr
  955. str lr, [sp, #8] @ save spsr
  956. @
  957. @ Prepare for SVC32 mode. IRQs remain disabled.
  958. @
  959. mrs r0, cpsr
  960. eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
  961. msr spsr_cxsf, r0
  962. @
  963. @ the branch table must immediately follow this code
  964. @
  965. and lr, lr, #0x0f
  966. THUMB( adr r0, 1f )
  967. THUMB( ldr lr, [r0, lr, lsl #2] )
  968. mov r0, sp
  969. ARM( ldr lr, [pc, lr, lsl #2] )
  970. movs pc, lr @ branch to handler in SVC mode
  971. ENDPROC(vector_\name)
  972. .align 2
  973. @ handler addresses follow this label
  974. 1:
  975. .endm
  976. .globl __stubs_start
  977. __stubs_start:
  978. /*
  979. * Interrupt dispatcher
  980. */
  981. vector_stub irq, IRQ_MODE, 4
  982. .long __irq_usr @ 0 (USR_26 / USR_32)
  983. .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
  984. .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
  985. .long __irq_svc @ 3 (SVC_26 / SVC_32)
  986. .long __irq_invalid @ 4
  987. .long __irq_invalid @ 5
  988. .long __irq_invalid @ 6
  989. .long __irq_invalid @ 7
  990. .long __irq_invalid @ 8
  991. .long __irq_invalid @ 9
  992. .long __irq_invalid @ a
  993. .long __irq_invalid @ b
  994. .long __irq_invalid @ c
  995. .long __irq_invalid @ d
  996. .long __irq_invalid @ e
  997. .long __irq_invalid @ f
  998. /*
  999. * Data abort dispatcher
  1000. * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  1001. */
  1002. vector_stub dabt, ABT_MODE, 8
  1003. .long __dabt_usr @ 0 (USR_26 / USR_32)
  1004. .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
  1005. .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
  1006. .long __dabt_svc @ 3 (SVC_26 / SVC_32)
  1007. .long __dabt_invalid @ 4
  1008. .long __dabt_invalid @ 5
  1009. .long __dabt_invalid @ 6
  1010. .long __dabt_invalid @ 7
  1011. .long __dabt_invalid @ 8
  1012. .long __dabt_invalid @ 9
  1013. .long __dabt_invalid @ a
  1014. .long __dabt_invalid @ b
  1015. .long __dabt_invalid @ c
  1016. .long __dabt_invalid @ d
  1017. .long __dabt_invalid @ e
  1018. .long __dabt_invalid @ f
  1019. /*
  1020. * Prefetch abort dispatcher
  1021. * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  1022. */
  1023. vector_stub pabt, ABT_MODE, 4
  1024. .long __pabt_usr @ 0 (USR_26 / USR_32)
  1025. .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
  1026. .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
  1027. .long __pabt_svc @ 3 (SVC_26 / SVC_32)
  1028. .long __pabt_invalid @ 4
  1029. .long __pabt_invalid @ 5
  1030. .long __pabt_invalid @ 6
  1031. .long __pabt_invalid @ 7
  1032. .long __pabt_invalid @ 8
  1033. .long __pabt_invalid @ 9
  1034. .long __pabt_invalid @ a
  1035. .long __pabt_invalid @ b
  1036. .long __pabt_invalid @ c
  1037. .long __pabt_invalid @ d
  1038. .long __pabt_invalid @ e
  1039. .long __pabt_invalid @ f
  1040. /*
  1041. * Undef instr entry dispatcher
  1042. * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  1043. */
  1044. vector_stub und, UND_MODE
  1045. .long __und_usr @ 0 (USR_26 / USR_32)
  1046. .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
  1047. .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
  1048. .long __und_svc @ 3 (SVC_26 / SVC_32)
  1049. .long __und_invalid @ 4
  1050. .long __und_invalid @ 5
  1051. .long __und_invalid @ 6
  1052. .long __und_invalid @ 7
  1053. .long __und_invalid @ 8
  1054. .long __und_invalid @ 9
  1055. .long __und_invalid @ a
  1056. .long __und_invalid @ b
  1057. .long __und_invalid @ c
  1058. .long __und_invalid @ d
  1059. .long __und_invalid @ e
  1060. .long __und_invalid @ f
  1061. .align 5
  1062. /*=============================================================================
  1063. * Undefined FIQs
  1064. *-----------------------------------------------------------------------------
  1065. * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
  1066. * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
  1067. * Basically to switch modes, we *HAVE* to clobber one register... brain
  1068. * damage alert! I don't think that we can execute any code in here in any
  1069. * other mode than FIQ... Ok you can switch to another mode, but you can't
  1070. * get out of that mode without clobbering one register.
  1071. */
  1072. vector_fiq:
  1073. disable_fiq
  1074. subs pc, lr, #4
  1075. /*=============================================================================
  1076. * Address exception handler
  1077. *-----------------------------------------------------------------------------
  1078. * These aren't too critical.
  1079. * (they're not supposed to happen, and won't happen in 32-bit data mode).
  1080. */
  1081. vector_addrexcptn:
  1082. b vector_addrexcptn
  1083. /*
  1084. * We group all the following data together to optimise
  1085. * for CPUs with separate I & D caches.
  1086. */
  1087. .align 5
  1088. .LCvswi:
  1089. .word vector_swi
  1090. .globl __stubs_end
  1091. __stubs_end:
  1092. .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
  1093. .globl __vectors_start
  1094. __vectors_start:
  1095. ARM( swi SYS_ERROR0 )
  1096. THUMB( svc #0 )
  1097. THUMB( nop )
  1098. W(b) vector_und + stubs_offset
  1099. W(ldr) pc, .LCvswi + stubs_offset
  1100. W(b) vector_pabt + stubs_offset
  1101. W(b) vector_dabt + stubs_offset
  1102. W(b) vector_addrexcptn + stubs_offset
  1103. W(b) vector_irq + stubs_offset
  1104. W(b) vector_fiq + stubs_offset
  1105. .globl __vectors_end
  1106. __vectors_end:
  1107. .data
  1108. .globl cr_alignment
  1109. .globl cr_no_alignment
  1110. cr_alignment:
  1111. .space 4
  1112. cr_no_alignment:
  1113. .space 4