entry-armv.S 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068
  1. /*
  2. * linux/arch/arm/kernel/entry-armv.S
  3. *
  4. * Copyright (C) 1996,1997,1998 Russell King.
  5. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
  6. * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Low-level vector interface routines
  13. *
  14. * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
  15. * it to save wrong values... Be aware!
  16. */
  17. #include <linux/config.h>
  18. #include <asm/memory.h>
  19. #include <asm/glue.h>
  20. #include <asm/vfpmacros.h>
  21. #include <asm/arch/entry-macro.S>
  22. #include <asm/thread_notify.h>
  23. #include "entry-header.S"
  24. /*
  25. * Interrupt handling. Preserves r7, r8, r9
  26. */
  27. .macro irq_handler
  28. 1: get_irqnr_and_base r0, r6, r5, lr
  29. movne r1, sp
  30. @
  31. @ routine called with r0 = irq number, r1 = struct pt_regs *
  32. @
  33. adrne lr, 1b
  34. bne asm_do_IRQ
  35. #ifdef CONFIG_SMP
  36. /*
  37. * XXX
  38. *
  39. * this macro assumes that irqstat (r6) and base (r5) are
  40. * preserved from get_irqnr_and_base above
  41. */
  42. test_for_ipi r0, r6, r5, lr
  43. movne r0, sp
  44. adrne lr, 1b
  45. bne do_IPI
  46. #ifdef CONFIG_LOCAL_TIMERS
  47. test_for_ltirq r0, r6, r5, lr
  48. movne r0, sp
  49. adrne lr, 1b
  50. bne do_local_timer
  51. #endif
  52. #endif
  53. .endm
  54. /*
  55. * Invalid mode handlers
  56. */
  57. .macro inv_entry, reason
  58. sub sp, sp, #S_FRAME_SIZE
  59. stmib sp, {r1 - lr}
  60. mov r1, #\reason
  61. .endm
  62. __pabt_invalid:
  63. inv_entry BAD_PREFETCH
  64. b common_invalid
  65. __dabt_invalid:
  66. inv_entry BAD_DATA
  67. b common_invalid
  68. __irq_invalid:
  69. inv_entry BAD_IRQ
  70. b common_invalid
  71. __und_invalid:
  72. inv_entry BAD_UNDEFINSTR
  73. @
  74. @ XXX fall through to common_invalid
  75. @
  76. @
  77. @ common_invalid - generic code for failed exception (re-entrant version of handlers)
  78. @
  79. common_invalid:
  80. zero_fp
  81. ldmia r0, {r4 - r6}
  82. add r0, sp, #S_PC @ here for interlock avoidance
  83. mov r7, #-1 @ "" "" "" ""
  84. str r4, [sp] @ save preserved r0
  85. stmia r0, {r5 - r7} @ lr_<exception>,
  86. @ cpsr_<exception>, "old_r0"
  87. mov r0, sp
  88. and r2, r6, #0x1f
  89. b bad_mode
  90. /*
  91. * SVC mode handlers
  92. */
  93. #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
  94. #define SPFIX(code...) code
  95. #else
  96. #define SPFIX(code...)
  97. #endif
  98. .macro svc_entry
  99. sub sp, sp, #S_FRAME_SIZE
  100. SPFIX( tst sp, #4 )
  101. SPFIX( bicne sp, sp, #4 )
  102. stmib sp, {r1 - r12}
  103. ldmia r0, {r1 - r3}
  104. add r5, sp, #S_SP @ here for interlock avoidance
  105. mov r4, #-1 @ "" "" "" ""
  106. add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
  107. SPFIX( addne r0, r0, #4 )
  108. str r1, [sp] @ save the "real" r0 copied
  109. @ from the exception stack
  110. mov r1, lr
  111. @
  112. @ We are now ready to fill in the remaining blanks on the stack:
  113. @
  114. @ r0 - sp_svc
  115. @ r1 - lr_svc
  116. @ r2 - lr_<exception>, already fixed up for correct return/restart
  117. @ r3 - spsr_<exception>
  118. @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
  119. @
  120. stmia r5, {r0 - r4}
  121. .endm
  122. .align 5
  123. __dabt_svc:
  124. svc_entry
  125. @
  126. @ get ready to re-enable interrupts if appropriate
  127. @
  128. mrs r9, cpsr
  129. tst r3, #PSR_I_BIT
  130. biceq r9, r9, #PSR_I_BIT
  131. @
  132. @ Call the processor-specific abort handler:
  133. @
  134. @ r2 - aborted context pc
  135. @ r3 - aborted context cpsr
  136. @
  137. @ The abort handler must return the aborted address in r0, and
  138. @ the fault status register in r1. r9 must be preserved.
  139. @
  140. #ifdef MULTI_ABORT
  141. ldr r4, .LCprocfns
  142. mov lr, pc
  143. ldr pc, [r4]
  144. #else
  145. bl CPU_ABORT_HANDLER
  146. #endif
  147. @
  148. @ set desired IRQ state, then call main handler
  149. @
  150. msr cpsr_c, r9
  151. mov r2, sp
  152. bl do_DataAbort
  153. @
  154. @ IRQs off again before pulling preserved data off the stack
  155. @
  156. disable_irq
  157. @
  158. @ restore SPSR and restart the instruction
  159. @
  160. ldr r0, [sp, #S_PSR]
  161. msr spsr_cxsf, r0
  162. ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
  163. .align 5
  164. __irq_svc:
  165. svc_entry
  166. #ifdef CONFIG_PREEMPT
  167. get_thread_info tsk
  168. ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
  169. add r7, r8, #1 @ increment it
  170. str r7, [tsk, #TI_PREEMPT]
  171. #endif
  172. irq_handler
  173. #ifdef CONFIG_PREEMPT
  174. ldr r0, [tsk, #TI_FLAGS] @ get flags
  175. tst r0, #_TIF_NEED_RESCHED
  176. blne svc_preempt
  177. preempt_return:
  178. ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
  179. str r8, [tsk, #TI_PREEMPT] @ restore preempt count
  180. teq r0, r7
  181. strne r0, [r0, -r0] @ bug()
  182. #endif
  183. ldr r0, [sp, #S_PSR] @ irqs are already disabled
  184. msr spsr_cxsf, r0
  185. ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
  186. .ltorg
  187. #ifdef CONFIG_PREEMPT
  188. svc_preempt:
  189. teq r8, #0 @ was preempt count = 0
  190. ldreq r6, .LCirq_stat
  191. movne pc, lr @ no
  192. ldr r0, [r6, #4] @ local_irq_count
  193. ldr r1, [r6, #8] @ local_bh_count
  194. adds r0, r0, r1
  195. movne pc, lr
  196. mov r7, #0 @ preempt_schedule_irq
  197. str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
  198. 1: bl preempt_schedule_irq @ irq en/disable is done inside
  199. ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
  200. tst r0, #_TIF_NEED_RESCHED
  201. beq preempt_return @ go again
  202. b 1b
  203. #endif
  204. .align 5
  205. __und_svc:
  206. svc_entry
  207. @
  208. @ call emulation code, which returns using r9 if it has emulated
  209. @ the instruction, or the more conventional lr if we are to treat
  210. @ this as a real undefined instruction
  211. @
  212. @ r0 - instruction
  213. @
  214. ldr r0, [r2, #-4]
  215. adr r9, 1f
  216. bl call_fpe
  217. mov r0, sp @ struct pt_regs *regs
  218. bl do_undefinstr
  219. @
  220. @ IRQs off again before pulling preserved data off the stack
  221. @
  222. 1: disable_irq
  223. @
  224. @ restore SPSR and restart the instruction
  225. @
  226. ldr lr, [sp, #S_PSR] @ Get SVC cpsr
  227. msr spsr_cxsf, lr
  228. ldmia sp, {r0 - pc}^ @ Restore SVC registers
  229. .align 5
  230. __pabt_svc:
  231. svc_entry
  232. @
  233. @ re-enable interrupts if appropriate
  234. @
  235. mrs r9, cpsr
  236. tst r3, #PSR_I_BIT
  237. biceq r9, r9, #PSR_I_BIT
  238. msr cpsr_c, r9
  239. @
  240. @ set args, then call main handler
  241. @
  242. @ r0 - address of faulting instruction
  243. @ r1 - pointer to registers on stack
  244. @
  245. mov r0, r2 @ address (pc)
  246. mov r1, sp @ regs
  247. bl do_PrefetchAbort @ call abort handler
  248. @
  249. @ IRQs off again before pulling preserved data off the stack
  250. @
  251. disable_irq
  252. @
  253. @ restore SPSR and restart the instruction
  254. @
  255. ldr r0, [sp, #S_PSR]
  256. msr spsr_cxsf, r0
  257. ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
  258. .align 5
  259. .LCcralign:
  260. .word cr_alignment
  261. #ifdef MULTI_ABORT
  262. .LCprocfns:
  263. .word processor
  264. #endif
  265. .LCfp:
  266. .word fp_enter
  267. #ifdef CONFIG_PREEMPT
  268. .LCirq_stat:
  269. .word irq_stat
  270. #endif
  271. /*
  272. * User mode handlers
  273. *
  274. * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
  275. */
  276. #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
  277. #error "sizeof(struct pt_regs) must be a multiple of 8"
  278. #endif
  279. .macro usr_entry
  280. sub sp, sp, #S_FRAME_SIZE
  281. stmib sp, {r1 - r12}
  282. ldmia r0, {r1 - r3}
  283. add r0, sp, #S_PC @ here for interlock avoidance
  284. mov r4, #-1 @ "" "" "" ""
  285. str r1, [sp] @ save the "real" r0 copied
  286. @ from the exception stack
  287. #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  288. #ifndef CONFIG_MMU
  289. #warning "NPTL on non MMU needs fixing"
  290. #else
  291. @ make sure our user space atomic helper is aborted
  292. cmp r2, #TASK_SIZE
  293. bichs r3, r3, #PSR_Z_BIT
  294. #endif
  295. #endif
  296. @
  297. @ We are now ready to fill in the remaining blanks on the stack:
  298. @
  299. @ r2 - lr_<exception>, already fixed up for correct return/restart
  300. @ r3 - spsr_<exception>
  301. @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
  302. @
  303. @ Also, separately save sp_usr and lr_usr
  304. @
  305. stmia r0, {r2 - r4}
  306. stmdb r0, {sp, lr}^
  307. @
  308. @ Enable the alignment trap while in kernel mode
  309. @
  310. alignment_trap r0
  311. @
  312. @ Clear FP to mark the first stack frame
  313. @
  314. zero_fp
  315. .endm
  316. .align 5
  317. __dabt_usr:
  318. usr_entry
  319. @
  320. @ Call the processor-specific abort handler:
  321. @
  322. @ r2 - aborted context pc
  323. @ r3 - aborted context cpsr
  324. @
  325. @ The abort handler must return the aborted address in r0, and
  326. @ the fault status register in r1.
  327. @
  328. #ifdef MULTI_ABORT
  329. ldr r4, .LCprocfns
  330. mov lr, pc
  331. ldr pc, [r4]
  332. #else
  333. bl CPU_ABORT_HANDLER
  334. #endif
  335. @
  336. @ IRQs on, then call the main handler
  337. @
  338. enable_irq
  339. mov r2, sp
  340. adr lr, ret_from_exception
  341. b do_DataAbort
  342. .align 5
  343. __irq_usr:
  344. usr_entry
  345. get_thread_info tsk
  346. #ifdef CONFIG_PREEMPT
  347. ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
  348. add r7, r8, #1 @ increment it
  349. str r7, [tsk, #TI_PREEMPT]
  350. #endif
  351. irq_handler
  352. #ifdef CONFIG_PREEMPT
  353. ldr r0, [tsk, #TI_PREEMPT]
  354. str r8, [tsk, #TI_PREEMPT]
  355. teq r0, r7
  356. strne r0, [r0, -r0]
  357. #endif
  358. mov why, #0
  359. b ret_to_user
  360. .ltorg
  361. .align 5
  362. __und_usr:
  363. usr_entry
  364. tst r3, #PSR_T_BIT @ Thumb mode?
  365. bne fpundefinstr @ ignore FP
  366. sub r4, r2, #4
  367. @
  368. @ fall through to the emulation code, which returns using r9 if
  369. @ it has emulated the instruction, or the more conventional lr
  370. @ if we are to treat this as a real undefined instruction
  371. @
  372. @ r0 - instruction
  373. @
  374. 1: ldrt r0, [r4]
  375. adr r9, ret_from_exception
  376. adr lr, fpundefinstr
  377. @
  378. @ fallthrough to call_fpe
  379. @
  380. /*
  381. * The out of line fixup for the ldrt above.
  382. */
  383. .section .fixup, "ax"
  384. 2: mov pc, r9
  385. .previous
  386. .section __ex_table,"a"
  387. .long 1b, 2b
  388. .previous
  389. /*
  390. * Check whether the instruction is a co-processor instruction.
  391. * If yes, we need to call the relevant co-processor handler.
  392. *
  393. * Note that we don't do a full check here for the co-processor
  394. * instructions; all instructions with bit 27 set are well
  395. * defined. The only instructions that should fault are the
  396. * co-processor instructions. However, we have to watch out
  397. * for the ARM6/ARM7 SWI bug.
  398. *
  399. * Emulators may wish to make use of the following registers:
  400. * r0 = instruction opcode.
  401. * r2 = PC+4
  402. * r10 = this threads thread_info structure.
  403. */
  404. call_fpe:
  405. tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
  406. #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
  407. and r8, r0, #0x0f000000 @ mask out op-code bits
  408. teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
  409. #endif
  410. moveq pc, lr
  411. get_thread_info r10 @ get current thread
  412. and r8, r0, #0x00000f00 @ mask out CP number
  413. mov r7, #1
  414. add r6, r10, #TI_USED_CP
  415. strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
  416. #ifdef CONFIG_IWMMXT
  417. @ Test if we need to give access to iWMMXt coprocessors
  418. ldr r5, [r10, #TI_FLAGS]
  419. rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
  420. movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
  421. bcs iwmmxt_task_enable
  422. #endif
  423. add pc, pc, r8, lsr #6
  424. mov r0, r0
  425. mov pc, lr @ CP#0
  426. b do_fpe @ CP#1 (FPE)
  427. b do_fpe @ CP#2 (FPE)
  428. mov pc, lr @ CP#3
  429. mov pc, lr @ CP#4
  430. mov pc, lr @ CP#5
  431. mov pc, lr @ CP#6
  432. mov pc, lr @ CP#7
  433. mov pc, lr @ CP#8
  434. mov pc, lr @ CP#9
  435. #ifdef CONFIG_VFP
  436. b do_vfp @ CP#10 (VFP)
  437. b do_vfp @ CP#11 (VFP)
  438. #else
  439. mov pc, lr @ CP#10 (VFP)
  440. mov pc, lr @ CP#11 (VFP)
  441. #endif
  442. mov pc, lr @ CP#12
  443. mov pc, lr @ CP#13
  444. mov pc, lr @ CP#14 (Debug)
  445. mov pc, lr @ CP#15 (Control)
  446. do_fpe:
  447. enable_irq
  448. ldr r4, .LCfp
  449. add r10, r10, #TI_FPSTATE @ r10 = workspace
  450. ldr pc, [r4] @ Call FP module USR entry point
  451. /*
  452. * The FP module is called with these registers set:
  453. * r0 = instruction
  454. * r2 = PC+4
  455. * r9 = normal "successful" return address
  456. * r10 = FP workspace
  457. * lr = unrecognised FP instruction return address
  458. */
  459. .data
  460. ENTRY(fp_enter)
  461. .word fpundefinstr
  462. .text
  463. fpundefinstr:
  464. mov r0, sp
  465. adr lr, ret_from_exception
  466. b do_undefinstr
  467. .align 5
  468. __pabt_usr:
  469. usr_entry
  470. enable_irq @ Enable interrupts
  471. mov r0, r2 @ address (pc)
  472. mov r1, sp @ regs
  473. bl do_PrefetchAbort @ call abort handler
  474. /* fall through */
  475. /*
  476. * This is the return code to user mode for abort handlers
  477. */
  478. ENTRY(ret_from_exception)
  479. get_thread_info tsk
  480. mov why, #0
  481. b ret_to_user
  482. /*
  483. * Register switch for ARMv3 and ARMv4 processors
  484. * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
  485. * previous and next are guaranteed not to be the same.
  486. */
  487. ENTRY(__switch_to)
  488. add ip, r1, #TI_CPU_SAVE
  489. ldr r3, [r2, #TI_TP_VALUE]
  490. stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
  491. #ifdef CONFIG_MMU
  492. ldr r6, [r2, #TI_CPU_DOMAIN]
  493. #endif
  494. #if __LINUX_ARM_ARCH__ >= 6
  495. #ifdef CONFIG_CPU_32v6K
  496. clrex
  497. #else
  498. strex r5, r4, [ip] @ Clear exclusive monitor
  499. #endif
  500. #endif
  501. #if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
  502. mra r4, r5, acc0
  503. stmia ip, {r4, r5}
  504. #endif
  505. #if defined(CONFIG_HAS_TLS_REG)
  506. mcr p15, 0, r3, c13, c0, 3 @ set TLS register
  507. #elif !defined(CONFIG_TLS_REG_EMUL)
  508. mov r4, #0xffff0fff
  509. str r3, [r4, #-15] @ TLS val at 0xffff0ff0
  510. #endif
  511. #ifdef CONFIG_MMU
  512. mcr p15, 0, r6, c3, c0, 0 @ Set domain register
  513. #endif
  514. #if defined(CONFIG_IWMMXT)
  515. bl iwmmxt_task_switch
  516. #elif defined(CONFIG_CPU_XSCALE)
  517. add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra
  518. ldmib r4, {r4, r5}
  519. mar acc0, r4, r5
  520. #endif
  521. mov r5, r0
  522. add r4, r2, #TI_CPU_SAVE
  523. ldr r0, =thread_notify_head
  524. mov r1, #THREAD_NOTIFY_SWITCH
  525. bl atomic_notifier_call_chain
  526. mov r0, r5
  527. ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
  528. __INIT
  529. /*
  530. * User helpers.
  531. *
  532. * These are segment of kernel provided user code reachable from user space
  533. * at a fixed address in kernel memory. This is used to provide user space
  534. * with some operations which require kernel help because of unimplemented
  535. * native feature and/or instructions in many ARM CPUs. The idea is for
  536. * this code to be executed directly in user mode for best efficiency but
  537. * which is too intimate with the kernel counter part to be left to user
  538. * libraries. In fact this code might even differ from one CPU to another
  539. * depending on the available instruction set and restrictions like on
  540. * SMP systems. In other words, the kernel reserves the right to change
  541. * this code as needed without warning. Only the entry points and their
  542. * results are guaranteed to be stable.
  543. *
  544. * Each segment is 32-byte aligned and will be moved to the top of the high
  545. * vector page. New segments (if ever needed) must be added in front of
  546. * existing ones. This mechanism should be used only for things that are
  547. * really small and justified, and not be abused freely.
  548. *
  549. * User space is expected to implement those things inline when optimizing
  550. * for a processor that has the necessary native support, but only if such
  551. * resulting binaries are already to be incompatible with earlier ARM
  552. * processors due to the use of unsupported instructions other than what
  553. * is provided here. In other words don't make binaries unable to run on
  554. * earlier processors just for the sake of not using these kernel helpers
  555. * if your compiled code is not going to use the new instructions for other
  556. * purpose.
  557. */
  558. .align 5
  559. .globl __kuser_helper_start
  560. __kuser_helper_start:
  561. /*
  562. * Reference prototype:
  563. *
  564. * void __kernel_memory_barrier(void)
  565. *
  566. * Input:
  567. *
  568. * lr = return address
  569. *
  570. * Output:
  571. *
  572. * none
  573. *
  574. * Clobbered:
  575. *
  576. * the Z flag might be lost
  577. *
  578. * Definition and user space usage example:
  579. *
  580. * typedef void (__kernel_dmb_t)(void);
  581. * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
  582. *
  583. * Apply any needed memory barrier to preserve consistency with data modified
  584. * manually and __kuser_cmpxchg usage.
  585. *
  586. * This could be used as follows:
  587. *
  588. * #define __kernel_dmb() \
  589. * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
  590. * : : : "r0", "lr","cc" )
  591. */
  592. __kuser_memory_barrier: @ 0xffff0fa0
  593. #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
  594. mcr p15, 0, r0, c7, c10, 5 @ dmb
  595. #endif
  596. mov pc, lr
  597. .align 5
  598. /*
  599. * Reference prototype:
  600. *
  601. * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
  602. *
  603. * Input:
  604. *
  605. * r0 = oldval
  606. * r1 = newval
  607. * r2 = ptr
  608. * lr = return address
  609. *
  610. * Output:
  611. *
  612. * r0 = returned value (zero or non-zero)
  613. * C flag = set if r0 == 0, clear if r0 != 0
  614. *
  615. * Clobbered:
  616. *
  617. * r3, ip, flags
  618. *
  619. * Definition and user space usage example:
  620. *
  621. * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
  622. * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
  623. *
  624. * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
  625. * Return zero if *ptr was changed or non-zero if no exchange happened.
  626. * The C flag is also set if *ptr was changed to allow for assembly
  627. * optimization in the calling code.
  628. *
  629. * Notes:
  630. *
  631. * - This routine already includes memory barriers as needed.
  632. *
  633. * - A failure might be transient, i.e. it is possible, although unlikely,
  634. * that "failure" be returned even if *ptr == oldval.
  635. *
  636. * For example, a user space atomic_add implementation could look like this:
  637. *
  638. * #define atomic_add(ptr, val) \
  639. * ({ register unsigned int *__ptr asm("r2") = (ptr); \
  640. * register unsigned int __result asm("r1"); \
  641. * asm volatile ( \
  642. * "1: @ atomic_add\n\t" \
  643. * "ldr r0, [r2]\n\t" \
  644. * "mov r3, #0xffff0fff\n\t" \
  645. * "add lr, pc, #4\n\t" \
  646. * "add r1, r0, %2\n\t" \
  647. * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
  648. * "bcc 1b" \
  649. * : "=&r" (__result) \
  650. * : "r" (__ptr), "rIL" (val) \
  651. * : "r0","r3","ip","lr","cc","memory" ); \
  652. * __result; })
  653. */
  654. __kuser_cmpxchg: @ 0xffff0fc0
  655. #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
  656. /*
  657. * Poor you. No fast solution possible...
  658. * The kernel itself must perform the operation.
  659. * A special ghost syscall is used for that (see traps.c).
  660. */
  661. stmfd sp!, {r7, lr}
  662. mov r7, #0xff00 @ 0xfff0 into r7 for EABI
  663. orr r7, r7, #0xf0
  664. swi #0x9ffff0
  665. ldmfd sp!, {r7, pc}
  666. #elif __LINUX_ARM_ARCH__ < 6
  667. /*
  668. * Theory of operation:
  669. *
  670. * We set the Z flag before loading oldval. If ever an exception
  671. * occurs we can not be sure the loaded value will still be the same
  672. * when the exception returns, therefore the user exception handler
  673. * will clear the Z flag whenever the interrupted user code was
  674. * actually from the kernel address space (see the usr_entry macro).
  675. *
  676. * The post-increment on the str is used to prevent a race with an
  677. * exception happening just after the str instruction which would
  678. * clear the Z flag although the exchange was done.
  679. */
  680. #ifdef CONFIG_MMU
  681. teq ip, ip @ set Z flag
  682. ldr ip, [r2] @ load current val
  683. add r3, r2, #1 @ prepare store ptr
  684. teqeq ip, r0 @ compare with oldval if still allowed
  685. streq r1, [r3, #-1]! @ store newval if still allowed
  686. subs r0, r2, r3 @ if r2 == r3 the str occured
  687. #else
  688. #warning "NPTL on non MMU needs fixing"
  689. mov r0, #-1
  690. adds r0, r0, #0
  691. #endif
  692. mov pc, lr
  693. #else
  694. #ifdef CONFIG_SMP
  695. mcr p15, 0, r0, c7, c10, 5 @ dmb
  696. #endif
  697. ldrex r3, [r2]
  698. subs r3, r3, r0
  699. strexeq r3, r1, [r2]
  700. rsbs r0, r3, #0
  701. #ifdef CONFIG_SMP
  702. mcr p15, 0, r0, c7, c10, 5 @ dmb
  703. #endif
  704. mov pc, lr
  705. #endif
  706. .align 5
  707. /*
  708. * Reference prototype:
  709. *
  710. * int __kernel_get_tls(void)
  711. *
  712. * Input:
  713. *
  714. * lr = return address
  715. *
  716. * Output:
  717. *
  718. * r0 = TLS value
  719. *
  720. * Clobbered:
  721. *
  722. * the Z flag might be lost
  723. *
  724. * Definition and user space usage example:
  725. *
  726. * typedef int (__kernel_get_tls_t)(void);
  727. * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
  728. *
  729. * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
  730. *
  731. * This could be used as follows:
  732. *
  733. * #define __kernel_get_tls() \
  734. * ({ register unsigned int __val asm("r0"); \
  735. * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
  736. * : "=r" (__val) : : "lr","cc" ); \
  737. * __val; })
  738. */
  739. __kuser_get_tls: @ 0xffff0fe0
  740. #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
  741. ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
  742. mov pc, lr
  743. #else
  744. mrc p15, 0, r0, c13, c0, 3 @ read TLS register
  745. mov pc, lr
  746. #endif
  747. .rep 5
  748. .word 0 @ pad up to __kuser_helper_version
  749. .endr
  750. /*
  751. * Reference declaration:
  752. *
  753. * extern unsigned int __kernel_helper_version;
  754. *
  755. * Definition and user space usage example:
  756. *
  757. * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
  758. *
  759. * User space may read this to determine the curent number of helpers
  760. * available.
  761. */
  762. __kuser_helper_version: @ 0xffff0ffc
  763. .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
  764. .globl __kuser_helper_end
  765. __kuser_helper_end:
  766. /*
  767. * Vector stubs.
  768. *
  769. * This code is copied to 0xffff0200 so we can use branches in the
  770. * vectors, rather than ldr's. Note that this code must not
  771. * exceed 0x300 bytes.
  772. *
  773. * Common stub entry macro:
  774. * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  775. *
  776. * SP points to a minimal amount of processor-private memory, the address
  777. * of which is copied into r0 for the mode specific abort handler.
  778. */
  779. .macro vector_stub, name, mode, correction=0
  780. .align 5
  781. vector_\name:
  782. .if \correction
  783. sub lr, lr, #\correction
  784. .endif
  785. @
  786. @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
  787. @ (parent CPSR)
  788. @
  789. stmia sp, {r0, lr} @ save r0, lr
  790. mrs lr, spsr
  791. str lr, [sp, #8] @ save spsr
  792. @
  793. @ Prepare for SVC32 mode. IRQs remain disabled.
  794. @
  795. mrs r0, cpsr
  796. eor r0, r0, #(\mode ^ SVC_MODE)
  797. msr spsr_cxsf, r0
  798. @
  799. @ the branch table must immediately follow this code
  800. @
  801. and lr, lr, #0x0f
  802. mov r0, sp
  803. ldr lr, [pc, lr, lsl #2]
  804. movs pc, lr @ branch to handler in SVC mode
  805. .endm
  806. .globl __stubs_start
  807. __stubs_start:
  808. /*
  809. * Interrupt dispatcher
  810. */
  811. vector_stub irq, IRQ_MODE, 4
  812. .long __irq_usr @ 0 (USR_26 / USR_32)
  813. .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
  814. .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
  815. .long __irq_svc @ 3 (SVC_26 / SVC_32)
  816. .long __irq_invalid @ 4
  817. .long __irq_invalid @ 5
  818. .long __irq_invalid @ 6
  819. .long __irq_invalid @ 7
  820. .long __irq_invalid @ 8
  821. .long __irq_invalid @ 9
  822. .long __irq_invalid @ a
  823. .long __irq_invalid @ b
  824. .long __irq_invalid @ c
  825. .long __irq_invalid @ d
  826. .long __irq_invalid @ e
  827. .long __irq_invalid @ f
  828. /*
  829. * Data abort dispatcher
  830. * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  831. */
  832. vector_stub dabt, ABT_MODE, 8
  833. .long __dabt_usr @ 0 (USR_26 / USR_32)
  834. .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
  835. .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
  836. .long __dabt_svc @ 3 (SVC_26 / SVC_32)
  837. .long __dabt_invalid @ 4
  838. .long __dabt_invalid @ 5
  839. .long __dabt_invalid @ 6
  840. .long __dabt_invalid @ 7
  841. .long __dabt_invalid @ 8
  842. .long __dabt_invalid @ 9
  843. .long __dabt_invalid @ a
  844. .long __dabt_invalid @ b
  845. .long __dabt_invalid @ c
  846. .long __dabt_invalid @ d
  847. .long __dabt_invalid @ e
  848. .long __dabt_invalid @ f
  849. /*
  850. * Prefetch abort dispatcher
  851. * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
  852. */
  853. vector_stub pabt, ABT_MODE, 4
  854. .long __pabt_usr @ 0 (USR_26 / USR_32)
  855. .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
  856. .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
  857. .long __pabt_svc @ 3 (SVC_26 / SVC_32)
  858. .long __pabt_invalid @ 4
  859. .long __pabt_invalid @ 5
  860. .long __pabt_invalid @ 6
  861. .long __pabt_invalid @ 7
  862. .long __pabt_invalid @ 8
  863. .long __pabt_invalid @ 9
  864. .long __pabt_invalid @ a
  865. .long __pabt_invalid @ b
  866. .long __pabt_invalid @ c
  867. .long __pabt_invalid @ d
  868. .long __pabt_invalid @ e
  869. .long __pabt_invalid @ f
  870. /*
  871. * Undef instr entry dispatcher
  872. * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
  873. */
  874. vector_stub und, UND_MODE
  875. .long __und_usr @ 0 (USR_26 / USR_32)
  876. .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
  877. .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
  878. .long __und_svc @ 3 (SVC_26 / SVC_32)
  879. .long __und_invalid @ 4
  880. .long __und_invalid @ 5
  881. .long __und_invalid @ 6
  882. .long __und_invalid @ 7
  883. .long __und_invalid @ 8
  884. .long __und_invalid @ 9
  885. .long __und_invalid @ a
  886. .long __und_invalid @ b
  887. .long __und_invalid @ c
  888. .long __und_invalid @ d
  889. .long __und_invalid @ e
  890. .long __und_invalid @ f
  891. .align 5
  892. /*=============================================================================
  893. * Undefined FIQs
  894. *-----------------------------------------------------------------------------
  895. * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
  896. * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
  897. * Basically to switch modes, we *HAVE* to clobber one register... brain
  898. * damage alert! I don't think that we can execute any code in here in any
  899. * other mode than FIQ... Ok you can switch to another mode, but you can't
  900. * get out of that mode without clobbering one register.
  901. */
  902. vector_fiq:
  903. disable_fiq
  904. subs pc, lr, #4
  905. /*=============================================================================
  906. * Address exception handler
  907. *-----------------------------------------------------------------------------
  908. * These aren't too critical.
  909. * (they're not supposed to happen, and won't happen in 32-bit data mode).
  910. */
  911. vector_addrexcptn:
  912. b vector_addrexcptn
  913. /*
  914. * We group all the following data together to optimise
  915. * for CPUs with separate I & D caches.
  916. */
  917. .align 5
  918. .LCvswi:
  919. .word vector_swi
  920. .globl __stubs_end
  921. __stubs_end:
  922. .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
  923. .globl __vectors_start
  924. __vectors_start:
  925. swi SYS_ERROR0
  926. b vector_und + stubs_offset
  927. ldr pc, .LCvswi + stubs_offset
  928. b vector_pabt + stubs_offset
  929. b vector_dabt + stubs_offset
  930. b vector_addrexcptn + stubs_offset
  931. b vector_irq + stubs_offset
  932. b vector_fiq + stubs_offset
  933. .globl __vectors_end
  934. __vectors_end:
  935. .data
  936. .globl cr_alignment
  937. .globl cr_no_alignment
  938. cr_alignment:
  939. .space 4
  940. cr_no_alignment:
  941. .space 4