assembler.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /*
  2. * arch/arm/include/asm/assembler.h
  3. *
  4. * Copyright (C) 1996-2000 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This file contains arm architecture specific defines
  11. * for the different processors.
  12. *
  13. * Do not include any C declarations in this file - it is included by
  14. * assembler source.
  15. */
  16. #ifndef __ASM_ASSEMBLER_H__
  17. #define __ASM_ASSEMBLER_H__
  18. #ifndef __ASSEMBLY__
  19. #error "Only include this from assembly code"
  20. #endif
  21. #include <asm/ptrace.h>
  22. #include <asm/domain.h>
  23. #include <asm/opcodes-virt.h>
  24. #define IOMEM(x) (x)
  25. /*
  26. * Endian independent macros for shifting bytes within registers.
  27. */
  28. #ifndef __ARMEB__
  29. #define pull lsr
  30. #define push lsl
  31. #define get_byte_0 lsl #0
  32. #define get_byte_1 lsr #8
  33. #define get_byte_2 lsr #16
  34. #define get_byte_3 lsr #24
  35. #define put_byte_0 lsl #0
  36. #define put_byte_1 lsl #8
  37. #define put_byte_2 lsl #16
  38. #define put_byte_3 lsl #24
  39. #else
  40. #define pull lsl
  41. #define push lsr
  42. #define get_byte_0 lsr #24
  43. #define get_byte_1 lsr #16
  44. #define get_byte_2 lsr #8
  45. #define get_byte_3 lsl #0
  46. #define put_byte_0 lsl #24
  47. #define put_byte_1 lsl #16
  48. #define put_byte_2 lsl #8
  49. #define put_byte_3 lsl #0
  50. #endif
  51. /* Select code for any configuration running in BE8 mode */
  52. #ifdef CONFIG_CPU_ENDIAN_BE8
  53. #define ARM_BE8(code...) code
  54. #else
  55. #define ARM_BE8(code...)
  56. #endif
  57. /*
  58. * Data preload for architectures that support it
  59. */
  60. #if __LINUX_ARM_ARCH__ >= 5
  61. #define PLD(code...) code
  62. #else
  63. #define PLD(code...)
  64. #endif
  65. /*
  66. * This can be used to enable code to cacheline align the destination
  67. * pointer when bulk writing to memory. Experiments on StrongARM and
  68. * XScale didn't show this a worthwhile thing to do when the cache is not
  69. * set to write-allocate (this would need further testing on XScale when WA
  70. * is used).
  71. *
  72. * On Feroceon there is much to gain however, regardless of cache mode.
  73. */
  74. #ifdef CONFIG_CPU_FEROCEON
  75. #define CALGN(code...) code
  76. #else
  77. #define CALGN(code...)
  78. #endif
  79. /*
  80. * Enable and disable interrupts
  81. */
  82. #if __LINUX_ARM_ARCH__ >= 6
  83. .macro disable_irq_notrace
  84. cpsid i
  85. .endm
  86. .macro enable_irq_notrace
  87. cpsie i
  88. .endm
  89. #else
  90. .macro disable_irq_notrace
  91. msr cpsr_c, #PSR_I_BIT | SVC_MODE
  92. .endm
  93. .macro enable_irq_notrace
  94. msr cpsr_c, #SVC_MODE
  95. .endm
  96. #endif
  97. .macro asm_trace_hardirqs_off
  98. #if defined(CONFIG_TRACE_IRQFLAGS)
  99. stmdb sp!, {r0-r3, ip, lr}
  100. bl trace_hardirqs_off
  101. ldmia sp!, {r0-r3, ip, lr}
  102. #endif
  103. .endm
  104. .macro asm_trace_hardirqs_on_cond, cond
  105. #if defined(CONFIG_TRACE_IRQFLAGS)
  106. /*
  107. * actually the registers should be pushed and pop'd conditionally, but
  108. * after bl the flags are certainly clobbered
  109. */
  110. stmdb sp!, {r0-r3, ip, lr}
  111. bl\cond trace_hardirqs_on
  112. ldmia sp!, {r0-r3, ip, lr}
  113. #endif
  114. .endm
  115. .macro asm_trace_hardirqs_on
  116. asm_trace_hardirqs_on_cond al
  117. .endm
  118. .macro disable_irq
  119. disable_irq_notrace
  120. asm_trace_hardirqs_off
  121. .endm
  122. .macro enable_irq
  123. asm_trace_hardirqs_on
  124. enable_irq_notrace
  125. .endm
  126. /*
  127. * Save the current IRQ state and disable IRQs. Note that this macro
  128. * assumes FIQs are enabled, and that the processor is in SVC mode.
  129. */
  130. .macro save_and_disable_irqs, oldcpsr
  131. #ifdef CONFIG_CPU_V7M
  132. mrs \oldcpsr, primask
  133. #else
  134. mrs \oldcpsr, cpsr
  135. #endif
  136. disable_irq
  137. .endm
  138. .macro save_and_disable_irqs_notrace, oldcpsr
  139. mrs \oldcpsr, cpsr
  140. disable_irq_notrace
  141. .endm
  142. /*
  143. * Restore interrupt state previously stored in a register. We don't
  144. * guarantee that this will preserve the flags.
  145. */
  146. .macro restore_irqs_notrace, oldcpsr
  147. #ifdef CONFIG_CPU_V7M
  148. msr primask, \oldcpsr
  149. #else
  150. msr cpsr_c, \oldcpsr
  151. #endif
  152. .endm
  153. .macro restore_irqs, oldcpsr
  154. tst \oldcpsr, #PSR_I_BIT
  155. asm_trace_hardirqs_on_cond eq
  156. restore_irqs_notrace \oldcpsr
  157. .endm
  158. #define USER(x...) \
  159. 9999: x; \
  160. .pushsection __ex_table,"a"; \
  161. .align 3; \
  162. .long 9999b,9001f; \
  163. .popsection
  164. #ifdef CONFIG_SMP
  165. #define ALT_SMP(instr...) \
  166. 9998: instr
  167. /*
  168. * Note: if you get assembler errors from ALT_UP() when building with
  169. * CONFIG_THUMB2_KERNEL, you almost certainly need to use
  170. * ALT_SMP( W(instr) ... )
  171. */
  172. #define ALT_UP(instr...) \
  173. .pushsection ".alt.smp.init", "a" ;\
  174. .long 9998b ;\
  175. 9997: instr ;\
  176. .if . - 9997b != 4 ;\
  177. .error "ALT_UP() content must assemble to exactly 4 bytes";\
  178. .endif ;\
  179. .popsection
  180. #define ALT_UP_B(label) \
  181. .equ up_b_offset, label - 9998b ;\
  182. .pushsection ".alt.smp.init", "a" ;\
  183. .long 9998b ;\
  184. W(b) . + up_b_offset ;\
  185. .popsection
  186. #else
  187. #define ALT_SMP(instr...)
  188. #define ALT_UP(instr...) instr
  189. #define ALT_UP_B(label) b label
  190. #endif
  191. /*
  192. * Instruction barrier
  193. */
  194. .macro instr_sync
  195. #if __LINUX_ARM_ARCH__ >= 7
  196. isb
  197. #elif __LINUX_ARM_ARCH__ == 6
  198. mcr p15, 0, r0, c7, c5, 4
  199. #endif
  200. .endm
  201. /*
  202. * SMP data memory barrier
  203. */
  204. .macro smp_dmb mode
  205. #ifdef CONFIG_SMP
  206. #if __LINUX_ARM_ARCH__ >= 7
  207. .ifeqs "\mode","arm"
  208. ALT_SMP(dmb ish)
  209. .else
  210. ALT_SMP(W(dmb) ish)
  211. .endif
  212. #elif __LINUX_ARM_ARCH__ == 6
  213. ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
  214. #else
  215. #error Incompatible SMP platform
  216. #endif
  217. .ifeqs "\mode","arm"
  218. ALT_UP(nop)
  219. .else
  220. ALT_UP(W(nop))
  221. .endif
  222. #endif
  223. .endm
  224. #if defined(CONFIG_CPU_V7M)
  225. /*
  226. * setmode is used to assert to be in svc mode during boot. For v7-M
  227. * this is done in __v7m_setup, so setmode can be empty here.
  228. */
  229. .macro setmode, mode, reg
  230. .endm
  231. #elif defined(CONFIG_THUMB2_KERNEL)
  232. .macro setmode, mode, reg
  233. mov \reg, #\mode
  234. msr cpsr_c, \reg
  235. .endm
  236. #else
  237. .macro setmode, mode, reg
  238. msr cpsr_c, #\mode
  239. .endm
  240. #endif
  241. /*
  242. * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
  243. * a scratch register for the macro to overwrite.
  244. *
  245. * This macro is intended for forcing the CPU into SVC mode at boot time.
  246. * you cannot return to the original mode.
  247. */
  248. .macro safe_svcmode_maskall reg:req
  249. #if __LINUX_ARM_ARCH__ >= 6
  250. mrs \reg , cpsr
  251. eor \reg, \reg, #HYP_MODE
  252. tst \reg, #MODE_MASK
  253. bic \reg , \reg , #MODE_MASK
  254. orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
  255. THUMB( orr \reg , \reg , #PSR_T_BIT )
  256. bne 1f
  257. orr \reg, \reg, #PSR_A_BIT
  258. adr lr, BSYM(2f)
  259. msr spsr_cxsf, \reg
  260. __MSR_ELR_HYP(14)
  261. __ERET
  262. 1: msr cpsr_c, \reg
  263. 2:
  264. #else
  265. /*
  266. * workaround for possibly broken pre-v6 hardware
  267. * (akita, Sharp Zaurus C-1000, PXA270-based)
  268. */
  269. setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
  270. #endif
  271. .endm
  272. /*
  273. * STRT/LDRT access macros with ARM and Thumb-2 variants
  274. */
  275. #ifdef CONFIG_THUMB2_KERNEL
  276. .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
  277. 9999:
  278. .if \inc == 1
  279. \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
  280. .elseif \inc == 4
  281. \instr\cond\()\t\().w \reg, [\ptr, #\off]
  282. .else
  283. .error "Unsupported inc macro argument"
  284. .endif
  285. .pushsection __ex_table,"a"
  286. .align 3
  287. .long 9999b, \abort
  288. .popsection
  289. .endm
  290. .macro usracc, instr, reg, ptr, inc, cond, rept, abort
  291. @ explicit IT instruction needed because of the label
  292. @ introduced by the USER macro
  293. .ifnc \cond,al
  294. .if \rept == 1
  295. itt \cond
  296. .elseif \rept == 2
  297. ittt \cond
  298. .else
  299. .error "Unsupported rept macro argument"
  300. .endif
  301. .endif
  302. @ Slightly optimised to avoid incrementing the pointer twice
  303. usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
  304. .if \rept == 2
  305. usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
  306. .endif
  307. add\cond \ptr, #\rept * \inc
  308. .endm
  309. #else /* !CONFIG_THUMB2_KERNEL */
  310. .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
  311. .rept \rept
  312. 9999:
  313. .if \inc == 1
  314. \instr\cond\()b\()\t \reg, [\ptr], #\inc
  315. .elseif \inc == 4
  316. \instr\cond\()\t \reg, [\ptr], #\inc
  317. .else
  318. .error "Unsupported inc macro argument"
  319. .endif
  320. .pushsection __ex_table,"a"
  321. .align 3
  322. .long 9999b, \abort
  323. .popsection
  324. .endr
  325. .endm
  326. #endif /* CONFIG_THUMB2_KERNEL */
  327. .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
  328. usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
  329. .endm
  330. .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
  331. usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
  332. .endm
  333. /* Utility macro for declaring string literals */
  334. .macro string name:req, string
  335. .type \name , #object
  336. \name:
  337. .asciz "\string"
  338. .size \name , . - \name
  339. .endm
  340. .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
  341. #ifndef CONFIG_CPU_USE_DOMAINS
  342. adds \tmp, \addr, #\size - 1
  343. sbcccs \tmp, \tmp, \limit
  344. bcs \bad
  345. #endif
  346. .endm
  347. #endif /* __ASM_ASSEMBLER_H__ */