assembler.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /*
  2. * arch/arm/include/asm/assembler.h
  3. *
  4. * Copyright (C) 1996-2000 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This file contains arm architecture specific defines
  11. * for the different processors.
  12. *
  13. * Do not include any C declarations in this file - it is included by
  14. * assembler source.
  15. */
  16. #ifndef __ASM_ASSEMBLER_H__
  17. #define __ASM_ASSEMBLER_H__
  18. #ifndef __ASSEMBLY__
  19. #error "Only include this from assembly code"
  20. #endif
  21. #include <asm/ptrace.h>
  22. #include <asm/domain.h>
  23. #include <asm/opcodes-virt.h>
  24. #define IOMEM(x) (x)
  25. /*
  26. * Endian independent macros for shifting bytes within registers.
  27. */
  28. #ifndef __ARMEB__
  29. #define pull lsr
  30. #define push lsl
  31. #define get_byte_0 lsl #0
  32. #define get_byte_1 lsr #8
  33. #define get_byte_2 lsr #16
  34. #define get_byte_3 lsr #24
  35. #define put_byte_0 lsl #0
  36. #define put_byte_1 lsl #8
  37. #define put_byte_2 lsl #16
  38. #define put_byte_3 lsl #24
  39. #else
  40. #define pull lsl
  41. #define push lsr
  42. #define get_byte_0 lsr #24
  43. #define get_byte_1 lsr #16
  44. #define get_byte_2 lsr #8
  45. #define get_byte_3 lsl #0
  46. #define put_byte_0 lsl #24
  47. #define put_byte_1 lsl #16
  48. #define put_byte_2 lsl #8
  49. #define put_byte_3 lsl #0
  50. #endif
  51. /*
  52. * Data preload for architectures that support it
  53. */
  54. #if __LINUX_ARM_ARCH__ >= 5
  55. #define PLD(code...) code
  56. #else
  57. #define PLD(code...)
  58. #endif
  59. /*
  60. * This can be used to enable code to cacheline align the destination
  61. * pointer when bulk writing to memory. Experiments on StrongARM and
  62. * XScale didn't show this a worthwhile thing to do when the cache is not
  63. * set to write-allocate (this would need further testing on XScale when WA
  64. * is used).
  65. *
  66. * On Feroceon there is much to gain however, regardless of cache mode.
  67. */
  68. #ifdef CONFIG_CPU_FEROCEON
  69. #define CALGN(code...) code
  70. #else
  71. #define CALGN(code...)
  72. #endif
  73. /*
  74. * Enable and disable interrupts
  75. */
  76. #if __LINUX_ARM_ARCH__ >= 6
  77. .macro disable_irq_notrace
  78. cpsid i
  79. .endm
  80. .macro enable_irq_notrace
  81. cpsie i
  82. .endm
  83. #else
  84. .macro disable_irq_notrace
  85. msr cpsr_c, #PSR_I_BIT | SVC_MODE
  86. .endm
  87. .macro enable_irq_notrace
  88. msr cpsr_c, #SVC_MODE
  89. .endm
  90. #endif
  91. .macro asm_trace_hardirqs_off
  92. #if defined(CONFIG_TRACE_IRQFLAGS)
  93. stmdb sp!, {r0-r3, ip, lr}
  94. bl trace_hardirqs_off
  95. ldmia sp!, {r0-r3, ip, lr}
  96. #endif
  97. .endm
  98. .macro asm_trace_hardirqs_on_cond, cond
  99. #if defined(CONFIG_TRACE_IRQFLAGS)
  100. /*
  101. * actually the registers should be pushed and pop'd conditionally, but
  102. * after bl the flags are certainly clobbered
  103. */
  104. stmdb sp!, {r0-r3, ip, lr}
  105. bl\cond trace_hardirqs_on
  106. ldmia sp!, {r0-r3, ip, lr}
  107. #endif
  108. .endm
  109. .macro asm_trace_hardirqs_on
  110. asm_trace_hardirqs_on_cond al
  111. .endm
  112. .macro disable_irq
  113. disable_irq_notrace
  114. asm_trace_hardirqs_off
  115. .endm
  116. .macro enable_irq
  117. asm_trace_hardirqs_on
  118. enable_irq_notrace
  119. .endm
  120. /*
  121. * Save the current IRQ state and disable IRQs. Note that this macro
  122. * assumes FIQs are enabled, and that the processor is in SVC mode.
  123. */
  124. .macro save_and_disable_irqs, oldcpsr
  125. #ifdef CONFIG_CPU_V7M
  126. mrs \oldcpsr, primask
  127. #else
  128. mrs \oldcpsr, cpsr
  129. #endif
  130. disable_irq
  131. .endm
  132. .macro save_and_disable_irqs_notrace, oldcpsr
  133. mrs \oldcpsr, cpsr
  134. disable_irq_notrace
  135. .endm
  136. /*
  137. * Restore interrupt state previously stored in a register. We don't
  138. * guarantee that this will preserve the flags.
  139. */
  140. .macro restore_irqs_notrace, oldcpsr
  141. #ifdef CONFIG_CPU_V7M
  142. msr primask, \oldcpsr
  143. #else
  144. msr cpsr_c, \oldcpsr
  145. #endif
  146. .endm
  147. .macro restore_irqs, oldcpsr
  148. tst \oldcpsr, #PSR_I_BIT
  149. asm_trace_hardirqs_on_cond eq
  150. restore_irqs_notrace \oldcpsr
  151. .endm
  152. #define USER(x...) \
  153. 9999: x; \
  154. .pushsection __ex_table,"a"; \
  155. .align 3; \
  156. .long 9999b,9001f; \
  157. .popsection
  158. #ifdef CONFIG_SMP
  159. #define ALT_SMP(instr...) \
  160. 9998: instr
  161. /*
  162. * Note: if you get assembler errors from ALT_UP() when building with
  163. * CONFIG_THUMB2_KERNEL, you almost certainly need to use
  164. * ALT_SMP( W(instr) ... )
  165. */
  166. #define ALT_UP(instr...) \
  167. .pushsection ".alt.smp.init", "a" ;\
  168. .long 9998b ;\
  169. 9997: instr ;\
  170. .if . - 9997b != 4 ;\
  171. .error "ALT_UP() content must assemble to exactly 4 bytes";\
  172. .endif ;\
  173. .popsection
  174. #define ALT_UP_B(label) \
  175. .equ up_b_offset, label - 9998b ;\
  176. .pushsection ".alt.smp.init", "a" ;\
  177. .long 9998b ;\
  178. W(b) . + up_b_offset ;\
  179. .popsection
  180. #else
  181. #define ALT_SMP(instr...)
  182. #define ALT_UP(instr...) instr
  183. #define ALT_UP_B(label) b label
  184. #endif
  185. /*
  186. * Instruction barrier
  187. */
  188. .macro instr_sync
  189. #if __LINUX_ARM_ARCH__ >= 7
  190. isb
  191. #elif __LINUX_ARM_ARCH__ == 6
  192. mcr p15, 0, r0, c7, c5, 4
  193. #endif
  194. .endm
  195. /*
  196. * SMP data memory barrier
  197. */
  198. .macro smp_dmb mode
  199. #ifdef CONFIG_SMP
  200. #if __LINUX_ARM_ARCH__ >= 7
  201. .ifeqs "\mode","arm"
  202. ALT_SMP(dmb ish)
  203. .else
  204. ALT_SMP(W(dmb) ish)
  205. .endif
  206. #elif __LINUX_ARM_ARCH__ == 6
  207. ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
  208. #else
  209. #error Incompatible SMP platform
  210. #endif
  211. .ifeqs "\mode","arm"
  212. ALT_UP(nop)
  213. .else
  214. ALT_UP(W(nop))
  215. .endif
  216. #endif
  217. .endm
  218. #if defined(CONFIG_CPU_V7M)
  219. /*
  220. * setmode is used to assert to be in svc mode during boot. For v7-M
  221. * this is done in __v7m_setup, so setmode can be empty here.
  222. */
  223. .macro setmode, mode, reg
  224. .endm
  225. #elif defined(CONFIG_THUMB2_KERNEL)
  226. .macro setmode, mode, reg
  227. mov \reg, #\mode
  228. msr cpsr_c, \reg
  229. .endm
  230. #else
  231. .macro setmode, mode, reg
  232. msr cpsr_c, #\mode
  233. .endm
  234. #endif
  235. /*
  236. * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
  237. * a scratch register for the macro to overwrite.
  238. *
  239. * This macro is intended for forcing the CPU into SVC mode at boot time.
  240. * you cannot return to the original mode.
  241. */
  242. .macro safe_svcmode_maskall reg:req
  243. #if __LINUX_ARM_ARCH__ >= 6
  244. mrs \reg , cpsr
  245. eor \reg, \reg, #HYP_MODE
  246. tst \reg, #MODE_MASK
  247. bic \reg , \reg , #MODE_MASK
  248. orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
  249. THUMB( orr \reg , \reg , #PSR_T_BIT )
  250. bne 1f
  251. orr \reg, \reg, #PSR_A_BIT
  252. adr lr, BSYM(2f)
  253. msr spsr_cxsf, \reg
  254. __MSR_ELR_HYP(14)
  255. __ERET
  256. 1: msr cpsr_c, \reg
  257. 2:
  258. #else
  259. /*
  260. * workaround for possibly broken pre-v6 hardware
  261. * (akita, Sharp Zaurus C-1000, PXA270-based)
  262. */
  263. setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
  264. #endif
  265. .endm
  266. /*
  267. * STRT/LDRT access macros with ARM and Thumb-2 variants
  268. */
  269. #ifdef CONFIG_THUMB2_KERNEL
  270. .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
  271. 9999:
  272. .if \inc == 1
  273. \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
  274. .elseif \inc == 4
  275. \instr\cond\()\t\().w \reg, [\ptr, #\off]
  276. .else
  277. .error "Unsupported inc macro argument"
  278. .endif
  279. .pushsection __ex_table,"a"
  280. .align 3
  281. .long 9999b, \abort
  282. .popsection
  283. .endm
  284. .macro usracc, instr, reg, ptr, inc, cond, rept, abort
  285. @ explicit IT instruction needed because of the label
  286. @ introduced by the USER macro
  287. .ifnc \cond,al
  288. .if \rept == 1
  289. itt \cond
  290. .elseif \rept == 2
  291. ittt \cond
  292. .else
  293. .error "Unsupported rept macro argument"
  294. .endif
  295. .endif
  296. @ Slightly optimised to avoid incrementing the pointer twice
  297. usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
  298. .if \rept == 2
  299. usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
  300. .endif
  301. add\cond \ptr, #\rept * \inc
  302. .endm
  303. #else /* !CONFIG_THUMB2_KERNEL */
  304. .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
  305. .rept \rept
  306. 9999:
  307. .if \inc == 1
  308. \instr\cond\()b\()\t \reg, [\ptr], #\inc
  309. .elseif \inc == 4
  310. \instr\cond\()\t \reg, [\ptr], #\inc
  311. .else
  312. .error "Unsupported inc macro argument"
  313. .endif
  314. .pushsection __ex_table,"a"
  315. .align 3
  316. .long 9999b, \abort
  317. .popsection
  318. .endr
  319. .endm
  320. #endif /* CONFIG_THUMB2_KERNEL */
  321. .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
  322. usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
  323. .endm
  324. .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
  325. usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
  326. .endm
  327. /* Utility macro for declaring string literals */
  328. .macro string name:req, string
  329. .type \name , #object
  330. \name:
  331. .asciz "\string"
  332. .size \name , . - \name
  333. .endm
  334. .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
  335. #ifndef CONFIG_CPU_USE_DOMAINS
  336. adds \tmp, \addr, #\size - 1
  337. sbcccs \tmp, \tmp, \limit
  338. bcs \bad
  339. #endif
  340. .endm
  341. #endif /* __ASM_ASSEMBLER_H__ */