assembler.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * arch/arm/include/asm/assembler.h
  3. *
  4. * Copyright (C) 1996-2000 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This file contains arm architecture specific defines
  11. * for the different processors.
  12. *
  13. * Do not include any C declarations in this file - it is included by
  14. * assembler source.
  15. */
  16. #ifndef __ASSEMBLY__
  17. #error "Only include this from assembly code"
  18. #endif
  19. #include <asm/ptrace.h>
  20. #include <asm/domain.h>
  21. /*
  22. * Endian independent macros for shifting bytes within registers.
  23. */
  24. #ifndef __ARMEB__
  25. #define pull lsr
  26. #define push lsl
  27. #define get_byte_0 lsl #0
  28. #define get_byte_1 lsr #8
  29. #define get_byte_2 lsr #16
  30. #define get_byte_3 lsr #24
  31. #define put_byte_0 lsl #0
  32. #define put_byte_1 lsl #8
  33. #define put_byte_2 lsl #16
  34. #define put_byte_3 lsl #24
  35. #else
  36. #define pull lsl
  37. #define push lsr
  38. #define get_byte_0 lsr #24
  39. #define get_byte_1 lsr #16
  40. #define get_byte_2 lsr #8
  41. #define get_byte_3 lsl #0
  42. #define put_byte_0 lsl #24
  43. #define put_byte_1 lsl #16
  44. #define put_byte_2 lsl #8
  45. #define put_byte_3 lsl #0
  46. #endif
  47. /*
  48. * Data preload for architectures that support it
  49. */
  50. #if __LINUX_ARM_ARCH__ >= 5
  51. #define PLD(code...) code
  52. #else
  53. #define PLD(code...)
  54. #endif
  55. /*
  56. * This can be used to enable code to cacheline align the destination
  57. * pointer when bulk writing to memory. Experiments on StrongARM and
  58. * XScale didn't show this a worthwhile thing to do when the cache is not
  59. * set to write-allocate (this would need further testing on XScale when WA
  60. * is used).
  61. *
  62. * On Feroceon there is much to gain however, regardless of cache mode.
  63. */
  64. #ifdef CONFIG_CPU_FEROCEON
  65. #define CALGN(code...) code
  66. #else
  67. #define CALGN(code...)
  68. #endif
  69. /*
  70. * Enable and disable interrupts
  71. */
  72. #if __LINUX_ARM_ARCH__ >= 6
  73. .macro disable_irq_notrace
  74. cpsid i
  75. .endm
  76. .macro enable_irq_notrace
  77. cpsie i
  78. .endm
  79. #else
  80. .macro disable_irq_notrace
  81. msr cpsr_c, #PSR_I_BIT | SVC_MODE
  82. .endm
  83. .macro enable_irq_notrace
  84. msr cpsr_c, #SVC_MODE
  85. .endm
  86. #endif
  87. .macro asm_trace_hardirqs_off
  88. #if defined(CONFIG_TRACE_IRQFLAGS)
  89. stmdb sp!, {r0-r3, ip, lr}
  90. bl trace_hardirqs_off
  91. ldmia sp!, {r0-r3, ip, lr}
  92. #endif
  93. .endm
  94. .macro asm_trace_hardirqs_on_cond, cond
  95. #if defined(CONFIG_TRACE_IRQFLAGS)
  96. /*
  97. * actually the registers should be pushed and pop'd conditionally, but
  98. * after bl the flags are certainly clobbered
  99. */
  100. stmdb sp!, {r0-r3, ip, lr}
  101. bl\cond trace_hardirqs_on
  102. ldmia sp!, {r0-r3, ip, lr}
  103. #endif
  104. .endm
  105. .macro asm_trace_hardirqs_on
  106. asm_trace_hardirqs_on_cond al
  107. .endm
  108. .macro disable_irq
  109. disable_irq_notrace
  110. asm_trace_hardirqs_off
  111. .endm
  112. .macro enable_irq
  113. asm_trace_hardirqs_on
  114. enable_irq_notrace
  115. .endm
  116. /*
  117. * Save the current IRQ state and disable IRQs. Note that this macro
  118. * assumes FIQs are enabled, and that the processor is in SVC mode.
  119. */
  120. .macro save_and_disable_irqs, oldcpsr
  121. mrs \oldcpsr, cpsr
  122. disable_irq
  123. .endm
  124. /*
  125. * Restore interrupt state previously stored in a register. We don't
  126. * guarantee that this will preserve the flags.
  127. */
  128. .macro restore_irqs_notrace, oldcpsr
  129. msr cpsr_c, \oldcpsr
  130. .endm
  131. .macro restore_irqs, oldcpsr
  132. tst \oldcpsr, #PSR_I_BIT
  133. asm_trace_hardirqs_on_cond eq
  134. restore_irqs_notrace \oldcpsr
  135. .endm
  136. #define USER(x...) \
  137. 9999: x; \
  138. .pushsection __ex_table,"a"; \
  139. .align 3; \
  140. .long 9999b,9001f; \
  141. .popsection
  142. #ifdef CONFIG_SMP
  143. #define ALT_SMP(instr...) \
  144. 9998: instr
  145. #define ALT_UP(instr...) \
  146. .pushsection ".alt.smp.init", "a" ;\
  147. .long 9998b ;\
  148. instr ;\
  149. .popsection
  150. #define ALT_UP_B(label) \
  151. .equ up_b_offset, label - 9998b ;\
  152. .pushsection ".alt.smp.init", "a" ;\
  153. .long 9998b ;\
  154. b . + up_b_offset ;\
  155. .popsection
  156. #else
  157. #define ALT_SMP(instr...)
  158. #define ALT_UP(instr...) instr
  159. #define ALT_UP_B(label) b label
  160. #endif
  161. /*
  162. * SMP data memory barrier
  163. */
  164. .macro smp_dmb
  165. #ifdef CONFIG_SMP
  166. #if __LINUX_ARM_ARCH__ >= 7
  167. ALT_SMP(dmb)
  168. #elif __LINUX_ARM_ARCH__ == 6
  169. ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
  170. #else
  171. #error Incompatible SMP platform
  172. #endif
  173. ALT_UP(nop)
  174. #endif
  175. .endm
  176. #ifdef CONFIG_THUMB2_KERNEL
  177. .macro setmode, mode, reg
  178. mov \reg, #\mode
  179. msr cpsr_c, \reg
  180. .endm
  181. #else
  182. .macro setmode, mode, reg
  183. msr cpsr_c, #\mode
  184. .endm
  185. #endif
  186. /*
  187. * STRT/LDRT access macros with ARM and Thumb-2 variants
  188. */
  189. #ifdef CONFIG_THUMB2_KERNEL
  190. .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
  191. 9999:
  192. .if \inc == 1
  193. \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
  194. .elseif \inc == 4
  195. \instr\cond\()\t\().w \reg, [\ptr, #\off]
  196. .else
  197. .error "Unsupported inc macro argument"
  198. .endif
  199. .pushsection __ex_table,"a"
  200. .align 3
  201. .long 9999b, \abort
  202. .popsection
  203. .endm
  204. .macro usracc, instr, reg, ptr, inc, cond, rept, abort
  205. @ explicit IT instruction needed because of the label
  206. @ introduced by the USER macro
  207. .ifnc \cond,al
  208. .if \rept == 1
  209. itt \cond
  210. .elseif \rept == 2
  211. ittt \cond
  212. .else
  213. .error "Unsupported rept macro argument"
  214. .endif
  215. .endif
  216. @ Slightly optimised to avoid incrementing the pointer twice
  217. usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
  218. .if \rept == 2
  219. usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
  220. .endif
  221. add\cond \ptr, #\rept * \inc
  222. .endm
  223. #else /* !CONFIG_THUMB2_KERNEL */
  224. .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
  225. .rept \rept
  226. 9999:
  227. .if \inc == 1
  228. \instr\cond\()b\()\t \reg, [\ptr], #\inc
  229. .elseif \inc == 4
  230. \instr\cond\()\t \reg, [\ptr], #\inc
  231. .else
  232. .error "Unsupported inc macro argument"
  233. .endif
  234. .pushsection __ex_table,"a"
  235. .align 3
  236. .long 9999b, \abort
  237. .popsection
  238. .endr
  239. .endm
  240. #endif /* CONFIG_THUMB2_KERNEL */
  241. .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
  242. usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
  243. .endm
  244. .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
  245. usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
  246. .endm