system.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. #ifndef __ASM_ARM_SYSTEM_H
  2. #define __ASM_ARM_SYSTEM_H
  3. #ifdef __KERNEL__
  4. #include <asm/memory.h>
  5. #define CPU_ARCH_UNKNOWN 0
  6. #define CPU_ARCH_ARMv3 1
  7. #define CPU_ARCH_ARMv4 2
  8. #define CPU_ARCH_ARMv4T 3
  9. #define CPU_ARCH_ARMv5 4
  10. #define CPU_ARCH_ARMv5T 5
  11. #define CPU_ARCH_ARMv5TE 6
  12. #define CPU_ARCH_ARMv5TEJ 7
  13. #define CPU_ARCH_ARMv6 8
  14. #define CPU_ARCH_ARMv7 9
  15. /*
  16. * CR1 bits (CP#15 CR1)
  17. */
  18. #define CR_M (1 << 0) /* MMU enable */
  19. #define CR_A (1 << 1) /* Alignment abort enable */
  20. #define CR_C (1 << 2) /* Dcache enable */
  21. #define CR_W (1 << 3) /* Write buffer enable */
  22. #define CR_P (1 << 4) /* 32-bit exception handler */
  23. #define CR_D (1 << 5) /* 32-bit data address range */
  24. #define CR_L (1 << 6) /* Implementation defined */
  25. #define CR_B (1 << 7) /* Big endian */
  26. #define CR_S (1 << 8) /* System MMU protection */
  27. #define CR_R (1 << 9) /* ROM MMU protection */
  28. #define CR_F (1 << 10) /* Implementation defined */
  29. #define CR_Z (1 << 11) /* Implementation defined */
  30. #define CR_I (1 << 12) /* Icache enable */
  31. #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
  32. #define CR_RR (1 << 14) /* Round Robin cache replacement */
  33. #define CR_L4 (1 << 15) /* LDR pc can set T bit */
  34. #define CR_DT (1 << 16)
  35. #define CR_IT (1 << 18)
  36. #define CR_ST (1 << 19)
  37. #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
  38. #define CR_U (1 << 22) /* Unaligned access operation */
  39. #define CR_XP (1 << 23) /* Extended page tables */
  40. #define CR_VE (1 << 24) /* Vectored interrupts */
  41. /*
  42. * This is used to ensure the compiler did actually allocate the register we
  43. * asked it for some inline assembly sequences. Apparently we can't trust
  44. * the compiler from one version to another so a bit of paranoia won't hurt.
  45. * This string is meant to be concatenated with the inline asm string and
  46. * will cause compilation to stop on mismatch.
  47. * (for details, see gcc PR 15089)
  48. */
  49. #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
  50. #ifndef __ASSEMBLY__
  51. #include <linux/linkage.h>
  52. #include <linux/irqflags.h>
  53. #define __exception __attribute__((section(".exception.text")))
  54. struct thread_info;
  55. struct task_struct;
  56. /* information about the system we're running on */
  57. extern unsigned int system_rev;
  58. extern unsigned int system_serial_low;
  59. extern unsigned int system_serial_high;
  60. extern unsigned int mem_fclk_21285;
  61. struct pt_regs;
  62. void die(const char *msg, struct pt_regs *regs, int err)
  63. __attribute__((noreturn));
  64. struct siginfo;
  65. void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
  66. unsigned long err, unsigned long trap);
  67. void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
  68. struct pt_regs *),
  69. int sig, const char *name);
  70. #define xchg(ptr,x) \
  71. ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  72. extern asmlinkage void __backtrace(void);
  73. extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
  74. struct mm_struct;
  75. extern void show_pte(struct mm_struct *mm, unsigned long addr);
  76. extern void __show_regs(struct pt_regs *);
  77. extern int cpu_architecture(void);
  78. extern void cpu_init(void);
  79. void arm_machine_restart(char mode);
  80. extern void (*arm_pm_restart)(char str);
  81. #define UDBG_UNDEFINED (1 << 0)
  82. #define UDBG_SYSCALL (1 << 1)
  83. #define UDBG_BADABORT (1 << 2)
  84. #define UDBG_SEGV (1 << 3)
  85. #define UDBG_BUS (1 << 4)
  86. extern unsigned int user_debug;
  87. #if __LINUX_ARM_ARCH__ >= 4
  88. #define vectors_high() (cr_alignment & CR_V)
  89. #else
  90. #define vectors_high() (0)
  91. #endif
  92. #if __LINUX_ARM_ARCH__ >= 7
  93. #define isb() __asm__ __volatile__ ("isb" : : : "memory")
  94. #define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
  95. #define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
  96. #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
  97. #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
  98. : : "r" (0) : "memory")
  99. #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
  100. : : "r" (0) : "memory")
  101. #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
  102. : : "r" (0) : "memory")
  103. #else
  104. #define isb() __asm__ __volatile__ ("" : : : "memory")
  105. #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
  106. : : "r" (0) : "memory")
  107. #define dmb() __asm__ __volatile__ ("" : : : "memory")
  108. #endif
  109. #ifndef CONFIG_SMP
  110. #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
  111. #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
  112. #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
  113. #define smp_mb() barrier()
  114. #define smp_rmb() barrier()
  115. #define smp_wmb() barrier()
  116. #else
  117. #define mb() dmb()
  118. #define rmb() dmb()
  119. #define wmb() dmb()
  120. #define smp_mb() dmb()
  121. #define smp_rmb() dmb()
  122. #define smp_wmb() dmb()
  123. #endif
  124. #define read_barrier_depends() do { } while(0)
  125. #define smp_read_barrier_depends() do { } while(0)
  126. #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
  127. #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
  128. extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
  129. extern unsigned long cr_alignment; /* defined in entry-armv.S */
  130. static inline unsigned int get_cr(void)
  131. {
  132. unsigned int val;
  133. asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
  134. return val;
  135. }
  136. static inline void set_cr(unsigned int val)
  137. {
  138. asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
  139. : : "r" (val) : "cc");
  140. isb();
  141. }
  142. #ifndef CONFIG_SMP
  143. extern void adjust_cr(unsigned long mask, unsigned long set);
  144. #endif
  145. #define CPACC_FULL(n) (3 << (n * 2))
  146. #define CPACC_SVC(n) (1 << (n * 2))
  147. #define CPACC_DISABLE(n) (0 << (n * 2))
  148. static inline unsigned int get_copro_access(void)
  149. {
  150. unsigned int val;
  151. asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
  152. : "=r" (val) : : "cc");
  153. return val;
  154. }
  155. static inline void set_copro_access(unsigned int val)
  156. {
  157. asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
  158. : : "r" (val) : "cc");
  159. isb();
  160. }
  161. /*
  162. * switch_mm() may do a full cache flush over the context switch,
  163. * so enable interrupts over the context switch to avoid high
  164. * latency.
  165. */
  166. #define __ARCH_WANT_INTERRUPTS_ON_CTXSW
  167. /*
  168. * switch_to(prev, next) should switch from task `prev' to `next'
  169. * `prev' will never be the same as `next'. schedule() itself
  170. * contains the memory barrier to tell GCC not to cache `current'.
  171. */
  172. extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
  173. #define switch_to(prev,next,last) \
  174. do { \
  175. last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
  176. } while (0)
  177. #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
  178. /*
  179. * On the StrongARM, "swp" is terminally broken since it bypasses the
  180. * cache totally. This means that the cache becomes inconsistent, and,
  181. * since we use normal loads/stores as well, this is really bad.
  182. * Typically, this causes oopsen in filp_close, but could have other,
  183. * more disasterous effects. There are two work-arounds:
  184. * 1. Disable interrupts and emulate the atomic swap
  185. * 2. Clean the cache, perform atomic swap, flush the cache
  186. *
  187. * We choose (1) since its the "easiest" to achieve here and is not
  188. * dependent on the processor type.
  189. *
  190. * NOTE that this solution won't work on an SMP system, so explcitly
  191. * forbid it here.
  192. */
  193. #define swp_is_buggy
  194. #endif
  195. static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  196. {
  197. extern void __bad_xchg(volatile void *, int);
  198. unsigned long ret;
  199. #ifdef swp_is_buggy
  200. unsigned long flags;
  201. #endif
  202. #if __LINUX_ARM_ARCH__ >= 6
  203. unsigned int tmp;
  204. #endif
  205. switch (size) {
  206. #if __LINUX_ARM_ARCH__ >= 6
  207. case 1:
  208. asm volatile("@ __xchg1\n"
  209. "1: ldrexb %0, [%3]\n"
  210. " strexb %1, %2, [%3]\n"
  211. " teq %1, #0\n"
  212. " bne 1b"
  213. : "=&r" (ret), "=&r" (tmp)
  214. : "r" (x), "r" (ptr)
  215. : "memory", "cc");
  216. break;
  217. case 4:
  218. asm volatile("@ __xchg4\n"
  219. "1: ldrex %0, [%3]\n"
  220. " strex %1, %2, [%3]\n"
  221. " teq %1, #0\n"
  222. " bne 1b"
  223. : "=&r" (ret), "=&r" (tmp)
  224. : "r" (x), "r" (ptr)
  225. : "memory", "cc");
  226. break;
  227. #elif defined(swp_is_buggy)
  228. #ifdef CONFIG_SMP
  229. #error SMP is not supported on this platform
  230. #endif
  231. case 1:
  232. raw_local_irq_save(flags);
  233. ret = *(volatile unsigned char *)ptr;
  234. *(volatile unsigned char *)ptr = x;
  235. raw_local_irq_restore(flags);
  236. break;
  237. case 4:
  238. raw_local_irq_save(flags);
  239. ret = *(volatile unsigned long *)ptr;
  240. *(volatile unsigned long *)ptr = x;
  241. raw_local_irq_restore(flags);
  242. break;
  243. #else
  244. case 1:
  245. asm volatile("@ __xchg1\n"
  246. " swpb %0, %1, [%2]"
  247. : "=&r" (ret)
  248. : "r" (x), "r" (ptr)
  249. : "memory", "cc");
  250. break;
  251. case 4:
  252. asm volatile("@ __xchg4\n"
  253. " swp %0, %1, [%2]"
  254. : "=&r" (ret)
  255. : "r" (x), "r" (ptr)
  256. : "memory", "cc");
  257. break;
  258. #endif
  259. default:
  260. __bad_xchg(ptr, size), ret = 0;
  261. break;
  262. }
  263. return ret;
  264. }
  265. extern void disable_hlt(void);
  266. extern void enable_hlt(void);
  267. #include <asm-generic/cmpxchg-local.h>
  268. /*
  269. * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  270. * them available.
  271. */
  272. #define cmpxchg_local(ptr, o, n) \
  273. ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
  274. (unsigned long)(n), sizeof(*(ptr))))
  275. #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
  276. #ifndef CONFIG_SMP
  277. #include <asm-generic/cmpxchg.h>
  278. #endif
  279. #endif /* __ASSEMBLY__ */
  280. #define arch_align_stack(x) (x)
  281. #endif /* __KERNEL__ */
  282. #endif