system.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. /*
  2. * linux/include/asm-arm/proc-armv/system.h
  3. *
  4. * Copyright (C) 1996 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef __ASM_PROC_SYSTEM_H
  11. #define __ASM_PROC_SYSTEM_H
  12. #include <linux/config.h>
  13. #define set_cr(x) \
  14. __asm__ __volatile__( \
  15. "mcr p15, 0, %0, c1, c0 @ set CR" \
  16. : : "r" (x))
  17. #define CR_M (1 << 0) /* MMU enable */
  18. #define CR_A (1 << 1) /* Alignment abort enable */
  19. #define CR_C (1 << 2) /* Dcache enable */
  20. #define CR_W (1 << 3) /* Write buffer enable */
  21. #define CR_P (1 << 4) /* 32-bit exception handler */
  22. #define CR_D (1 << 5) /* 32-bit data address range */
  23. #define CR_L (1 << 6) /* Implementation defined */
  24. #define CD_B (1 << 7) /* Big endian */
  25. #define CR_S (1 << 8) /* System MMU protection */
  26. #define CD_R (1 << 9) /* ROM MMU protection */
  27. #define CR_F (1 << 10) /* Implementation defined */
  28. #define CR_Z (1 << 11) /* Implementation defined */
  29. #define CR_I (1 << 12) /* Icache enable */
  30. #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
  31. #define CR_RR (1 << 14) /* Round Robin cache replacement */
  32. extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
  33. extern unsigned long cr_alignment; /* defined in entry-armv.S */
  34. #if __LINUX_ARM_ARCH__ >= 4
  35. #define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
  36. #else
  37. #define vectors_base() (0)
  38. #endif
  39. /*
  40. * Save the current interrupt enable state & disable IRQs
  41. */
  42. #define local_irq_save(x) \
  43. ({ \
  44. unsigned long temp; \
  45. __asm__ __volatile__( \
  46. "mrs %0, cpsr @ local_irq_save\n" \
  47. " orr %1, %0, #128\n" \
  48. " msr cpsr_c, %1" \
  49. : "=r" (x), "=r" (temp) \
  50. : \
  51. : "memory"); \
  52. })
  53. /*
  54. * Enable IRQs
  55. */
  56. #define local_irq_enable() \
  57. ({ \
  58. unsigned long temp; \
  59. __asm__ __volatile__( \
  60. "mrs %0, cpsr @ local_irq_enable\n" \
  61. " bic %0, %0, #128\n" \
  62. " msr cpsr_c, %0" \
  63. : "=r" (temp) \
  64. : \
  65. : "memory"); \
  66. })
  67. /*
  68. * Disable IRQs
  69. */
  70. #define local_irq_disable() \
  71. ({ \
  72. unsigned long temp; \
  73. __asm__ __volatile__( \
  74. "mrs %0, cpsr @ local_irq_disable\n" \
  75. " orr %0, %0, #128\n" \
  76. " msr cpsr_c, %0" \
  77. : "=r" (temp) \
  78. : \
  79. : "memory"); \
  80. })
  81. /*
  82. * Enable FIQs
  83. */
  84. #define __stf() \
  85. ({ \
  86. unsigned long temp; \
  87. __asm__ __volatile__( \
  88. "mrs %0, cpsr @ stf\n" \
  89. " bic %0, %0, #64\n" \
  90. " msr cpsr_c, %0" \
  91. : "=r" (temp) \
  92. : \
  93. : "memory"); \
  94. })
  95. /*
  96. * Disable FIQs
  97. */
  98. #define __clf() \
  99. ({ \
  100. unsigned long temp; \
  101. __asm__ __volatile__( \
  102. "mrs %0, cpsr @ clf\n" \
  103. " orr %0, %0, #64\n" \
  104. " msr cpsr_c, %0" \
  105. : "=r" (temp) \
  106. : \
  107. : "memory"); \
  108. })
  109. /*
  110. * Save the current interrupt enable state.
  111. */
  112. #define local_save_flags(x) \
  113. ({ \
  114. __asm__ __volatile__( \
  115. "mrs %0, cpsr @ local_save_flags\n" \
  116. : "=r" (x) \
  117. : \
  118. : "memory"); \
  119. })
  120. /*
  121. * restore saved IRQ & FIQ state
  122. */
  123. #define local_irq_restore(x) \
  124. __asm__ __volatile__( \
  125. "msr cpsr_c, %0 @ local_irq_restore\n" \
  126. : \
  127. : "r" (x) \
  128. : "memory")
  129. #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
  130. /*
  131. * On the StrongARM, "swp" is terminally broken since it bypasses the
  132. * cache totally. This means that the cache becomes inconsistent, and,
  133. * since we use normal loads/stores as well, this is really bad.
  134. * Typically, this causes oopsen in filp_close, but could have other,
  135. * more disasterous effects. There are two work-arounds:
  136. * 1. Disable interrupts and emulate the atomic swap
  137. * 2. Clean the cache, perform atomic swap, flush the cache
  138. *
  139. * We choose (1) since its the "easiest" to achieve here and is not
  140. * dependent on the processor type.
  141. */
  142. #define swp_is_buggy
  143. #endif
  144. static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  145. {
  146. extern void __bad_xchg(volatile void *, int);
  147. unsigned long ret;
  148. #ifdef swp_is_buggy
  149. unsigned long flags;
  150. #endif
  151. switch (size) {
  152. #ifdef swp_is_buggy
  153. case 1:
  154. local_irq_save(flags);
  155. ret = *(volatile unsigned char *)ptr;
  156. *(volatile unsigned char *)ptr = x;
  157. local_irq_restore(flags);
  158. break;
  159. case 4:
  160. local_irq_save(flags);
  161. ret = *(volatile unsigned long *)ptr;
  162. *(volatile unsigned long *)ptr = x;
  163. local_irq_restore(flags);
  164. break;
  165. #else
  166. case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
  167. : "=&r" (ret)
  168. : "r" (x), "r" (ptr)
  169. : "memory");
  170. break;
  171. case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
  172. : "=&r" (ret)
  173. : "r" (x), "r" (ptr)
  174. : "memory");
  175. break;
  176. #endif
  177. default: __bad_xchg(ptr, size), ret = 0;
  178. }
  179. return ret;
  180. }
  181. #endif