system.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. /*
  2. * linux/include/asm-arm/proc-armv/system.h
  3. *
  4. * Copyright (C) 1996 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef __ASM_PROC_SYSTEM_H
  11. #define __ASM_PROC_SYSTEM_H
  12. #include <linux/config.h>
  13. /*
  14. * Save the current interrupt enable state & disable IRQs
  15. */
  16. #define local_irq_save(x) \
  17. ({ \
  18. unsigned long temp; \
  19. __asm__ __volatile__( \
  20. "mrs %0, cpsr @ local_irq_save\n" \
  21. " orr %1, %0, #128\n" \
  22. " msr cpsr_c, %1" \
  23. : "=r" (x), "=r" (temp) \
  24. : \
  25. : "memory"); \
  26. })
  27. /*
  28. * Enable IRQs
  29. */
  30. #define local_irq_enable() \
  31. ({ \
  32. unsigned long temp; \
  33. __asm__ __volatile__( \
  34. "mrs %0, cpsr @ local_irq_enable\n" \
  35. " bic %0, %0, #128\n" \
  36. " msr cpsr_c, %0" \
  37. : "=r" (temp) \
  38. : \
  39. : "memory"); \
  40. })
  41. /*
  42. * Disable IRQs
  43. */
  44. #define local_irq_disable() \
  45. ({ \
  46. unsigned long temp; \
  47. __asm__ __volatile__( \
  48. "mrs %0, cpsr @ local_irq_disable\n" \
  49. " orr %0, %0, #128\n" \
  50. " msr cpsr_c, %0" \
  51. : "=r" (temp) \
  52. : \
  53. : "memory"); \
  54. })
  55. /*
  56. * Enable FIQs
  57. */
  58. #define __stf() \
  59. ({ \
  60. unsigned long temp; \
  61. __asm__ __volatile__( \
  62. "mrs %0, cpsr @ stf\n" \
  63. " bic %0, %0, #64\n" \
  64. " msr cpsr_c, %0" \
  65. : "=r" (temp) \
  66. : \
  67. : "memory"); \
  68. })
  69. /*
  70. * Disable FIQs
  71. */
  72. #define __clf() \
  73. ({ \
  74. unsigned long temp; \
  75. __asm__ __volatile__( \
  76. "mrs %0, cpsr @ clf\n" \
  77. " orr %0, %0, #64\n" \
  78. " msr cpsr_c, %0" \
  79. : "=r" (temp) \
  80. : \
  81. : "memory"); \
  82. })
  83. /*
  84. * Save the current interrupt enable state.
  85. */
  86. #define local_save_flags(x) \
  87. ({ \
  88. __asm__ __volatile__( \
  89. "mrs %0, cpsr @ local_save_flags\n" \
  90. : "=r" (x) \
  91. : \
  92. : "memory"); \
  93. })
  94. /*
  95. * restore saved IRQ & FIQ state
  96. */
  97. #define local_irq_restore(x) \
  98. __asm__ __volatile__( \
  99. "msr cpsr_c, %0 @ local_irq_restore\n" \
  100. : \
  101. : "r" (x) \
  102. : "memory")
  103. #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
  104. /*
  105. * On the StrongARM, "swp" is terminally broken since it bypasses the
  106. * cache totally. This means that the cache becomes inconsistent, and,
  107. * since we use normal loads/stores as well, this is really bad.
  108. * Typically, this causes oopsen in filp_close, but could have other,
  109. * more disasterous effects. There are two work-arounds:
  110. * 1. Disable interrupts and emulate the atomic swap
  111. * 2. Clean the cache, perform atomic swap, flush the cache
  112. *
  113. * We choose (1) since its the "easiest" to achieve here and is not
  114. * dependent on the processor type.
  115. */
  116. #define swp_is_buggy
  117. #endif
  118. static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  119. {
  120. extern void __bad_xchg(volatile void *, int);
  121. unsigned long ret;
  122. #ifdef swp_is_buggy
  123. unsigned long flags;
  124. #endif
  125. switch (size) {
  126. #ifdef swp_is_buggy
  127. case 1:
  128. local_irq_save(flags);
  129. ret = *(volatile unsigned char *)ptr;
  130. *(volatile unsigned char *)ptr = x;
  131. local_irq_restore(flags);
  132. break;
  133. case 4:
  134. local_irq_save(flags);
  135. ret = *(volatile unsigned long *)ptr;
  136. *(volatile unsigned long *)ptr = x;
  137. local_irq_restore(flags);
  138. break;
  139. #else
  140. case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
  141. : "=&r" (ret)
  142. : "r" (x), "r" (ptr)
  143. : "memory");
  144. break;
  145. case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
  146. : "=&r" (ret)
  147. : "r" (x), "r" (ptr)
  148. : "memory");
  149. break;
  150. #endif
  151. default: __bad_xchg(ptr, size), ret = 0;
  152. }
  153. return ret;
  154. }
  155. #endif