system.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
  7. * Copyright (C) 1996 by Paul M. Antoine
  8. * Copyright (C) 1999 Silicon Graphics
  9. * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
  10. * Copyright (C) 2000 MIPS Technologies, Inc.
  11. */
  12. #ifndef _ASM_SYSTEM_H
  13. #define _ASM_SYSTEM_H
  14. #include <linux/config.h>
  15. #include <linux/types.h>
  16. #include <asm/addrspace.h>
  17. #include <asm/cpu-features.h>
  18. #include <asm/dsp.h>
  19. #include <asm/ptrace.h>
  20. #include <asm/war.h>
  21. #include <asm/interrupt.h>
  22. /*
  23. * read_barrier_depends - Flush all pending reads that subsequents reads
  24. * depend on.
  25. *
  26. * No data-dependent reads from memory-like regions are ever reordered
  27. * over this barrier. All reads preceding this primitive are guaranteed
  28. * to access memory (but not necessarily other CPUs' caches) before any
  29. * reads following this primitive that depend on the data return by
  30. * any of the preceding reads. This primitive is much lighter weight than
  31. * rmb() on most CPUs, and is never heavier weight than is
  32. * rmb().
  33. *
  34. * These ordering constraints are respected by both the local CPU
  35. * and the compiler.
  36. *
  37. * Ordering is not guaranteed by anything other than these primitives,
  38. * not even by data dependencies. See the documentation for
  39. * memory_barrier() for examples and URLs to more information.
  40. *
  41. * For example, the following code would force ordering (the initial
  42. * value of "a" is zero, "b" is one, and "p" is "&a"):
  43. *
  44. * <programlisting>
  45. * CPU 0 CPU 1
  46. *
  47. * b = 2;
  48. * memory_barrier();
  49. * p = &b; q = p;
  50. * read_barrier_depends();
  51. * d = *q;
  52. * </programlisting>
  53. *
  54. * because the read of "*q" depends on the read of "p" and these
  55. * two reads are separated by a read_barrier_depends(). However,
  56. * the following code, with the same initial values for "a" and "b":
  57. *
  58. * <programlisting>
  59. * CPU 0 CPU 1
  60. *
  61. * a = 2;
  62. * memory_barrier();
  63. * b = 3; y = b;
  64. * read_barrier_depends();
  65. * x = a;
  66. * </programlisting>
  67. *
  68. * does not enforce ordering, since there is no data dependency between
  69. * the read of "a" and the read of "b". Therefore, on some CPUs, such
  70. * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
  71. * in cases like thiswhere there are no data dependencies.
  72. */
  73. #define read_barrier_depends() do { } while(0)
  74. #ifdef CONFIG_CPU_HAS_SYNC
  75. #define __sync() \
  76. __asm__ __volatile__( \
  77. ".set push\n\t" \
  78. ".set noreorder\n\t" \
  79. ".set mips2\n\t" \
  80. "sync\n\t" \
  81. ".set pop" \
  82. : /* no output */ \
  83. : /* no input */ \
  84. : "memory")
  85. #else
  86. #define __sync() do { } while(0)
  87. #endif
  88. #define __fast_iob() \
  89. __asm__ __volatile__( \
  90. ".set push\n\t" \
  91. ".set noreorder\n\t" \
  92. "lw $0,%0\n\t" \
  93. "nop\n\t" \
  94. ".set pop" \
  95. : /* no output */ \
  96. : "m" (*(int *)CKSEG1) \
  97. : "memory")
  98. #define fast_wmb() __sync()
  99. #define fast_rmb() __sync()
  100. #define fast_mb() __sync()
  101. #define fast_iob() \
  102. do { \
  103. __sync(); \
  104. __fast_iob(); \
  105. } while (0)
  106. #ifdef CONFIG_CPU_HAS_WB
  107. #include <asm/wbflush.h>
  108. #define wmb() fast_wmb()
  109. #define rmb() fast_rmb()
  110. #define mb() wbflush()
  111. #define iob() wbflush()
  112. #else /* !CONFIG_CPU_HAS_WB */
  113. #define wmb() fast_wmb()
  114. #define rmb() fast_rmb()
  115. #define mb() fast_mb()
  116. #define iob() fast_iob()
  117. #endif /* !CONFIG_CPU_HAS_WB */
  118. #ifdef CONFIG_SMP
  119. #define smp_mb() mb()
  120. #define smp_rmb() rmb()
  121. #define smp_wmb() wmb()
  122. #define smp_read_barrier_depends() read_barrier_depends()
  123. #else
  124. #define smp_mb() barrier()
  125. #define smp_rmb() barrier()
  126. #define smp_wmb() barrier()
  127. #define smp_read_barrier_depends() do { } while(0)
  128. #endif
  129. #define set_mb(var, value) \
  130. do { var = value; mb(); } while (0)
  131. #define set_wmb(var, value) \
  132. do { var = value; wmb(); } while (0)
  133. /*
  134. * switch_to(n) should switch tasks to task nr n, first
  135. * checking that n isn't the current task, in which case it does nothing.
  136. */
  137. extern asmlinkage void *resume(void *last, void *next, void *next_ti);
  138. struct task_struct;
  139. #define switch_to(prev,next,last) \
  140. do { \
  141. if (cpu_has_dsp) \
  142. __save_dsp(prev); \
  143. (last) = resume(prev, next, next->thread_info); \
  144. if (cpu_has_dsp) \
  145. __restore_dsp(current); \
  146. } while(0)
  147. #define ROT_IN_PIECES \
  148. " .set noreorder \n" \
  149. " .set reorder \n"
  150. static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
  151. {
  152. __u32 retval;
  153. if (cpu_has_llsc && R10000_LLSC_WAR) {
  154. unsigned long dummy;
  155. __asm__ __volatile__(
  156. "1: ll %0, %3 # xchg_u32 \n"
  157. " move %2, %z4 \n"
  158. " sc %2, %1 \n"
  159. " beqzl %2, 1b \n"
  160. ROT_IN_PIECES
  161. #ifdef CONFIG_SMP
  162. " sync \n"
  163. #endif
  164. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  165. : "R" (*m), "Jr" (val)
  166. : "memory");
  167. } else if (cpu_has_llsc) {
  168. unsigned long dummy;
  169. __asm__ __volatile__(
  170. "1: ll %0, %3 # xchg_u32 \n"
  171. " move %2, %z4 \n"
  172. " sc %2, %1 \n"
  173. " beqz %2, 1b \n"
  174. #ifdef CONFIG_SMP
  175. " sync \n"
  176. #endif
  177. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  178. : "R" (*m), "Jr" (val)
  179. : "memory");
  180. } else {
  181. unsigned long flags;
  182. local_irq_save(flags);
  183. retval = *m;
  184. *m = val;
  185. local_irq_restore(flags); /* implies memory barrier */
  186. }
  187. return retval;
  188. }
  189. #ifdef CONFIG_64BIT
  190. static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
  191. {
  192. __u64 retval;
  193. if (cpu_has_llsc && R10000_LLSC_WAR) {
  194. unsigned long dummy;
  195. __asm__ __volatile__(
  196. "1: lld %0, %3 # xchg_u64 \n"
  197. " move %2, %z4 \n"
  198. " scd %2, %1 \n"
  199. " beqzl %2, 1b \n"
  200. ROT_IN_PIECES
  201. #ifdef CONFIG_SMP
  202. " sync \n"
  203. #endif
  204. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  205. : "R" (*m), "Jr" (val)
  206. : "memory");
  207. } else if (cpu_has_llsc) {
  208. unsigned long dummy;
  209. __asm__ __volatile__(
  210. "1: lld %0, %3 # xchg_u64 \n"
  211. " move %2, %z4 \n"
  212. " scd %2, %1 \n"
  213. " beqz %2, 1b \n"
  214. #ifdef CONFIG_SMP
  215. " sync \n"
  216. #endif
  217. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  218. : "R" (*m), "Jr" (val)
  219. : "memory");
  220. } else {
  221. unsigned long flags;
  222. local_irq_save(flags);
  223. retval = *m;
  224. *m = val;
  225. local_irq_restore(flags); /* implies memory barrier */
  226. }
  227. return retval;
  228. }
  229. #else
  230. extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
  231. #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
  232. #endif
  233. /* This function doesn't exist, so you'll get a linker error
  234. if something tries to do an invalid xchg(). */
  235. extern void __xchg_called_with_bad_pointer(void);
  236. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  237. {
  238. switch (size) {
  239. case 4:
  240. return __xchg_u32(ptr, x);
  241. case 8:
  242. return __xchg_u64(ptr, x);
  243. }
  244. __xchg_called_with_bad_pointer();
  245. return x;
  246. }
  247. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  248. #define tas(ptr) (xchg((ptr),1))
  249. #define __HAVE_ARCH_CMPXCHG 1
  250. static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
  251. unsigned long new)
  252. {
  253. __u32 retval;
  254. if (cpu_has_llsc && R10000_LLSC_WAR) {
  255. __asm__ __volatile__(
  256. " .set noat \n"
  257. "1: ll %0, %2 # __cmpxchg_u32 \n"
  258. " bne %0, %z3, 2f \n"
  259. " move $1, %z4 \n"
  260. " sc $1, %1 \n"
  261. " beqzl $1, 1b \n"
  262. ROT_IN_PIECES
  263. #ifdef CONFIG_SMP
  264. " sync \n"
  265. #endif
  266. "2: \n"
  267. " .set at \n"
  268. : "=&r" (retval), "=m" (*m)
  269. : "R" (*m), "Jr" (old), "Jr" (new)
  270. : "memory");
  271. } else if (cpu_has_llsc) {
  272. __asm__ __volatile__(
  273. " .set noat \n"
  274. "1: ll %0, %2 # __cmpxchg_u32 \n"
  275. " bne %0, %z3, 2f \n"
  276. " move $1, %z4 \n"
  277. " sc $1, %1 \n"
  278. " beqz $1, 1b \n"
  279. #ifdef CONFIG_SMP
  280. " sync \n"
  281. #endif
  282. "2: \n"
  283. " .set at \n"
  284. : "=&r" (retval), "=m" (*m)
  285. : "R" (*m), "Jr" (old), "Jr" (new)
  286. : "memory");
  287. } else {
  288. unsigned long flags;
  289. local_irq_save(flags);
  290. retval = *m;
  291. if (retval == old)
  292. *m = new;
  293. local_irq_restore(flags); /* implies memory barrier */
  294. }
  295. return retval;
  296. }
  297. #ifdef CONFIG_64BIT
  298. static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
  299. unsigned long new)
  300. {
  301. __u64 retval;
  302. if (cpu_has_llsc) {
  303. __asm__ __volatile__(
  304. " .set noat \n"
  305. "1: lld %0, %2 # __cmpxchg_u64 \n"
  306. " bne %0, %z3, 2f \n"
  307. " move $1, %z4 \n"
  308. " scd $1, %1 \n"
  309. " beqzl $1, 1b \n"
  310. ROT_IN_PIECES
  311. #ifdef CONFIG_SMP
  312. " sync \n"
  313. #endif
  314. "2: \n"
  315. " .set at \n"
  316. : "=&r" (retval), "=m" (*m)
  317. : "R" (*m), "Jr" (old), "Jr" (new)
  318. : "memory");
  319. } else if (cpu_has_llsc) {
  320. __asm__ __volatile__(
  321. " .set noat \n"
  322. "1: lld %0, %2 # __cmpxchg_u64 \n"
  323. " bne %0, %z3, 2f \n"
  324. " move $1, %z4 \n"
  325. " scd $1, %1 \n"
  326. " beqz $1, 1b \n"
  327. #ifdef CONFIG_SMP
  328. " sync \n"
  329. #endif
  330. "2: \n"
  331. " .set at \n"
  332. : "=&r" (retval), "=m" (*m)
  333. : "R" (*m), "Jr" (old), "Jr" (new)
  334. : "memory");
  335. } else {
  336. unsigned long flags;
  337. local_irq_save(flags);
  338. retval = *m;
  339. if (retval == old)
  340. *m = new;
  341. local_irq_restore(flags); /* implies memory barrier */
  342. }
  343. return retval;
  344. }
  345. #else
  346. extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
  347. volatile int * m, unsigned long old, unsigned long new);
  348. #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
  349. #endif
  350. /* This function doesn't exist, so you'll get a linker error
  351. if something tries to do an invalid cmpxchg(). */
  352. extern void __cmpxchg_called_with_bad_pointer(void);
  353. static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
  354. unsigned long new, int size)
  355. {
  356. switch (size) {
  357. case 4:
  358. return __cmpxchg_u32(ptr, old, new);
  359. case 8:
  360. return __cmpxchg_u64(ptr, old, new);
  361. }
  362. __cmpxchg_called_with_bad_pointer();
  363. return old;
  364. }
  365. #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
  366. extern void *set_except_vector(int n, void *addr);
  367. extern void per_cpu_trap_init(void);
  368. extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file,
  369. const char *func, unsigned long line);
  370. extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
  371. const char *func, unsigned long line);
  372. #define die(msg, regs) \
  373. __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
  374. #define die_if_kernel(msg, regs) \
  375. __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
  376. extern int stop_a_enabled;
  377. /*
  378. * See include/asm-ia64/system.h; prevents deadlock on SMP
  379. * systems.
  380. */
  381. #define __ARCH_WANT_UNLOCKED_CTXSW
  382. #define arch_align_stack(x) (x)
  383. #endif /* _ASM_SYSTEM_H */