system.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
  7. * Copyright (C) 1996 by Paul M. Antoine
  8. * Copyright (C) 1999 Silicon Graphics
  9. * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
  10. * Copyright (C) 2000 MIPS Technologies, Inc.
  11. */
  12. #ifndef _ASM_SYSTEM_H
  13. #define _ASM_SYSTEM_H
  14. #include <linux/types.h>
  15. #include <asm/addrspace.h>
  16. #include <asm/cpu-features.h>
  17. #include <asm/dsp.h>
  18. #include <asm/ptrace.h>
  19. #include <asm/war.h>
  20. #include <asm/interrupt.h>
  21. /*
  22. * read_barrier_depends - Flush all pending reads that subsequents reads
  23. * depend on.
  24. *
  25. * No data-dependent reads from memory-like regions are ever reordered
  26. * over this barrier. All reads preceding this primitive are guaranteed
  27. * to access memory (but not necessarily other CPUs' caches) before any
  28. * reads following this primitive that depend on the data return by
  29. * any of the preceding reads. This primitive is much lighter weight than
  30. * rmb() on most CPUs, and is never heavier weight than is
  31. * rmb().
  32. *
  33. * These ordering constraints are respected by both the local CPU
  34. * and the compiler.
  35. *
  36. * Ordering is not guaranteed by anything other than these primitives,
  37. * not even by data dependencies. See the documentation for
  38. * memory_barrier() for examples and URLs to more information.
  39. *
  40. * For example, the following code would force ordering (the initial
  41. * value of "a" is zero, "b" is one, and "p" is "&a"):
  42. *
  43. * <programlisting>
  44. * CPU 0 CPU 1
  45. *
  46. * b = 2;
  47. * memory_barrier();
  48. * p = &b; q = p;
  49. * read_barrier_depends();
  50. * d = *q;
  51. * </programlisting>
  52. *
  53. * because the read of "*q" depends on the read of "p" and these
  54. * two reads are separated by a read_barrier_depends(). However,
  55. * the following code, with the same initial values for "a" and "b":
  56. *
  57. * <programlisting>
  58. * CPU 0 CPU 1
  59. *
  60. * a = 2;
  61. * memory_barrier();
  62. * b = 3; y = b;
  63. * read_barrier_depends();
  64. * x = a;
  65. * </programlisting>
  66. *
  67. * does not enforce ordering, since there is no data dependency between
  68. * the read of "a" and the read of "b". Therefore, on some CPUs, such
  69. * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
  70. * in cases like this where there are no data dependencies.
  71. */
  72. #define read_barrier_depends() do { } while(0)
  73. #ifdef CONFIG_CPU_HAS_SYNC
  74. #define __sync() \
  75. __asm__ __volatile__( \
  76. ".set push\n\t" \
  77. ".set noreorder\n\t" \
  78. ".set mips2\n\t" \
  79. "sync\n\t" \
  80. ".set pop" \
  81. : /* no output */ \
  82. : /* no input */ \
  83. : "memory")
  84. #else
  85. #define __sync() do { } while(0)
  86. #endif
  87. #define __fast_iob() \
  88. __asm__ __volatile__( \
  89. ".set push\n\t" \
  90. ".set noreorder\n\t" \
  91. "lw $0,%0\n\t" \
  92. "nop\n\t" \
  93. ".set pop" \
  94. : /* no output */ \
  95. : "m" (*(int *)CKSEG1) \
  96. : "memory")
  97. #define fast_wmb() __sync()
  98. #define fast_rmb() __sync()
  99. #define fast_mb() __sync()
  100. #define fast_iob() \
  101. do { \
  102. __sync(); \
  103. __fast_iob(); \
  104. } while (0)
  105. #ifdef CONFIG_CPU_HAS_WB
  106. #include <asm/wbflush.h>
  107. #define wmb() fast_wmb()
  108. #define rmb() fast_rmb()
  109. #define mb() wbflush()
  110. #define iob() wbflush()
  111. #else /* !CONFIG_CPU_HAS_WB */
  112. #define wmb() fast_wmb()
  113. #define rmb() fast_rmb()
  114. #define mb() fast_mb()
  115. #define iob() fast_iob()
  116. #endif /* !CONFIG_CPU_HAS_WB */
  117. #ifdef CONFIG_SMP
  118. #define smp_mb() mb()
  119. #define smp_rmb() rmb()
  120. #define smp_wmb() wmb()
  121. #define smp_read_barrier_depends() read_barrier_depends()
  122. #else
  123. #define smp_mb() barrier()
  124. #define smp_rmb() barrier()
  125. #define smp_wmb() barrier()
  126. #define smp_read_barrier_depends() do { } while(0)
  127. #endif
  128. #define set_mb(var, value) \
  129. do { var = value; mb(); } while (0)
  130. #define set_wmb(var, value) \
  131. do { var = value; wmb(); } while (0)
  132. /*
  133. * switch_to(n) should switch tasks to task nr n, first
  134. * checking that n isn't the current task, in which case it does nothing.
  135. */
  136. extern asmlinkage void *resume(void *last, void *next, void *next_ti);
  137. struct task_struct;
  138. #ifdef CONFIG_MIPS_MT_FPAFF
  139. /*
  140. * Handle the scheduler resume end of FPU affinity management. We do this
  141. * inline to try to keep the overhead down. If we have been forced to run on
  142. * a "CPU" with an FPU because of a previous high level of FP computation,
  143. * but did not actually use the FPU during the most recent time-slice (CU1
  144. * isn't set), we undo the restriction on cpus_allowed.
  145. *
  146. * We're not calling set_cpus_allowed() here, because we have no need to
  147. * force prompt migration - we're already switching the current CPU to a
  148. * different thread.
  149. */
  150. #define switch_to(prev,next,last) \
  151. do { \
  152. if (cpu_has_fpu && \
  153. (prev->thread.mflags & MF_FPUBOUND) && \
  154. (!(KSTK_STATUS(prev) & ST0_CU1))) { \
  155. prev->thread.mflags &= ~MF_FPUBOUND; \
  156. prev->cpus_allowed = prev->thread.user_cpus_allowed; \
  157. } \
  158. if (cpu_has_dsp) \
  159. __save_dsp(prev); \
  160. next->thread.emulated_fp = 0; \
  161. (last) = resume(prev, next, next->thread_info); \
  162. if (cpu_has_dsp) \
  163. __restore_dsp(current); \
  164. } while(0)
  165. #else
  166. #define switch_to(prev,next,last) \
  167. do { \
  168. if (cpu_has_dsp) \
  169. __save_dsp(prev); \
  170. (last) = resume(prev, next, task_thread_info(next)); \
  171. if (cpu_has_dsp) \
  172. __restore_dsp(current); \
  173. } while(0)
  174. #endif
  175. /*
  176. * On SMP systems, when the scheduler does migration-cost autodetection,
  177. * it needs a way to flush as much of the CPU's caches as possible.
  178. *
  179. * TODO: fill this in!
  180. */
  181. static inline void sched_cacheflush(void)
  182. {
  183. }
  184. static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
  185. {
  186. __u32 retval;
  187. if (cpu_has_llsc && R10000_LLSC_WAR) {
  188. unsigned long dummy;
  189. __asm__ __volatile__(
  190. " .set mips3 \n"
  191. "1: ll %0, %3 # xchg_u32 \n"
  192. " .set mips0 \n"
  193. " move %2, %z4 \n"
  194. " .set mips3 \n"
  195. " sc %2, %1 \n"
  196. " beqzl %2, 1b \n"
  197. #ifdef CONFIG_SMP
  198. " sync \n"
  199. #endif
  200. " .set mips0 \n"
  201. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  202. : "R" (*m), "Jr" (val)
  203. : "memory");
  204. } else if (cpu_has_llsc) {
  205. unsigned long dummy;
  206. __asm__ __volatile__(
  207. " .set mips3 \n"
  208. "1: ll %0, %3 # xchg_u32 \n"
  209. " .set mips0 \n"
  210. " move %2, %z4 \n"
  211. " .set mips3 \n"
  212. " sc %2, %1 \n"
  213. " beqz %2, 1b \n"
  214. #ifdef CONFIG_SMP
  215. " sync \n"
  216. #endif
  217. " .set mips0 \n"
  218. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  219. : "R" (*m), "Jr" (val)
  220. : "memory");
  221. } else {
  222. unsigned long flags;
  223. local_irq_save(flags);
  224. retval = *m;
  225. *m = val;
  226. local_irq_restore(flags); /* implies memory barrier */
  227. }
  228. return retval;
  229. }
  230. #ifdef CONFIG_64BIT
  231. static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
  232. {
  233. __u64 retval;
  234. if (cpu_has_llsc && R10000_LLSC_WAR) {
  235. unsigned long dummy;
  236. __asm__ __volatile__(
  237. " .set mips3 \n"
  238. "1: lld %0, %3 # xchg_u64 \n"
  239. " move %2, %z4 \n"
  240. " scd %2, %1 \n"
  241. " beqzl %2, 1b \n"
  242. #ifdef CONFIG_SMP
  243. " sync \n"
  244. #endif
  245. " .set mips0 \n"
  246. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  247. : "R" (*m), "Jr" (val)
  248. : "memory");
  249. } else if (cpu_has_llsc) {
  250. unsigned long dummy;
  251. __asm__ __volatile__(
  252. " .set mips3 \n"
  253. "1: lld %0, %3 # xchg_u64 \n"
  254. " move %2, %z4 \n"
  255. " scd %2, %1 \n"
  256. " beqz %2, 1b \n"
  257. #ifdef CONFIG_SMP
  258. " sync \n"
  259. #endif
  260. " .set mips0 \n"
  261. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  262. : "R" (*m), "Jr" (val)
  263. : "memory");
  264. } else {
  265. unsigned long flags;
  266. local_irq_save(flags);
  267. retval = *m;
  268. *m = val;
  269. local_irq_restore(flags); /* implies memory barrier */
  270. }
  271. return retval;
  272. }
  273. #else
  274. extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
  275. #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
  276. #endif
  277. /* This function doesn't exist, so you'll get a linker error
  278. if something tries to do an invalid xchg(). */
  279. extern void __xchg_called_with_bad_pointer(void);
  280. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  281. {
  282. switch (size) {
  283. case 4:
  284. return __xchg_u32(ptr, x);
  285. case 8:
  286. return __xchg_u64(ptr, x);
  287. }
  288. __xchg_called_with_bad_pointer();
  289. return x;
  290. }
  291. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  292. #define tas(ptr) (xchg((ptr),1))
  293. #define __HAVE_ARCH_CMPXCHG 1
  294. static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
  295. unsigned long new)
  296. {
  297. __u32 retval;
  298. if (cpu_has_llsc && R10000_LLSC_WAR) {
  299. __asm__ __volatile__(
  300. " .set push \n"
  301. " .set noat \n"
  302. " .set mips3 \n"
  303. "1: ll %0, %2 # __cmpxchg_u32 \n"
  304. " bne %0, %z3, 2f \n"
  305. " .set mips0 \n"
  306. " move $1, %z4 \n"
  307. " .set mips3 \n"
  308. " sc $1, %1 \n"
  309. " beqzl $1, 1b \n"
  310. #ifdef CONFIG_SMP
  311. " sync \n"
  312. #endif
  313. "2: \n"
  314. " .set pop \n"
  315. : "=&r" (retval), "=R" (*m)
  316. : "R" (*m), "Jr" (old), "Jr" (new)
  317. : "memory");
  318. } else if (cpu_has_llsc) {
  319. __asm__ __volatile__(
  320. " .set push \n"
  321. " .set noat \n"
  322. " .set mips3 \n"
  323. "1: ll %0, %2 # __cmpxchg_u32 \n"
  324. " bne %0, %z3, 2f \n"
  325. " .set mips0 \n"
  326. " move $1, %z4 \n"
  327. " .set mips3 \n"
  328. " sc $1, %1 \n"
  329. " beqz $1, 1b \n"
  330. #ifdef CONFIG_SMP
  331. " sync \n"
  332. #endif
  333. "2: \n"
  334. " .set pop \n"
  335. : "=&r" (retval), "=R" (*m)
  336. : "R" (*m), "Jr" (old), "Jr" (new)
  337. : "memory");
  338. } else {
  339. unsigned long flags;
  340. local_irq_save(flags);
  341. retval = *m;
  342. if (retval == old)
  343. *m = new;
  344. local_irq_restore(flags); /* implies memory barrier */
  345. }
  346. return retval;
  347. }
  348. #ifdef CONFIG_64BIT
  349. static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
  350. unsigned long new)
  351. {
  352. __u64 retval;
  353. if (cpu_has_llsc) {
  354. __asm__ __volatile__(
  355. " .set push \n"
  356. " .set noat \n"
  357. " .set mips3 \n"
  358. "1: lld %0, %2 # __cmpxchg_u64 \n"
  359. " bne %0, %z3, 2f \n"
  360. " move $1, %z4 \n"
  361. " scd $1, %1 \n"
  362. " beqzl $1, 1b \n"
  363. #ifdef CONFIG_SMP
  364. " sync \n"
  365. #endif
  366. "2: \n"
  367. " .set pop \n"
  368. : "=&r" (retval), "=R" (*m)
  369. : "R" (*m), "Jr" (old), "Jr" (new)
  370. : "memory");
  371. } else if (cpu_has_llsc) {
  372. __asm__ __volatile__(
  373. " .set push \n"
  374. " .set noat \n"
  375. " .set mips3 \n"
  376. "1: lld %0, %2 # __cmpxchg_u64 \n"
  377. " bne %0, %z3, 2f \n"
  378. " move $1, %z4 \n"
  379. " scd $1, %1 \n"
  380. " beqz $1, 1b \n"
  381. #ifdef CONFIG_SMP
  382. " sync \n"
  383. #endif
  384. "2: \n"
  385. " .set pop \n"
  386. : "=&r" (retval), "=R" (*m)
  387. : "R" (*m), "Jr" (old), "Jr" (new)
  388. : "memory");
  389. } else {
  390. unsigned long flags;
  391. local_irq_save(flags);
  392. retval = *m;
  393. if (retval == old)
  394. *m = new;
  395. local_irq_restore(flags); /* implies memory barrier */
  396. }
  397. return retval;
  398. }
  399. #else
  400. extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
  401. volatile int * m, unsigned long old, unsigned long new);
  402. #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
  403. #endif
  404. /* This function doesn't exist, so you'll get a linker error
  405. if something tries to do an invalid cmpxchg(). */
  406. extern void __cmpxchg_called_with_bad_pointer(void);
  407. static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
  408. unsigned long new, int size)
  409. {
  410. switch (size) {
  411. case 4:
  412. return __cmpxchg_u32(ptr, old, new);
  413. case 8:
  414. return __cmpxchg_u64(ptr, old, new);
  415. }
  416. __cmpxchg_called_with_bad_pointer();
  417. return old;
  418. }
  419. #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
  420. extern void set_handler (unsigned long offset, void *addr, unsigned long len);
  421. extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
  422. extern void *set_vi_handler (int n, void *addr);
  423. extern void *set_except_vector(int n, void *addr);
  424. extern unsigned long ebase;
  425. extern void per_cpu_trap_init(void);
  426. extern NORET_TYPE void die(const char *, struct pt_regs *);
  427. static inline void die_if_kernel(const char *str, struct pt_regs *regs)
  428. {
  429. if (unlikely(!user_mode(regs)))
  430. die(str, regs);
  431. }
  432. extern int stop_a_enabled;
  433. /*
  434. * See include/asm-ia64/system.h; prevents deadlock on SMP
  435. * systems.
  436. */
  437. #define __ARCH_WANT_UNLOCKED_CTXSW
  438. #define arch_align_stack(x) (x)
  439. #endif /* _ASM_SYSTEM_H */