system.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
  7. * Copyright (C) 1996 by Paul M. Antoine
  8. * Copyright (C) 1999 Silicon Graphics
  9. * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
  10. * Copyright (C) 2000 MIPS Technologies, Inc.
  11. */
  12. #ifndef _ASM_SYSTEM_H
  13. #define _ASM_SYSTEM_H
  14. #include <linux/types.h>
  15. #include <linux/irqflags.h>
  16. #include <asm/addrspace.h>
  17. #include <asm/barrier.h>
  18. #include <asm/cpu-features.h>
  19. #include <asm/dsp.h>
  20. #include <asm/war.h>
  21. /*
  22. * switch_to(n) should switch tasks to task nr n, first
  23. * checking that n isn't the current task, in which case it does nothing.
  24. */
  25. extern asmlinkage void *resume(void *last, void *next, void *next_ti);
  26. struct task_struct;
  27. #ifdef CONFIG_MIPS_MT_FPAFF
  28. /*
  29. * Handle the scheduler resume end of FPU affinity management. We do this
  30. * inline to try to keep the overhead down. If we have been forced to run on
  31. * a "CPU" with an FPU because of a previous high level of FP computation,
  32. * but did not actually use the FPU during the most recent time-slice (CU1
  33. * isn't set), we undo the restriction on cpus_allowed.
  34. *
  35. * We're not calling set_cpus_allowed() here, because we have no need to
  36. * force prompt migration - we're already switching the current CPU to a
  37. * different thread.
  38. */
  39. #define switch_to(prev,next,last) \
  40. do { \
  41. if (cpu_has_fpu && \
  42. (prev->thread.mflags & MF_FPUBOUND) && \
  43. (!(KSTK_STATUS(prev) & ST0_CU1))) { \
  44. prev->thread.mflags &= ~MF_FPUBOUND; \
  45. prev->cpus_allowed = prev->thread.user_cpus_allowed; \
  46. } \
  47. if (cpu_has_dsp) \
  48. __save_dsp(prev); \
  49. next->thread.emulated_fp = 0; \
  50. (last) = resume(prev, next, next->thread_info); \
  51. if (cpu_has_dsp) \
  52. __restore_dsp(current); \
  53. } while(0)
  54. #else
  55. #define switch_to(prev,next,last) \
  56. do { \
  57. if (cpu_has_dsp) \
  58. __save_dsp(prev); \
  59. (last) = resume(prev, next, task_thread_info(next)); \
  60. if (cpu_has_dsp) \
  61. __restore_dsp(current); \
  62. } while(0)
  63. #endif
  64. /*
  65. * On SMP systems, when the scheduler does migration-cost autodetection,
  66. * it needs a way to flush as much of the CPU's caches as possible.
  67. *
  68. * TODO: fill this in!
  69. */
  70. static inline void sched_cacheflush(void)
  71. {
  72. }
  73. static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
  74. {
  75. __u32 retval;
  76. if (cpu_has_llsc && R10000_LLSC_WAR) {
  77. unsigned long dummy;
  78. __asm__ __volatile__(
  79. " .set mips3 \n"
  80. "1: ll %0, %3 # xchg_u32 \n"
  81. " .set mips0 \n"
  82. " move %2, %z4 \n"
  83. " .set mips3 \n"
  84. " sc %2, %1 \n"
  85. " beqzl %2, 1b \n"
  86. " .set mips0 \n"
  87. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  88. : "R" (*m), "Jr" (val)
  89. : "memory");
  90. } else if (cpu_has_llsc) {
  91. unsigned long dummy;
  92. __asm__ __volatile__(
  93. " .set mips3 \n"
  94. "1: ll %0, %3 # xchg_u32 \n"
  95. " .set mips0 \n"
  96. " move %2, %z4 \n"
  97. " .set mips3 \n"
  98. " sc %2, %1 \n"
  99. " beqz %2, 2f \n"
  100. " .subsection 2 \n"
  101. "2: b 1b \n"
  102. " .previous \n"
  103. " .set mips0 \n"
  104. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  105. : "R" (*m), "Jr" (val)
  106. : "memory");
  107. } else {
  108. unsigned long flags;
  109. raw_local_irq_save(flags);
  110. retval = *m;
  111. *m = val;
  112. raw_local_irq_restore(flags); /* implies memory barrier */
  113. }
  114. smp_mb();
  115. return retval;
  116. }
  117. #ifdef CONFIG_64BIT
  118. static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
  119. {
  120. __u64 retval;
  121. if (cpu_has_llsc && R10000_LLSC_WAR) {
  122. unsigned long dummy;
  123. __asm__ __volatile__(
  124. " .set mips3 \n"
  125. "1: lld %0, %3 # xchg_u64 \n"
  126. " move %2, %z4 \n"
  127. " scd %2, %1 \n"
  128. " beqzl %2, 1b \n"
  129. " .set mips0 \n"
  130. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  131. : "R" (*m), "Jr" (val)
  132. : "memory");
  133. } else if (cpu_has_llsc) {
  134. unsigned long dummy;
  135. __asm__ __volatile__(
  136. " .set mips3 \n"
  137. "1: lld %0, %3 # xchg_u64 \n"
  138. " move %2, %z4 \n"
  139. " scd %2, %1 \n"
  140. " beqz %2, 2f \n"
  141. " .subsection 2 \n"
  142. "2: b 1b \n"
  143. " .previous \n"
  144. " .set mips0 \n"
  145. : "=&r" (retval), "=m" (*m), "=&r" (dummy)
  146. : "R" (*m), "Jr" (val)
  147. : "memory");
  148. } else {
  149. unsigned long flags;
  150. raw_local_irq_save(flags);
  151. retval = *m;
  152. *m = val;
  153. raw_local_irq_restore(flags); /* implies memory barrier */
  154. }
  155. smp_mb();
  156. return retval;
  157. }
  158. #else
  159. extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
  160. #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
  161. #endif
  162. /* This function doesn't exist, so you'll get a linker error
  163. if something tries to do an invalid xchg(). */
  164. extern void __xchg_called_with_bad_pointer(void);
  165. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  166. {
  167. switch (size) {
  168. case 4:
  169. return __xchg_u32(ptr, x);
  170. case 8:
  171. return __xchg_u64(ptr, x);
  172. }
  173. __xchg_called_with_bad_pointer();
  174. return x;
  175. }
  176. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  177. #define tas(ptr) (xchg((ptr),1))
  178. #define __HAVE_ARCH_CMPXCHG 1
  179. static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
  180. unsigned long new)
  181. {
  182. __u32 retval;
  183. if (cpu_has_llsc && R10000_LLSC_WAR) {
  184. __asm__ __volatile__(
  185. " .set push \n"
  186. " .set noat \n"
  187. " .set mips3 \n"
  188. "1: ll %0, %2 # __cmpxchg_u32 \n"
  189. " bne %0, %z3, 2f \n"
  190. " .set mips0 \n"
  191. " move $1, %z4 \n"
  192. " .set mips3 \n"
  193. " sc $1, %1 \n"
  194. " beqzl $1, 1b \n"
  195. "2: \n"
  196. " .set pop \n"
  197. : "=&r" (retval), "=R" (*m)
  198. : "R" (*m), "Jr" (old), "Jr" (new)
  199. : "memory");
  200. } else if (cpu_has_llsc) {
  201. __asm__ __volatile__(
  202. " .set push \n"
  203. " .set noat \n"
  204. " .set mips3 \n"
  205. "1: ll %0, %2 # __cmpxchg_u32 \n"
  206. " bne %0, %z3, 2f \n"
  207. " .set mips0 \n"
  208. " move $1, %z4 \n"
  209. " .set mips3 \n"
  210. " sc $1, %1 \n"
  211. " beqz $1, 3f \n"
  212. "2: \n"
  213. " .subsection 2 \n"
  214. "3: b 1b \n"
  215. " .previous \n"
  216. " .set pop \n"
  217. : "=&r" (retval), "=R" (*m)
  218. : "R" (*m), "Jr" (old), "Jr" (new)
  219. : "memory");
  220. } else {
  221. unsigned long flags;
  222. raw_local_irq_save(flags);
  223. retval = *m;
  224. if (retval == old)
  225. *m = new;
  226. raw_local_irq_restore(flags); /* implies memory barrier */
  227. }
  228. smp_mb();
  229. return retval;
  230. }
  231. #ifdef CONFIG_64BIT
  232. static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
  233. unsigned long new)
  234. {
  235. __u64 retval;
  236. if (cpu_has_llsc && R10000_LLSC_WAR) {
  237. __asm__ __volatile__(
  238. " .set push \n"
  239. " .set noat \n"
  240. " .set mips3 \n"
  241. "1: lld %0, %2 # __cmpxchg_u64 \n"
  242. " bne %0, %z3, 2f \n"
  243. " move $1, %z4 \n"
  244. " scd $1, %1 \n"
  245. " beqzl $1, 1b \n"
  246. "2: \n"
  247. " .set pop \n"
  248. : "=&r" (retval), "=R" (*m)
  249. : "R" (*m), "Jr" (old), "Jr" (new)
  250. : "memory");
  251. } else if (cpu_has_llsc) {
  252. __asm__ __volatile__(
  253. " .set push \n"
  254. " .set noat \n"
  255. " .set mips3 \n"
  256. "1: lld %0, %2 # __cmpxchg_u64 \n"
  257. " bne %0, %z3, 2f \n"
  258. " move $1, %z4 \n"
  259. " scd $1, %1 \n"
  260. " beqz $1, 3f \n"
  261. "2: \n"
  262. " .subsection 2 \n"
  263. "3: b 1b \n"
  264. " .previous \n"
  265. " .set pop \n"
  266. : "=&r" (retval), "=R" (*m)
  267. : "R" (*m), "Jr" (old), "Jr" (new)
  268. : "memory");
  269. } else {
  270. unsigned long flags;
  271. raw_local_irq_save(flags);
  272. retval = *m;
  273. if (retval == old)
  274. *m = new;
  275. raw_local_irq_restore(flags); /* implies memory barrier */
  276. }
  277. smp_mb();
  278. return retval;
  279. }
  280. #else
  281. extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
  282. volatile int * m, unsigned long old, unsigned long new);
  283. #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
  284. #endif
  285. /* This function doesn't exist, so you'll get a linker error
  286. if something tries to do an invalid cmpxchg(). */
  287. extern void __cmpxchg_called_with_bad_pointer(void);
  288. static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
  289. unsigned long new, int size)
  290. {
  291. switch (size) {
  292. case 4:
  293. return __cmpxchg_u32(ptr, old, new);
  294. case 8:
  295. return __cmpxchg_u64(ptr, old, new);
  296. }
  297. __cmpxchg_called_with_bad_pointer();
  298. return old;
  299. }
  300. #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
  301. extern void set_handler (unsigned long offset, void *addr, unsigned long len);
  302. extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
  303. extern void *set_vi_handler (int n, void *addr);
  304. extern void *set_except_vector(int n, void *addr);
  305. extern unsigned long ebase;
  306. extern void per_cpu_trap_init(void);
  307. extern int stop_a_enabled;
  308. /*
  309. * See include/asm-ia64/system.h; prevents deadlock on SMP
  310. * systems.
  311. */
  312. #define __ARCH_WANT_UNLOCKED_CTXSW
  313. #define arch_align_stack(x) (x)
  314. #endif /* _ASM_SYSTEM_H */