system.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. /* $Id: system.h,v 1.86 2001/10/30 04:57:10 davem Exp $ */
  2. #include <linux/config.h>
  3. #ifndef __SPARC_SYSTEM_H
  4. #define __SPARC_SYSTEM_H
  5. #include <linux/config.h>
  6. #include <linux/kernel.h>
  7. #include <linux/threads.h> /* NR_CPUS */
  8. #include <linux/thread_info.h>
  9. #include <asm/page.h>
  10. #include <asm/psr.h>
  11. #include <asm/ptrace.h>
  12. #include <asm/btfixup.h>
  13. #ifndef __ASSEMBLY__
  14. /*
  15. * Sparc (general) CPU types
  16. */
  17. enum sparc_cpu {
  18. sun4 = 0x00,
  19. sun4c = 0x01,
  20. sun4m = 0x02,
  21. sun4d = 0x03,
  22. sun4e = 0x04,
  23. sun4u = 0x05, /* V8 ploos ploos */
  24. sun_unknown = 0x06,
  25. ap1000 = 0x07, /* almost a sun4m */
  26. };
  27. /* Really, userland should not be looking at any of this... */
  28. #ifdef __KERNEL__
  29. extern enum sparc_cpu sparc_cpu_model;
  30. #ifndef CONFIG_SUN4
  31. #define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c)
  32. #define ARCH_SUN4 0
  33. #else
  34. #define ARCH_SUN4C_SUN4 1
  35. #define ARCH_SUN4 1
  36. #endif
  37. #define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
  38. extern struct thread_info *current_set[NR_CPUS];
  39. extern unsigned long empty_bad_page;
  40. extern unsigned long empty_bad_page_table;
  41. extern unsigned long empty_zero_page;
  42. extern void sun_do_break(void);
  43. extern int serial_console;
  44. extern int stop_a_enabled;
  45. static __inline__ int con_is_present(void)
  46. {
  47. return serial_console ? 0 : 1;
  48. }
  49. /* When a context switch happens we must flush all user windows so that
  50. * the windows of the current process are flushed onto its stack. This
  51. * way the windows are all clean for the next process and the stack
  52. * frames are up to date.
  53. */
  54. extern void flush_user_windows(void);
  55. extern void kill_user_windows(void);
  56. extern void synchronize_user_stack(void);
  57. extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
  58. void *fpqueue, unsigned long *fpqdepth);
  59. #ifdef CONFIG_SMP
  60. #define SWITCH_ENTER(prv) \
  61. do { \
  62. if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
  63. put_psr(get_psr() | PSR_EF); \
  64. fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
  65. &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
  66. clear_tsk_thread_flag(prv, TIF_USEDFPU); \
  67. (prv)->thread.kregs->psr &= ~PSR_EF; \
  68. } \
  69. } while(0)
  70. #define SWITCH_DO_LAZY_FPU(next) /* */
  71. #else
  72. #define SWITCH_ENTER(prv) /* */
  73. #define SWITCH_DO_LAZY_FPU(nxt) \
  74. do { \
  75. if (last_task_used_math != (nxt)) \
  76. (nxt)->thread.kregs->psr&=~PSR_EF; \
  77. } while(0)
  78. #endif
  79. /*
  80. * Flush windows so that the VM switch which follows
  81. * would not pull the stack from under us.
  82. *
  83. * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
  84. * XXX WTF is the above comment? Found in late teen 2.4.x.
  85. */
  86. #define prepare_arch_switch(next) do { \
  87. __asm__ __volatile__( \
  88. ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
  89. "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
  90. "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
  91. "save %sp, -0x40, %sp\n\t" \
  92. "restore; restore; restore; restore; restore; restore; restore"); \
  93. } while(0)
  94. /* Much care has gone into this code, do not touch it.
  95. *
  96. * We need to loadup regs l0/l1 for the newly forked child
  97. * case because the trap return path relies on those registers
  98. * holding certain values, gcc is told that they are clobbered.
  99. * Gcc needs registers for 3 values in and 1 value out, so we
  100. * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
  101. *
  102. * Hey Dave, that do not touch sign is too much of an incentive
  103. * - Anton & Pete
  104. */
  105. #define switch_to(prev, next, last) do { \
  106. SWITCH_ENTER(prev); \
  107. SWITCH_DO_LAZY_FPU(next); \
  108. cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask); \
  109. __asm__ __volatile__( \
  110. "sethi %%hi(here - 0x8), %%o7\n\t" \
  111. "mov %%g6, %%g3\n\t" \
  112. "or %%o7, %%lo(here - 0x8), %%o7\n\t" \
  113. "rd %%psr, %%g4\n\t" \
  114. "std %%sp, [%%g6 + %4]\n\t" \
  115. "rd %%wim, %%g5\n\t" \
  116. "wr %%g4, 0x20, %%psr\n\t" \
  117. "nop\n\t" \
  118. "std %%g4, [%%g6 + %3]\n\t" \
  119. "ldd [%2 + %3], %%g4\n\t" \
  120. "mov %2, %%g6\n\t" \
  121. ".globl patchme_store_new_current\n" \
  122. "patchme_store_new_current:\n\t" \
  123. "st %2, [%1]\n\t" \
  124. "wr %%g4, 0x20, %%psr\n\t" \
  125. "nop\n\t" \
  126. "nop\n\t" \
  127. "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \
  128. "ldd [%%g6 + %4], %%sp\n\t" \
  129. "wr %%g5, 0x0, %%wim\n\t" \
  130. "ldd [%%sp + 0x00], %%l0\n\t" \
  131. "ldd [%%sp + 0x38], %%i6\n\t" \
  132. "wr %%g4, 0x0, %%psr\n\t" \
  133. "nop\n\t" \
  134. "nop\n\t" \
  135. "jmpl %%o7 + 0x8, %%g0\n\t" \
  136. " ld [%%g3 + %5], %0\n\t" \
  137. "here:\n" \
  138. : "=&r" (last) \
  139. : "r" (&(current_set[hard_smp_processor_id()])), \
  140. "r" (task_thread_info(next)), \
  141. "i" (TI_KPSR), \
  142. "i" (TI_KSP), \
  143. "i" (TI_TASK) \
  144. : "g1", "g2", "g3", "g4", "g5", "g7", \
  145. "l0", "l1", "l3", "l4", "l5", "l6", "l7", \
  146. "i0", "i1", "i2", "i3", "i4", "i5", \
  147. "o0", "o1", "o2", "o3", "o7"); \
  148. } while(0)
  149. /*
  150. * On SMP systems, when the scheduler does migration-cost autodetection,
  151. * it needs a way to flush as much of the CPU's caches as possible.
  152. *
  153. * TODO: fill this in!
  154. */
  155. static inline void sched_cacheflush(void)
  156. {
  157. }
  158. /*
  159. * Changing the IRQ level on the Sparc.
  160. */
  161. extern void local_irq_restore(unsigned long);
  162. extern unsigned long __local_irq_save(void);
  163. extern void local_irq_enable(void);
  164. static inline unsigned long getipl(void)
  165. {
  166. unsigned long retval;
  167. __asm__ __volatile__("rd %%psr, %0" : "=r" (retval));
  168. return retval;
  169. }
  170. #define local_save_flags(flags) ((flags) = getipl())
  171. #define local_irq_save(flags) ((flags) = __local_irq_save())
  172. #define local_irq_disable() ((void) __local_irq_save())
  173. #define irqs_disabled() ((getipl() & PSR_PIL) != 0)
  174. /* XXX Change this if we ever use a PSO mode kernel. */
  175. #define mb() __asm__ __volatile__ ("" : : : "memory")
  176. #define rmb() mb()
  177. #define wmb() mb()
  178. #define read_barrier_depends() do { } while(0)
  179. #define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
  180. #define set_wmb(__var, __value) set_mb(__var, __value)
  181. #define smp_mb() __asm__ __volatile__("":::"memory")
  182. #define smp_rmb() __asm__ __volatile__("":::"memory")
  183. #define smp_wmb() __asm__ __volatile__("":::"memory")
  184. #define smp_read_barrier_depends() do { } while(0)
  185. #define nop() __asm__ __volatile__ ("nop")
  186. /* This has special calling conventions */
  187. #ifndef CONFIG_SMP
  188. BTFIXUPDEF_CALL(void, ___xchg32, void)
  189. #endif
  190. static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
  191. {
  192. #ifdef CONFIG_SMP
  193. __asm__ __volatile__("swap [%2], %0"
  194. : "=&r" (val)
  195. : "0" (val), "r" (m)
  196. : "memory");
  197. return val;
  198. #else
  199. register unsigned long *ptr asm("g1");
  200. register unsigned long ret asm("g2");
  201. ptr = (unsigned long *) m;
  202. ret = val;
  203. /* Note: this is magic and the nop there is
  204. really needed. */
  205. __asm__ __volatile__(
  206. "mov %%o7, %%g4\n\t"
  207. "call ___f____xchg32\n\t"
  208. " nop\n\t"
  209. : "=&r" (ret)
  210. : "0" (ret), "r" (ptr)
  211. : "g3", "g4", "g7", "memory", "cc");
  212. return ret;
  213. #endif
  214. }
  215. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  216. #define tas(ptr) (xchg((ptr),1))
  217. extern void __xchg_called_with_bad_pointer(void);
  218. static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
  219. {
  220. switch (size) {
  221. case 4:
  222. return xchg_u32(ptr, x);
  223. };
  224. __xchg_called_with_bad_pointer();
  225. return x;
  226. }
  227. extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
  228. #endif /* __KERNEL__ */
  229. #endif /* __ASSEMBLY__ */
  230. #define arch_align_stack(x) (x)
  231. #endif /* !(__SPARC_SYSTEM_H) */