system.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. #ifndef __ASM_SH_SYSTEM_H
  2. #define __ASM_SH_SYSTEM_H
  3. /*
  4. * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
  5. * Copyright (C) 2002 Paul Mundt
  6. */
  7. #include <linux/irqflags.h>
  8. #include <linux/compiler.h>
  9. #include <linux/linkage.h>
  10. #include <asm/types.h>
  11. #include <asm/ptrace.h>
  12. struct task_struct *__switch_to(struct task_struct *prev,
  13. struct task_struct *next);
  14. #define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
  15. /*
  16. * switch_to() should switch tasks to task nr n, first
  17. */
  18. #define switch_to(prev, next, last) do { \
  19. struct task_struct *__last; \
  20. register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
  21. register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
  22. register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
  23. register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
  24. register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
  25. register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
  26. __asm__ __volatile__ (".balign 4\n\t" \
  27. "stc.l gbr, @-r15\n\t" \
  28. "sts.l pr, @-r15\n\t" \
  29. "mov.l r8, @-r15\n\t" \
  30. "mov.l r9, @-r15\n\t" \
  31. "mov.l r10, @-r15\n\t" \
  32. "mov.l r11, @-r15\n\t" \
  33. "mov.l r12, @-r15\n\t" \
  34. "mov.l r13, @-r15\n\t" \
  35. "mov.l r14, @-r15\n\t" \
  36. "mov.l r15, @r1 ! save SP\n\t" \
  37. "mov.l @r6, r15 ! change to new stack\n\t" \
  38. "mova 1f, %0\n\t" \
  39. "mov.l %0, @r2 ! save PC\n\t" \
  40. "mov.l 2f, %0\n\t" \
  41. "jmp @%0 ! call __switch_to\n\t" \
  42. " lds r7, pr ! with return to new PC\n\t" \
  43. ".balign 4\n" \
  44. "2:\n\t" \
  45. ".long __switch_to\n" \
  46. "1:\n\t" \
  47. "mov.l @r15+, r14\n\t" \
  48. "mov.l @r15+, r13\n\t" \
  49. "mov.l @r15+, r12\n\t" \
  50. "mov.l @r15+, r11\n\t" \
  51. "mov.l @r15+, r10\n\t" \
  52. "mov.l @r15+, r9\n\t" \
  53. "mov.l @r15+, r8\n\t" \
  54. "lds.l @r15+, pr\n\t" \
  55. "ldc.l @r15+, gbr\n\t" \
  56. : "=z" (__last) \
  57. : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
  58. "r" (__ts5), "r" (__ts6), "r" (__ts7) \
  59. : "r3", "t"); \
  60. last = __last; \
  61. } while (0)
  62. #ifdef CONFIG_CPU_SH4A
  63. #define __icbi() \
  64. { \
  65. unsigned long __addr; \
  66. __addr = 0xa8000000; \
  67. __asm__ __volatile__( \
  68. "icbi %0\n\t" \
  69. : /* no output */ \
  70. : "m" (__m(__addr))); \
  71. }
  72. #endif
  73. /*
  74. * A brief note on ctrl_barrier(), the control register write barrier.
  75. *
  76. * Legacy SH cores typically require a sequence of 8 nops after
  77. * modification of a control register in order for the changes to take
  78. * effect. On newer cores (like the sh4a and sh5) this is accomplished
  79. * with icbi.
  80. *
  81. * Also note that on sh4a in the icbi case we can forego a synco for the
  82. * write barrier, as it's not necessary for control registers.
  83. *
  84. * Historically we have only done this type of barrier for the MMUCR, but
  85. * it's also necessary for the CCR, so we make it generic here instead.
  86. */
  87. #ifdef CONFIG_CPU_SH4A
  88. #define mb() __asm__ __volatile__ ("synco": : :"memory")
  89. #define rmb() mb()
  90. #define wmb() __asm__ __volatile__ ("synco": : :"memory")
  91. #define ctrl_barrier() __icbi()
  92. #define read_barrier_depends() do { } while(0)
  93. #else
  94. #define mb() __asm__ __volatile__ ("": : :"memory")
  95. #define rmb() mb()
  96. #define wmb() __asm__ __volatile__ ("": : :"memory")
  97. #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
  98. #define read_barrier_depends() do { } while(0)
  99. #endif
  100. #ifdef CONFIG_SMP
  101. #define smp_mb() mb()
  102. #define smp_rmb() rmb()
  103. #define smp_wmb() wmb()
  104. #define smp_read_barrier_depends() read_barrier_depends()
  105. #else
  106. #define smp_mb() barrier()
  107. #define smp_rmb() barrier()
  108. #define smp_wmb() barrier()
  109. #define smp_read_barrier_depends() do { } while(0)
  110. #endif
  111. #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
  112. /*
  113. * Jump to P2 area.
  114. * When handling TLB or caches, we need to do it from P2 area.
  115. */
  116. #define jump_to_P2() \
  117. do { \
  118. unsigned long __dummy; \
  119. __asm__ __volatile__( \
  120. "mov.l 1f, %0\n\t" \
  121. "or %1, %0\n\t" \
  122. "jmp @%0\n\t" \
  123. " nop\n\t" \
  124. ".balign 4\n" \
  125. "1: .long 2f\n" \
  126. "2:" \
  127. : "=&r" (__dummy) \
  128. : "r" (0x20000000)); \
  129. } while (0)
  130. /*
  131. * Back to P1 area.
  132. */
  133. #define back_to_P1() \
  134. do { \
  135. unsigned long __dummy; \
  136. ctrl_barrier(); \
  137. __asm__ __volatile__( \
  138. "mov.l 1f, %0\n\t" \
  139. "jmp @%0\n\t" \
  140. " nop\n\t" \
  141. ".balign 4\n" \
  142. "1: .long 2f\n" \
  143. "2:" \
  144. : "=&r" (__dummy)); \
  145. } while (0)
  146. static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
  147. {
  148. unsigned long flags, retval;
  149. local_irq_save(flags);
  150. retval = *m;
  151. *m = val;
  152. local_irq_restore(flags);
  153. return retval;
  154. }
  155. static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
  156. {
  157. unsigned long flags, retval;
  158. local_irq_save(flags);
  159. retval = *m;
  160. *m = val & 0xff;
  161. local_irq_restore(flags);
  162. return retval;
  163. }
  164. extern void __xchg_called_with_bad_pointer(void);
  165. #define __xchg(ptr, x, size) \
  166. ({ \
  167. unsigned long __xchg__res; \
  168. volatile void *__xchg_ptr = (ptr); \
  169. switch (size) { \
  170. case 4: \
  171. __xchg__res = xchg_u32(__xchg_ptr, x); \
  172. break; \
  173. case 1: \
  174. __xchg__res = xchg_u8(__xchg_ptr, x); \
  175. break; \
  176. default: \
  177. __xchg_called_with_bad_pointer(); \
  178. __xchg__res = x; \
  179. break; \
  180. } \
  181. \
  182. __xchg__res; \
  183. })
  184. #define xchg(ptr,x) \
  185. ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
  186. static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
  187. unsigned long new)
  188. {
  189. __u32 retval;
  190. unsigned long flags;
  191. local_irq_save(flags);
  192. retval = *m;
  193. if (retval == old)
  194. *m = new;
  195. local_irq_restore(flags); /* implies memory barrier */
  196. return retval;
  197. }
  198. /* This function doesn't exist, so you'll get a linker error
  199. * if something tries to do an invalid cmpxchg(). */
  200. extern void __cmpxchg_called_with_bad_pointer(void);
  201. #define __HAVE_ARCH_CMPXCHG 1
  202. static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
  203. unsigned long new, int size)
  204. {
  205. switch (size) {
  206. case 4:
  207. return __cmpxchg_u32(ptr, old, new);
  208. }
  209. __cmpxchg_called_with_bad_pointer();
  210. return old;
  211. }
  212. #define cmpxchg(ptr,o,n) \
  213. ({ \
  214. __typeof__(*(ptr)) _o_ = (o); \
  215. __typeof__(*(ptr)) _n_ = (n); \
  216. (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
  217. (unsigned long)_n_, sizeof(*(ptr))); \
  218. })
  219. extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
  220. extern void *set_exception_table_vec(unsigned int vec, void *handler);
  221. static inline void *set_exception_table_evt(unsigned int evt, void *handler)
  222. {
  223. return set_exception_table_vec(evt >> 5, handler);
  224. }
  225. /*
  226. * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks.
  227. */
  228. #ifdef CONFIG_CPU_SH2A
  229. extern unsigned int instruction_size(unsigned int insn);
  230. #else
  231. #define instruction_size(insn) (2)
  232. #endif
  233. /* XXX
  234. * disable hlt during certain critical i/o operations
  235. */
  236. #define HAVE_DISABLE_HLT
  237. void disable_hlt(void);
  238. void enable_hlt(void);
  239. void default_idle(void);
  240. void per_cpu_trap_init(void);
  241. asmlinkage void break_point_trap(void);
  242. asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
  243. unsigned long r6, unsigned long r7,
  244. struct pt_regs __regs);
  245. asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
  246. unsigned long r6, unsigned long r7,
  247. struct pt_regs __regs);
  248. #define arch_align_stack(x) (x)
  249. #endif