system.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. #ifndef __ASM_SH64_SYSTEM_H
  2. #define __ASM_SH64_SYSTEM_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/system.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. * Copyright (C) 2003 Paul Mundt
  12. * Copyright (C) 2004 Richard Curnow
  13. *
  14. */
  15. #include <asm/registers.h>
  16. #include <asm/processor.h>
  17. /*
  18. * switch_to() should switch tasks to task nr n, first
  19. */
  20. typedef struct {
  21. unsigned long seg;
  22. } mm_segment_t;
  23. extern struct task_struct *sh64_switch_to(struct task_struct *prev,
  24. struct thread_struct *prev_thread,
  25. struct task_struct *next,
  26. struct thread_struct *next_thread);
  27. #define switch_to(prev,next,last) \
  28. do {\
  29. if (last_task_used_math != next) {\
  30. struct pt_regs *regs = next->thread.uregs;\
  31. if (regs) regs->sr |= SR_FD;\
  32. }\
  33. last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\
  34. } while(0)
  35. #define nop() __asm__ __volatile__ ("nop")
  36. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  37. #define tas(ptr) (xchg((ptr), 1))
  38. extern void __xchg_called_with_bad_pointer(void);
  39. #define mb() __asm__ __volatile__ ("synco": : :"memory")
  40. #define rmb() mb()
  41. #define wmb() __asm__ __volatile__ ("synco": : :"memory")
  42. #define read_barrier_depends() do { } while (0)
  43. #ifdef CONFIG_SMP
  44. #define smp_mb() mb()
  45. #define smp_rmb() rmb()
  46. #define smp_wmb() wmb()
  47. #define smp_read_barrier_depends() read_barrier_depends()
  48. #else
  49. #define smp_mb() barrier()
  50. #define smp_rmb() barrier()
  51. #define smp_wmb() barrier()
  52. #define smp_read_barrier_depends() do { } while (0)
  53. #endif /* CONFIG_SMP */
  54. #define set_rmb(var, value) do { (void)xchg(&var, value); } while (0)
  55. #define set_mb(var, value) set_rmb(var, value)
  56. /* Interrupt Control */
  57. #ifndef HARD_CLI
  58. #define SR_MASK_L 0x000000f0L
  59. #define SR_MASK_LL 0x00000000000000f0LL
  60. #else
  61. #define SR_MASK_L 0x10000000L
  62. #define SR_MASK_LL 0x0000000010000000LL
  63. #endif
  64. static __inline__ void local_irq_enable(void)
  65. {
  66. /* cli/sti based on SR.BL */
  67. unsigned long long __dummy0, __dummy1=~SR_MASK_LL;
  68. __asm__ __volatile__("getcon " __SR ", %0\n\t"
  69. "and %0, %1, %0\n\t"
  70. "putcon %0, " __SR "\n\t"
  71. : "=&r" (__dummy0)
  72. : "r" (__dummy1));
  73. }
  74. static __inline__ void local_irq_disable(void)
  75. {
  76. /* cli/sti based on SR.BL */
  77. unsigned long long __dummy0, __dummy1=SR_MASK_LL;
  78. __asm__ __volatile__("getcon " __SR ", %0\n\t"
  79. "or %0, %1, %0\n\t"
  80. "putcon %0, " __SR "\n\t"
  81. : "=&r" (__dummy0)
  82. : "r" (__dummy1));
  83. }
  84. #define local_save_flags(x) \
  85. (__extension__ ({ unsigned long long __dummy=SR_MASK_LL; \
  86. __asm__ __volatile__( \
  87. "getcon " __SR ", %0\n\t" \
  88. "and %0, %1, %0" \
  89. : "=&r" (x) \
  90. : "r" (__dummy));}))
  91. #define local_irq_save(x) \
  92. (__extension__ ({ unsigned long long __d2=SR_MASK_LL, __d1; \
  93. __asm__ __volatile__( \
  94. "getcon " __SR ", %1\n\t" \
  95. "or %1, r63, %0\n\t" \
  96. "or %1, %2, %1\n\t" \
  97. "putcon %1, " __SR "\n\t" \
  98. "and %0, %2, %0" \
  99. : "=&r" (x), "=&r" (__d1) \
  100. : "r" (__d2));}));
  101. #define local_irq_restore(x) do { \
  102. if ( ((x) & SR_MASK_L) == 0 ) /* dropping to 0 ? */ \
  103. local_irq_enable(); /* yes...re-enable */ \
  104. } while (0)
  105. #define irqs_disabled() \
  106. ({ \
  107. unsigned long flags; \
  108. local_save_flags(flags); \
  109. (flags != 0); \
  110. })
  111. static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
  112. {
  113. unsigned long flags, retval;
  114. local_irq_save(flags);
  115. retval = *m;
  116. *m = val;
  117. local_irq_restore(flags);
  118. return retval;
  119. }
  120. static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
  121. {
  122. unsigned long flags, retval;
  123. local_irq_save(flags);
  124. retval = *m;
  125. *m = val & 0xff;
  126. local_irq_restore(flags);
  127. return retval;
  128. }
  129. static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  130. {
  131. switch (size) {
  132. case 4:
  133. return xchg_u32(ptr, x);
  134. break;
  135. case 1:
  136. return xchg_u8(ptr, x);
  137. break;
  138. }
  139. __xchg_called_with_bad_pointer();
  140. return x;
  141. }
  142. /* XXX
  143. * disable hlt during certain critical i/o operations
  144. */
  145. #define HAVE_DISABLE_HLT
  146. void disable_hlt(void);
  147. void enable_hlt(void);
  148. #define smp_mb() barrier()
  149. #define smp_rmb() barrier()
  150. #define smp_wmb() barrier()
  151. #ifdef CONFIG_SH_ALPHANUMERIC
  152. /* This is only used for debugging. */
  153. extern void print_seg(char *file,int line);
  154. #define PLS() print_seg(__FILE__,__LINE__)
  155. #else /* CONFIG_SH_ALPHANUMERIC */
  156. #define PLS()
  157. #endif /* CONFIG_SH_ALPHANUMERIC */
  158. #define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
  159. #define arch_align_stack(x) (x)
  160. #endif /* __ASM_SH64_SYSTEM_H */