system.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. #ifndef __ASM_SH64_SYSTEM_H
  2. #define __ASM_SH64_SYSTEM_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/system.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. * Copyright (C) 2003 Paul Mundt
  12. * Copyright (C) 2004 Richard Curnow
  13. *
  14. */
  15. #include <asm/registers.h>
  16. #include <asm/processor.h>
  17. /*
  18. * switch_to() should switch tasks to task nr n, first
  19. */
  20. typedef struct {
  21. unsigned long seg;
  22. } mm_segment_t;
  23. extern struct task_struct *sh64_switch_to(struct task_struct *prev,
  24. struct thread_struct *prev_thread,
  25. struct task_struct *next,
  26. struct thread_struct *next_thread);
  27. #define switch_to(prev,next,last) \
  28. do {\
  29. if (last_task_used_math != next) {\
  30. struct pt_regs *regs = next->thread.uregs;\
  31. if (regs) regs->sr |= SR_FD;\
  32. }\
  33. last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\
  34. } while(0)
  35. #define nop() __asm__ __volatile__ ("nop")
  36. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  37. extern void __xchg_called_with_bad_pointer(void);
  38. #define mb() __asm__ __volatile__ ("synco": : :"memory")
  39. #define rmb() mb()
  40. #define wmb() __asm__ __volatile__ ("synco": : :"memory")
  41. #define read_barrier_depends() do { } while (0)
  42. #ifdef CONFIG_SMP
  43. #define smp_mb() mb()
  44. #define smp_rmb() rmb()
  45. #define smp_wmb() wmb()
  46. #define smp_read_barrier_depends() read_barrier_depends()
  47. #else
  48. #define smp_mb() barrier()
  49. #define smp_rmb() barrier()
  50. #define smp_wmb() barrier()
  51. #define smp_read_barrier_depends() do { } while (0)
  52. #endif /* CONFIG_SMP */
  53. #define set_rmb(var, value) do { (void)xchg(&var, value); } while (0)
  54. #define set_mb(var, value) set_rmb(var, value)
  55. /* Interrupt Control */
  56. #ifndef HARD_CLI
  57. #define SR_MASK_L 0x000000f0L
  58. #define SR_MASK_LL 0x00000000000000f0LL
  59. #else
  60. #define SR_MASK_L 0x10000000L
  61. #define SR_MASK_LL 0x0000000010000000LL
  62. #endif
  63. static __inline__ void local_irq_enable(void)
  64. {
  65. /* cli/sti based on SR.BL */
  66. unsigned long long __dummy0, __dummy1=~SR_MASK_LL;
  67. __asm__ __volatile__("getcon " __SR ", %0\n\t"
  68. "and %0, %1, %0\n\t"
  69. "putcon %0, " __SR "\n\t"
  70. : "=&r" (__dummy0)
  71. : "r" (__dummy1));
  72. }
  73. static __inline__ void local_irq_disable(void)
  74. {
  75. /* cli/sti based on SR.BL */
  76. unsigned long long __dummy0, __dummy1=SR_MASK_LL;
  77. __asm__ __volatile__("getcon " __SR ", %0\n\t"
  78. "or %0, %1, %0\n\t"
  79. "putcon %0, " __SR "\n\t"
  80. : "=&r" (__dummy0)
  81. : "r" (__dummy1));
  82. }
  83. #define local_save_flags(x) \
  84. (__extension__ ({ unsigned long long __dummy=SR_MASK_LL; \
  85. __asm__ __volatile__( \
  86. "getcon " __SR ", %0\n\t" \
  87. "and %0, %1, %0" \
  88. : "=&r" (x) \
  89. : "r" (__dummy));}))
  90. #define local_irq_save(x) \
  91. (__extension__ ({ unsigned long long __d2=SR_MASK_LL, __d1; \
  92. __asm__ __volatile__( \
  93. "getcon " __SR ", %1\n\t" \
  94. "or %1, r63, %0\n\t" \
  95. "or %1, %2, %1\n\t" \
  96. "putcon %1, " __SR "\n\t" \
  97. "and %0, %2, %0" \
  98. : "=&r" (x), "=&r" (__d1) \
  99. : "r" (__d2));}));
  100. #define local_irq_restore(x) do { \
  101. if ( ((x) & SR_MASK_L) == 0 ) /* dropping to 0 ? */ \
  102. local_irq_enable(); /* yes...re-enable */ \
  103. } while (0)
  104. #define irqs_disabled() \
  105. ({ \
  106. unsigned long flags; \
  107. local_save_flags(flags); \
  108. (flags != 0); \
  109. })
  110. static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
  111. {
  112. unsigned long flags, retval;
  113. local_irq_save(flags);
  114. retval = *m;
  115. *m = val;
  116. local_irq_restore(flags);
  117. return retval;
  118. }
  119. static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
  120. {
  121. unsigned long flags, retval;
  122. local_irq_save(flags);
  123. retval = *m;
  124. *m = val & 0xff;
  125. local_irq_restore(flags);
  126. return retval;
  127. }
  128. static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  129. {
  130. switch (size) {
  131. case 4:
  132. return xchg_u32(ptr, x);
  133. break;
  134. case 1:
  135. return xchg_u8(ptr, x);
  136. break;
  137. }
  138. __xchg_called_with_bad_pointer();
  139. return x;
  140. }
  141. /* XXX
  142. * disable hlt during certain critical i/o operations
  143. */
  144. #define HAVE_DISABLE_HLT
  145. void disable_hlt(void);
  146. void enable_hlt(void);
  147. #define smp_mb() barrier()
  148. #define smp_rmb() barrier()
  149. #define smp_wmb() barrier()
  150. #ifdef CONFIG_SH_ALPHANUMERIC
  151. /* This is only used for debugging. */
  152. extern void print_seg(char *file,int line);
  153. #define PLS() print_seg(__FILE__,__LINE__)
  154. #else /* CONFIG_SH_ALPHANUMERIC */
  155. #define PLS()
  156. #endif /* CONFIG_SH_ALPHANUMERIC */
  157. #define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
  158. #define arch_align_stack(x) (x)
  159. #endif /* __ASM_SH64_SYSTEM_H */