system.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. #ifndef __ASM_SH64_SYSTEM_H
  2. #define __ASM_SH64_SYSTEM_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/system.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. * Copyright (C) 2003 Paul Mundt
  12. * Copyright (C) 2004 Richard Curnow
  13. *
  14. */
  15. #include <linux/config.h>
  16. #include <asm/registers.h>
  17. #include <asm/processor.h>
  18. /*
  19. * switch_to() should switch tasks to task nr n, first
  20. */
  21. typedef struct {
  22. unsigned long seg;
  23. } mm_segment_t;
  24. extern struct task_struct *sh64_switch_to(struct task_struct *prev,
  25. struct thread_struct *prev_thread,
  26. struct task_struct *next,
  27. struct thread_struct *next_thread);
  28. #define switch_to(prev,next,last) \
  29. do {\
  30. if (last_task_used_math != next) {\
  31. struct pt_regs *regs = next->thread.uregs;\
  32. if (regs) regs->sr |= SR_FD;\
  33. }\
  34. last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\
  35. } while(0)
  36. #define nop() __asm__ __volatile__ ("nop")
  37. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  38. #define tas(ptr) (xchg((ptr), 1))
  39. extern void __xchg_called_with_bad_pointer(void);
  40. #define mb() __asm__ __volatile__ ("synco": : :"memory")
  41. #define rmb() mb()
  42. #define wmb() __asm__ __volatile__ ("synco": : :"memory")
  43. #define read_barrier_depends() do { } while (0)
  44. #ifdef CONFIG_SMP
  45. #define smp_mb() mb()
  46. #define smp_rmb() rmb()
  47. #define smp_wmb() wmb()
  48. #define smp_read_barrier_depends() read_barrier_depends()
  49. #else
  50. #define smp_mb() barrier()
  51. #define smp_rmb() barrier()
  52. #define smp_wmb() barrier()
  53. #define smp_read_barrier_depends() do { } while (0)
  54. #endif /* CONFIG_SMP */
  55. #define set_rmb(var, value) do { xchg(&var, value); } while (0)
  56. #define set_mb(var, value) set_rmb(var, value)
  57. #define set_wmb(var, value) do { var = value; wmb(); } while (0)
  58. /* Interrupt Control */
  59. #ifndef HARD_CLI
  60. #define SR_MASK_L 0x000000f0L
  61. #define SR_MASK_LL 0x00000000000000f0LL
  62. #else
  63. #define SR_MASK_L 0x10000000L
  64. #define SR_MASK_LL 0x0000000010000000LL
  65. #endif
  66. static __inline__ void local_irq_enable(void)
  67. {
  68. /* cli/sti based on SR.BL */
  69. unsigned long long __dummy0, __dummy1=~SR_MASK_LL;
  70. __asm__ __volatile__("getcon " __SR ", %0\n\t"
  71. "and %0, %1, %0\n\t"
  72. "putcon %0, " __SR "\n\t"
  73. : "=&r" (__dummy0)
  74. : "r" (__dummy1));
  75. }
  76. static __inline__ void local_irq_disable(void)
  77. {
  78. /* cli/sti based on SR.BL */
  79. unsigned long long __dummy0, __dummy1=SR_MASK_LL;
  80. __asm__ __volatile__("getcon " __SR ", %0\n\t"
  81. "or %0, %1, %0\n\t"
  82. "putcon %0, " __SR "\n\t"
  83. : "=&r" (__dummy0)
  84. : "r" (__dummy1));
  85. }
  86. #define local_save_flags(x) \
  87. (__extension__ ({ unsigned long long __dummy=SR_MASK_LL; \
  88. __asm__ __volatile__( \
  89. "getcon " __SR ", %0\n\t" \
  90. "and %0, %1, %0" \
  91. : "=&r" (x) \
  92. : "r" (__dummy));}))
  93. #define local_irq_save(x) \
  94. (__extension__ ({ unsigned long long __d2=SR_MASK_LL, __d1; \
  95. __asm__ __volatile__( \
  96. "getcon " __SR ", %1\n\t" \
  97. "or %1, r63, %0\n\t" \
  98. "or %1, %2, %1\n\t" \
  99. "putcon %1, " __SR "\n\t" \
  100. "and %0, %2, %0" \
  101. : "=&r" (x), "=&r" (__d1) \
  102. : "r" (__d2));}));
  103. #define local_irq_restore(x) do { \
  104. if ( ((x) & SR_MASK_L) == 0 ) /* dropping to 0 ? */ \
  105. local_irq_enable(); /* yes...re-enable */ \
  106. } while (0)
  107. #define irqs_disabled() \
  108. ({ \
  109. unsigned long flags; \
  110. local_save_flags(flags); \
  111. (flags != 0); \
  112. })
  113. static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
  114. {
  115. unsigned long flags, retval;
  116. local_irq_save(flags);
  117. retval = *m;
  118. *m = val;
  119. local_irq_restore(flags);
  120. return retval;
  121. }
  122. static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
  123. {
  124. unsigned long flags, retval;
  125. local_irq_save(flags);
  126. retval = *m;
  127. *m = val & 0xff;
  128. local_irq_restore(flags);
  129. return retval;
  130. }
  131. static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  132. {
  133. switch (size) {
  134. case 4:
  135. return xchg_u32(ptr, x);
  136. break;
  137. case 1:
  138. return xchg_u8(ptr, x);
  139. break;
  140. }
  141. __xchg_called_with_bad_pointer();
  142. return x;
  143. }
  144. /* XXX
  145. * disable hlt during certain critical i/o operations
  146. */
  147. #define HAVE_DISABLE_HLT
  148. void disable_hlt(void);
  149. void enable_hlt(void);
  150. #define smp_mb() barrier()
  151. #define smp_rmb() barrier()
  152. #define smp_wmb() barrier()
  153. #ifdef CONFIG_SH_ALPHANUMERIC
  154. /* This is only used for debugging. */
  155. extern void print_seg(char *file,int line);
  156. #define PLS() print_seg(__FILE__,__LINE__)
  157. #else /* CONFIG_SH_ALPHANUMERIC */
  158. #define PLS()
  159. #endif /* CONFIG_SH_ALPHANUMERIC */
  160. #define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
  161. #define arch_align_stack(x) (x)
  162. #endif /* __ASM_SH64_SYSTEM_H */