system.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. #ifndef __ASM_SH64_SYSTEM_H
  2. #define __ASM_SH64_SYSTEM_H
  3. /*
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * include/asm-sh64/system.h
  9. *
  10. * Copyright (C) 2000, 2001 Paolo Alberelli
  11. * Copyright (C) 2003 Paul Mundt
  12. * Copyright (C) 2004 Richard Curnow
  13. *
  14. */
  15. #include <asm/registers.h>
  16. #include <asm/processor.h>
  17. /*
  18. * switch_to() should switch tasks to task nr n, first
  19. */
  20. typedef struct {
  21. unsigned long seg;
  22. } mm_segment_t;
  23. extern struct task_struct *sh64_switch_to(struct task_struct *prev,
  24. struct thread_struct *prev_thread,
  25. struct task_struct *next,
  26. struct thread_struct *next_thread);
  27. #define switch_to(prev,next,last) \
  28. do {\
  29. if (last_task_used_math != next) {\
  30. struct pt_regs *regs = next->thread.uregs;\
  31. if (regs) regs->sr |= SR_FD;\
  32. }\
  33. last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\
  34. } while(0)
  35. #define nop() __asm__ __volatile__ ("nop")
  36. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  37. extern void __xchg_called_with_bad_pointer(void);
  38. #define mb() __asm__ __volatile__ ("synco": : :"memory")
  39. #define rmb() mb()
  40. #define wmb() __asm__ __volatile__ ("synco": : :"memory")
  41. #define read_barrier_depends() do { } while (0)
  42. #ifdef CONFIG_SMP
  43. #define smp_mb() mb()
  44. #define smp_rmb() rmb()
  45. #define smp_wmb() wmb()
  46. #define smp_read_barrier_depends() read_barrier_depends()
  47. #else
  48. #define smp_mb() barrier()
  49. #define smp_rmb() barrier()
  50. #define smp_wmb() barrier()
  51. #define smp_read_barrier_depends() do { } while (0)
  52. #endif /* CONFIG_SMP */
  53. #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
  54. /* Interrupt Control */
  55. #ifndef HARD_CLI
  56. #define SR_MASK_L 0x000000f0L
  57. #define SR_MASK_LL 0x00000000000000f0LL
  58. #else
  59. #define SR_MASK_L 0x10000000L
  60. #define SR_MASK_LL 0x0000000010000000LL
  61. #endif
  62. static __inline__ void local_irq_enable(void)
  63. {
  64. /* cli/sti based on SR.BL */
  65. unsigned long long __dummy0, __dummy1=~SR_MASK_LL;
  66. __asm__ __volatile__("getcon " __SR ", %0\n\t"
  67. "and %0, %1, %0\n\t"
  68. "putcon %0, " __SR "\n\t"
  69. : "=&r" (__dummy0)
  70. : "r" (__dummy1));
  71. }
  72. static __inline__ void local_irq_disable(void)
  73. {
  74. /* cli/sti based on SR.BL */
  75. unsigned long long __dummy0, __dummy1=SR_MASK_LL;
  76. __asm__ __volatile__("getcon " __SR ", %0\n\t"
  77. "or %0, %1, %0\n\t"
  78. "putcon %0, " __SR "\n\t"
  79. : "=&r" (__dummy0)
  80. : "r" (__dummy1));
  81. }
  82. #define local_save_flags(x) \
  83. (__extension__ ({ unsigned long long __dummy=SR_MASK_LL; \
  84. __asm__ __volatile__( \
  85. "getcon " __SR ", %0\n\t" \
  86. "and %0, %1, %0" \
  87. : "=&r" (x) \
  88. : "r" (__dummy));}))
  89. #define local_irq_save(x) \
  90. (__extension__ ({ unsigned long long __d2=SR_MASK_LL, __d1; \
  91. __asm__ __volatile__( \
  92. "getcon " __SR ", %1\n\t" \
  93. "or %1, r63, %0\n\t" \
  94. "or %1, %2, %1\n\t" \
  95. "putcon %1, " __SR "\n\t" \
  96. "and %0, %2, %0" \
  97. : "=&r" (x), "=&r" (__d1) \
  98. : "r" (__d2));}));
  99. #define local_irq_restore(x) do { \
  100. if ( ((x) & SR_MASK_L) == 0 ) /* dropping to 0 ? */ \
  101. local_irq_enable(); /* yes...re-enable */ \
  102. } while (0)
  103. #define irqs_disabled() \
  104. ({ \
  105. unsigned long flags; \
  106. local_save_flags(flags); \
  107. (flags != 0); \
  108. })
  109. static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
  110. {
  111. unsigned long flags, retval;
  112. local_irq_save(flags);
  113. retval = *m;
  114. *m = val;
  115. local_irq_restore(flags);
  116. return retval;
  117. }
  118. static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
  119. {
  120. unsigned long flags, retval;
  121. local_irq_save(flags);
  122. retval = *m;
  123. *m = val & 0xff;
  124. local_irq_restore(flags);
  125. return retval;
  126. }
  127. static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  128. {
  129. switch (size) {
  130. case 4:
  131. return xchg_u32(ptr, x);
  132. break;
  133. case 1:
  134. return xchg_u8(ptr, x);
  135. break;
  136. }
  137. __xchg_called_with_bad_pointer();
  138. return x;
  139. }
  140. /* XXX
  141. * disable hlt during certain critical i/o operations
  142. */
  143. #define HAVE_DISABLE_HLT
  144. void disable_hlt(void);
  145. void enable_hlt(void);
  146. #define smp_mb() barrier()
  147. #define smp_rmb() barrier()
  148. #define smp_wmb() barrier()
  149. #ifdef CONFIG_SH_ALPHANUMERIC
  150. /* This is only used for debugging. */
  151. extern void print_seg(char *file,int line);
  152. #define PLS() print_seg(__FILE__,__LINE__)
  153. #else /* CONFIG_SH_ALPHANUMERIC */
  154. #define PLS()
  155. #endif /* CONFIG_SH_ALPHANUMERIC */
  156. #define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
  157. #define arch_align_stack(x) (x)
  158. #endif /* __ASM_SH64_SYSTEM_H */