system.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. #ifndef _H8300_SYSTEM_H
  2. #define _H8300_SYSTEM_H
  3. #include <linux/linkage.h>
  4. struct pt_regs;
  5. /*
  6. * switch_to(n) should switch tasks to task ptr, first checking that
  7. * ptr isn't the current task, in which case it does nothing. This
  8. * also clears the TS-flag if the task we switched to has used the
  9. * math co-processor latest.
  10. */
  11. /*
  12. * switch_to() saves the extra registers, that are not saved
  13. * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
  14. * a0-a1. Some of these are used by schedule() and its predecessors
  15. * and so we might get see unexpected behaviors when a task returns
  16. * with unexpected register values.
  17. *
  18. * syscall stores these registers itself and none of them are used
  19. * by syscall after the function in the syscall has been called.
  20. *
  21. * Beware that resume now expects *next to be in d1 and the offset of
  22. * tss to be in a1. This saves a few instructions as we no longer have
  23. * to push them onto the stack and read them back right after.
  24. *
  25. * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
  26. *
  27. * Changed 96/09/19 by Andreas Schwab
  28. * pass prev in a0, next in a1, offset of tss in d1, and whether
  29. * the mm structures are shared in d2 (to avoid atc flushing).
  30. *
  31. * H8/300 Porting 2002/09/04 Yoshinori Sato
  32. */
  33. asmlinkage void resume(void);
  34. #define switch_to(prev,next,last) { \
  35. void *_last; \
  36. __asm__ __volatile__( \
  37. "mov.l %1, er0\n\t" \
  38. "mov.l %2, er1\n\t" \
  39. "mov.l %3, er2\n\t" \
  40. "jsr @_resume\n\t" \
  41. "mov.l er2,%0\n\t" \
  42. : "=r" (_last) \
  43. : "r" (&(prev->thread)), \
  44. "r" (&(next->thread)), \
  45. "g" (prev) \
  46. : "cc", "er0", "er1", "er2", "er3"); \
  47. (last) = _last; \
  48. }
  49. #define __sti() asm volatile ("andc #0x7f,ccr")
  50. #define __cli() asm volatile ("orc #0x80,ccr")
  51. #define __save_flags(x) \
  52. asm volatile ("stc ccr,%w0":"=r" (x))
  53. #define __restore_flags(x) \
  54. asm volatile ("ldc %w0,ccr": :"r" (x))
  55. #define irqs_disabled() \
  56. ({ \
  57. unsigned char flags; \
  58. __save_flags(flags); \
  59. ((flags & 0x80) == 0x80); \
  60. })
  61. #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
  62. /* For spinlocks etc */
  63. #define local_irq_disable() __cli()
  64. #define local_irq_enable() __sti()
  65. #define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); })
  66. #define local_irq_restore(x) __restore_flags(x)
  67. #define local_save_flags(x) __save_flags(x)
  68. /*
  69. * Force strict CPU ordering.
  70. * Not really required on H8...
  71. */
  72. #define nop() asm volatile ("nop"::)
  73. #define mb() asm volatile ("" : : :"memory")
  74. #define rmb() asm volatile ("" : : :"memory")
  75. #define wmb() asm volatile ("" : : :"memory")
  76. #define set_mb(var, value) do { xchg(&var, value); } while (0)
  77. #ifdef CONFIG_SMP
  78. #define smp_mb() mb()
  79. #define smp_rmb() rmb()
  80. #define smp_wmb() wmb()
  81. #define smp_read_barrier_depends() read_barrier_depends()
  82. #else
  83. #define smp_mb() barrier()
  84. #define smp_rmb() barrier()
  85. #define smp_wmb() barrier()
  86. #define smp_read_barrier_depends() do { } while(0)
  87. #endif
  88. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  89. struct __xchg_dummy { unsigned long a[100]; };
  90. #define __xg(x) ((volatile struct __xchg_dummy *)(x))
  91. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  92. {
  93. unsigned long tmp, flags;
  94. local_irq_save(flags);
  95. switch (size) {
  96. case 1:
  97. __asm__ __volatile__
  98. ("mov.b %2,%0\n\t"
  99. "mov.b %1,%2"
  100. : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
  101. break;
  102. case 2:
  103. __asm__ __volatile__
  104. ("mov.w %2,%0\n\t"
  105. "mov.w %1,%2"
  106. : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
  107. break;
  108. case 4:
  109. __asm__ __volatile__
  110. ("mov.l %2,%0\n\t"
  111. "mov.l %1,%2"
  112. : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
  113. break;
  114. default:
  115. tmp = 0;
  116. }
  117. local_irq_restore(flags);
  118. return tmp;
  119. }
  120. #define HARD_RESET_NOW() ({ \
  121. local_irq_disable(); \
  122. asm("jmp @@0"); \
  123. })
  124. #include <asm-generic/cmpxchg-local.h>
  125. /*
  126. * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  127. * them available.
  128. */
  129. #define cmpxchg_local(ptr, o, n) \
  130. ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
  131. (unsigned long)(n), sizeof(*(ptr))))
  132. #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
  133. #ifndef CONFIG_SMP
  134. #include <asm-generic/cmpxchg.h>
  135. #endif
  136. #define arch_align_stack(x) (x)
  137. extern void die(const char *str, struct pt_regs *fp, unsigned long err);
  138. #endif /* _H8300_SYSTEM_H */