system.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. #ifndef _H8300_SYSTEM_H
  2. #define _H8300_SYSTEM_H
  3. #include <linux/linkage.h>
  4. #include <linux/irqflags.h>
  5. struct pt_regs;
  6. /*
  7. * switch_to(n) should switch tasks to task ptr, first checking that
  8. * ptr isn't the current task, in which case it does nothing. This
  9. * also clears the TS-flag if the task we switched to has used the
  10. * math co-processor latest.
  11. */
  12. /*
  13. * switch_to() saves the extra registers, that are not saved
  14. * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
  15. * a0-a1. Some of these are used by schedule() and its predecessors
  16. * and so we might get see unexpected behaviors when a task returns
  17. * with unexpected register values.
  18. *
  19. * syscall stores these registers itself and none of them are used
  20. * by syscall after the function in the syscall has been called.
  21. *
  22. * Beware that resume now expects *next to be in d1 and the offset of
  23. * tss to be in a1. This saves a few instructions as we no longer have
  24. * to push them onto the stack and read them back right after.
  25. *
  26. * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
  27. *
  28. * Changed 96/09/19 by Andreas Schwab
  29. * pass prev in a0, next in a1, offset of tss in d1, and whether
  30. * the mm structures are shared in d2 (to avoid atc flushing).
  31. *
  32. * H8/300 Porting 2002/09/04 Yoshinori Sato
  33. */
  34. asmlinkage void resume(void);
  35. #define switch_to(prev,next,last) { \
  36. void *_last; \
  37. __asm__ __volatile__( \
  38. "mov.l %1, er0\n\t" \
  39. "mov.l %2, er1\n\t" \
  40. "mov.l %3, er2\n\t" \
  41. "jsr @_resume\n\t" \
  42. "mov.l er2,%0\n\t" \
  43. : "=r" (_last) \
  44. : "r" (&(prev->thread)), \
  45. "r" (&(next->thread)), \
  46. "g" (prev) \
  47. : "cc", "er0", "er1", "er2", "er3"); \
  48. (last) = _last; \
  49. }
  50. #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
  51. /*
  52. * Force strict CPU ordering.
  53. * Not really required on H8...
  54. */
  55. #define nop() asm volatile ("nop"::)
  56. #define mb() asm volatile ("" : : :"memory")
  57. #define rmb() asm volatile ("" : : :"memory")
  58. #define wmb() asm volatile ("" : : :"memory")
  59. #define set_mb(var, value) do { xchg(&var, value); } while (0)
  60. #ifdef CONFIG_SMP
  61. #define smp_mb() mb()
  62. #define smp_rmb() rmb()
  63. #define smp_wmb() wmb()
  64. #define smp_read_barrier_depends() read_barrier_depends()
  65. #else
  66. #define smp_mb() barrier()
  67. #define smp_rmb() barrier()
  68. #define smp_wmb() barrier()
  69. #define smp_read_barrier_depends() do { } while(0)
  70. #endif
  71. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  72. struct __xchg_dummy { unsigned long a[100]; };
  73. #define __xg(x) ((volatile struct __xchg_dummy *)(x))
  74. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  75. {
  76. unsigned long tmp, flags;
  77. local_irq_save(flags);
  78. switch (size) {
  79. case 1:
  80. __asm__ __volatile__
  81. ("mov.b %2,%0\n\t"
  82. "mov.b %1,%2"
  83. : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
  84. break;
  85. case 2:
  86. __asm__ __volatile__
  87. ("mov.w %2,%0\n\t"
  88. "mov.w %1,%2"
  89. : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
  90. break;
  91. case 4:
  92. __asm__ __volatile__
  93. ("mov.l %2,%0\n\t"
  94. "mov.l %1,%2"
  95. : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
  96. break;
  97. default:
  98. tmp = 0;
  99. }
  100. local_irq_restore(flags);
  101. return tmp;
  102. }
  103. #define HARD_RESET_NOW() ({ \
  104. local_irq_disable(); \
  105. asm("jmp @@0"); \
  106. })
  107. #include <asm-generic/cmpxchg-local.h>
  108. /*
  109. * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  110. * them available.
  111. */
  112. #define cmpxchg_local(ptr, o, n) \
  113. ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
  114. (unsigned long)(n), sizeof(*(ptr))))
  115. #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
  116. #ifndef CONFIG_SMP
  117. #include <asm-generic/cmpxchg.h>
  118. #endif
  119. #define arch_align_stack(x) (x)
  120. extern void die(const char *str, struct pt_regs *fp, unsigned long err);
  121. #endif /* _H8300_SYSTEM_H */