system.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /* MN10300 System definitions
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_SYSTEM_H
  12. #define _ASM_SYSTEM_H
  13. #include <asm/cpu-regs.h>
  14. #ifdef __KERNEL__
  15. #ifndef __ASSEMBLY__
  16. #include <linux/kernel.h>
  17. #include <linux/irqflags.h>
  18. struct task_struct;
  19. struct thread_struct;
  20. extern asmlinkage
  21. struct task_struct *__switch_to(struct thread_struct *prev,
  22. struct thread_struct *next,
  23. struct task_struct *prev_task);
  24. /* context switching is now performed out-of-line in switch_to.S */
  25. #define switch_to(prev, next, last) \
  26. do { \
  27. current->thread.wchan = (u_long) __builtin_return_address(0); \
  28. (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
  29. mb(); \
  30. current->thread.wchan = 0; \
  31. } while (0)
  32. #define arch_align_stack(x) (x)
  33. #define nop() asm volatile ("nop")
  34. #endif /* !__ASSEMBLY__ */
  35. /*
  36. * Force strict CPU ordering.
  37. * And yes, this is required on UP too when we're talking
  38. * to devices.
  39. *
  40. * For now, "wmb()" doesn't actually do anything, as all
  41. * Intel CPU's follow what Intel calls a *Processor Order*,
  42. * in which all writes are seen in the program order even
  43. * outside the CPU.
  44. *
  45. * I expect future Intel CPU's to have a weaker ordering,
  46. * but I'd also expect them to finally get their act together
  47. * and add some real memory barriers if so.
  48. *
  49. * Some non intel clones support out of order store. wmb() ceases to be a
  50. * nop for these.
  51. */
  52. #define mb() asm volatile ("": : :"memory")
  53. #define rmb() mb()
  54. #define wmb() asm volatile ("": : :"memory")
  55. #ifdef CONFIG_SMP
  56. #define smp_mb() mb()
  57. #define smp_rmb() rmb()
  58. #define smp_wmb() wmb()
  59. #else
  60. #define smp_mb() barrier()
  61. #define smp_rmb() barrier()
  62. #define smp_wmb() barrier()
  63. #endif
  64. #define set_mb(var, value) do { var = value; mb(); } while (0)
  65. #define set_wmb(var, value) do { var = value; wmb(); } while (0)
  66. #define read_barrier_depends() do {} while (0)
  67. #define smp_read_barrier_depends() do {} while (0)
  68. /*****************************************************************************/
  69. /*
  70. * MN10300 doesn't actually have an exchange instruction
  71. */
  72. #ifndef __ASSEMBLY__
  73. struct __xchg_dummy { unsigned long a[100]; };
  74. #define __xg(x) ((struct __xchg_dummy *)(x))
  75. static inline
  76. unsigned long __xchg(volatile unsigned long *m, unsigned long val)
  77. {
  78. unsigned long retval;
  79. unsigned long flags;
  80. local_irq_save(flags);
  81. retval = *m;
  82. *m = val;
  83. local_irq_restore(flags);
  84. return retval;
  85. }
  86. #define xchg(ptr, v) \
  87. ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
  88. (unsigned long)(v)))
  89. static inline unsigned long __cmpxchg(volatile unsigned long *m,
  90. unsigned long old, unsigned long new)
  91. {
  92. unsigned long retval;
  93. unsigned long flags;
  94. local_irq_save(flags);
  95. retval = *m;
  96. if (retval == old)
  97. *m = new;
  98. local_irq_restore(flags);
  99. return retval;
  100. }
  101. #define cmpxchg(ptr, o, n) \
  102. ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
  103. (unsigned long)(o), \
  104. (unsigned long)(n)))
  105. #endif /* !__ASSEMBLY__ */
  106. #endif /* __KERNEL__ */
  107. #endif /* _ASM_SYSTEM_H */