system.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /* MN10300 System definitions
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_SYSTEM_H
  12. #define _ASM_SYSTEM_H
  13. #include <asm/cpu-regs.h>
  14. #include <asm/intctl-regs.h>
  15. #ifdef __KERNEL__
  16. #ifndef __ASSEMBLY__
  17. #include <linux/kernel.h>
  18. #include <linux/irqflags.h>
  19. #include <asm/atomic.h>
  20. #if !defined(CONFIG_LAZY_SAVE_FPU)
  21. struct fpu_state_struct;
  22. extern asmlinkage void fpu_save(struct fpu_state_struct *);
  23. #define switch_fpu(prev, next) \
  24. do { \
  25. if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \
  26. (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \
  27. (prev)->thread.uregs->epsw &= ~EPSW_FE; \
  28. fpu_save(&(prev)->thread.fpu_state); \
  29. } \
  30. } while (0)
  31. #else
  32. #define switch_fpu(prev, next) do {} while (0)
  33. #endif
  34. struct task_struct;
  35. struct thread_struct;
  36. extern asmlinkage
  37. struct task_struct *__switch_to(struct thread_struct *prev,
  38. struct thread_struct *next,
  39. struct task_struct *prev_task);
  40. /* context switching is now performed out-of-line in switch_to.S */
  41. #define switch_to(prev, next, last) \
  42. do { \
  43. switch_fpu(prev, next); \
  44. current->thread.wchan = (u_long) __builtin_return_address(0); \
  45. (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
  46. mb(); \
  47. current->thread.wchan = 0; \
  48. } while (0)
  49. #define arch_align_stack(x) (x)
  50. #define nop() asm volatile ("nop")
  51. /*
  52. * Force strict CPU ordering.
  53. * And yes, this is required on UP too when we're talking
  54. * to devices.
  55. *
  56. * For now, "wmb()" doesn't actually do anything, as all
  57. * Intel CPU's follow what Intel calls a *Processor Order*,
  58. * in which all writes are seen in the program order even
  59. * outside the CPU.
  60. *
  61. * I expect future Intel CPU's to have a weaker ordering,
  62. * but I'd also expect them to finally get their act together
  63. * and add some real memory barriers if so.
  64. *
  65. * Some non intel clones support out of order store. wmb() ceases to be a
  66. * nop for these.
  67. */
  68. #define mb() asm volatile ("": : :"memory")
  69. #define rmb() mb()
  70. #define wmb() asm volatile ("": : :"memory")
  71. #ifdef CONFIG_SMP
  72. #define smp_mb() mb()
  73. #define smp_rmb() rmb()
  74. #define smp_wmb() wmb()
  75. #define set_mb(var, value) do { xchg(&var, value); } while (0)
  76. #else /* CONFIG_SMP */
  77. #define smp_mb() barrier()
  78. #define smp_rmb() barrier()
  79. #define smp_wmb() barrier()
  80. #define set_mb(var, value) do { var = value; mb(); } while (0)
  81. #endif /* CONFIG_SMP */
  82. #define set_wmb(var, value) do { var = value; wmb(); } while (0)
  83. #define read_barrier_depends() do {} while (0)
  84. #define smp_read_barrier_depends() do {} while (0)
  85. #endif /* !__ASSEMBLY__ */
  86. #endif /* __KERNEL__ */
  87. #endif /* _ASM_SYSTEM_H */