system.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. #ifndef __PPC64_SYSTEM_H
  2. #define __PPC64_SYSTEM_H
  3. /*
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/config.h>
  10. #include <linux/compiler.h>
  11. #include <asm/page.h>
  12. #include <asm/processor.h>
  13. #include <asm/hw_irq.h>
  14. #include <asm/memory.h>
  15. /*
  16. * Memory barrier.
  17. * The sync instruction guarantees that all memory accesses initiated
  18. * by this processor have been performed (with respect to all other
  19. * mechanisms that access memory). The eieio instruction is a barrier
  20. * providing an ordering (separately) for (a) cacheable stores and (b)
  21. * loads and stores to non-cacheable memory (e.g. I/O devices).
  22. *
  23. * mb() prevents loads and stores being reordered across this point.
  24. * rmb() prevents loads being reordered across this point.
  25. * wmb() prevents stores being reordered across this point.
  26. * read_barrier_depends() prevents data-dependent loads being reordered
  27. * across this point (nop on PPC).
  28. *
  29. * We have to use the sync instructions for mb(), since lwsync doesn't
  30. * order loads with respect to previous stores. Lwsync is fine for
  31. * rmb(), though.
  32. * For wmb(), we use sync since wmb is used in drivers to order
  33. * stores to system memory with respect to writes to the device.
  34. * However, smp_wmb() can be a lighter-weight eieio barrier on
  35. * SMP since it is only used to order updates to system memory.
  36. */
  37. #define mb() __asm__ __volatile__ ("sync" : : : "memory")
  38. #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
  39. #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
  40. #define read_barrier_depends() do { } while(0)
  41. #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
  42. #define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
  43. #ifdef CONFIG_SMP
  44. #define smp_mb() mb()
  45. #define smp_rmb() rmb()
  46. #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
  47. #define smp_read_barrier_depends() read_barrier_depends()
  48. #else
  49. #define smp_mb() __asm__ __volatile__("": : :"memory")
  50. #define smp_rmb() __asm__ __volatile__("": : :"memory")
  51. #define smp_wmb() __asm__ __volatile__("": : :"memory")
  52. #define smp_read_barrier_depends() do { } while(0)
  53. #endif /* CONFIG_SMP */
  54. #ifdef __KERNEL__
  55. struct task_struct;
  56. struct pt_regs;
  57. #ifdef CONFIG_DEBUGGER
  58. extern int (*__debugger)(struct pt_regs *regs);
  59. extern int (*__debugger_ipi)(struct pt_regs *regs);
  60. extern int (*__debugger_bpt)(struct pt_regs *regs);
  61. extern int (*__debugger_sstep)(struct pt_regs *regs);
  62. extern int (*__debugger_iabr_match)(struct pt_regs *regs);
  63. extern int (*__debugger_dabr_match)(struct pt_regs *regs);
  64. extern int (*__debugger_fault_handler)(struct pt_regs *regs);
  65. #define DEBUGGER_BOILERPLATE(__NAME) \
  66. static inline int __NAME(struct pt_regs *regs) \
  67. { \
  68. if (unlikely(__ ## __NAME)) \
  69. return __ ## __NAME(regs); \
  70. return 0; \
  71. }
  72. DEBUGGER_BOILERPLATE(debugger)
  73. DEBUGGER_BOILERPLATE(debugger_ipi)
  74. DEBUGGER_BOILERPLATE(debugger_bpt)
  75. DEBUGGER_BOILERPLATE(debugger_sstep)
  76. DEBUGGER_BOILERPLATE(debugger_iabr_match)
  77. DEBUGGER_BOILERPLATE(debugger_dabr_match)
  78. DEBUGGER_BOILERPLATE(debugger_fault_handler)
  79. #ifdef CONFIG_XMON
  80. extern void xmon_init(void);
  81. #endif
  82. #else
  83. static inline int debugger(struct pt_regs *regs) { return 0; }
  84. static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
  85. static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
  86. static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
  87. static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
  88. static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
  89. static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
  90. #endif
  91. extern int fix_alignment(struct pt_regs *regs);
  92. extern void bad_page_fault(struct pt_regs *regs, unsigned long address,
  93. int sig);
  94. extern void show_regs(struct pt_regs * regs);
  95. extern void low_hash_fault(struct pt_regs *regs, unsigned long address);
  96. extern int die(const char *str, struct pt_regs *regs, long err);
  97. extern int _get_PVR(void);
  98. extern void giveup_fpu(struct task_struct *);
  99. extern void disable_kernel_fp(void);
  100. extern void flush_fp_to_thread(struct task_struct *);
  101. extern void enable_kernel_fp(void);
  102. extern void giveup_altivec(struct task_struct *);
  103. extern void disable_kernel_altivec(void);
  104. extern void enable_kernel_altivec(void);
  105. extern int emulate_altivec(struct pt_regs *);
  106. extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
  107. extern void cvt_df(double *from, float *to, unsigned long *fpscr);
  108. #ifdef CONFIG_ALTIVEC
  109. extern void flush_altivec_to_thread(struct task_struct *);
  110. #else
  111. static inline void flush_altivec_to_thread(struct task_struct *t)
  112. {
  113. }
  114. #endif
  115. extern int mem_init_done; /* set on boot once kmalloc can be called */
  116. /* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
  117. extern unsigned char e2a(unsigned char);
  118. extern struct task_struct *__switch_to(struct task_struct *,
  119. struct task_struct *);
  120. #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
  121. struct thread_struct;
  122. extern struct task_struct * _switch(struct thread_struct *prev,
  123. struct thread_struct *next);
  124. static inline int __is_processor(unsigned long pv)
  125. {
  126. unsigned long pvr;
  127. asm("mfspr %0, 0x11F" : "=r" (pvr));
  128. return(PVR_VER(pvr) == pv);
  129. }
  130. /*
  131. * Atomic exchange
  132. *
  133. * Changes the memory location '*ptr' to be val and returns
  134. * the previous value stored there.
  135. *
  136. * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
  137. * is more like most of the other architectures.
  138. */
  139. static __inline__ unsigned long
  140. __xchg_u32(volatile int *m, unsigned long val)
  141. {
  142. unsigned long dummy;
  143. __asm__ __volatile__(
  144. EIEIO_ON_SMP
  145. "1: lwarx %0,0,%3 # __xchg_u32\n\
  146. stwcx. %2,0,%3\n\
  147. 2: bne- 1b"
  148. ISYNC_ON_SMP
  149. : "=&r" (dummy), "=m" (*m)
  150. : "r" (val), "r" (m)
  151. : "cc", "memory");
  152. return (dummy);
  153. }
  154. static __inline__ unsigned long
  155. __xchg_u64(volatile long *m, unsigned long val)
  156. {
  157. unsigned long dummy;
  158. __asm__ __volatile__(
  159. EIEIO_ON_SMP
  160. "1: ldarx %0,0,%3 # __xchg_u64\n\
  161. stdcx. %2,0,%3\n\
  162. 2: bne- 1b"
  163. ISYNC_ON_SMP
  164. : "=&r" (dummy), "=m" (*m)
  165. : "r" (val), "r" (m)
  166. : "cc", "memory");
  167. return (dummy);
  168. }
  169. /*
  170. * This function doesn't exist, so you'll get a linker error
  171. * if something tries to do an invalid xchg().
  172. */
  173. extern void __xchg_called_with_bad_pointer(void);
  174. static __inline__ unsigned long
  175. __xchg(volatile void *ptr, unsigned long x, int size)
  176. {
  177. switch (size) {
  178. case 4:
  179. return __xchg_u32(ptr, x);
  180. case 8:
  181. return __xchg_u64(ptr, x);
  182. }
  183. __xchg_called_with_bad_pointer();
  184. return x;
  185. }
  186. #define xchg(ptr,x) \
  187. ({ \
  188. __typeof__(*(ptr)) _x_ = (x); \
  189. (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
  190. })
  191. #define tas(ptr) (xchg((ptr),1))
  192. #define __HAVE_ARCH_CMPXCHG 1
  193. static __inline__ unsigned long
  194. __cmpxchg_u32(volatile int *p, int old, int new)
  195. {
  196. unsigned int prev;
  197. __asm__ __volatile__ (
  198. EIEIO_ON_SMP
  199. "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
  200. cmpw 0,%0,%3\n\
  201. bne- 2f\n\
  202. stwcx. %4,0,%2\n\
  203. bne- 1b"
  204. ISYNC_ON_SMP
  205. "\n\
  206. 2:"
  207. : "=&r" (prev), "=m" (*p)
  208. : "r" (p), "r" (old), "r" (new), "m" (*p)
  209. : "cc", "memory");
  210. return prev;
  211. }
  212. static __inline__ unsigned long
  213. __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
  214. {
  215. unsigned long prev;
  216. __asm__ __volatile__ (
  217. EIEIO_ON_SMP
  218. "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
  219. cmpd 0,%0,%3\n\
  220. bne- 2f\n\
  221. stdcx. %4,0,%2\n\
  222. bne- 1b"
  223. ISYNC_ON_SMP
  224. "\n\
  225. 2:"
  226. : "=&r" (prev), "=m" (*p)
  227. : "r" (p), "r" (old), "r" (new), "m" (*p)
  228. : "cc", "memory");
  229. return prev;
  230. }
  231. /* This function doesn't exist, so you'll get a linker error
  232. if something tries to do an invalid cmpxchg(). */
  233. extern void __cmpxchg_called_with_bad_pointer(void);
  234. static __inline__ unsigned long
  235. __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
  236. {
  237. switch (size) {
  238. case 4:
  239. return __cmpxchg_u32(ptr, old, new);
  240. case 8:
  241. return __cmpxchg_u64(ptr, old, new);
  242. }
  243. __cmpxchg_called_with_bad_pointer();
  244. return old;
  245. }
  246. #define cmpxchg(ptr,o,n) \
  247. ({ \
  248. __typeof__(*(ptr)) _o_ = (o); \
  249. __typeof__(*(ptr)) _n_ = (n); \
  250. (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
  251. (unsigned long)_n_, sizeof(*(ptr))); \
  252. })
  253. /*
  254. * We handle most unaligned accesses in hardware. On the other hand
  255. * unaligned DMA can be very expensive on some ppc64 IO chips (it does
  256. * powers of 2 writes until it reaches sufficient alignment).
  257. *
  258. * Based on this we disable the IP header alignment in network drivers.
  259. */
  260. #define NET_IP_ALIGN 0
  261. #define arch_align_stack(x) (x)
  262. #endif /* __KERNEL__ */
  263. #endif