processor.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /* $Id: processor.h,v 1.83 2002/02/10 06:04:33 davem Exp $
  2. * include/asm-sparc64/processor.h
  3. *
  4. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  5. */
  6. #ifndef __ASM_SPARC64_PROCESSOR_H
  7. #define __ASM_SPARC64_PROCESSOR_H
  8. /*
  9. * Sparc64 implementation of macro that returns current
  10. * instruction pointer ("program counter").
  11. */
  12. #define current_text_addr() ({ void *pc; __asm__("rd %%pc, %0" : "=r" (pc)); pc; })
  13. #include <linux/config.h>
  14. #include <asm/asi.h>
  15. #include <asm/a.out.h>
  16. #include <asm/pstate.h>
  17. #include <asm/ptrace.h>
  18. #include <asm/segment.h>
  19. #include <asm/page.h>
  20. /* The sparc has no problems with write protection */
  21. #define wp_works_ok 1
  22. #define wp_works_ok__is_a_macro /* for versions in ksyms.c */
  23. /*
  24. * User lives in his very own context, and cannot reference us. Note
  25. * that TASK_SIZE is a misnomer, it really gives maximum user virtual
  26. * address that the kernel will allocate out.
  27. */
  28. #define VA_BITS 44
  29. #ifndef __ASSEMBLY__
  30. #define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3))
  31. #else
  32. #define VPTE_SIZE (1 << (VA_BITS - PAGE_SHIFT + 3))
  33. #endif
  34. #define TASK_SIZE ((unsigned long)-VPTE_SIZE)
  35. /*
  36. * The vpte base must be able to hold the entire vpte, half
  37. * of which lives above, and half below, the base. And it
  38. * is placed as close to the highest address range as possible.
  39. */
  40. #define VPTE_BASE_SPITFIRE (-(VPTE_SIZE/2))
  41. #if 1
  42. #define VPTE_BASE_CHEETAH VPTE_BASE_SPITFIRE
  43. #else
  44. #define VPTE_BASE_CHEETAH 0xffe0000000000000
  45. #endif
  46. #ifndef __ASSEMBLY__
  47. typedef struct {
  48. unsigned char seg;
  49. } mm_segment_t;
  50. /* The Sparc processor specific thread struct. */
  51. /* XXX This should die, everything can go into thread_info now. */
  52. struct thread_struct {
  53. #ifdef CONFIG_DEBUG_SPINLOCK
  54. /* How many spinlocks held by this thread.
  55. * Used with spin lock debugging to catch tasks
  56. * sleeping illegally with locks held.
  57. */
  58. int smp_lock_count;
  59. unsigned int smp_lock_pc;
  60. #else
  61. int dummy; /* f'in gcc bug... */
  62. #endif
  63. };
  64. #endif /* !(__ASSEMBLY__) */
  65. #ifndef CONFIG_DEBUG_SPINLOCK
  66. #define INIT_THREAD { \
  67. 0, \
  68. }
  69. #else /* CONFIG_DEBUG_SPINLOCK */
  70. #define INIT_THREAD { \
  71. /* smp_lock_count, smp_lock_pc, */ \
  72. 0, 0, \
  73. }
  74. #endif /* !(CONFIG_DEBUG_SPINLOCK) */
  75. #ifndef __ASSEMBLY__
  76. #include <linux/types.h>
  77. /* Return saved PC of a blocked thread. */
  78. struct task_struct;
  79. extern unsigned long thread_saved_pc(struct task_struct *);
  80. /* On Uniprocessor, even in RMO processes see TSO semantics */
  81. #ifdef CONFIG_SMP
  82. #define TSTATE_INITIAL_MM TSTATE_TSO
  83. #else
  84. #define TSTATE_INITIAL_MM TSTATE_RMO
  85. #endif
  86. /* Do necessary setup to start up a newly executed thread. */
  87. #define start_thread(regs, pc, sp) \
  88. do { \
  89. regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (ASI_PNF << 24); \
  90. regs->tpc = ((pc & (~3)) - 4); \
  91. regs->tnpc = regs->tpc + 4; \
  92. regs->y = 0; \
  93. set_thread_wstate(1 << 3); \
  94. if (current_thread_info()->utraps) { \
  95. if (*(current_thread_info()->utraps) < 2) \
  96. kfree(current_thread_info()->utraps); \
  97. else \
  98. (*(current_thread_info()->utraps))--; \
  99. current_thread_info()->utraps = NULL; \
  100. } \
  101. __asm__ __volatile__( \
  102. "stx %%g0, [%0 + %2 + 0x00]\n\t" \
  103. "stx %%g0, [%0 + %2 + 0x08]\n\t" \
  104. "stx %%g0, [%0 + %2 + 0x10]\n\t" \
  105. "stx %%g0, [%0 + %2 + 0x18]\n\t" \
  106. "stx %%g0, [%0 + %2 + 0x20]\n\t" \
  107. "stx %%g0, [%0 + %2 + 0x28]\n\t" \
  108. "stx %%g0, [%0 + %2 + 0x30]\n\t" \
  109. "stx %%g0, [%0 + %2 + 0x38]\n\t" \
  110. "stx %%g0, [%0 + %2 + 0x40]\n\t" \
  111. "stx %%g0, [%0 + %2 + 0x48]\n\t" \
  112. "stx %%g0, [%0 + %2 + 0x50]\n\t" \
  113. "stx %%g0, [%0 + %2 + 0x58]\n\t" \
  114. "stx %%g0, [%0 + %2 + 0x60]\n\t" \
  115. "stx %%g0, [%0 + %2 + 0x68]\n\t" \
  116. "stx %1, [%0 + %2 + 0x70]\n\t" \
  117. "stx %%g0, [%0 + %2 + 0x78]\n\t" \
  118. "wrpr %%g0, (1 << 3), %%wstate\n\t" \
  119. : \
  120. : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \
  121. "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
  122. } while (0)
  123. #define start_thread32(regs, pc, sp) \
  124. do { \
  125. pc &= 0x00000000ffffffffUL; \
  126. sp &= 0x00000000ffffffffUL; \
  127. \
  128. regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM); \
  129. regs->tpc = ((pc & (~3)) - 4); \
  130. regs->tnpc = regs->tpc + 4; \
  131. regs->y = 0; \
  132. set_thread_wstate(2 << 3); \
  133. if (current_thread_info()->utraps) { \
  134. if (*(current_thread_info()->utraps) < 2) \
  135. kfree(current_thread_info()->utraps); \
  136. else \
  137. (*(current_thread_info()->utraps))--; \
  138. current_thread_info()->utraps = NULL; \
  139. } \
  140. __asm__ __volatile__( \
  141. "stx %%g0, [%0 + %2 + 0x00]\n\t" \
  142. "stx %%g0, [%0 + %2 + 0x08]\n\t" \
  143. "stx %%g0, [%0 + %2 + 0x10]\n\t" \
  144. "stx %%g0, [%0 + %2 + 0x18]\n\t" \
  145. "stx %%g0, [%0 + %2 + 0x20]\n\t" \
  146. "stx %%g0, [%0 + %2 + 0x28]\n\t" \
  147. "stx %%g0, [%0 + %2 + 0x30]\n\t" \
  148. "stx %%g0, [%0 + %2 + 0x38]\n\t" \
  149. "stx %%g0, [%0 + %2 + 0x40]\n\t" \
  150. "stx %%g0, [%0 + %2 + 0x48]\n\t" \
  151. "stx %%g0, [%0 + %2 + 0x50]\n\t" \
  152. "stx %%g0, [%0 + %2 + 0x58]\n\t" \
  153. "stx %%g0, [%0 + %2 + 0x60]\n\t" \
  154. "stx %%g0, [%0 + %2 + 0x68]\n\t" \
  155. "stx %1, [%0 + %2 + 0x70]\n\t" \
  156. "stx %%g0, [%0 + %2 + 0x78]\n\t" \
  157. "wrpr %%g0, (2 << 3), %%wstate\n\t" \
  158. : \
  159. : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \
  160. "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
  161. } while (0)
  162. /* Free all resources held by a thread. */
  163. #define release_thread(tsk) do { } while (0)
  164. /* Prepare to copy thread state - unlazy all lazy status */
  165. #define prepare_to_copy(tsk) do { } while (0)
  166. extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
  167. extern unsigned long get_wchan(struct task_struct *task);
  168. #define KSTK_EIP(tsk) ((tsk)->thread_info->kregs->tpc)
  169. #define KSTK_ESP(tsk) ((tsk)->thread_info->kregs->u_regs[UREG_FP])
  170. #define cpu_relax() barrier()
  171. /* Prefetch support. This is tuned for UltraSPARC-III and later.
  172. * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
  173. * a shallower prefetch queue than later chips.
  174. */
  175. #define ARCH_HAS_PREFETCH
  176. #define ARCH_HAS_PREFETCHW
  177. #define ARCH_HAS_SPINLOCK_PREFETCH
  178. static inline void prefetch(const void *x)
  179. {
  180. /* We do not use the read prefetch mnemonic because that
  181. * prefetches into the prefetch-cache which only is accessible
  182. * by floating point operations in UltraSPARC-III and later.
  183. * By contrast, "#one_write" prefetches into the L2 cache
  184. * in shared state.
  185. */
  186. __asm__ __volatile__("prefetch [%0], #one_write"
  187. : /* no outputs */
  188. : "r" (x));
  189. }
  190. static inline void prefetchw(const void *x)
  191. {
  192. /* The most optimal prefetch to use for writes is
  193. * "#n_writes". This brings the cacheline into the
  194. * L2 cache in "owned" state.
  195. */
  196. __asm__ __volatile__("prefetch [%0], #n_writes"
  197. : /* no outputs */
  198. : "r" (x));
  199. }
  200. #define spin_lock_prefetch(x) prefetchw(x)
  201. #endif /* !(__ASSEMBLY__) */
  202. #endif /* !(__ASM_SPARC64_PROCESSOR_H) */