smp_32.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /* smp.h: Sparc specific SMP stuff.
  2. *
  3. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef _SPARC_SMP_H
  6. #define _SPARC_SMP_H
  7. #include <linux/threads.h>
  8. #include <asm/head.h>
  9. #include <asm/btfixup.h>
  10. #ifndef __ASSEMBLY__
  11. #include <linux/cpumask.h>
  12. #endif /* __ASSEMBLY__ */
  13. #ifdef CONFIG_SMP
  14. #ifndef __ASSEMBLY__
  15. #include <asm/ptrace.h>
  16. #include <asm/asi.h>
  17. #include <asm/atomic.h>
  18. /*
  19. * Private routines/data
  20. */
  21. extern unsigned char boot_cpu_id;
  22. extern volatile unsigned long cpu_callin_map[NR_CPUS];
  23. extern cpumask_t smp_commenced_mask;
  24. extern struct linux_prom_registers smp_penguin_ctable;
  25. typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
  26. unsigned long, unsigned long);
  27. void cpu_panic(void);
  28. extern void smp4m_irq_rotate(int cpu);
  29. /*
  30. * General functions that each host system must provide.
  31. */
  32. void sun4m_init_smp(void);
  33. void sun4d_init_smp(void);
  34. void smp_callin(void);
  35. void smp_boot_cpus(void);
  36. void smp_store_cpu_info(int);
  37. struct seq_file;
  38. void smp_bogo(struct seq_file *);
  39. void smp_info(struct seq_file *);
  40. BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
  41. BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
  42. BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
  43. BTFIXUPDEF_BLACKBOX(load_current)
  44. #define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)
  45. static inline void xc0(smpfunc_t func) { smp_cross_call(func, cpu_online_map, 0, 0, 0, 0); }
  46. static inline void xc1(smpfunc_t func, unsigned long arg1)
  47. { smp_cross_call(func, cpu_online_map, arg1, 0, 0, 0); }
  48. static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
  49. { smp_cross_call(func, cpu_online_map, arg1, arg2, 0, 0); }
  50. static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
  51. unsigned long arg3)
  52. { smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, 0); }
  53. static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
  54. unsigned long arg3, unsigned long arg4)
  55. { smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); }
  56. static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
  57. {
  58. xc1((smpfunc_t)func, (unsigned long)info);
  59. return 0;
  60. }
  61. static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
  62. void *info, int wait)
  63. {
  64. smp_cross_call((smpfunc_t)func, cpumask_of_cpu(cpuid),
  65. (unsigned long) info, 0, 0, 0);
  66. return 0;
  67. }
  68. static inline int cpu_logical_map(int cpu)
  69. {
  70. return cpu;
  71. }
  72. static inline int hard_smp4m_processor_id(void)
  73. {
  74. int cpuid;
  75. __asm__ __volatile__("rd %%tbr, %0\n\t"
  76. "srl %0, 12, %0\n\t"
  77. "and %0, 3, %0\n\t" :
  78. "=&r" (cpuid));
  79. return cpuid;
  80. }
  81. static inline int hard_smp4d_processor_id(void)
  82. {
  83. int cpuid;
  84. __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
  85. "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
  86. return cpuid;
  87. }
  88. extern inline int hard_smpleon_processor_id(void)
  89. {
  90. int cpuid;
  91. __asm__ __volatile__("rd %%asr17,%0\n\t"
  92. "srl %0,28,%0" :
  93. "=&r" (cpuid) : );
  94. return cpuid;
  95. }
  96. #ifndef MODULE
  97. static inline int hard_smp_processor_id(void)
  98. {
  99. int cpuid;
  100. /* Black box - sun4m
  101. __asm__ __volatile__("rd %%tbr, %0\n\t"
  102. "srl %0, 12, %0\n\t"
  103. "and %0, 3, %0\n\t" :
  104. "=&r" (cpuid));
  105. - sun4d
  106. __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
  107. "nop; nop" :
  108. "=&r" (cpuid));
  109. See btfixup.h and btfixupprep.c to understand how a blackbox works.
  110. */
  111. __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
  112. "sethi %%hi(boot_cpu_id), %0\n\t"
  113. "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
  114. "=&r" (cpuid));
  115. return cpuid;
  116. }
  117. #else
  118. static inline int hard_smp_processor_id(void)
  119. {
  120. int cpuid;
  121. __asm__ __volatile__("mov %%o7, %%g1\n\t"
  122. "call ___f___hard_smp_processor_id\n\t"
  123. " nop\n\t"
  124. "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
  125. return cpuid;
  126. }
  127. #endif
  128. #define raw_smp_processor_id() (current_thread_info()->cpu)
  129. #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
  130. #define prof_counter(__cpu) cpu_data(__cpu).counter
  131. void smp_setup_cpu_possible_map(void);
  132. #endif /* !(__ASSEMBLY__) */
  133. /* Sparc specific messages. */
  134. #define MSG_CROSS_CALL 0x0005 /* run func on cpus */
  135. /* Empirical PROM processor mailbox constants. If the per-cpu mailbox
  136. * contains something other than one of these then the ipi is from
  137. * Linux's active_kernel_processor. This facility exists so that
  138. * the boot monitor can capture all the other cpus when one catches
  139. * a watchdog reset or the user enters the monitor using L1-A keys.
  140. */
  141. #define MBOX_STOPCPU 0xFB
  142. #define MBOX_IDLECPU 0xFC
  143. #define MBOX_IDLECPU2 0xFD
  144. #define MBOX_STOPCPU2 0xFE
  145. #else /* SMP */
  146. #define hard_smp_processor_id() 0
  147. #define smp_setup_cpu_possible_map() do { } while (0)
  148. #endif /* !(SMP) */
  149. #endif /* !(_SPARC_SMP_H) */