smp_32.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /* smp.h: Sparc specific SMP stuff.
  2. *
  3. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef _SPARC_SMP_H
  6. #define _SPARC_SMP_H
  7. #include <linux/threads.h>
  8. #include <asm/head.h>
  9. #include <asm/btfixup.h>
  10. #ifndef __ASSEMBLY__
  11. #include <linux/cpumask.h>
  12. #endif /* __ASSEMBLY__ */
  13. #ifdef CONFIG_SMP
  14. #ifndef __ASSEMBLY__
  15. #include <asm/ptrace.h>
  16. #include <asm/asi.h>
  17. #include <asm/atomic.h>
  18. /*
  19. * Private routines/data
  20. */
  21. extern unsigned char boot_cpu_id;
  22. typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
  23. unsigned long, unsigned long);
  24. /*
  25. * General functions that each host system must provide.
  26. */
  27. void sun4m_init_smp(void);
  28. void sun4d_init_smp(void);
  29. void smp_callin(void);
  30. void smp_boot_cpus(void);
  31. void smp_store_cpu_info(int);
  32. struct seq_file;
  33. void smp_bogo(struct seq_file *);
  34. void smp_info(struct seq_file *);
  35. BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
  36. BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
  37. BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
  38. BTFIXUPDEF_BLACKBOX(load_current)
  39. #define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)
  40. static inline void xc0(smpfunc_t func) { smp_cross_call(func, cpu_online_map, 0, 0, 0, 0); }
  41. static inline void xc1(smpfunc_t func, unsigned long arg1)
  42. { smp_cross_call(func, cpu_online_map, arg1, 0, 0, 0); }
  43. static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
  44. { smp_cross_call(func, cpu_online_map, arg1, arg2, 0, 0); }
  45. static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
  46. unsigned long arg3)
  47. { smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, 0); }
  48. static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
  49. unsigned long arg3, unsigned long arg4)
  50. { smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); }
  51. static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
  52. {
  53. xc1((smpfunc_t)func, (unsigned long)info);
  54. return 0;
  55. }
  56. static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
  57. void *info, int wait)
  58. {
  59. smp_cross_call((smpfunc_t)func, cpumask_of_cpu(cpuid),
  60. (unsigned long) info, 0, 0, 0);
  61. return 0;
  62. }
  63. static inline int cpu_logical_map(int cpu)
  64. {
  65. return cpu;
  66. }
  67. static inline int hard_smp4m_processor_id(void)
  68. {
  69. int cpuid;
  70. __asm__ __volatile__("rd %%tbr, %0\n\t"
  71. "srl %0, 12, %0\n\t"
  72. "and %0, 3, %0\n\t" :
  73. "=&r" (cpuid));
  74. return cpuid;
  75. }
  76. static inline int hard_smp4d_processor_id(void)
  77. {
  78. int cpuid;
  79. __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
  80. "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
  81. return cpuid;
  82. }
  83. #ifndef MODULE
  84. static inline int hard_smp_processor_id(void)
  85. {
  86. int cpuid;
  87. /* Black box - sun4m
  88. __asm__ __volatile__("rd %%tbr, %0\n\t"
  89. "srl %0, 12, %0\n\t"
  90. "and %0, 3, %0\n\t" :
  91. "=&r" (cpuid));
  92. - sun4d
  93. __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
  94. "nop; nop" :
  95. "=&r" (cpuid));
  96. See btfixup.h and btfixupprep.c to understand how a blackbox works.
  97. */
  98. __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
  99. "sethi %%hi(boot_cpu_id), %0\n\t"
  100. "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
  101. "=&r" (cpuid));
  102. return cpuid;
  103. }
  104. #else
  105. static inline int hard_smp_processor_id(void)
  106. {
  107. int cpuid;
  108. __asm__ __volatile__("mov %%o7, %%g1\n\t"
  109. "call ___f___hard_smp_processor_id\n\t"
  110. " nop\n\t"
  111. "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
  112. return cpuid;
  113. }
  114. #endif
  115. #define raw_smp_processor_id() (current_thread_info()->cpu)
  116. #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
  117. #define prof_counter(__cpu) cpu_data(__cpu).counter
  118. void smp_setup_cpu_possible_map(void);
  119. #endif /* !(__ASSEMBLY__) */
  120. /* Sparc specific messages. */
  121. #define MSG_CROSS_CALL 0x0005 /* run func on cpus */
  122. /* Empirical PROM processor mailbox constants. If the per-cpu mailbox
  123. * contains something other than one of these then the ipi is from
  124. * Linux's active_kernel_processor. This facility exists so that
  125. * the boot monitor can capture all the other cpus when one catches
  126. * a watchdog reset or the user enters the monitor using L1-A keys.
  127. */
  128. #define MBOX_STOPCPU 0xFB
  129. #define MBOX_IDLECPU 0xFC
  130. #define MBOX_IDLECPU2 0xFD
  131. #define MBOX_STOPCPU2 0xFE
  132. #else /* SMP */
  133. #define hard_smp_processor_id() 0
  134. #define smp_setup_cpu_possible_map() do { } while (0)
  135. #endif /* !(SMP) */
  136. #endif /* !(_SPARC_SMP_H) */