smp.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /* smp.h: Sparc specific SMP stuff.
  2. *
  3. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef _SPARC_SMP_H
  6. #define _SPARC_SMP_H
  7. #include <linux/threads.h>
  8. #include <asm/head.h>
  9. #include <asm/btfixup.h>
  10. #ifndef __ASSEMBLY__
  11. #include <linux/cpumask.h>
  12. #endif /* __ASSEMBLY__ */
  13. #ifdef CONFIG_SMP
  14. #ifndef __ASSEMBLY__
  15. #include <asm/ptrace.h>
  16. #include <asm/asi.h>
  17. #include <asm/atomic.h>
  18. /*
  19. * Private routines/data
  20. */
  21. extern unsigned char boot_cpu_id;
  22. extern cpumask_t phys_cpu_present_map;
  23. #define cpu_possible_map phys_cpu_present_map
  24. typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
  25. unsigned long, unsigned long);
  26. /*
  27. * General functions that each host system must provide.
  28. */
  29. void sun4m_init_smp(void);
  30. void sun4d_init_smp(void);
  31. void smp_callin(void);
  32. void smp_boot_cpus(void);
  33. void smp_store_cpu_info(int);
  34. struct seq_file;
  35. void smp_bogo(struct seq_file *);
  36. void smp_info(struct seq_file *);
  37. BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long)
  38. BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
  39. BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
  40. BTFIXUPDEF_BLACKBOX(load_current)
  41. #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
  42. static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
  43. static inline void xc1(smpfunc_t func, unsigned long arg1)
  44. { smp_cross_call(func, arg1, 0, 0, 0, 0); }
  45. static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
  46. { smp_cross_call(func, arg1, arg2, 0, 0, 0); }
  47. static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
  48. unsigned long arg3)
  49. { smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
  50. static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
  51. unsigned long arg3, unsigned long arg4)
  52. { smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
  53. static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
  54. unsigned long arg3, unsigned long arg4, unsigned long arg5)
  55. { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
  56. static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait)
  57. {
  58. xc1((smpfunc_t)func, (unsigned long)info);
  59. return 0;
  60. }
  61. static inline int cpu_logical_map(int cpu)
  62. {
  63. return cpu;
  64. }
  65. static inline int hard_smp4m_processor_id(void)
  66. {
  67. int cpuid;
  68. __asm__ __volatile__("rd %%tbr, %0\n\t"
  69. "srl %0, 12, %0\n\t"
  70. "and %0, 3, %0\n\t" :
  71. "=&r" (cpuid));
  72. return cpuid;
  73. }
  74. static inline int hard_smp4d_processor_id(void)
  75. {
  76. int cpuid;
  77. __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
  78. "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
  79. return cpuid;
  80. }
  81. #ifndef MODULE
  82. static inline int hard_smp_processor_id(void)
  83. {
  84. int cpuid;
  85. /* Black box - sun4m
  86. __asm__ __volatile__("rd %%tbr, %0\n\t"
  87. "srl %0, 12, %0\n\t"
  88. "and %0, 3, %0\n\t" :
  89. "=&r" (cpuid));
  90. - sun4d
  91. __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
  92. "nop; nop" :
  93. "=&r" (cpuid));
  94. See btfixup.h and btfixupprep.c to understand how a blackbox works.
  95. */
  96. __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
  97. "sethi %%hi(boot_cpu_id), %0\n\t"
  98. "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
  99. "=&r" (cpuid));
  100. return cpuid;
  101. }
  102. #else
  103. static inline int hard_smp_processor_id(void)
  104. {
  105. int cpuid;
  106. __asm__ __volatile__("mov %%o7, %%g1\n\t"
  107. "call ___f___hard_smp_processor_id\n\t"
  108. " nop\n\t"
  109. "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
  110. return cpuid;
  111. }
  112. #endif
  113. #define raw_smp_processor_id() (current_thread_info()->cpu)
  114. #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
  115. #define prof_counter(__cpu) cpu_data(__cpu).counter
  116. void smp_setup_cpu_possible_map(void);
  117. #endif /* !(__ASSEMBLY__) */
  118. /* Sparc specific messages. */
  119. #define MSG_CROSS_CALL 0x0005 /* run func on cpus */
  120. /* Empirical PROM processor mailbox constants. If the per-cpu mailbox
  121. * contains something other than one of these then the ipi is from
  122. * Linux's active_kernel_processor. This facility exists so that
  123. * the boot monitor can capture all the other cpus when one catches
  124. * a watchdog reset or the user enters the monitor using L1-A keys.
  125. */
  126. #define MBOX_STOPCPU 0xFB
  127. #define MBOX_IDLECPU 0xFC
  128. #define MBOX_IDLECPU2 0xFD
  129. #define MBOX_STOPCPU2 0xFE
  130. #else /* SMP */
  131. #define hard_smp_processor_id() 0
  132. #define smp_setup_cpu_possible_map() do { } while (0)
  133. #endif /* !(SMP) */
  134. #define NO_PROC_ID 0xFF
  135. #endif /* !(_SPARC_SMP_H) */