smp_32.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. /* smp.h: Sparc specific SMP stuff.
  2. *
  3. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef _SPARC_SMP_H
  6. #define _SPARC_SMP_H
  7. #include <linux/threads.h>
  8. #include <asm/head.h>
  9. #include <asm/btfixup.h>
  10. #ifndef __ASSEMBLY__
  11. #include <linux/cpumask.h>
  12. #endif /* __ASSEMBLY__ */
  13. #ifdef CONFIG_SMP
  14. #ifndef __ASSEMBLY__
  15. #include <asm/ptrace.h>
  16. #include <asm/asi.h>
  17. #include <asm/atomic.h>
  18. /*
  19. * Private routines/data
  20. */
  21. extern unsigned char boot_cpu_id;
  22. extern cpumask_t phys_cpu_present_map;
  23. #define cpu_possible_map phys_cpu_present_map
  24. typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
  25. unsigned long, unsigned long);
  26. /*
  27. * General functions that each host system must provide.
  28. */
  29. void sun4m_init_smp(void);
  30. void sun4d_init_smp(void);
  31. void smp_callin(void);
  32. void smp_boot_cpus(void);
  33. void smp_store_cpu_info(int);
  34. struct seq_file;
  35. void smp_bogo(struct seq_file *);
  36. void smp_info(struct seq_file *);
  37. BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
  38. BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
  39. BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
  40. BTFIXUPDEF_BLACKBOX(load_current)
  41. #define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)
  42. static inline void xc0(smpfunc_t func) { smp_cross_call(func, cpu_online_map, 0, 0, 0, 0); }
  43. static inline void xc1(smpfunc_t func, unsigned long arg1)
  44. { smp_cross_call(func, cpu_online_map, arg1, 0, 0, 0); }
  45. static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
  46. { smp_cross_call(func, cpu_online_map, arg1, arg2, 0, 0); }
  47. static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
  48. unsigned long arg3)
  49. { smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, 0); }
  50. static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
  51. unsigned long arg3, unsigned long arg4)
  52. { smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); }
  53. static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
  54. {
  55. xc1((smpfunc_t)func, (unsigned long)info);
  56. return 0;
  57. }
  58. static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
  59. void *info, int wait)
  60. {
  61. smp_cross_call((smpfunc_t)func, cpumask_of_cpu(cpuid),
  62. (unsigned long) info, 0, 0, 0);
  63. return 0;
  64. }
  65. static inline int cpu_logical_map(int cpu)
  66. {
  67. return cpu;
  68. }
  69. static inline int hard_smp4m_processor_id(void)
  70. {
  71. int cpuid;
  72. __asm__ __volatile__("rd %%tbr, %0\n\t"
  73. "srl %0, 12, %0\n\t"
  74. "and %0, 3, %0\n\t" :
  75. "=&r" (cpuid));
  76. return cpuid;
  77. }
  78. static inline int hard_smp4d_processor_id(void)
  79. {
  80. int cpuid;
  81. __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
  82. "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
  83. return cpuid;
  84. }
  85. #ifndef MODULE
  86. static inline int hard_smp_processor_id(void)
  87. {
  88. int cpuid;
  89. /* Black box - sun4m
  90. __asm__ __volatile__("rd %%tbr, %0\n\t"
  91. "srl %0, 12, %0\n\t"
  92. "and %0, 3, %0\n\t" :
  93. "=&r" (cpuid));
  94. - sun4d
  95. __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
  96. "nop; nop" :
  97. "=&r" (cpuid));
  98. See btfixup.h and btfixupprep.c to understand how a blackbox works.
  99. */
  100. __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
  101. "sethi %%hi(boot_cpu_id), %0\n\t"
  102. "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
  103. "=&r" (cpuid));
  104. return cpuid;
  105. }
  106. #else
  107. static inline int hard_smp_processor_id(void)
  108. {
  109. int cpuid;
  110. __asm__ __volatile__("mov %%o7, %%g1\n\t"
  111. "call ___f___hard_smp_processor_id\n\t"
  112. " nop\n\t"
  113. "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
  114. return cpuid;
  115. }
  116. #endif
  117. #define raw_smp_processor_id() (current_thread_info()->cpu)
  118. #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
  119. #define prof_counter(__cpu) cpu_data(__cpu).counter
  120. void smp_setup_cpu_possible_map(void);
  121. #endif /* !(__ASSEMBLY__) */
  122. /* Sparc specific messages. */
  123. #define MSG_CROSS_CALL 0x0005 /* run func on cpus */
  124. /* Empirical PROM processor mailbox constants. If the per-cpu mailbox
  125. * contains something other than one of these then the ipi is from
  126. * Linux's active_kernel_processor. This facility exists so that
  127. * the boot monitor can capture all the other cpus when one catches
  128. * a watchdog reset or the user enters the monitor using L1-A keys.
  129. */
  130. #define MBOX_STOPCPU 0xFB
  131. #define MBOX_IDLECPU 0xFC
  132. #define MBOX_IDLECPU2 0xFD
  133. #define MBOX_STOPCPU2 0xFE
  134. #else /* SMP */
  135. #define hard_smp_processor_id() 0
  136. #define smp_setup_cpu_possible_map() do { } while (0)
  137. #endif /* !(SMP) */
  138. #define NO_PROC_ID 0xFF
  139. #endif /* !(_SPARC_SMP_H) */