smp.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * include/asm-s390/smp.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. * Heiko Carstens (heiko.carstens@de.ibm.com)
  9. */
  10. #ifndef __ASM_SMP_H
  11. #define __ASM_SMP_H
  12. #include <linux/threads.h>
  13. #include <linux/cpumask.h>
  14. #include <linux/bitops.h>
  15. #if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
  16. #include <asm/lowcore.h>
  17. #include <asm/sigp.h>
  18. #include <asm/ptrace.h>
  19. /*
  20. s390 specific smp.c headers
  21. */
  22. typedef struct
  23. {
  24. int intresting;
  25. sigp_ccode ccode;
  26. __u32 status;
  27. __u16 cpu;
  28. } sigp_info;
  29. extern void machine_restart_smp(char *);
  30. extern void machine_halt_smp(void);
  31. extern void machine_power_off_smp(void);
  32. extern void smp_setup_cpu_possible_map(void);
  33. extern int smp_call_function_on(void (*func) (void *info), void *info,
  34. int nonatomic, int wait, int cpu);
  35. #define NO_PROC_ID 0xFF /* No processor magic marker */
  36. /*
  37. * This magic constant controls our willingness to transfer
  38. * a process across CPUs. Such a transfer incurs misses on the L1
  39. * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
  40. * gut feeling is this will vary by board in value. For a board
  41. * with separate L2 cache it probably depends also on the RSS, and
  42. * for a board with shared L2 cache it ought to decay fast as other
  43. * processes are run.
  44. */
  45. #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
  46. #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
  47. static inline __u16 hard_smp_processor_id(void)
  48. {
  49. __u16 cpu_address;
  50. asm volatile("stap %0" : "=m" (cpu_address));
  51. return cpu_address;
  52. }
  53. /*
  54. * returns 1 if cpu is in stopped/check stopped state or not operational
  55. * returns 0 otherwise
  56. */
  57. static inline int
  58. smp_cpu_not_running(int cpu)
  59. {
  60. __u32 status;
  61. switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
  62. case sigp_order_code_accepted:
  63. case sigp_status_stored:
  64. /* Check for stopped and check stop state */
  65. if (status & 0x50)
  66. return 1;
  67. break;
  68. case sigp_not_operational:
  69. return 1;
  70. default:
  71. break;
  72. }
  73. return 0;
  74. }
  75. #define cpu_logical_map(cpu) (cpu)
  76. extern int __cpu_disable (void);
  77. extern void __cpu_die (unsigned int cpu);
  78. extern void cpu_die (void) __attribute__ ((noreturn));
  79. extern int __cpu_up (unsigned int cpu);
  80. #endif
  81. #ifndef CONFIG_SMP
  82. static inline int
  83. smp_call_function_on(void (*func) (void *info), void *info,
  84. int nonatomic, int wait, int cpu)
  85. {
  86. func(info);
  87. return 0;
  88. }
  89. static inline void smp_send_stop(void)
  90. {
  91. /* Disable all interrupts/machine checks */
  92. __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
  93. }
  94. #define hard_smp_processor_id() 0
  95. #define smp_cpu_not_running(cpu) 1
  96. #define smp_setup_cpu_possible_map() do { } while (0)
  97. #endif
  98. extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
  99. #endif