smp.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. #ifndef _ASM_M32R_SMP_H
  2. #define _ASM_M32R_SMP_H
  3. /* $Id$ */
  4. #include <linux/config.h>
  5. #ifdef CONFIG_SMP
  6. #ifndef __ASSEMBLY__
  7. #include <linux/cpumask.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/threads.h>
  10. #include <asm/m32r.h>
  11. #define PHYSID_ARRAY_SIZE 1
  12. struct physid_mask
  13. {
  14. unsigned long mask[PHYSID_ARRAY_SIZE];
  15. };
  16. typedef struct physid_mask physid_mask_t;
  17. #define physid_set(physid, map) set_bit(physid, (map).mask)
  18. #define physid_clear(physid, map) clear_bit(physid, (map).mask)
  19. #define physid_isset(physid, map) test_bit(physid, (map).mask)
  20. #define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask)
  21. #define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
  22. #define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
  23. #define physids_clear(map) bitmap_zero((map).mask, MAX_APICS)
  24. #define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS)
  25. #define physids_empty(map) bitmap_empty((map).mask, MAX_APICS)
  26. #define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
  27. #define physids_weight(map) bitmap_weight((map).mask, MAX_APICS)
  28. #define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
  29. #define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
  30. #define physids_coerce(map) ((map).mask[0])
  31. #define physids_promote(physids) \
  32. ({ \
  33. physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
  34. __physid_mask.mask[0] = physids; \
  35. __physid_mask; \
  36. })
  37. #define physid_mask_of_physid(physid) \
  38. ({ \
  39. physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
  40. physid_set(physid, __physid_mask); \
  41. __physid_mask; \
  42. })
  43. #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
  44. #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
  45. extern physid_mask_t phys_cpu_present_map;
  46. /*
  47. * Some lowlevel functions might want to know about
  48. * the real CPU ID <-> CPU # mapping.
  49. */
  50. extern volatile int cpu_2_physid[NR_CPUS];
  51. #define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id]
  52. #define raw_smp_processor_id() (current_thread_info()->cpu)
  53. extern cpumask_t cpu_callout_map;
  54. #define cpu_possible_map cpu_callout_map
  55. static __inline__ int hard_smp_processor_id(void)
  56. {
  57. return (int)*(volatile long *)M32R_CPUID_PORTL;
  58. }
  59. static __inline__ int cpu_logical_map(int cpu)
  60. {
  61. return cpu;
  62. }
  63. static __inline__ int cpu_number_map(int cpu)
  64. {
  65. return cpu;
  66. }
  67. static __inline__ unsigned int num_booting_cpus(void)
  68. {
  69. return cpus_weight(cpu_callout_map);
  70. }
  71. extern void smp_send_timer(void);
  72. extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
  73. #endif /* not __ASSEMBLY__ */
  74. #define NO_PROC_ID (0xff) /* No processor magic marker */
  75. #define PROC_CHANGE_PENALTY (15) /* Schedule penalty */
  76. /*
  77. * M32R-mp IPI
  78. */
  79. #define RESCHEDULE_IPI (M32R_IRQ_IPI0-M32R_IRQ_IPI0)
  80. #define INVALIDATE_TLB_IPI (M32R_IRQ_IPI1-M32R_IRQ_IPI0)
  81. #define CALL_FUNCTION_IPI (M32R_IRQ_IPI2-M32R_IRQ_IPI0)
  82. #define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0)
  83. #define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0)
  84. #define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0)
  85. #define IPI_SHIFT (0)
  86. #define NR_IPIS (8)
  87. #endif /* CONFIG_SMP */
  88. #endif /* _ASM_M32R_SMP_H */