cpuset.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. #ifndef _LINUX_CPUSET_H
  2. #define _LINUX_CPUSET_H
  3. /*
  4. * cpuset interface
  5. *
  6. * Copyright (C) 2003 BULL SA
  7. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  8. *
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/cgroup.h>
  14. #ifdef CONFIG_CPUSETS
  15. extern int number_of_cpusets; /* How many cpusets are defined in system? */
  16. extern int cpuset_init_early(void);
  17. extern int cpuset_init(void);
  18. extern void cpuset_init_smp(void);
  19. extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask);
  20. extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask);
  21. extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  22. #define cpuset_current_mems_allowed (current->mems_allowed)
  23. void cpuset_init_current_mems_allowed(void);
  24. void cpuset_update_task_memory_state(void);
  25. int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  26. extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
  27. extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
  28. static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
  29. {
  30. return number_of_cpusets <= 1 ||
  31. __cpuset_zone_allowed_softwall(z, gfp_mask);
  32. }
  33. static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
  34. {
  35. return number_of_cpusets <= 1 ||
  36. __cpuset_zone_allowed_hardwall(z, gfp_mask);
  37. }
  38. extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  39. const struct task_struct *tsk2);
  40. #define cpuset_memory_pressure_bump() \
  41. do { \
  42. if (cpuset_memory_pressure_enabled) \
  43. __cpuset_memory_pressure_bump(); \
  44. } while (0)
  45. extern int cpuset_memory_pressure_enabled;
  46. extern void __cpuset_memory_pressure_bump(void);
  47. extern const struct file_operations proc_cpuset_operations;
  48. struct seq_file;
  49. extern void cpuset_task_status_allowed(struct seq_file *m,
  50. struct task_struct *task);
  51. extern void cpuset_lock(void);
  52. extern void cpuset_unlock(void);
  53. extern int cpuset_mem_spread_node(void);
  54. static inline int cpuset_do_page_mem_spread(void)
  55. {
  56. return current->flags & PF_SPREAD_PAGE;
  57. }
  58. static inline int cpuset_do_slab_mem_spread(void)
  59. {
  60. return current->flags & PF_SPREAD_SLAB;
  61. }
  62. extern int current_cpuset_is_being_rebound(void);
  63. extern void rebuild_sched_domains(void);
  64. #else /* !CONFIG_CPUSETS */
  65. static inline int cpuset_init_early(void) { return 0; }
  66. static inline int cpuset_init(void) { return 0; }
  67. static inline void cpuset_init_smp(void) {}
  68. static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask)
  69. {
  70. *mask = cpu_possible_map;
  71. }
  72. static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
  73. cpumask_t *mask)
  74. {
  75. *mask = cpu_possible_map;
  76. }
  77. static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
  78. {
  79. return node_possible_map;
  80. }
  81. #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
  82. static inline void cpuset_init_current_mems_allowed(void) {}
  83. static inline void cpuset_update_task_memory_state(void) {}
  84. static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
  85. {
  86. return 1;
  87. }
  88. static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
  89. {
  90. return 1;
  91. }
  92. static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
  93. {
  94. return 1;
  95. }
  96. static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  97. const struct task_struct *tsk2)
  98. {
  99. return 1;
  100. }
  101. static inline void cpuset_memory_pressure_bump(void) {}
  102. static inline void cpuset_task_status_allowed(struct seq_file *m,
  103. struct task_struct *task)
  104. {
  105. }
  106. static inline void cpuset_lock(void) {}
  107. static inline void cpuset_unlock(void) {}
  108. static inline int cpuset_mem_spread_node(void)
  109. {
  110. return 0;
  111. }
  112. static inline int cpuset_do_page_mem_spread(void)
  113. {
  114. return 0;
  115. }
  116. static inline int cpuset_do_slab_mem_spread(void)
  117. {
  118. return 0;
  119. }
  120. static inline int current_cpuset_is_being_rebound(void)
  121. {
  122. return 0;
  123. }
  124. static inline void rebuild_sched_domains(void)
  125. {
  126. partition_sched_domains(1, NULL, NULL);
  127. }
  128. #endif /* !CONFIG_CPUSETS */
  129. #endif /* _LINUX_CPUSET_H */