cpuset.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. #ifndef _LINUX_CPUSET_H
  2. #define _LINUX_CPUSET_H
  3. /*
  4. * cpuset interface
  5. *
  6. * Copyright (C) 2003 BULL SA
  7. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  8. *
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/cgroup.h>
  14. #include <linux/mm.h>
  15. #ifdef CONFIG_CPUSETS
  16. extern int number_of_cpusets; /* How many cpusets are defined in system? */
  17. extern int cpuset_init(void);
  18. extern void cpuset_init_smp(void);
  19. extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
  20. extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
  21. extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  22. #define cpuset_current_mems_allowed (current->mems_allowed)
  23. void cpuset_init_current_mems_allowed(void);
  24. int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  25. extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
  26. extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
  27. static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
  28. {
  29. return number_of_cpusets <= 1 ||
  30. __cpuset_node_allowed_softwall(node, gfp_mask);
  31. }
  32. static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
  33. {
  34. return number_of_cpusets <= 1 ||
  35. __cpuset_node_allowed_hardwall(node, gfp_mask);
  36. }
  37. static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
  38. {
  39. return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
  40. }
  41. static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
  42. {
  43. return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
  44. }
  45. extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  46. const struct task_struct *tsk2);
  47. #define cpuset_memory_pressure_bump() \
  48. do { \
  49. if (cpuset_memory_pressure_enabled) \
  50. __cpuset_memory_pressure_bump(); \
  51. } while (0)
  52. extern int cpuset_memory_pressure_enabled;
  53. extern void __cpuset_memory_pressure_bump(void);
  54. extern const struct file_operations proc_cpuset_operations;
  55. struct seq_file;
  56. extern void cpuset_task_status_allowed(struct seq_file *m,
  57. struct task_struct *task);
  58. extern int cpuset_mem_spread_node(void);
  59. extern int cpuset_slab_spread_node(void);
  60. static inline int cpuset_do_page_mem_spread(void)
  61. {
  62. return current->flags & PF_SPREAD_PAGE;
  63. }
  64. static inline int cpuset_do_slab_mem_spread(void)
  65. {
  66. return current->flags & PF_SPREAD_SLAB;
  67. }
  68. extern int current_cpuset_is_being_rebound(void);
  69. extern void rebuild_sched_domains(void);
  70. extern void cpuset_print_task_mems_allowed(struct task_struct *p);
  71. /*
  72. * reading current mems_allowed and mempolicy in the fastpath must protected
  73. * by get_mems_allowed()
  74. */
  75. static inline void get_mems_allowed(void)
  76. {
  77. current->mems_allowed_change_disable++;
  78. /*
  79. * ensure that reading mems_allowed and mempolicy happens after the
  80. * update of ->mems_allowed_change_disable.
  81. *
  82. * the write-side task finds ->mems_allowed_change_disable is not 0,
  83. * and knows the read-side task is reading mems_allowed or mempolicy,
  84. * so it will clear old bits lazily.
  85. */
  86. smp_mb();
  87. }
  88. static inline void put_mems_allowed(void)
  89. {
  90. /*
  91. * ensure that reading mems_allowed and mempolicy before reducing
  92. * mems_allowed_change_disable.
  93. *
  94. * the write-side task will know that the read-side task is still
  95. * reading mems_allowed or mempolicy, don't clears old bits in the
  96. * nodemask.
  97. */
  98. smp_mb();
  99. --ACCESS_ONCE(current->mems_allowed_change_disable);
  100. }
  101. static inline void set_mems_allowed(nodemask_t nodemask)
  102. {
  103. task_lock(current);
  104. current->mems_allowed = nodemask;
  105. task_unlock(current);
  106. }
  107. #else /* !CONFIG_CPUSETS */
  108. static inline int cpuset_init(void) { return 0; }
  109. static inline void cpuset_init_smp(void) {}
  110. static inline void cpuset_cpus_allowed(struct task_struct *p,
  111. struct cpumask *mask)
  112. {
  113. cpumask_copy(mask, cpu_possible_mask);
  114. }
  115. static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
  116. {
  117. cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
  118. return cpumask_any(cpu_active_mask);
  119. }
  120. static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
  121. {
  122. return node_possible_map;
  123. }
  124. #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
  125. static inline void cpuset_init_current_mems_allowed(void) {}
  126. static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
  127. {
  128. return 1;
  129. }
  130. static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
  131. {
  132. return 1;
  133. }
  134. static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
  135. {
  136. return 1;
  137. }
  138. static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
  139. {
  140. return 1;
  141. }
  142. static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
  143. {
  144. return 1;
  145. }
  146. static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  147. const struct task_struct *tsk2)
  148. {
  149. return 1;
  150. }
  151. static inline void cpuset_memory_pressure_bump(void) {}
  152. static inline void cpuset_task_status_allowed(struct seq_file *m,
  153. struct task_struct *task)
  154. {
  155. }
  156. static inline int cpuset_mem_spread_node(void)
  157. {
  158. return 0;
  159. }
  160. static inline int cpuset_slab_spread_node(void)
  161. {
  162. return 0;
  163. }
  164. static inline int cpuset_do_page_mem_spread(void)
  165. {
  166. return 0;
  167. }
  168. static inline int cpuset_do_slab_mem_spread(void)
  169. {
  170. return 0;
  171. }
  172. static inline int current_cpuset_is_being_rebound(void)
  173. {
  174. return 0;
  175. }
  176. static inline void rebuild_sched_domains(void)
  177. {
  178. partition_sched_domains(1, NULL, NULL);
  179. }
  180. static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
  181. {
  182. }
  183. static inline void set_mems_allowed(nodemask_t nodemask)
  184. {
  185. }
  186. static inline void get_mems_allowed(void)
  187. {
  188. }
  189. static inline void put_mems_allowed(void)
  190. {
  191. }
  192. #endif /* !CONFIG_CPUSETS */
  193. #endif /* _LINUX_CPUSET_H */