cpuset.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. #ifndef _LINUX_CPUSET_H
  2. #define _LINUX_CPUSET_H
  3. /*
  4. * cpuset interface
  5. *
  6. * Copyright (C) 2003 BULL SA
  7. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  8. *
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/cgroup.h>
  14. #include <linux/mm.h>
  15. #ifdef CONFIG_CPUSETS
  16. extern int number_of_cpusets; /* How many cpusets are defined in system? */
  17. extern int cpuset_init(void);
  18. extern void cpuset_init_smp(void);
  19. extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
  20. extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  21. #define cpuset_current_mems_allowed (current->mems_allowed)
  22. void cpuset_init_current_mems_allowed(void);
  23. int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  24. extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
  25. extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
  26. static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
  27. {
  28. return number_of_cpusets <= 1 ||
  29. __cpuset_node_allowed_softwall(node, gfp_mask);
  30. }
  31. static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
  32. {
  33. return number_of_cpusets <= 1 ||
  34. __cpuset_node_allowed_hardwall(node, gfp_mask);
  35. }
  36. static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
  37. {
  38. return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
  39. }
  40. static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
  41. {
  42. return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
  43. }
  44. extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  45. const struct task_struct *tsk2);
  46. #define cpuset_memory_pressure_bump() \
  47. do { \
  48. if (cpuset_memory_pressure_enabled) \
  49. __cpuset_memory_pressure_bump(); \
  50. } while (0)
  51. extern int cpuset_memory_pressure_enabled;
  52. extern void __cpuset_memory_pressure_bump(void);
  53. extern const struct file_operations proc_cpuset_operations;
  54. struct seq_file;
  55. extern void cpuset_task_status_allowed(struct seq_file *m,
  56. struct task_struct *task);
  57. extern int cpuset_mem_spread_node(void);
  58. static inline int cpuset_do_page_mem_spread(void)
  59. {
  60. return current->flags & PF_SPREAD_PAGE;
  61. }
  62. static inline int cpuset_do_slab_mem_spread(void)
  63. {
  64. return current->flags & PF_SPREAD_SLAB;
  65. }
  66. extern int current_cpuset_is_being_rebound(void);
  67. extern void rebuild_sched_domains(void);
  68. extern void cpuset_print_task_mems_allowed(struct task_struct *p);
  69. static inline void set_mems_allowed(nodemask_t nodemask)
  70. {
  71. current->mems_allowed = nodemask;
  72. }
  73. #else /* !CONFIG_CPUSETS */
  74. static inline int cpuset_init(void) { return 0; }
  75. static inline void cpuset_init_smp(void) {}
  76. static inline void cpuset_cpus_allowed(struct task_struct *p,
  77. struct cpumask *mask)
  78. {
  79. cpumask_copy(mask, cpu_possible_mask);
  80. }
  81. static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
  82. {
  83. return node_possible_map;
  84. }
  85. #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
  86. static inline void cpuset_init_current_mems_allowed(void) {}
  87. static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
  88. {
  89. return 1;
  90. }
  91. static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
  92. {
  93. return 1;
  94. }
  95. static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
  96. {
  97. return 1;
  98. }
  99. static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
  100. {
  101. return 1;
  102. }
  103. static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
  104. {
  105. return 1;
  106. }
  107. static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  108. const struct task_struct *tsk2)
  109. {
  110. return 1;
  111. }
  112. static inline void cpuset_memory_pressure_bump(void) {}
  113. static inline void cpuset_task_status_allowed(struct seq_file *m,
  114. struct task_struct *task)
  115. {
  116. }
  117. static inline int cpuset_mem_spread_node(void)
  118. {
  119. return 0;
  120. }
  121. static inline int cpuset_do_page_mem_spread(void)
  122. {
  123. return 0;
  124. }
  125. static inline int cpuset_do_slab_mem_spread(void)
  126. {
  127. return 0;
  128. }
  129. static inline int current_cpuset_is_being_rebound(void)
  130. {
  131. return 0;
  132. }
  133. static inline void rebuild_sched_domains(void)
  134. {
  135. partition_sched_domains(1, NULL, NULL);
  136. }
  137. static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
  138. {
  139. }
  140. static inline void set_mems_allowed(nodemask_t nodemask)
  141. {
  142. }
  143. #endif /* !CONFIG_CPUSETS */
  144. #endif /* _LINUX_CPUSET_H */