cpuset.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. #ifndef _LINUX_CPUSET_H
  2. #define _LINUX_CPUSET_H
  3. /*
  4. * cpuset interface
  5. *
  6. * Copyright (C) 2003 BULL SA
  7. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  8. *
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/cgroup.h>
  14. #ifdef CONFIG_CPUSETS
  15. extern int number_of_cpusets; /* How many cpusets are defined in system? */
  16. extern int cpuset_init_early(void);
  17. extern int cpuset_init(void);
  18. extern void cpuset_init_smp(void);
  19. extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
  20. extern void cpuset_cpus_allowed_locked(struct task_struct *p,
  21. struct cpumask *mask);
  22. extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  23. #define cpuset_current_mems_allowed (current->mems_allowed)
  24. void cpuset_init_current_mems_allowed(void);
  25. void cpuset_update_task_memory_state(void);
  26. int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  27. extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
  28. extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
  29. static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
  30. {
  31. return number_of_cpusets <= 1 ||
  32. __cpuset_zone_allowed_softwall(z, gfp_mask);
  33. }
  34. static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
  35. {
  36. return number_of_cpusets <= 1 ||
  37. __cpuset_zone_allowed_hardwall(z, gfp_mask);
  38. }
  39. extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  40. const struct task_struct *tsk2);
  41. #define cpuset_memory_pressure_bump() \
  42. do { \
  43. if (cpuset_memory_pressure_enabled) \
  44. __cpuset_memory_pressure_bump(); \
  45. } while (0)
  46. extern int cpuset_memory_pressure_enabled;
  47. extern void __cpuset_memory_pressure_bump(void);
  48. extern const struct file_operations proc_cpuset_operations;
  49. struct seq_file;
  50. extern void cpuset_task_status_allowed(struct seq_file *m,
  51. struct task_struct *task);
  52. extern void cpuset_lock(void);
  53. extern void cpuset_unlock(void);
  54. extern int cpuset_mem_spread_node(void);
  55. static inline int cpuset_do_page_mem_spread(void)
  56. {
  57. return current->flags & PF_SPREAD_PAGE;
  58. }
  59. static inline int cpuset_do_slab_mem_spread(void)
  60. {
  61. return current->flags & PF_SPREAD_SLAB;
  62. }
  63. extern int current_cpuset_is_being_rebound(void);
  64. extern void rebuild_sched_domains(void);
  65. extern void cpuset_print_task_mems_allowed(struct task_struct *p);
  66. #else /* !CONFIG_CPUSETS */
  67. static inline int cpuset_init_early(void) { return 0; }
  68. static inline int cpuset_init(void) { return 0; }
  69. static inline void cpuset_init_smp(void) {}
  70. static inline void cpuset_cpus_allowed(struct task_struct *p,
  71. struct cpumask *mask)
  72. {
  73. cpumask_copy(mask, cpu_possible_mask);
  74. }
  75. static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
  76. struct cpumask *mask)
  77. {
  78. cpumask_copy(mask, cpu_possible_mask);
  79. }
  80. static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
  81. {
  82. return node_possible_map;
  83. }
  84. #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
  85. static inline void cpuset_init_current_mems_allowed(void) {}
  86. static inline void cpuset_update_task_memory_state(void) {}
  87. static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
  88. {
  89. return 1;
  90. }
  91. static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
  92. {
  93. return 1;
  94. }
  95. static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
  96. {
  97. return 1;
  98. }
  99. static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  100. const struct task_struct *tsk2)
  101. {
  102. return 1;
  103. }
  104. static inline void cpuset_memory_pressure_bump(void) {}
  105. static inline void cpuset_task_status_allowed(struct seq_file *m,
  106. struct task_struct *task)
  107. {
  108. }
  109. static inline void cpuset_lock(void) {}
  110. static inline void cpuset_unlock(void) {}
  111. static inline int cpuset_mem_spread_node(void)
  112. {
  113. return 0;
  114. }
  115. static inline int cpuset_do_page_mem_spread(void)
  116. {
  117. return 0;
  118. }
  119. static inline int cpuset_do_slab_mem_spread(void)
  120. {
  121. return 0;
  122. }
  123. static inline int current_cpuset_is_being_rebound(void)
  124. {
  125. return 0;
  126. }
  127. static inline void rebuild_sched_domains(void)
  128. {
  129. partition_sched_domains(1, NULL, NULL);
  130. }
  131. static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
  132. {
  133. }
  134. #endif /* !CONFIG_CPUSETS */
  135. #endif /* _LINUX_CPUSET_H */