cpuset.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. #ifndef _LINUX_CPUSET_H
  2. #define _LINUX_CPUSET_H
  3. /*
  4. * cpuset interface
  5. *
  6. * Copyright (C) 2003 BULL SA
  7. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  8. *
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/cgroup.h>
  14. #ifdef CONFIG_CPUSETS
  15. extern int number_of_cpusets; /* How many cpusets are defined in system? */
  16. extern int cpuset_init_early(void);
  17. extern int cpuset_init(void);
  18. extern void cpuset_init_smp(void);
  19. extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask);
  20. extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask);
  21. extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  22. #define cpuset_current_mems_allowed (current->mems_allowed)
  23. void cpuset_init_current_mems_allowed(void);
  24. void cpuset_update_task_memory_state(void);
  25. int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  26. extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
  27. extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
  28. static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
  29. {
  30. return number_of_cpusets <= 1 ||
  31. __cpuset_zone_allowed_softwall(z, gfp_mask);
  32. }
  33. static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
  34. {
  35. return number_of_cpusets <= 1 ||
  36. __cpuset_zone_allowed_hardwall(z, gfp_mask);
  37. }
  38. extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  39. const struct task_struct *tsk2);
  40. #define cpuset_memory_pressure_bump() \
  41. do { \
  42. if (cpuset_memory_pressure_enabled) \
  43. __cpuset_memory_pressure_bump(); \
  44. } while (0)
  45. extern int cpuset_memory_pressure_enabled;
  46. extern void __cpuset_memory_pressure_bump(void);
  47. extern const struct file_operations proc_cpuset_operations;
  48. struct seq_file;
  49. extern void cpuset_task_status_allowed(struct seq_file *m,
  50. struct task_struct *task);
  51. extern void cpuset_lock(void);
  52. extern void cpuset_unlock(void);
  53. extern int cpuset_mem_spread_node(void);
  54. static inline int cpuset_do_page_mem_spread(void)
  55. {
  56. return current->flags & PF_SPREAD_PAGE;
  57. }
  58. static inline int cpuset_do_slab_mem_spread(void)
  59. {
  60. return current->flags & PF_SPREAD_SLAB;
  61. }
  62. extern void cpuset_track_online_nodes(void);
  63. extern int current_cpuset_is_being_rebound(void);
  64. extern void rebuild_sched_domains(void);
  65. #else /* !CONFIG_CPUSETS */
  66. static inline int cpuset_init_early(void) { return 0; }
  67. static inline int cpuset_init(void) { return 0; }
  68. static inline void cpuset_init_smp(void) {}
  69. static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask)
  70. {
  71. *mask = cpu_possible_map;
  72. }
  73. static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
  74. cpumask_t *mask)
  75. {
  76. *mask = cpu_possible_map;
  77. }
  78. static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
  79. {
  80. return node_possible_map;
  81. }
  82. #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
  83. static inline void cpuset_init_current_mems_allowed(void) {}
  84. static inline void cpuset_update_task_memory_state(void) {}
  85. static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
  86. {
  87. return 1;
  88. }
  89. static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
  90. {
  91. return 1;
  92. }
  93. static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
  94. {
  95. return 1;
  96. }
  97. static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  98. const struct task_struct *tsk2)
  99. {
  100. return 1;
  101. }
  102. static inline void cpuset_memory_pressure_bump(void) {}
  103. static inline void cpuset_task_status_allowed(struct seq_file *m,
  104. struct task_struct *task)
  105. {
  106. }
  107. static inline void cpuset_lock(void) {}
  108. static inline void cpuset_unlock(void) {}
  109. static inline int cpuset_mem_spread_node(void)
  110. {
  111. return 0;
  112. }
  113. static inline int cpuset_do_page_mem_spread(void)
  114. {
  115. return 0;
  116. }
  117. static inline int cpuset_do_slab_mem_spread(void)
  118. {
  119. return 0;
  120. }
  121. static inline void cpuset_track_online_nodes(void) {}
  122. static inline int current_cpuset_is_being_rebound(void)
  123. {
  124. return 0;
  125. }
  126. static inline void rebuild_sched_domains(void)
  127. {
  128. partition_sched_domains(1, NULL, NULL);
  129. }
  130. #endif /* !CONFIG_CPUSETS */
  131. #endif /* _LINUX_CPUSET_H */