mempolicy.h 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. #ifndef _LINUX_MEMPOLICY_H
  2. #define _LINUX_MEMPOLICY_H 1
  3. #include <linux/errno.h>
  4. /*
  5. * NUMA memory policies for Linux.
  6. * Copyright 2003,2004 Andi Kleen SuSE Labs
  7. */
  8. /* Policies */
  9. #define MPOL_DEFAULT 0
  10. #define MPOL_PREFERRED 1
  11. #define MPOL_BIND 2
  12. #define MPOL_INTERLEAVE 3
  13. #define MPOL_MAX MPOL_INTERLEAVE
  14. /* Flags for get_mem_policy */
  15. #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
  16. #define MPOL_F_ADDR (1<<1) /* look up vma using address */
  17. /* Flags for mbind */
  18. #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
  19. #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
  20. #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
  21. #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
  22. #ifdef __KERNEL__
  23. #include <linux/config.h>
  24. #include <linux/mmzone.h>
  25. #include <linux/slab.h>
  26. #include <linux/rbtree.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/nodemask.h>
  29. struct vm_area_struct;
  30. struct mm_struct;
  31. #ifdef CONFIG_NUMA
  32. /*
  33. * Describe a memory policy.
  34. *
  35. * A mempolicy can be either associated with a process or with a VMA.
  36. * For VMA related allocations the VMA policy is preferred, otherwise
  37. * the process policy is used. Interrupts ignore the memory policy
  38. * of the current process.
  39. *
  40. * Locking policy for interlave:
  41. * In process context there is no locking because only the process accesses
  42. * its own state. All vma manipulation is somewhat protected by a down_read on
  43. * mmap_sem.
  44. *
  45. * Freeing policy:
  46. * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
  47. * All other policies don't have any external state. mpol_free() handles this.
  48. *
  49. * Copying policy objects:
  50. * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
  51. */
  52. struct mempolicy {
  53. atomic_t refcnt;
  54. short policy; /* See MPOL_* above */
  55. union {
  56. struct zonelist *zonelist; /* bind */
  57. short preferred_node; /* preferred */
  58. nodemask_t nodes; /* interleave */
  59. /* undefined for default */
  60. } v;
  61. nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
  62. };
  63. /*
  64. * Support for managing mempolicy data objects (clone, copy, destroy)
  65. * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
  66. */
  67. extern void __mpol_free(struct mempolicy *pol);
  68. static inline void mpol_free(struct mempolicy *pol)
  69. {
  70. if (pol)
  71. __mpol_free(pol);
  72. }
  73. extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
  74. static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
  75. {
  76. if (pol)
  77. pol = __mpol_copy(pol);
  78. return pol;
  79. }
  80. #define vma_policy(vma) ((vma)->vm_policy)
  81. #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
  82. static inline void mpol_get(struct mempolicy *pol)
  83. {
  84. if (pol)
  85. atomic_inc(&pol->refcnt);
  86. }
  87. extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
  88. static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
  89. {
  90. if (a == b)
  91. return 1;
  92. return __mpol_equal(a, b);
  93. }
  94. #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
  95. /* Could later add inheritance of the process policy here. */
  96. #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
  97. /*
  98. * Tree of shared policies for a shared memory region.
  99. * Maintain the policies in a pseudo mm that contains vmas. The vmas
  100. * carry the policy. As a special twist the pseudo mm is indexed in pages, not
  101. * bytes, so that we can work with shared memory segments bigger than
  102. * unsigned long.
  103. */
  104. struct sp_node {
  105. struct rb_node nd;
  106. unsigned long start, end;
  107. struct mempolicy *policy;
  108. };
  109. struct shared_policy {
  110. struct rb_root root;
  111. spinlock_t lock;
  112. };
  113. void mpol_shared_policy_init(struct shared_policy *info, int policy,
  114. nodemask_t *nodes);
  115. int mpol_set_shared_policy(struct shared_policy *info,
  116. struct vm_area_struct *vma,
  117. struct mempolicy *new);
  118. void mpol_free_shared_policy(struct shared_policy *p);
  119. struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
  120. unsigned long idx);
  121. extern void numa_default_policy(void);
  122. extern void numa_policy_init(void);
  123. extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new);
  124. extern void mpol_rebind_task(struct task_struct *tsk,
  125. const nodemask_t *new);
  126. extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
  127. extern void mpol_fix_fork_child_flag(struct task_struct *p);
  128. #define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x))
  129. #ifdef CONFIG_CPUSET
  130. #define current_cpuset_is_being_rebound() \
  131. (cpuset_being_rebound == current->cpuset)
  132. #else
  133. #define current_cpuset_is_being_rebound() 0
  134. #endif
  135. extern struct mempolicy default_policy;
  136. extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
  137. unsigned long addr);
  138. extern unsigned slab_node(struct mempolicy *policy);
  139. extern int policy_zone;
  140. static inline void check_highest_zone(int k)
  141. {
  142. if (k > policy_zone)
  143. policy_zone = k;
  144. }
  145. int do_migrate_pages(struct mm_struct *mm,
  146. const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
  147. extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */
  148. #else
  149. struct mempolicy {};
  150. static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
  151. {
  152. return 1;
  153. }
  154. #define vma_mpol_equal(a,b) 1
  155. #define mpol_set_vma_default(vma) do {} while(0)
  156. static inline void mpol_free(struct mempolicy *p)
  157. {
  158. }
  159. static inline void mpol_get(struct mempolicy *pol)
  160. {
  161. }
  162. static inline struct mempolicy *mpol_copy(struct mempolicy *old)
  163. {
  164. return NULL;
  165. }
  166. struct shared_policy {};
  167. static inline int mpol_set_shared_policy(struct shared_policy *info,
  168. struct vm_area_struct *vma,
  169. struct mempolicy *new)
  170. {
  171. return -EINVAL;
  172. }
  173. static inline void mpol_shared_policy_init(struct shared_policy *info,
  174. int policy, nodemask_t *nodes)
  175. {
  176. }
  177. static inline void mpol_free_shared_policy(struct shared_policy *p)
  178. {
  179. }
  180. static inline struct mempolicy *
  181. mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
  182. {
  183. return NULL;
  184. }
  185. #define vma_policy(vma) NULL
  186. #define vma_set_policy(vma, pol) do {} while(0)
  187. static inline void numa_policy_init(void)
  188. {
  189. }
  190. static inline void numa_default_policy(void)
  191. {
  192. }
  193. static inline void mpol_rebind_policy(struct mempolicy *pol,
  194. const nodemask_t *new)
  195. {
  196. }
  197. static inline void mpol_rebind_task(struct task_struct *tsk,
  198. const nodemask_t *new)
  199. {
  200. }
  201. static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
  202. {
  203. }
  204. static inline void mpol_fix_fork_child_flag(struct task_struct *p)
  205. {
  206. }
  207. #define set_cpuset_being_rebound(x) do {} while (0)
  208. static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
  209. unsigned long addr)
  210. {
  211. return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
  212. }
  213. static inline int do_migrate_pages(struct mm_struct *mm,
  214. const nodemask_t *from_nodes,
  215. const nodemask_t *to_nodes, int flags)
  216. {
  217. return 0;
  218. }
  219. static inline void check_highest_zone(int k)
  220. {
  221. }
  222. #endif /* CONFIG_NUMA */
  223. #endif /* __KERNEL__ */
  224. #endif