mempolicy.h 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. #ifndef _LINUX_MEMPOLICY_H
  2. #define _LINUX_MEMPOLICY_H 1
  3. #include <linux/errno.h>
  4. /*
  5. * NUMA memory policies for Linux.
  6. * Copyright 2003,2004 Andi Kleen SuSE Labs
  7. */
  8. /* Policies */
  9. #define MPOL_DEFAULT 0
  10. #define MPOL_PREFERRED 1
  11. #define MPOL_BIND 2
  12. #define MPOL_INTERLEAVE 3
  13. #define MPOL_MAX MPOL_INTERLEAVE
  14. /* Flags for get_mem_policy */
  15. #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
  16. #define MPOL_F_ADDR (1<<1) /* look up vma using address */
  17. /* Flags for mbind */
  18. #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
  19. #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
  20. #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
  21. #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
  22. #ifdef __KERNEL__
  23. #include <linux/config.h>
  24. #include <linux/mmzone.h>
  25. #include <linux/slab.h>
  26. #include <linux/rbtree.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/nodemask.h>
  29. struct vm_area_struct;
  30. #ifdef CONFIG_NUMA
  31. /*
  32. * Describe a memory policy.
  33. *
  34. * A mempolicy can be either associated with a process or with a VMA.
  35. * For VMA related allocations the VMA policy is preferred, otherwise
  36. * the process policy is used. Interrupts ignore the memory policy
  37. * of the current process.
  38. *
  39. * Locking policy for interlave:
  40. * In process context there is no locking because only the process accesses
  41. * its own state. All vma manipulation is somewhat protected by a down_read on
  42. * mmap_sem.
  43. *
  44. * Freeing policy:
  45. * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
  46. * All other policies don't have any external state. mpol_free() handles this.
  47. *
  48. * Copying policy objects:
  49. * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
  50. */
  51. struct mempolicy {
  52. atomic_t refcnt;
  53. short policy; /* See MPOL_* above */
  54. union {
  55. struct zonelist *zonelist; /* bind */
  56. short preferred_node; /* preferred */
  57. nodemask_t nodes; /* interleave */
  58. /* undefined for default */
  59. } v;
  60. nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
  61. };
  62. /*
  63. * Support for managing mempolicy data objects (clone, copy, destroy)
  64. * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
  65. */
  66. extern void __mpol_free(struct mempolicy *pol);
  67. static inline void mpol_free(struct mempolicy *pol)
  68. {
  69. if (pol)
  70. __mpol_free(pol);
  71. }
  72. extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
  73. static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
  74. {
  75. if (pol)
  76. pol = __mpol_copy(pol);
  77. return pol;
  78. }
  79. #define vma_policy(vma) ((vma)->vm_policy)
  80. #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
  81. static inline void mpol_get(struct mempolicy *pol)
  82. {
  83. if (pol)
  84. atomic_inc(&pol->refcnt);
  85. }
  86. extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
  87. static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
  88. {
  89. if (a == b)
  90. return 1;
  91. return __mpol_equal(a, b);
  92. }
  93. #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
  94. /* Could later add inheritance of the process policy here. */
  95. #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
  96. /*
  97. * Tree of shared policies for a shared memory region.
  98. * Maintain the policies in a pseudo mm that contains vmas. The vmas
  99. * carry the policy. As a special twist the pseudo mm is indexed in pages, not
  100. * bytes, so that we can work with shared memory segments bigger than
  101. * unsigned long.
  102. */
  103. struct sp_node {
  104. struct rb_node nd;
  105. unsigned long start, end;
  106. struct mempolicy *policy;
  107. };
  108. struct shared_policy {
  109. struct rb_root root;
  110. spinlock_t lock;
  111. };
  112. void mpol_shared_policy_init(struct shared_policy *info, int policy,
  113. nodemask_t *nodes);
  114. int mpol_set_shared_policy(struct shared_policy *info,
  115. struct vm_area_struct *vma,
  116. struct mempolicy *new);
  117. void mpol_free_shared_policy(struct shared_policy *p);
  118. struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
  119. unsigned long idx);
  120. extern void numa_default_policy(void);
  121. extern void numa_policy_init(void);
  122. extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new);
  123. extern void mpol_rebind_task(struct task_struct *tsk,
  124. const nodemask_t *new);
  125. extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
  126. #define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x))
  127. #ifdef CONFIG_CPUSET
  128. #define current_cpuset_is_being_rebound() \
  129. (cpuset_being_rebound == current->cpuset)
  130. #else
  131. #define current_cpuset_is_being_rebound() 0
  132. #endif
  133. extern struct mempolicy default_policy;
  134. extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
  135. unsigned long addr);
  136. extern unsigned slab_node(struct mempolicy *policy);
  137. extern int policy_zone;
  138. static inline void check_highest_zone(int k)
  139. {
  140. if (k > policy_zone)
  141. policy_zone = k;
  142. }
  143. int do_migrate_pages(struct mm_struct *mm,
  144. const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
  145. extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */
  146. #else
  147. struct mempolicy {};
  148. static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
  149. {
  150. return 1;
  151. }
  152. #define vma_mpol_equal(a,b) 1
  153. #define mpol_set_vma_default(vma) do {} while(0)
  154. static inline void mpol_free(struct mempolicy *p)
  155. {
  156. }
  157. static inline void mpol_get(struct mempolicy *pol)
  158. {
  159. }
  160. static inline struct mempolicy *mpol_copy(struct mempolicy *old)
  161. {
  162. return NULL;
  163. }
  164. struct shared_policy {};
  165. static inline int mpol_set_shared_policy(struct shared_policy *info,
  166. struct vm_area_struct *vma,
  167. struct mempolicy *new)
  168. {
  169. return -EINVAL;
  170. }
  171. static inline void mpol_shared_policy_init(struct shared_policy *info,
  172. int policy, nodemask_t *nodes)
  173. {
  174. }
  175. static inline void mpol_free_shared_policy(struct shared_policy *p)
  176. {
  177. }
  178. static inline struct mempolicy *
  179. mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
  180. {
  181. return NULL;
  182. }
  183. #define vma_policy(vma) NULL
  184. #define vma_set_policy(vma, pol) do {} while(0)
  185. static inline void numa_policy_init(void)
  186. {
  187. }
  188. static inline void numa_default_policy(void)
  189. {
  190. }
  191. static inline void mpol_rebind_policy(struct mempolicy *pol,
  192. const nodemask_t *new)
  193. {
  194. }
  195. static inline void mpol_rebind_task(struct task_struct *tsk,
  196. const nodemask_t *new)
  197. {
  198. }
  199. static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
  200. {
  201. }
  202. #define set_cpuset_being_rebound(x) do {} while (0)
  203. static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
  204. unsigned long addr)
  205. {
  206. return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
  207. }
  208. static inline int do_migrate_pages(struct mm_struct *mm,
  209. const nodemask_t *from_nodes,
  210. const nodemask_t *to_nodes, int flags)
  211. {
  212. return 0;
  213. }
  214. static inline void check_highest_zone(int k)
  215. {
  216. }
  217. #endif /* CONFIG_NUMA */
  218. #endif /* __KERNEL__ */
  219. #endif