mempolicy.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. #ifndef _LINUX_MEMPOLICY_H
  2. #define _LINUX_MEMPOLICY_H 1
  3. #include <linux/errno.h>
  4. /*
  5. * NUMA memory policies for Linux.
  6. * Copyright 2003,2004 Andi Kleen SuSE Labs
  7. */
  8. /* Policies */
  9. #define MPOL_DEFAULT 0
  10. #define MPOL_PREFERRED 1
  11. #define MPOL_BIND 2
  12. #define MPOL_INTERLEAVE 3
  13. #define MPOL_MAX MPOL_INTERLEAVE
  14. /* Flags for get_mem_policy */
  15. #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
  16. #define MPOL_F_ADDR (1<<1) /* look up vma using address */
  17. #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
  18. /* Flags for mbind */
  19. #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
  20. #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
  21. #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
  22. #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
  23. #ifdef __KERNEL__
  24. #include <linux/mmzone.h>
  25. #include <linux/slab.h>
  26. #include <linux/rbtree.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/nodemask.h>
  29. struct vm_area_struct;
  30. struct mm_struct;
  31. #ifdef CONFIG_NUMA
  32. /*
  33. * Describe a memory policy.
  34. *
  35. * A mempolicy can be either associated with a process or with a VMA.
  36. * For VMA related allocations the VMA policy is preferred, otherwise
  37. * the process policy is used. Interrupts ignore the memory policy
  38. * of the current process.
  39. *
  40. * Locking policy for interlave:
  41. * In process context there is no locking because only the process accesses
  42. * its own state. All vma manipulation is somewhat protected by a down_read on
  43. * mmap_sem.
  44. *
  45. * Freeing policy:
  46. * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
  47. * All other policies don't have any external state. mpol_free() handles this.
  48. *
  49. * Copying policy objects:
  50. * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
  51. */
  52. struct mempolicy {
  53. atomic_t refcnt;
  54. short policy; /* See MPOL_* above */
  55. union {
  56. struct zonelist *zonelist; /* bind */
  57. short preferred_node; /* preferred */
  58. nodemask_t nodes; /* interleave */
  59. /* undefined for default */
  60. } v;
  61. nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
  62. };
  63. /*
  64. * Support for managing mempolicy data objects (clone, copy, destroy)
  65. * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
  66. */
  67. extern void __mpol_free(struct mempolicy *pol);
  68. static inline void mpol_free(struct mempolicy *pol)
  69. {
  70. if (pol)
  71. __mpol_free(pol);
  72. }
  73. extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
  74. static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
  75. {
  76. if (pol)
  77. pol = __mpol_copy(pol);
  78. return pol;
  79. }
  80. #define vma_policy(vma) ((vma)->vm_policy)
  81. #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
  82. static inline void mpol_get(struct mempolicy *pol)
  83. {
  84. if (pol)
  85. atomic_inc(&pol->refcnt);
  86. }
  87. extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
  88. static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
  89. {
  90. if (a == b)
  91. return 1;
  92. return __mpol_equal(a, b);
  93. }
  94. #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
  95. /* Could later add inheritance of the process policy here. */
  96. #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
  97. /*
  98. * Tree of shared policies for a shared memory region.
  99. * Maintain the policies in a pseudo mm that contains vmas. The vmas
  100. * carry the policy. As a special twist the pseudo mm is indexed in pages, not
  101. * bytes, so that we can work with shared memory segments bigger than
  102. * unsigned long.
  103. */
  104. struct sp_node {
  105. struct rb_node nd;
  106. unsigned long start, end;
  107. struct mempolicy *policy;
  108. };
  109. struct shared_policy {
  110. struct rb_root root;
  111. spinlock_t lock;
  112. };
  113. void mpol_shared_policy_init(struct shared_policy *info, int policy,
  114. nodemask_t *nodes);
  115. int mpol_set_shared_policy(struct shared_policy *info,
  116. struct vm_area_struct *vma,
  117. struct mempolicy *new);
  118. void mpol_free_shared_policy(struct shared_policy *p);
  119. struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
  120. unsigned long idx);
  121. extern void numa_default_policy(void);
  122. extern void numa_policy_init(void);
  123. extern void mpol_rebind_task(struct task_struct *tsk,
  124. const nodemask_t *new);
  125. extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
  126. extern void mpol_fix_fork_child_flag(struct task_struct *p);
  127. extern struct mempolicy default_policy;
  128. extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
  129. unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol);
  130. extern unsigned slab_node(struct mempolicy *policy);
  131. extern enum zone_type policy_zone;
  132. static inline void check_highest_zone(enum zone_type k)
  133. {
  134. if (k > policy_zone && k != ZONE_MOVABLE)
  135. policy_zone = k;
  136. }
  137. int do_migrate_pages(struct mm_struct *mm,
  138. const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
  139. #else
  140. struct mempolicy {};
  141. static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
  142. {
  143. return 1;
  144. }
  145. #define vma_mpol_equal(a,b) 1
  146. #define mpol_set_vma_default(vma) do {} while(0)
  147. static inline void mpol_free(struct mempolicy *p)
  148. {
  149. }
  150. static inline void mpol_get(struct mempolicy *pol)
  151. {
  152. }
  153. static inline struct mempolicy *mpol_copy(struct mempolicy *old)
  154. {
  155. return NULL;
  156. }
  157. struct shared_policy {};
  158. static inline int mpol_set_shared_policy(struct shared_policy *info,
  159. struct vm_area_struct *vma,
  160. struct mempolicy *new)
  161. {
  162. return -EINVAL;
  163. }
  164. static inline void mpol_shared_policy_init(struct shared_policy *info,
  165. int policy, nodemask_t *nodes)
  166. {
  167. }
  168. static inline void mpol_free_shared_policy(struct shared_policy *p)
  169. {
  170. }
  171. static inline struct mempolicy *
  172. mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
  173. {
  174. return NULL;
  175. }
  176. #define vma_policy(vma) NULL
  177. #define vma_set_policy(vma, pol) do {} while(0)
  178. static inline void numa_policy_init(void)
  179. {
  180. }
  181. static inline void numa_default_policy(void)
  182. {
  183. }
  184. static inline void mpol_rebind_task(struct task_struct *tsk,
  185. const nodemask_t *new)
  186. {
  187. }
  188. static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
  189. {
  190. }
  191. static inline void mpol_fix_fork_child_flag(struct task_struct *p)
  192. {
  193. }
  194. static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
  195. unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol)
  196. {
  197. return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
  198. }
  199. static inline int do_migrate_pages(struct mm_struct *mm,
  200. const nodemask_t *from_nodes,
  201. const nodemask_t *to_nodes, int flags)
  202. {
  203. return 0;
  204. }
  205. static inline void check_highest_zone(int k)
  206. {
  207. }
  208. #endif /* CONFIG_NUMA */
  209. #endif /* __KERNEL__ */
  210. #endif