mempolicy.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. #ifndef _LINUX_MEMPOLICY_H
  2. #define _LINUX_MEMPOLICY_H 1
  3. #include <linux/errno.h>
  4. /*
  5. * NUMA memory policies for Linux.
  6. * Copyright 2003,2004 Andi Kleen SuSE Labs
  7. */
  8. /* Policies */
  9. #define MPOL_DEFAULT 0
  10. #define MPOL_PREFERRED 1
  11. #define MPOL_BIND 2
  12. #define MPOL_INTERLEAVE 3
  13. #define MPOL_MAX MPOL_INTERLEAVE
  14. /* Flags for get_mem_policy */
  15. #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
  16. #define MPOL_F_ADDR (1<<1) /* look up vma using address */
  17. /* Flags for mbind */
  18. #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
  19. #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
  20. #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
  21. #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
  22. #ifdef __KERNEL__
  23. #include <linux/config.h>
  24. #include <linux/mmzone.h>
  25. #include <linux/slab.h>
  26. #include <linux/rbtree.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/nodemask.h>
  29. struct vm_area_struct;
  30. #ifdef CONFIG_NUMA
  31. /*
  32. * Describe a memory policy.
  33. *
  34. * A mempolicy can be either associated with a process or with a VMA.
  35. * For VMA related allocations the VMA policy is preferred, otherwise
  36. * the process policy is used. Interrupts ignore the memory policy
  37. * of the current process.
  38. *
  39. * Locking policy for interlave:
  40. * In process context there is no locking because only the process accesses
  41. * its own state. All vma manipulation is somewhat protected by a down_read on
  42. * mmap_sem.
  43. *
  44. * Freeing policy:
  45. * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
  46. * All other policies don't have any external state. mpol_free() handles this.
  47. *
  48. * Copying policy objects:
  49. * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
  50. */
  51. struct mempolicy {
  52. atomic_t refcnt;
  53. short policy; /* See MPOL_* above */
  54. union {
  55. struct zonelist *zonelist; /* bind */
  56. short preferred_node; /* preferred */
  57. nodemask_t nodes; /* interleave */
  58. /* undefined for default */
  59. } v;
  60. };
  61. /*
  62. * Support for managing mempolicy data objects (clone, copy, destroy)
  63. * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
  64. */
  65. extern void __mpol_free(struct mempolicy *pol);
  66. static inline void mpol_free(struct mempolicy *pol)
  67. {
  68. if (pol)
  69. __mpol_free(pol);
  70. }
  71. extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
  72. static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
  73. {
  74. if (pol)
  75. pol = __mpol_copy(pol);
  76. return pol;
  77. }
  78. #define vma_policy(vma) ((vma)->vm_policy)
  79. #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
  80. static inline void mpol_get(struct mempolicy *pol)
  81. {
  82. if (pol)
  83. atomic_inc(&pol->refcnt);
  84. }
  85. extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
  86. static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
  87. {
  88. if (a == b)
  89. return 1;
  90. return __mpol_equal(a, b);
  91. }
  92. #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
  93. /* Could later add inheritance of the process policy here. */
  94. #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
  95. /*
  96. * Tree of shared policies for a shared memory region.
  97. * Maintain the policies in a pseudo mm that contains vmas. The vmas
  98. * carry the policy. As a special twist the pseudo mm is indexed in pages, not
  99. * bytes, so that we can work with shared memory segments bigger than
  100. * unsigned long.
  101. */
  102. struct sp_node {
  103. struct rb_node nd;
  104. unsigned long start, end;
  105. struct mempolicy *policy;
  106. };
  107. struct shared_policy {
  108. struct rb_root root;
  109. spinlock_t lock;
  110. };
  111. static inline void mpol_shared_policy_init(struct shared_policy *info)
  112. {
  113. info->root = RB_ROOT;
  114. spin_lock_init(&info->lock);
  115. }
  116. int mpol_set_shared_policy(struct shared_policy *info,
  117. struct vm_area_struct *vma,
  118. struct mempolicy *new);
  119. void mpol_free_shared_policy(struct shared_policy *p);
  120. struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
  121. unsigned long idx);
  122. struct mempolicy *get_vma_policy(struct task_struct *task,
  123. struct vm_area_struct *vma, unsigned long addr);
  124. extern void numa_default_policy(void);
  125. extern void numa_policy_init(void);
  126. extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new);
  127. extern struct mempolicy default_policy;
  128. extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
  129. unsigned long addr);
  130. extern int policy_zone;
  131. static inline void check_highest_zone(int k)
  132. {
  133. if (k > policy_zone)
  134. policy_zone = k;
  135. }
  136. #else
  137. struct mempolicy {};
  138. static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
  139. {
  140. return 1;
  141. }
  142. #define vma_mpol_equal(a,b) 1
  143. #define mpol_set_vma_default(vma) do {} while(0)
  144. static inline void mpol_free(struct mempolicy *p)
  145. {
  146. }
  147. static inline void mpol_get(struct mempolicy *pol)
  148. {
  149. }
  150. static inline struct mempolicy *mpol_copy(struct mempolicy *old)
  151. {
  152. return NULL;
  153. }
  154. struct shared_policy {};
  155. static inline int mpol_set_shared_policy(struct shared_policy *info,
  156. struct vm_area_struct *vma,
  157. struct mempolicy *new)
  158. {
  159. return -EINVAL;
  160. }
  161. static inline void mpol_shared_policy_init(struct shared_policy *info)
  162. {
  163. }
  164. static inline void mpol_free_shared_policy(struct shared_policy *p)
  165. {
  166. }
  167. static inline struct mempolicy *
  168. mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
  169. {
  170. return NULL;
  171. }
  172. #define vma_policy(vma) NULL
  173. #define vma_set_policy(vma, pol) do {} while(0)
  174. static inline void numa_policy_init(void)
  175. {
  176. }
  177. static inline void numa_default_policy(void)
  178. {
  179. }
  180. static inline void numa_policy_rebind(const nodemask_t *old,
  181. const nodemask_t *new)
  182. {
  183. }
  184. static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
  185. unsigned long addr)
  186. {
  187. return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
  188. }
  189. static inline void check_highest_zone(int k)
  190. {
  191. }
  192. #endif /* CONFIG_NUMA */
  193. #endif /* __KERNEL__ */
  194. #endif