cgroup.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. #ifndef _LINUX_CGROUP_H
  2. #define _LINUX_CGROUP_H
  3. /*
  4. * cgroup interface
  5. *
  6. * Copyright (C) 2003 BULL SA
  7. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  8. *
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/kref.h>
  12. #include <linux/cpumask.h>
  13. #include <linux/nodemask.h>
  14. #include <linux/rcupdate.h>
  15. #include <linux/cgroupstats.h>
  16. #include <linux/prio_heap.h>
  17. #ifdef CONFIG_CGROUPS
  18. struct cgroupfs_root;
  19. struct cgroup_subsys;
  20. struct inode;
  21. extern int cgroup_init_early(void);
  22. extern int cgroup_init(void);
  23. extern void cgroup_init_smp(void);
  24. extern void cgroup_lock(void);
  25. extern void cgroup_unlock(void);
  26. extern void cgroup_fork(struct task_struct *p);
  27. extern void cgroup_fork_callbacks(struct task_struct *p);
  28. extern void cgroup_post_fork(struct task_struct *p);
  29. extern void cgroup_exit(struct task_struct *p, int run_callbacks);
  30. extern int cgroupstats_build(struct cgroupstats *stats,
  31. struct dentry *dentry);
  32. extern struct file_operations proc_cgroup_operations;
  33. /* Define the enumeration of all cgroup subsystems */
  34. #define SUBSYS(_x) _x ## _subsys_id,
  35. enum cgroup_subsys_id {
  36. #include <linux/cgroup_subsys.h>
  37. CGROUP_SUBSYS_COUNT
  38. };
  39. #undef SUBSYS
  40. /* Per-subsystem/per-cgroup state maintained by the system. */
  41. struct cgroup_subsys_state {
  42. /* The cgroup that this subsystem is attached to. Useful
  43. * for subsystems that want to know about the cgroup
  44. * hierarchy structure */
  45. struct cgroup *cgroup;
  46. /* State maintained by the cgroup system to allow
  47. * subsystems to be "busy". Should be accessed via css_get()
  48. * and css_put() */
  49. atomic_t refcnt;
  50. unsigned long flags;
  51. };
  52. /* bits in struct cgroup_subsys_state flags field */
  53. enum {
  54. CSS_ROOT, /* This CSS is the root of the subsystem */
  55. };
  56. /*
  57. * Call css_get() to hold a reference on the cgroup;
  58. *
  59. */
  60. static inline void css_get(struct cgroup_subsys_state *css)
  61. {
  62. /* We don't need to reference count the root state */
  63. if (!test_bit(CSS_ROOT, &css->flags))
  64. atomic_inc(&css->refcnt);
  65. }
  66. /*
  67. * css_put() should be called to release a reference taken by
  68. * css_get()
  69. */
  70. extern void __css_put(struct cgroup_subsys_state *css);
  71. static inline void css_put(struct cgroup_subsys_state *css)
  72. {
  73. if (!test_bit(CSS_ROOT, &css->flags))
  74. __css_put(css);
  75. }
  76. /* bits in struct cgroup flags field */
  77. enum {
  78. /* Control Group is dead */
  79. CGRP_REMOVED,
  80. /* Control Group has previously had a child cgroup or a task,
  81. * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
  82. CGRP_RELEASABLE,
  83. /* Control Group requires release notifications to userspace */
  84. CGRP_NOTIFY_ON_RELEASE,
  85. };
  86. struct cgroup {
  87. unsigned long flags; /* "unsigned long" so bitops work */
  88. /* count users of this cgroup. >0 means busy, but doesn't
  89. * necessarily indicate the number of tasks in the
  90. * cgroup */
  91. atomic_t count;
  92. /*
  93. * We link our 'sibling' struct into our parent's 'children'.
  94. * Our children link their 'sibling' into our 'children'.
  95. */
  96. struct list_head sibling; /* my parent's children */
  97. struct list_head children; /* my children */
  98. struct cgroup *parent; /* my parent */
  99. struct dentry *dentry; /* cgroup fs entry */
  100. /* Private pointers for each registered subsystem */
  101. struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
  102. struct cgroupfs_root *root;
  103. struct cgroup *top_cgroup;
  104. /*
  105. * List of cg_cgroup_links pointing at css_sets with
  106. * tasks in this cgroup. Protected by css_set_lock
  107. */
  108. struct list_head css_sets;
  109. /*
  110. * Linked list running through all cgroups that can
  111. * potentially be reaped by the release agent. Protected by
  112. * release_list_lock
  113. */
  114. struct list_head release_list;
  115. };
  116. /* A css_set is a structure holding pointers to a set of
  117. * cgroup_subsys_state objects. This saves space in the task struct
  118. * object and speeds up fork()/exit(), since a single inc/dec and a
  119. * list_add()/del() can bump the reference count on the entire
  120. * cgroup set for a task.
  121. */
  122. struct css_set {
  123. /* Reference count */
  124. struct kref ref;
  125. /*
  126. * List running through all cgroup groups in the same hash
  127. * slot. Protected by css_set_lock
  128. */
  129. struct hlist_node hlist;
  130. /*
  131. * List running through all tasks using this cgroup
  132. * group. Protected by css_set_lock
  133. */
  134. struct list_head tasks;
  135. /*
  136. * List of cg_cgroup_link objects on link chains from
  137. * cgroups referenced from this css_set. Protected by
  138. * css_set_lock
  139. */
  140. struct list_head cg_links;
  141. /*
  142. * Set of subsystem states, one for each subsystem. This array
  143. * is immutable after creation apart from the init_css_set
  144. * during subsystem registration (at boot time).
  145. */
  146. struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
  147. };
  148. /*
  149. * cgroup_map_cb is an abstract callback API for reporting map-valued
  150. * control files
  151. */
  152. struct cgroup_map_cb {
  153. int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value);
  154. void *state;
  155. };
  156. /* struct cftype:
  157. *
  158. * The files in the cgroup filesystem mostly have a very simple read/write
  159. * handling, some common function will take care of it. Nevertheless some cases
  160. * (read tasks) are special and therefore I define this structure for every
  161. * kind of file.
  162. *
  163. *
  164. * When reading/writing to a file:
  165. * - the cgroup to use is file->f_dentry->d_parent->d_fsdata
  166. * - the 'cftype' of the file is file->f_dentry->d_fsdata
  167. */
  168. #define MAX_CFTYPE_NAME 64
  169. struct cftype {
  170. /* By convention, the name should begin with the name of the
  171. * subsystem, followed by a period */
  172. char name[MAX_CFTYPE_NAME];
  173. int private;
  174. /*
  175. * If non-zero, defines the maximum length of string that can
  176. * be passed to write_string; defaults to 64
  177. */
  178. size_t max_write_len;
  179. int (*open)(struct inode *inode, struct file *file);
  180. ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
  181. struct file *file,
  182. char __user *buf, size_t nbytes, loff_t *ppos);
  183. /*
  184. * read_u64() is a shortcut for the common case of returning a
  185. * single integer. Use it in place of read()
  186. */
  187. u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
  188. /*
  189. * read_s64() is a signed version of read_u64()
  190. */
  191. s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
  192. /*
  193. * read_map() is used for defining a map of key/value
  194. * pairs. It should call cb->fill(cb, key, value) for each
  195. * entry. The key/value pairs (and their ordering) should not
  196. * change between reboots.
  197. */
  198. int (*read_map)(struct cgroup *cont, struct cftype *cft,
  199. struct cgroup_map_cb *cb);
  200. /*
  201. * read_seq_string() is used for outputting a simple sequence
  202. * using seqfile.
  203. */
  204. int (*read_seq_string)(struct cgroup *cont, struct cftype *cft,
  205. struct seq_file *m);
  206. ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
  207. struct file *file,
  208. const char __user *buf, size_t nbytes, loff_t *ppos);
  209. /*
  210. * write_u64() is a shortcut for the common case of accepting
  211. * a single integer (as parsed by simple_strtoull) from
  212. * userspace. Use in place of write(); return 0 or error.
  213. */
  214. int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
  215. /*
  216. * write_s64() is a signed version of write_u64()
  217. */
  218. int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
  219. /*
  220. * write_string() is passed a nul-terminated kernelspace
  221. * buffer of maximum length determined by max_write_len.
  222. * Returns 0 or -ve error code.
  223. */
  224. int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
  225. const char *buffer);
  226. /*
  227. * trigger() callback can be used to get some kick from the
  228. * userspace, when the actual string written is not important
  229. * at all. The private field can be used to determine the
  230. * kick type for multiplexing.
  231. */
  232. int (*trigger)(struct cgroup *cgrp, unsigned int event);
  233. int (*release)(struct inode *inode, struct file *file);
  234. };
  235. struct cgroup_scanner {
  236. struct cgroup *cg;
  237. int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
  238. void (*process_task)(struct task_struct *p,
  239. struct cgroup_scanner *scan);
  240. struct ptr_heap *heap;
  241. };
  242. /* Add a new file to the given cgroup directory. Should only be
  243. * called by subsystems from within a populate() method */
  244. int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
  245. const struct cftype *cft);
  246. /* Add a set of new files to the given cgroup directory. Should
  247. * only be called by subsystems from within a populate() method */
  248. int cgroup_add_files(struct cgroup *cgrp,
  249. struct cgroup_subsys *subsys,
  250. const struct cftype cft[],
  251. int count);
  252. int cgroup_is_removed(const struct cgroup *cgrp);
  253. int cgroup_lock_live_group(struct cgroup *cgrp);
  254. int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
  255. int cgroup_task_count(const struct cgroup *cgrp);
  256. /* Return true if the cgroup is a descendant of the current cgroup */
  257. int cgroup_is_descendant(const struct cgroup *cgrp);
  258. /* Control Group subsystem type. See Documentation/cgroups.txt for details */
  259. struct cgroup_subsys {
  260. struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
  261. struct cgroup *cgrp);
  262. void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
  263. void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
  264. int (*can_attach)(struct cgroup_subsys *ss,
  265. struct cgroup *cgrp, struct task_struct *tsk);
  266. void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
  267. struct cgroup *old_cgrp, struct task_struct *tsk);
  268. void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
  269. void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
  270. int (*populate)(struct cgroup_subsys *ss,
  271. struct cgroup *cgrp);
  272. void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
  273. void (*bind)(struct cgroup_subsys *ss, struct cgroup *root);
  274. /*
  275. * This routine is called with the task_lock of mm->owner held
  276. */
  277. void (*mm_owner_changed)(struct cgroup_subsys *ss,
  278. struct cgroup *old,
  279. struct cgroup *new);
  280. int subsys_id;
  281. int active;
  282. int disabled;
  283. int early_init;
  284. #define MAX_CGROUP_TYPE_NAMELEN 32
  285. const char *name;
  286. /* Protected by RCU */
  287. struct cgroupfs_root *root;
  288. struct list_head sibling;
  289. void *private;
  290. };
  291. #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
  292. #include <linux/cgroup_subsys.h>
  293. #undef SUBSYS
  294. static inline struct cgroup_subsys_state *cgroup_subsys_state(
  295. struct cgroup *cgrp, int subsys_id)
  296. {
  297. return cgrp->subsys[subsys_id];
  298. }
  299. static inline struct cgroup_subsys_state *task_subsys_state(
  300. struct task_struct *task, int subsys_id)
  301. {
  302. return rcu_dereference(task->cgroups->subsys[subsys_id]);
  303. }
  304. static inline struct cgroup* task_cgroup(struct task_struct *task,
  305. int subsys_id)
  306. {
  307. return task_subsys_state(task, subsys_id)->cgroup;
  308. }
  309. int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss);
  310. /* A cgroup_iter should be treated as an opaque object */
  311. struct cgroup_iter {
  312. struct list_head *cg_link;
  313. struct list_head *task;
  314. };
  315. /* To iterate across the tasks in a cgroup:
  316. *
  317. * 1) call cgroup_iter_start to intialize an iterator
  318. *
  319. * 2) call cgroup_iter_next() to retrieve member tasks until it
  320. * returns NULL or until you want to end the iteration
  321. *
  322. * 3) call cgroup_iter_end() to destroy the iterator.
  323. *
  324. * Or, call cgroup_scan_tasks() to iterate through every task in a cpuset.
  325. * - cgroup_scan_tasks() holds the css_set_lock when calling the test_task()
  326. * callback, but not while calling the process_task() callback.
  327. */
  328. void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
  329. struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
  330. struct cgroup_iter *it);
  331. void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
  332. int cgroup_scan_tasks(struct cgroup_scanner *scan);
  333. int cgroup_attach_task(struct cgroup *, struct task_struct *);
  334. #else /* !CONFIG_CGROUPS */
  335. static inline int cgroup_init_early(void) { return 0; }
  336. static inline int cgroup_init(void) { return 0; }
  337. static inline void cgroup_init_smp(void) {}
  338. static inline void cgroup_fork(struct task_struct *p) {}
  339. static inline void cgroup_fork_callbacks(struct task_struct *p) {}
  340. static inline void cgroup_post_fork(struct task_struct *p) {}
  341. static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
  342. static inline void cgroup_lock(void) {}
  343. static inline void cgroup_unlock(void) {}
  344. static inline int cgroupstats_build(struct cgroupstats *stats,
  345. struct dentry *dentry)
  346. {
  347. return -EINVAL;
  348. }
  349. #endif /* !CONFIG_CGROUPS */
  350. #ifdef CONFIG_MM_OWNER
  351. extern void
  352. cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new);
  353. #else /* !CONFIG_MM_OWNER */
  354. static inline void
  355. cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
  356. {
  357. }
  358. #endif /* CONFIG_MM_OWNER */
  359. #endif /* _LINUX_CGROUP_H */