cgroup.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. #ifndef _LINUX_CGROUP_H
  2. #define _LINUX_CGROUP_H
  3. /*
  4. * cgroup interface
  5. *
  6. * Copyright (C) 2003 BULL SA
  7. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  8. *
  9. */
  10. #include <linux/sched.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/rcupdate.h>
  14. #include <linux/cgroupstats.h>
  15. #include <linux/prio_heap.h>
  16. #include <linux/rwsem.h>
  17. #ifdef CONFIG_CGROUPS
  18. struct cgroupfs_root;
  19. struct cgroup_subsys;
  20. struct inode;
  21. struct cgroup;
  22. extern int cgroup_init_early(void);
  23. extern int cgroup_init(void);
  24. extern void cgroup_lock(void);
  25. extern bool cgroup_lock_live_group(struct cgroup *cgrp);
  26. extern void cgroup_unlock(void);
  27. extern void cgroup_fork(struct task_struct *p);
  28. extern void cgroup_fork_callbacks(struct task_struct *p);
  29. extern void cgroup_post_fork(struct task_struct *p);
  30. extern void cgroup_exit(struct task_struct *p, int run_callbacks);
  31. extern int cgroupstats_build(struct cgroupstats *stats,
  32. struct dentry *dentry);
  33. extern struct file_operations proc_cgroup_operations;
  34. /* Define the enumeration of all cgroup subsystems */
  35. #define SUBSYS(_x) _x ## _subsys_id,
  36. enum cgroup_subsys_id {
  37. #include <linux/cgroup_subsys.h>
  38. CGROUP_SUBSYS_COUNT
  39. };
  40. #undef SUBSYS
  41. /* Per-subsystem/per-cgroup state maintained by the system. */
  42. struct cgroup_subsys_state {
  43. /* The cgroup that this subsystem is attached to. Useful
  44. * for subsystems that want to know about the cgroup
  45. * hierarchy structure */
  46. struct cgroup *cgroup;
  47. /* State maintained by the cgroup system to allow subsystems
  48. * to be "busy". Should be accessed via css_get(),
  49. * css_tryget() and and css_put(). */
  50. atomic_t refcnt;
  51. unsigned long flags;
  52. };
  53. /* bits in struct cgroup_subsys_state flags field */
  54. enum {
  55. CSS_ROOT, /* This CSS is the root of the subsystem */
  56. CSS_REMOVED, /* This CSS is dead */
  57. };
  58. /*
  59. * Call css_get() to hold a reference on the css; it can be used
  60. * for a reference obtained via:
  61. * - an existing ref-counted reference to the css
  62. * - task->cgroups for a locked task
  63. */
  64. static inline void css_get(struct cgroup_subsys_state *css)
  65. {
  66. /* We don't need to reference count the root state */
  67. if (!test_bit(CSS_ROOT, &css->flags))
  68. atomic_inc(&css->refcnt);
  69. }
  70. static inline bool css_is_removed(struct cgroup_subsys_state *css)
  71. {
  72. return test_bit(CSS_REMOVED, &css->flags);
  73. }
  74. /*
  75. * Call css_tryget() to take a reference on a css if your existing
  76. * (known-valid) reference isn't already ref-counted. Returns false if
  77. * the css has been destroyed.
  78. */
  79. static inline bool css_tryget(struct cgroup_subsys_state *css)
  80. {
  81. if (test_bit(CSS_ROOT, &css->flags))
  82. return true;
  83. while (!atomic_inc_not_zero(&css->refcnt)) {
  84. if (test_bit(CSS_REMOVED, &css->flags))
  85. return false;
  86. cpu_relax();
  87. }
  88. return true;
  89. }
  90. /*
  91. * css_put() should be called to release a reference taken by
  92. * css_get() or css_tryget()
  93. */
  94. extern void __css_put(struct cgroup_subsys_state *css);
  95. static inline void css_put(struct cgroup_subsys_state *css)
  96. {
  97. if (!test_bit(CSS_ROOT, &css->flags))
  98. __css_put(css);
  99. }
  100. /* bits in struct cgroup flags field */
  101. enum {
  102. /* Control Group is dead */
  103. CGRP_REMOVED,
  104. /* Control Group has previously had a child cgroup or a task,
  105. * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
  106. CGRP_RELEASABLE,
  107. /* Control Group requires release notifications to userspace */
  108. CGRP_NOTIFY_ON_RELEASE,
  109. };
  110. struct cgroup {
  111. unsigned long flags; /* "unsigned long" so bitops work */
  112. /* count users of this cgroup. >0 means busy, but doesn't
  113. * necessarily indicate the number of tasks in the
  114. * cgroup */
  115. atomic_t count;
  116. /*
  117. * We link our 'sibling' struct into our parent's 'children'.
  118. * Our children link their 'sibling' into our 'children'.
  119. */
  120. struct list_head sibling; /* my parent's children */
  121. struct list_head children; /* my children */
  122. struct cgroup *parent; /* my parent */
  123. struct dentry *dentry; /* cgroup fs entry, RCU protected */
  124. /* Private pointers for each registered subsystem */
  125. struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
  126. struct cgroupfs_root *root;
  127. struct cgroup *top_cgroup;
  128. /*
  129. * List of cg_cgroup_links pointing at css_sets with
  130. * tasks in this cgroup. Protected by css_set_lock
  131. */
  132. struct list_head css_sets;
  133. /*
  134. * Linked list running through all cgroups that can
  135. * potentially be reaped by the release agent. Protected by
  136. * release_list_lock
  137. */
  138. struct list_head release_list;
  139. /* pids_mutex protects the fields below */
  140. struct rw_semaphore pids_mutex;
  141. /* Array of process ids in the cgroup */
  142. pid_t *tasks_pids;
  143. /* How many files are using the current tasks_pids array */
  144. int pids_use_count;
  145. /* Length of the current tasks_pids array */
  146. int pids_length;
  147. /* For RCU-protected deletion */
  148. struct rcu_head rcu_head;
  149. };
  150. /* A css_set is a structure holding pointers to a set of
  151. * cgroup_subsys_state objects. This saves space in the task struct
  152. * object and speeds up fork()/exit(), since a single inc/dec and a
  153. * list_add()/del() can bump the reference count on the entire
  154. * cgroup set for a task.
  155. */
  156. struct css_set {
  157. /* Reference count */
  158. atomic_t refcount;
  159. /*
  160. * List running through all cgroup groups in the same hash
  161. * slot. Protected by css_set_lock
  162. */
  163. struct hlist_node hlist;
  164. /*
  165. * List running through all tasks using this cgroup
  166. * group. Protected by css_set_lock
  167. */
  168. struct list_head tasks;
  169. /*
  170. * List of cg_cgroup_link objects on link chains from
  171. * cgroups referenced from this css_set. Protected by
  172. * css_set_lock
  173. */
  174. struct list_head cg_links;
  175. /*
  176. * Set of subsystem states, one for each subsystem. This array
  177. * is immutable after creation apart from the init_css_set
  178. * during subsystem registration (at boot time).
  179. */
  180. struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
  181. };
  182. /*
  183. * cgroup_map_cb is an abstract callback API for reporting map-valued
  184. * control files
  185. */
  186. struct cgroup_map_cb {
  187. int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value);
  188. void *state;
  189. };
  190. /* struct cftype:
  191. *
  192. * The files in the cgroup filesystem mostly have a very simple read/write
  193. * handling, some common function will take care of it. Nevertheless some cases
  194. * (read tasks) are special and therefore I define this structure for every
  195. * kind of file.
  196. *
  197. *
  198. * When reading/writing to a file:
  199. * - the cgroup to use is file->f_dentry->d_parent->d_fsdata
  200. * - the 'cftype' of the file is file->f_dentry->d_fsdata
  201. */
  202. #define MAX_CFTYPE_NAME 64
  203. struct cftype {
  204. /* By convention, the name should begin with the name of the
  205. * subsystem, followed by a period */
  206. char name[MAX_CFTYPE_NAME];
  207. int private;
  208. /*
  209. * If non-zero, defines the maximum length of string that can
  210. * be passed to write_string; defaults to 64
  211. */
  212. size_t max_write_len;
  213. int (*open)(struct inode *inode, struct file *file);
  214. ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
  215. struct file *file,
  216. char __user *buf, size_t nbytes, loff_t *ppos);
  217. /*
  218. * read_u64() is a shortcut for the common case of returning a
  219. * single integer. Use it in place of read()
  220. */
  221. u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
  222. /*
  223. * read_s64() is a signed version of read_u64()
  224. */
  225. s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
  226. /*
  227. * read_map() is used for defining a map of key/value
  228. * pairs. It should call cb->fill(cb, key, value) for each
  229. * entry. The key/value pairs (and their ordering) should not
  230. * change between reboots.
  231. */
  232. int (*read_map)(struct cgroup *cont, struct cftype *cft,
  233. struct cgroup_map_cb *cb);
  234. /*
  235. * read_seq_string() is used for outputting a simple sequence
  236. * using seqfile.
  237. */
  238. int (*read_seq_string)(struct cgroup *cont, struct cftype *cft,
  239. struct seq_file *m);
  240. ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
  241. struct file *file,
  242. const char __user *buf, size_t nbytes, loff_t *ppos);
  243. /*
  244. * write_u64() is a shortcut for the common case of accepting
  245. * a single integer (as parsed by simple_strtoull) from
  246. * userspace. Use in place of write(); return 0 or error.
  247. */
  248. int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
  249. /*
  250. * write_s64() is a signed version of write_u64()
  251. */
  252. int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
  253. /*
  254. * write_string() is passed a nul-terminated kernelspace
  255. * buffer of maximum length determined by max_write_len.
  256. * Returns 0 or -ve error code.
  257. */
  258. int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
  259. const char *buffer);
  260. /*
  261. * trigger() callback can be used to get some kick from the
  262. * userspace, when the actual string written is not important
  263. * at all. The private field can be used to determine the
  264. * kick type for multiplexing.
  265. */
  266. int (*trigger)(struct cgroup *cgrp, unsigned int event);
  267. int (*release)(struct inode *inode, struct file *file);
  268. };
  269. struct cgroup_scanner {
  270. struct cgroup *cg;
  271. int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
  272. void (*process_task)(struct task_struct *p,
  273. struct cgroup_scanner *scan);
  274. struct ptr_heap *heap;
  275. };
  276. /* Add a new file to the given cgroup directory. Should only be
  277. * called by subsystems from within a populate() method */
  278. int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
  279. const struct cftype *cft);
  280. /* Add a set of new files to the given cgroup directory. Should
  281. * only be called by subsystems from within a populate() method */
  282. int cgroup_add_files(struct cgroup *cgrp,
  283. struct cgroup_subsys *subsys,
  284. const struct cftype cft[],
  285. int count);
  286. int cgroup_is_removed(const struct cgroup *cgrp);
  287. int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
  288. int cgroup_task_count(const struct cgroup *cgrp);
  289. /* Return true if the cgroup is a descendant of the current cgroup */
  290. int cgroup_is_descendant(const struct cgroup *cgrp);
  291. /*
  292. * Control Group subsystem type.
  293. * See Documentation/cgroups/cgroups.txt for details
  294. */
  295. struct cgroup_subsys {
  296. struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
  297. struct cgroup *cgrp);
  298. void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
  299. void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
  300. int (*can_attach)(struct cgroup_subsys *ss,
  301. struct cgroup *cgrp, struct task_struct *tsk);
  302. void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
  303. struct cgroup *old_cgrp, struct task_struct *tsk);
  304. void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
  305. void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
  306. int (*populate)(struct cgroup_subsys *ss,
  307. struct cgroup *cgrp);
  308. void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
  309. void (*bind)(struct cgroup_subsys *ss, struct cgroup *root);
  310. int subsys_id;
  311. int active;
  312. int disabled;
  313. int early_init;
  314. #define MAX_CGROUP_TYPE_NAMELEN 32
  315. const char *name;
  316. /*
  317. * Protects sibling/children links of cgroups in this
  318. * hierarchy, plus protects which hierarchy (or none) the
  319. * subsystem is a part of (i.e. root/sibling). To avoid
  320. * potential deadlocks, the following operations should not be
  321. * undertaken while holding any hierarchy_mutex:
  322. *
  323. * - allocating memory
  324. * - initiating hotplug events
  325. */
  326. struct mutex hierarchy_mutex;
  327. struct lock_class_key subsys_key;
  328. /*
  329. * Link to parent, and list entry in parent's children.
  330. * Protected by this->hierarchy_mutex and cgroup_lock()
  331. */
  332. struct cgroupfs_root *root;
  333. struct list_head sibling;
  334. };
  335. #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
  336. #include <linux/cgroup_subsys.h>
  337. #undef SUBSYS
  338. static inline struct cgroup_subsys_state *cgroup_subsys_state(
  339. struct cgroup *cgrp, int subsys_id)
  340. {
  341. return cgrp->subsys[subsys_id];
  342. }
  343. static inline struct cgroup_subsys_state *task_subsys_state(
  344. struct task_struct *task, int subsys_id)
  345. {
  346. return rcu_dereference(task->cgroups->subsys[subsys_id]);
  347. }
  348. static inline struct cgroup* task_cgroup(struct task_struct *task,
  349. int subsys_id)
  350. {
  351. return task_subsys_state(task, subsys_id)->cgroup;
  352. }
  353. int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss,
  354. char *nodename);
  355. /* A cgroup_iter should be treated as an opaque object */
  356. struct cgroup_iter {
  357. struct list_head *cg_link;
  358. struct list_head *task;
  359. };
  360. /* To iterate across the tasks in a cgroup:
  361. *
  362. * 1) call cgroup_iter_start to intialize an iterator
  363. *
  364. * 2) call cgroup_iter_next() to retrieve member tasks until it
  365. * returns NULL or until you want to end the iteration
  366. *
  367. * 3) call cgroup_iter_end() to destroy the iterator.
  368. *
  369. * Or, call cgroup_scan_tasks() to iterate through every task in a cpuset.
  370. * - cgroup_scan_tasks() holds the css_set_lock when calling the test_task()
  371. * callback, but not while calling the process_task() callback.
  372. */
  373. void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
  374. struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
  375. struct cgroup_iter *it);
  376. void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
  377. int cgroup_scan_tasks(struct cgroup_scanner *scan);
  378. int cgroup_attach_task(struct cgroup *, struct task_struct *);
  379. #else /* !CONFIG_CGROUPS */
  380. static inline int cgroup_init_early(void) { return 0; }
  381. static inline int cgroup_init(void) { return 0; }
  382. static inline void cgroup_fork(struct task_struct *p) {}
  383. static inline void cgroup_fork_callbacks(struct task_struct *p) {}
  384. static inline void cgroup_post_fork(struct task_struct *p) {}
  385. static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
  386. static inline void cgroup_lock(void) {}
  387. static inline void cgroup_unlock(void) {}
  388. static inline int cgroupstats_build(struct cgroupstats *stats,
  389. struct dentry *dentry)
  390. {
  391. return -EINVAL;
  392. }
  393. #endif /* !CONFIG_CGROUPS */
  394. #endif /* _LINUX_CGROUP_H */