cgroup_freezer.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. /*
  2. * cgroup_freezer.c - control group freezer subsystem
  3. *
  4. * Copyright IBM Corporation, 2007
  5. *
  6. * Author : Cedric Le Goater <clg@fr.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2.1 of the GNU Lesser General Public License
  10. * as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it would be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. */
  16. #include <linux/module.h>
  17. #include <linux/cgroup.h>
  18. #include <linux/fs.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/freezer.h>
  21. #include <linux/seq_file.h>
  22. enum freezer_state {
  23. STATE_RUNNING = 0,
  24. STATE_FREEZING,
  25. STATE_FROZEN,
  26. };
  27. struct freezer {
  28. struct cgroup_subsys_state css;
  29. enum freezer_state state;
  30. spinlock_t lock; /* protects _writes_ to state */
  31. };
  32. static inline struct freezer *cgroup_freezer(
  33. struct cgroup *cgroup)
  34. {
  35. return container_of(
  36. cgroup_subsys_state(cgroup, freezer_subsys_id),
  37. struct freezer, css);
  38. }
  39. static inline struct freezer *task_freezer(struct task_struct *task)
  40. {
  41. return container_of(task_subsys_state(task, freezer_subsys_id),
  42. struct freezer, css);
  43. }
  44. int cgroup_frozen(struct task_struct *task)
  45. {
  46. struct freezer *freezer;
  47. enum freezer_state state;
  48. task_lock(task);
  49. freezer = task_freezer(task);
  50. state = freezer->state;
  51. task_unlock(task);
  52. return state == STATE_FROZEN;
  53. }
  54. /*
  55. * cgroups_write_string() limits the size of freezer state strings to
  56. * CGROUP_LOCAL_BUFFER_SIZE
  57. */
  58. static const char *freezer_state_strs[] = {
  59. "RUNNING",
  60. "FREEZING",
  61. "FROZEN",
  62. };
  63. /*
  64. * State diagram
  65. * Transitions are caused by userspace writes to the freezer.state file.
  66. * The values in parenthesis are state labels. The rest are edge labels.
  67. *
  68. * (RUNNING) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
  69. * ^ ^ | |
  70. * | \_______RUNNING_______/ |
  71. * \_____________________________RUNNING___________/
  72. */
  73. struct cgroup_subsys freezer_subsys;
  74. /* Locks taken and their ordering
  75. * ------------------------------
  76. * css_set_lock
  77. * cgroup_mutex (AKA cgroup_lock)
  78. * task->alloc_lock (AKA task_lock)
  79. * freezer->lock
  80. * task->sighand->siglock
  81. *
  82. * cgroup code forces css_set_lock to be taken before task->alloc_lock
  83. *
  84. * freezer_create(), freezer_destroy():
  85. * cgroup_mutex [ by cgroup core ]
  86. *
  87. * can_attach():
  88. * cgroup_mutex
  89. *
  90. * cgroup_frozen():
  91. * task->alloc_lock (to get task's cgroup)
  92. *
  93. * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
  94. * task->alloc_lock (to get task's cgroup)
  95. * freezer->lock
  96. * sighand->siglock (if the cgroup is freezing)
  97. *
  98. * freezer_read():
  99. * cgroup_mutex
  100. * freezer->lock
  101. * read_lock css_set_lock (cgroup iterator start)
  102. *
  103. * freezer_write() (freeze):
  104. * cgroup_mutex
  105. * freezer->lock
  106. * read_lock css_set_lock (cgroup iterator start)
  107. * sighand->siglock
  108. *
  109. * freezer_write() (unfreeze):
  110. * cgroup_mutex
  111. * freezer->lock
  112. * read_lock css_set_lock (cgroup iterator start)
  113. * task->alloc_lock (to prevent races with freeze_task())
  114. * sighand->siglock
  115. */
  116. static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
  117. struct cgroup *cgroup)
  118. {
  119. struct freezer *freezer;
  120. freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
  121. if (!freezer)
  122. return ERR_PTR(-ENOMEM);
  123. spin_lock_init(&freezer->lock);
  124. freezer->state = STATE_RUNNING;
  125. return &freezer->css;
  126. }
  127. static void freezer_destroy(struct cgroup_subsys *ss,
  128. struct cgroup *cgroup)
  129. {
  130. kfree(cgroup_freezer(cgroup));
  131. }
  132. static int freezer_can_attach(struct cgroup_subsys *ss,
  133. struct cgroup *new_cgroup,
  134. struct task_struct *task)
  135. {
  136. struct freezer *freezer;
  137. int retval = 0;
  138. /*
  139. * The call to cgroup_lock() in the freezer.state write method prevents
  140. * a write to that file racing against an attach, and hence the
  141. * can_attach() result will remain valid until the attach completes.
  142. */
  143. freezer = cgroup_freezer(new_cgroup);
  144. if (freezer->state == STATE_FROZEN)
  145. retval = -EBUSY;
  146. return retval;
  147. }
  148. static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
  149. {
  150. struct freezer *freezer;
  151. task_lock(task);
  152. freezer = task_freezer(task);
  153. task_unlock(task);
  154. BUG_ON(freezer->state == STATE_FROZEN);
  155. spin_lock_irq(&freezer->lock);
  156. /* Locking avoids race with FREEZING -> RUNNING transitions. */
  157. if (freezer->state == STATE_FREEZING)
  158. freeze_task(task, true);
  159. spin_unlock_irq(&freezer->lock);
  160. }
  161. /*
  162. * caller must hold freezer->lock
  163. */
  164. static void check_if_frozen(struct cgroup *cgroup,
  165. struct freezer *freezer)
  166. {
  167. struct cgroup_iter it;
  168. struct task_struct *task;
  169. unsigned int nfrozen = 0, ntotal = 0;
  170. cgroup_iter_start(cgroup, &it);
  171. while ((task = cgroup_iter_next(cgroup, &it))) {
  172. ntotal++;
  173. /*
  174. * Task is frozen or will freeze immediately when next it gets
  175. * woken
  176. */
  177. if (frozen(task) ||
  178. (task_is_stopped_or_traced(task) && freezing(task)))
  179. nfrozen++;
  180. }
  181. /*
  182. * Transition to FROZEN when no new tasks can be added ensures
  183. * that we never exist in the FROZEN state while there are unfrozen
  184. * tasks.
  185. */
  186. if (nfrozen == ntotal)
  187. freezer->state = STATE_FROZEN;
  188. cgroup_iter_end(cgroup, &it);
  189. }
  190. static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
  191. struct seq_file *m)
  192. {
  193. struct freezer *freezer;
  194. enum freezer_state state;
  195. if (!cgroup_lock_live_group(cgroup))
  196. return -ENODEV;
  197. freezer = cgroup_freezer(cgroup);
  198. spin_lock_irq(&freezer->lock);
  199. state = freezer->state;
  200. if (state == STATE_FREEZING) {
  201. /* We change from FREEZING to FROZEN lazily if the cgroup was
  202. * only partially frozen when we exitted write. */
  203. check_if_frozen(cgroup, freezer);
  204. state = freezer->state;
  205. }
  206. spin_unlock_irq(&freezer->lock);
  207. cgroup_unlock();
  208. seq_puts(m, freezer_state_strs[state]);
  209. seq_putc(m, '\n');
  210. return 0;
  211. }
  212. static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
  213. {
  214. struct cgroup_iter it;
  215. struct task_struct *task;
  216. unsigned int num_cant_freeze_now = 0;
  217. freezer->state = STATE_FREEZING;
  218. cgroup_iter_start(cgroup, &it);
  219. while ((task = cgroup_iter_next(cgroup, &it))) {
  220. if (!freeze_task(task, true))
  221. continue;
  222. if (task_is_stopped_or_traced(task) && freezing(task))
  223. /*
  224. * The freeze flag is set so these tasks will
  225. * immediately go into the fridge upon waking.
  226. */
  227. continue;
  228. if (!freezing(task) && !freezer_should_skip(task))
  229. num_cant_freeze_now++;
  230. }
  231. cgroup_iter_end(cgroup, &it);
  232. return num_cant_freeze_now ? -EBUSY : 0;
  233. }
  234. static int unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
  235. {
  236. struct cgroup_iter it;
  237. struct task_struct *task;
  238. cgroup_iter_start(cgroup, &it);
  239. while ((task = cgroup_iter_next(cgroup, &it))) {
  240. int do_wake;
  241. task_lock(task);
  242. do_wake = __thaw_process(task);
  243. task_unlock(task);
  244. if (do_wake)
  245. wake_up_process(task);
  246. }
  247. cgroup_iter_end(cgroup, &it);
  248. freezer->state = STATE_RUNNING;
  249. return 0;
  250. }
  251. static int freezer_change_state(struct cgroup *cgroup,
  252. enum freezer_state goal_state)
  253. {
  254. struct freezer *freezer;
  255. int retval = 0;
  256. freezer = cgroup_freezer(cgroup);
  257. spin_lock_irq(&freezer->lock);
  258. check_if_frozen(cgroup, freezer); /* may update freezer->state */
  259. if (goal_state == freezer->state)
  260. goto out;
  261. switch (freezer->state) {
  262. case STATE_RUNNING:
  263. retval = try_to_freeze_cgroup(cgroup, freezer);
  264. break;
  265. case STATE_FREEZING:
  266. if (goal_state == STATE_FROZEN) {
  267. /* Userspace is retrying after
  268. * "/bin/echo FROZEN > freezer.state" returned -EBUSY */
  269. retval = try_to_freeze_cgroup(cgroup, freezer);
  270. break;
  271. }
  272. /* state == FREEZING and goal_state == RUNNING, so unfreeze */
  273. case STATE_FROZEN:
  274. retval = unfreeze_cgroup(cgroup, freezer);
  275. break;
  276. default:
  277. break;
  278. }
  279. out:
  280. spin_unlock_irq(&freezer->lock);
  281. return retval;
  282. }
  283. static int freezer_write(struct cgroup *cgroup,
  284. struct cftype *cft,
  285. const char *buffer)
  286. {
  287. int retval;
  288. enum freezer_state goal_state;
  289. if (strcmp(buffer, freezer_state_strs[STATE_RUNNING]) == 0)
  290. goal_state = STATE_RUNNING;
  291. else if (strcmp(buffer, freezer_state_strs[STATE_FROZEN]) == 0)
  292. goal_state = STATE_FROZEN;
  293. else
  294. return -EIO;
  295. if (!cgroup_lock_live_group(cgroup))
  296. return -ENODEV;
  297. retval = freezer_change_state(cgroup, goal_state);
  298. cgroup_unlock();
  299. return retval;
  300. }
  301. static struct cftype files[] = {
  302. {
  303. .name = "state",
  304. .read_seq_string = freezer_read,
  305. .write_string = freezer_write,
  306. },
  307. };
  308. static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
  309. {
  310. return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
  311. }
  312. struct cgroup_subsys freezer_subsys = {
  313. .name = "freezer",
  314. .create = freezer_create,
  315. .destroy = freezer_destroy,
  316. .populate = freezer_populate,
  317. .subsys_id = freezer_subsys_id,
  318. .can_attach = freezer_can_attach,
  319. .attach = NULL,
  320. .fork = freezer_fork,
  321. .exit = NULL,
  322. };