device_cgroup.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780
  1. /*
  2. * device_cgroup.c - device cgroup subsystem
  3. *
  4. * Copyright 2007 IBM Corp
  5. */
  6. #include <linux/device_cgroup.h>
  7. #include <linux/cgroup.h>
  8. #include <linux/ctype.h>
  9. #include <linux/list.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/slab.h>
  13. #include <linux/rcupdate.h>
  14. #include <linux/mutex.h>
  15. #define ACC_MKNOD 1
  16. #define ACC_READ 2
  17. #define ACC_WRITE 4
  18. #define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
  19. #define DEV_BLOCK 1
  20. #define DEV_CHAR 2
  21. #define DEV_ALL 4 /* this represents all devices */
  22. static DEFINE_MUTEX(devcgroup_mutex);
  23. enum devcg_behavior {
  24. DEVCG_DEFAULT_NONE,
  25. DEVCG_DEFAULT_ALLOW,
  26. DEVCG_DEFAULT_DENY,
  27. };
  28. /*
  29. * exception list locking rules:
  30. * hold devcgroup_mutex for update/read.
  31. * hold rcu_read_lock() for read.
  32. */
  33. struct dev_exception_item {
  34. u32 major, minor;
  35. short type;
  36. short access;
  37. struct list_head list;
  38. struct rcu_head rcu;
  39. };
  40. struct dev_cgroup {
  41. struct cgroup_subsys_state css;
  42. struct list_head exceptions;
  43. enum devcg_behavior behavior;
  44. };
  45. static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
  46. {
  47. return s ? container_of(s, struct dev_cgroup, css) : NULL;
  48. }
  49. static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
  50. {
  51. return css_to_devcgroup(cgroup_css(cgroup, devices_subsys_id));
  52. }
  53. static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
  54. {
  55. return css_to_devcgroup(task_css(task, devices_subsys_id));
  56. }
  57. struct cgroup_subsys devices_subsys;
  58. static int devcgroup_can_attach(struct cgroup_subsys_state *new_css,
  59. struct cgroup_taskset *set)
  60. {
  61. struct task_struct *task = cgroup_taskset_first(set);
  62. if (current != task && !capable(CAP_SYS_ADMIN))
  63. return -EPERM;
  64. return 0;
  65. }
  66. /*
  67. * called under devcgroup_mutex
  68. */
  69. static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
  70. {
  71. struct dev_exception_item *ex, *tmp, *new;
  72. lockdep_assert_held(&devcgroup_mutex);
  73. list_for_each_entry(ex, orig, list) {
  74. new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
  75. if (!new)
  76. goto free_and_exit;
  77. list_add_tail(&new->list, dest);
  78. }
  79. return 0;
  80. free_and_exit:
  81. list_for_each_entry_safe(ex, tmp, dest, list) {
  82. list_del(&ex->list);
  83. kfree(ex);
  84. }
  85. return -ENOMEM;
  86. }
  87. /*
  88. * called under devcgroup_mutex
  89. */
  90. static int dev_exception_add(struct dev_cgroup *dev_cgroup,
  91. struct dev_exception_item *ex)
  92. {
  93. struct dev_exception_item *excopy, *walk;
  94. lockdep_assert_held(&devcgroup_mutex);
  95. excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
  96. if (!excopy)
  97. return -ENOMEM;
  98. list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
  99. if (walk->type != ex->type)
  100. continue;
  101. if (walk->major != ex->major)
  102. continue;
  103. if (walk->minor != ex->minor)
  104. continue;
  105. walk->access |= ex->access;
  106. kfree(excopy);
  107. excopy = NULL;
  108. }
  109. if (excopy != NULL)
  110. list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
  111. return 0;
  112. }
  113. /*
  114. * called under devcgroup_mutex
  115. */
  116. static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
  117. struct dev_exception_item *ex)
  118. {
  119. struct dev_exception_item *walk, *tmp;
  120. lockdep_assert_held(&devcgroup_mutex);
  121. list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
  122. if (walk->type != ex->type)
  123. continue;
  124. if (walk->major != ex->major)
  125. continue;
  126. if (walk->minor != ex->minor)
  127. continue;
  128. walk->access &= ~ex->access;
  129. if (!walk->access) {
  130. list_del_rcu(&walk->list);
  131. kfree_rcu(walk, rcu);
  132. }
  133. }
  134. }
  135. static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
  136. {
  137. struct dev_exception_item *ex, *tmp;
  138. list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
  139. list_del_rcu(&ex->list);
  140. kfree_rcu(ex, rcu);
  141. }
  142. }
  143. /**
  144. * dev_exception_clean - frees all entries of the exception list
  145. * @dev_cgroup: dev_cgroup with the exception list to be cleaned
  146. *
  147. * called under devcgroup_mutex
  148. */
  149. static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
  150. {
  151. lockdep_assert_held(&devcgroup_mutex);
  152. __dev_exception_clean(dev_cgroup);
  153. }
  154. static inline bool is_devcg_online(const struct dev_cgroup *devcg)
  155. {
  156. return (devcg->behavior != DEVCG_DEFAULT_NONE);
  157. }
  158. /**
  159. * devcgroup_online - initializes devcgroup's behavior and exceptions based on
  160. * parent's
  161. * @css: css getting online
  162. * returns 0 in case of success, error code otherwise
  163. */
  164. static int devcgroup_online(struct cgroup_subsys_state *css)
  165. {
  166. struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
  167. struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css_parent(css));
  168. int ret = 0;
  169. mutex_lock(&devcgroup_mutex);
  170. if (parent_dev_cgroup == NULL)
  171. dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
  172. else {
  173. ret = dev_exceptions_copy(&dev_cgroup->exceptions,
  174. &parent_dev_cgroup->exceptions);
  175. if (!ret)
  176. dev_cgroup->behavior = parent_dev_cgroup->behavior;
  177. }
  178. mutex_unlock(&devcgroup_mutex);
  179. return ret;
  180. }
  181. static void devcgroup_offline(struct cgroup_subsys_state *css)
  182. {
  183. struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
  184. mutex_lock(&devcgroup_mutex);
  185. dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
  186. mutex_unlock(&devcgroup_mutex);
  187. }
  188. /*
  189. * called from kernel/cgroup.c with cgroup_lock() held.
  190. */
  191. static struct cgroup_subsys_state *
  192. devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
  193. {
  194. struct dev_cgroup *dev_cgroup;
  195. dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
  196. if (!dev_cgroup)
  197. return ERR_PTR(-ENOMEM);
  198. INIT_LIST_HEAD(&dev_cgroup->exceptions);
  199. dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
  200. return &dev_cgroup->css;
  201. }
  202. static void devcgroup_css_free(struct cgroup_subsys_state *css)
  203. {
  204. struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
  205. __dev_exception_clean(dev_cgroup);
  206. kfree(dev_cgroup);
  207. }
  208. #define DEVCG_ALLOW 1
  209. #define DEVCG_DENY 2
  210. #define DEVCG_LIST 3
  211. #define MAJMINLEN 13
  212. #define ACCLEN 4
  213. static void set_access(char *acc, short access)
  214. {
  215. int idx = 0;
  216. memset(acc, 0, ACCLEN);
  217. if (access & ACC_READ)
  218. acc[idx++] = 'r';
  219. if (access & ACC_WRITE)
  220. acc[idx++] = 'w';
  221. if (access & ACC_MKNOD)
  222. acc[idx++] = 'm';
  223. }
  224. static char type_to_char(short type)
  225. {
  226. if (type == DEV_ALL)
  227. return 'a';
  228. if (type == DEV_CHAR)
  229. return 'c';
  230. if (type == DEV_BLOCK)
  231. return 'b';
  232. return 'X';
  233. }
  234. static void set_majmin(char *str, unsigned m)
  235. {
  236. if (m == ~0)
  237. strcpy(str, "*");
  238. else
  239. sprintf(str, "%u", m);
  240. }
  241. static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
  242. struct seq_file *m)
  243. {
  244. struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
  245. struct dev_exception_item *ex;
  246. char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
  247. rcu_read_lock();
  248. /*
  249. * To preserve the compatibility:
  250. * - Only show the "all devices" when the default policy is to allow
  251. * - List the exceptions in case the default policy is to deny
  252. * This way, the file remains as a "whitelist of devices"
  253. */
  254. if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
  255. set_access(acc, ACC_MASK);
  256. set_majmin(maj, ~0);
  257. set_majmin(min, ~0);
  258. seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
  259. maj, min, acc);
  260. } else {
  261. list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
  262. set_access(acc, ex->access);
  263. set_majmin(maj, ex->major);
  264. set_majmin(min, ex->minor);
  265. seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
  266. maj, min, acc);
  267. }
  268. }
  269. rcu_read_unlock();
  270. return 0;
  271. }
  272. /**
  273. * may_access - verifies if a new exception is part of what is allowed
  274. * by a dev cgroup based on the default policy +
  275. * exceptions. This is used to make sure a child cgroup
  276. * won't have more privileges than its parent or to
  277. * verify if a certain access is allowed.
  278. * @dev_cgroup: dev cgroup to be tested against
  279. * @refex: new exception
  280. * @behavior: behavior of the exception
  281. */
  282. static bool may_access(struct dev_cgroup *dev_cgroup,
  283. struct dev_exception_item *refex,
  284. enum devcg_behavior behavior)
  285. {
  286. struct dev_exception_item *ex;
  287. bool match = false;
  288. rcu_lockdep_assert(rcu_read_lock_held() ||
  289. lockdep_is_held(&devcgroup_mutex),
  290. "device_cgroup::may_access() called without proper synchronization");
  291. list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
  292. if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
  293. continue;
  294. if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
  295. continue;
  296. if (ex->major != ~0 && ex->major != refex->major)
  297. continue;
  298. if (ex->minor != ~0 && ex->minor != refex->minor)
  299. continue;
  300. if (refex->access & (~ex->access))
  301. continue;
  302. match = true;
  303. break;
  304. }
  305. if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
  306. if (behavior == DEVCG_DEFAULT_ALLOW) {
  307. /* the exception will deny access to certain devices */
  308. return true;
  309. } else {
  310. /* the exception will allow access to certain devices */
  311. if (match)
  312. /*
  313. * a new exception allowing access shouldn't
  314. * match an parent's exception
  315. */
  316. return false;
  317. return true;
  318. }
  319. } else {
  320. /* only behavior == DEVCG_DEFAULT_DENY allowed here */
  321. if (match)
  322. /* parent has an exception that matches the proposed */
  323. return true;
  324. else
  325. return false;
  326. }
  327. return false;
  328. }
  329. /*
  330. * parent_has_perm:
  331. * when adding a new allow rule to a device exception list, the rule
  332. * must be allowed in the parent device
  333. */
  334. static int parent_has_perm(struct dev_cgroup *childcg,
  335. struct dev_exception_item *ex)
  336. {
  337. struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
  338. if (!parent)
  339. return 1;
  340. return may_access(parent, ex, childcg->behavior);
  341. }
  342. /**
  343. * may_allow_all - checks if it's possible to change the behavior to
  344. * allow based on parent's rules.
  345. * @parent: device cgroup's parent
  346. * returns: != 0 in case it's allowed, 0 otherwise
  347. */
  348. static inline int may_allow_all(struct dev_cgroup *parent)
  349. {
  350. if (!parent)
  351. return 1;
  352. return parent->behavior == DEVCG_DEFAULT_ALLOW;
  353. }
  354. /**
  355. * revalidate_active_exceptions - walks through the active exception list and
  356. * revalidates the exceptions based on parent's
  357. * behavior and exceptions. The exceptions that
  358. * are no longer valid will be removed.
  359. * Called with devcgroup_mutex held.
  360. * @devcg: cgroup which exceptions will be checked
  361. *
  362. * This is one of the three key functions for hierarchy implementation.
  363. * This function is responsible for re-evaluating all the cgroup's active
  364. * exceptions due to a parent's exception change.
  365. * Refer to Documentation/cgroups/devices.txt for more details.
  366. */
  367. static void revalidate_active_exceptions(struct dev_cgroup *devcg)
  368. {
  369. struct dev_exception_item *ex;
  370. struct list_head *this, *tmp;
  371. list_for_each_safe(this, tmp, &devcg->exceptions) {
  372. ex = container_of(this, struct dev_exception_item, list);
  373. if (!parent_has_perm(devcg, ex))
  374. dev_exception_rm(devcg, ex);
  375. }
  376. }
  377. /**
  378. * propagate_exception - propagates a new exception to the children
  379. * @devcg_root: device cgroup that added a new exception
  380. * @ex: new exception to be propagated
  381. *
  382. * returns: 0 in case of success, != 0 in case of error
  383. */
  384. static int propagate_exception(struct dev_cgroup *devcg_root,
  385. struct dev_exception_item *ex)
  386. {
  387. struct cgroup *root = devcg_root->css.cgroup, *pos;
  388. int rc = 0;
  389. rcu_read_lock();
  390. cgroup_for_each_descendant_pre(pos, root) {
  391. struct dev_cgroup *devcg = cgroup_to_devcgroup(pos);
  392. /*
  393. * Because devcgroup_mutex is held, no devcg will become
  394. * online or offline during the tree walk (see on/offline
  395. * methods), and online ones are safe to access outside RCU
  396. * read lock without bumping refcnt.
  397. */
  398. if (!is_devcg_online(devcg))
  399. continue;
  400. rcu_read_unlock();
  401. /*
  402. * in case both root's behavior and devcg is allow, a new
  403. * restriction means adding to the exception list
  404. */
  405. if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
  406. devcg->behavior == DEVCG_DEFAULT_ALLOW) {
  407. rc = dev_exception_add(devcg, ex);
  408. if (rc)
  409. break;
  410. } else {
  411. /*
  412. * in the other possible cases:
  413. * root's behavior: allow, devcg's: deny
  414. * root's behavior: deny, devcg's: deny
  415. * the exception will be removed
  416. */
  417. dev_exception_rm(devcg, ex);
  418. }
  419. revalidate_active_exceptions(devcg);
  420. rcu_read_lock();
  421. }
  422. rcu_read_unlock();
  423. return rc;
  424. }
  425. static inline bool has_children(struct dev_cgroup *devcgroup)
  426. {
  427. struct cgroup *cgrp = devcgroup->css.cgroup;
  428. return !list_empty(&cgrp->children);
  429. }
  430. /*
  431. * Modify the exception list using allow/deny rules.
  432. * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
  433. * so we can give a container CAP_MKNOD to let it create devices but not
  434. * modify the exception list.
  435. * It seems likely we'll want to add a CAP_CONTAINER capability to allow
  436. * us to also grant CAP_SYS_ADMIN to containers without giving away the
  437. * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
  438. *
  439. * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
  440. * new access is only allowed if you're in the top-level cgroup, or your
  441. * parent cgroup has the access you're asking for.
  442. */
  443. static int devcgroup_update_access(struct dev_cgroup *devcgroup,
  444. int filetype, const char *buffer)
  445. {
  446. const char *b;
  447. char temp[12]; /* 11 + 1 characters needed for a u32 */
  448. int count, rc = 0;
  449. struct dev_exception_item ex;
  450. struct dev_cgroup *parent = css_to_devcgroup(css_parent(&devcgroup->css));
  451. if (!capable(CAP_SYS_ADMIN))
  452. return -EPERM;
  453. memset(&ex, 0, sizeof(ex));
  454. b = buffer;
  455. switch (*b) {
  456. case 'a':
  457. switch (filetype) {
  458. case DEVCG_ALLOW:
  459. if (has_children(devcgroup))
  460. return -EINVAL;
  461. if (!may_allow_all(parent))
  462. return -EPERM;
  463. dev_exception_clean(devcgroup);
  464. devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
  465. if (!parent)
  466. break;
  467. rc = dev_exceptions_copy(&devcgroup->exceptions,
  468. &parent->exceptions);
  469. if (rc)
  470. return rc;
  471. break;
  472. case DEVCG_DENY:
  473. if (has_children(devcgroup))
  474. return -EINVAL;
  475. dev_exception_clean(devcgroup);
  476. devcgroup->behavior = DEVCG_DEFAULT_DENY;
  477. break;
  478. default:
  479. return -EINVAL;
  480. }
  481. return 0;
  482. case 'b':
  483. ex.type = DEV_BLOCK;
  484. break;
  485. case 'c':
  486. ex.type = DEV_CHAR;
  487. break;
  488. default:
  489. return -EINVAL;
  490. }
  491. b++;
  492. if (!isspace(*b))
  493. return -EINVAL;
  494. b++;
  495. if (*b == '*') {
  496. ex.major = ~0;
  497. b++;
  498. } else if (isdigit(*b)) {
  499. memset(temp, 0, sizeof(temp));
  500. for (count = 0; count < sizeof(temp) - 1; count++) {
  501. temp[count] = *b;
  502. b++;
  503. if (!isdigit(*b))
  504. break;
  505. }
  506. rc = kstrtou32(temp, 10, &ex.major);
  507. if (rc)
  508. return -EINVAL;
  509. } else {
  510. return -EINVAL;
  511. }
  512. if (*b != ':')
  513. return -EINVAL;
  514. b++;
  515. /* read minor */
  516. if (*b == '*') {
  517. ex.minor = ~0;
  518. b++;
  519. } else if (isdigit(*b)) {
  520. memset(temp, 0, sizeof(temp));
  521. for (count = 0; count < sizeof(temp) - 1; count++) {
  522. temp[count] = *b;
  523. b++;
  524. if (!isdigit(*b))
  525. break;
  526. }
  527. rc = kstrtou32(temp, 10, &ex.minor);
  528. if (rc)
  529. return -EINVAL;
  530. } else {
  531. return -EINVAL;
  532. }
  533. if (!isspace(*b))
  534. return -EINVAL;
  535. for (b++, count = 0; count < 3; count++, b++) {
  536. switch (*b) {
  537. case 'r':
  538. ex.access |= ACC_READ;
  539. break;
  540. case 'w':
  541. ex.access |= ACC_WRITE;
  542. break;
  543. case 'm':
  544. ex.access |= ACC_MKNOD;
  545. break;
  546. case '\n':
  547. case '\0':
  548. count = 3;
  549. break;
  550. default:
  551. return -EINVAL;
  552. }
  553. }
  554. switch (filetype) {
  555. case DEVCG_ALLOW:
  556. if (!parent_has_perm(devcgroup, &ex))
  557. return -EPERM;
  558. /*
  559. * If the default policy is to allow by default, try to remove
  560. * an matching exception instead. And be silent about it: we
  561. * don't want to break compatibility
  562. */
  563. if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
  564. dev_exception_rm(devcgroup, &ex);
  565. return 0;
  566. }
  567. rc = dev_exception_add(devcgroup, &ex);
  568. break;
  569. case DEVCG_DENY:
  570. /*
  571. * If the default policy is to deny by default, try to remove
  572. * an matching exception instead. And be silent about it: we
  573. * don't want to break compatibility
  574. */
  575. if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
  576. dev_exception_rm(devcgroup, &ex);
  577. else
  578. rc = dev_exception_add(devcgroup, &ex);
  579. if (rc)
  580. break;
  581. /* we only propagate new restrictions */
  582. rc = propagate_exception(devcgroup, &ex);
  583. break;
  584. default:
  585. rc = -EINVAL;
  586. }
  587. return rc;
  588. }
  589. static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
  590. const char *buffer)
  591. {
  592. int retval;
  593. mutex_lock(&devcgroup_mutex);
  594. retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
  595. cft->private, buffer);
  596. mutex_unlock(&devcgroup_mutex);
  597. return retval;
  598. }
  599. static struct cftype dev_cgroup_files[] = {
  600. {
  601. .name = "allow",
  602. .write_string = devcgroup_access_write,
  603. .private = DEVCG_ALLOW,
  604. },
  605. {
  606. .name = "deny",
  607. .write_string = devcgroup_access_write,
  608. .private = DEVCG_DENY,
  609. },
  610. {
  611. .name = "list",
  612. .read_seq_string = devcgroup_seq_read,
  613. .private = DEVCG_LIST,
  614. },
  615. { } /* terminate */
  616. };
  617. struct cgroup_subsys devices_subsys = {
  618. .name = "devices",
  619. .can_attach = devcgroup_can_attach,
  620. .css_alloc = devcgroup_css_alloc,
  621. .css_free = devcgroup_css_free,
  622. .css_online = devcgroup_online,
  623. .css_offline = devcgroup_offline,
  624. .subsys_id = devices_subsys_id,
  625. .base_cftypes = dev_cgroup_files,
  626. };
  627. /**
  628. * __devcgroup_check_permission - checks if an inode operation is permitted
  629. * @dev_cgroup: the dev cgroup to be tested against
  630. * @type: device type
  631. * @major: device major number
  632. * @minor: device minor number
  633. * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
  634. *
  635. * returns 0 on success, -EPERM case the operation is not permitted
  636. */
  637. static int __devcgroup_check_permission(short type, u32 major, u32 minor,
  638. short access)
  639. {
  640. struct dev_cgroup *dev_cgroup;
  641. struct dev_exception_item ex;
  642. int rc;
  643. memset(&ex, 0, sizeof(ex));
  644. ex.type = type;
  645. ex.major = major;
  646. ex.minor = minor;
  647. ex.access = access;
  648. rcu_read_lock();
  649. dev_cgroup = task_devcgroup(current);
  650. rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
  651. rcu_read_unlock();
  652. if (!rc)
  653. return -EPERM;
  654. return 0;
  655. }
  656. int __devcgroup_inode_permission(struct inode *inode, int mask)
  657. {
  658. short type, access = 0;
  659. if (S_ISBLK(inode->i_mode))
  660. type = DEV_BLOCK;
  661. if (S_ISCHR(inode->i_mode))
  662. type = DEV_CHAR;
  663. if (mask & MAY_WRITE)
  664. access |= ACC_WRITE;
  665. if (mask & MAY_READ)
  666. access |= ACC_READ;
  667. return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
  668. access);
  669. }
  670. int devcgroup_inode_mknod(int mode, dev_t dev)
  671. {
  672. short type;
  673. if (!S_ISBLK(mode) && !S_ISCHR(mode))
  674. return 0;
  675. if (S_ISBLK(mode))
  676. type = DEV_BLOCK;
  677. else
  678. type = DEV_CHAR;
  679. return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
  680. ACC_MKNOD);
  681. }