device_cgroup.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. /*
  2. * device_cgroup.c - device cgroup subsystem
  3. *
  4. * Copyright 2007 IBM Corp
  5. */
  6. #include <linux/device_cgroup.h>
  7. #include <linux/cgroup.h>
  8. #include <linux/ctype.h>
  9. #include <linux/list.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/slab.h>
  13. #include <linux/rcupdate.h>
  14. #include <linux/mutex.h>
  15. #define ACC_MKNOD 1
  16. #define ACC_READ 2
  17. #define ACC_WRITE 4
  18. #define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
  19. #define DEV_BLOCK 1
  20. #define DEV_CHAR 2
  21. #define DEV_ALL 4 /* this represents all devices */
  22. static DEFINE_MUTEX(devcgroup_mutex);
  23. enum devcg_behavior {
  24. DEVCG_DEFAULT_NONE,
  25. DEVCG_DEFAULT_ALLOW,
  26. DEVCG_DEFAULT_DENY,
  27. };
  28. /*
  29. * exception list locking rules:
  30. * hold devcgroup_mutex for update/read.
  31. * hold rcu_read_lock() for read.
  32. */
  33. struct dev_exception_item {
  34. u32 major, minor;
  35. short type;
  36. short access;
  37. struct list_head list;
  38. struct rcu_head rcu;
  39. };
  40. struct dev_cgroup {
  41. struct cgroup_subsys_state css;
  42. struct list_head exceptions;
  43. enum devcg_behavior behavior;
  44. };
  45. static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
  46. {
  47. return container_of(s, struct dev_cgroup, css);
  48. }
  49. static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
  50. {
  51. return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
  52. }
  53. static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
  54. {
  55. return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
  56. }
  57. struct cgroup_subsys devices_subsys;
  58. static int devcgroup_can_attach(struct cgroup *new_cgrp,
  59. struct cgroup_taskset *set)
  60. {
  61. struct task_struct *task = cgroup_taskset_first(set);
  62. if (current != task && !capable(CAP_SYS_ADMIN))
  63. return -EPERM;
  64. return 0;
  65. }
  66. /*
  67. * called under devcgroup_mutex
  68. */
  69. static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
  70. {
  71. struct dev_exception_item *ex, *tmp, *new;
  72. lockdep_assert_held(&devcgroup_mutex);
  73. list_for_each_entry(ex, orig, list) {
  74. new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
  75. if (!new)
  76. goto free_and_exit;
  77. list_add_tail(&new->list, dest);
  78. }
  79. return 0;
  80. free_and_exit:
  81. list_for_each_entry_safe(ex, tmp, dest, list) {
  82. list_del(&ex->list);
  83. kfree(ex);
  84. }
  85. return -ENOMEM;
  86. }
  87. /*
  88. * called under devcgroup_mutex
  89. */
  90. static int dev_exception_add(struct dev_cgroup *dev_cgroup,
  91. struct dev_exception_item *ex)
  92. {
  93. struct dev_exception_item *excopy, *walk;
  94. lockdep_assert_held(&devcgroup_mutex);
  95. excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
  96. if (!excopy)
  97. return -ENOMEM;
  98. list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
  99. if (walk->type != ex->type)
  100. continue;
  101. if (walk->major != ex->major)
  102. continue;
  103. if (walk->minor != ex->minor)
  104. continue;
  105. walk->access |= ex->access;
  106. kfree(excopy);
  107. excopy = NULL;
  108. }
  109. if (excopy != NULL)
  110. list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
  111. return 0;
  112. }
  113. /*
  114. * called under devcgroup_mutex
  115. */
  116. static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
  117. struct dev_exception_item *ex)
  118. {
  119. struct dev_exception_item *walk, *tmp;
  120. lockdep_assert_held(&devcgroup_mutex);
  121. list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
  122. if (walk->type != ex->type)
  123. continue;
  124. if (walk->major != ex->major)
  125. continue;
  126. if (walk->minor != ex->minor)
  127. continue;
  128. walk->access &= ~ex->access;
  129. if (!walk->access) {
  130. list_del_rcu(&walk->list);
  131. kfree_rcu(walk, rcu);
  132. }
  133. }
  134. }
  135. static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
  136. {
  137. struct dev_exception_item *ex, *tmp;
  138. list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
  139. list_del_rcu(&ex->list);
  140. kfree_rcu(ex, rcu);
  141. }
  142. }
  143. /**
  144. * dev_exception_clean - frees all entries of the exception list
  145. * @dev_cgroup: dev_cgroup with the exception list to be cleaned
  146. *
  147. * called under devcgroup_mutex
  148. */
  149. static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
  150. {
  151. lockdep_assert_held(&devcgroup_mutex);
  152. __dev_exception_clean(dev_cgroup);
  153. }
  154. static inline bool is_devcg_online(const struct dev_cgroup *devcg)
  155. {
  156. return (devcg->behavior != DEVCG_DEFAULT_NONE);
  157. }
  158. /**
  159. * devcgroup_online - initializes devcgroup's behavior and exceptions based on
  160. * parent's
  161. * @cgroup: cgroup getting online
  162. * returns 0 in case of success, error code otherwise
  163. */
  164. static int devcgroup_online(struct cgroup *cgroup)
  165. {
  166. struct dev_cgroup *dev_cgroup, *parent_dev_cgroup = NULL;
  167. int ret = 0;
  168. mutex_lock(&devcgroup_mutex);
  169. dev_cgroup = cgroup_to_devcgroup(cgroup);
  170. if (cgroup->parent)
  171. parent_dev_cgroup = cgroup_to_devcgroup(cgroup->parent);
  172. if (parent_dev_cgroup == NULL)
  173. dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
  174. else {
  175. ret = dev_exceptions_copy(&dev_cgroup->exceptions,
  176. &parent_dev_cgroup->exceptions);
  177. if (!ret)
  178. dev_cgroup->behavior = parent_dev_cgroup->behavior;
  179. }
  180. mutex_unlock(&devcgroup_mutex);
  181. return ret;
  182. }
  183. static void devcgroup_offline(struct cgroup *cgroup)
  184. {
  185. struct dev_cgroup *dev_cgroup = cgroup_to_devcgroup(cgroup);
  186. mutex_lock(&devcgroup_mutex);
  187. dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
  188. mutex_unlock(&devcgroup_mutex);
  189. }
  190. /*
  191. * called from kernel/cgroup.c with cgroup_lock() held.
  192. */
  193. static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
  194. {
  195. struct dev_cgroup *dev_cgroup;
  196. dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
  197. if (!dev_cgroup)
  198. return ERR_PTR(-ENOMEM);
  199. INIT_LIST_HEAD(&dev_cgroup->exceptions);
  200. dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
  201. return &dev_cgroup->css;
  202. }
  203. static void devcgroup_css_free(struct cgroup *cgroup)
  204. {
  205. struct dev_cgroup *dev_cgroup;
  206. dev_cgroup = cgroup_to_devcgroup(cgroup);
  207. __dev_exception_clean(dev_cgroup);
  208. kfree(dev_cgroup);
  209. }
  210. #define DEVCG_ALLOW 1
  211. #define DEVCG_DENY 2
  212. #define DEVCG_LIST 3
  213. #define MAJMINLEN 13
  214. #define ACCLEN 4
  215. static void set_access(char *acc, short access)
  216. {
  217. int idx = 0;
  218. memset(acc, 0, ACCLEN);
  219. if (access & ACC_READ)
  220. acc[idx++] = 'r';
  221. if (access & ACC_WRITE)
  222. acc[idx++] = 'w';
  223. if (access & ACC_MKNOD)
  224. acc[idx++] = 'm';
  225. }
  226. static char type_to_char(short type)
  227. {
  228. if (type == DEV_ALL)
  229. return 'a';
  230. if (type == DEV_CHAR)
  231. return 'c';
  232. if (type == DEV_BLOCK)
  233. return 'b';
  234. return 'X';
  235. }
  236. static void set_majmin(char *str, unsigned m)
  237. {
  238. if (m == ~0)
  239. strcpy(str, "*");
  240. else
  241. sprintf(str, "%u", m);
  242. }
  243. static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
  244. struct seq_file *m)
  245. {
  246. struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
  247. struct dev_exception_item *ex;
  248. char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
  249. rcu_read_lock();
  250. /*
  251. * To preserve the compatibility:
  252. * - Only show the "all devices" when the default policy is to allow
  253. * - List the exceptions in case the default policy is to deny
  254. * This way, the file remains as a "whitelist of devices"
  255. */
  256. if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
  257. set_access(acc, ACC_MASK);
  258. set_majmin(maj, ~0);
  259. set_majmin(min, ~0);
  260. seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
  261. maj, min, acc);
  262. } else {
  263. list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
  264. set_access(acc, ex->access);
  265. set_majmin(maj, ex->major);
  266. set_majmin(min, ex->minor);
  267. seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
  268. maj, min, acc);
  269. }
  270. }
  271. rcu_read_unlock();
  272. return 0;
  273. }
  274. /**
  275. * may_access - verifies if a new exception is part of what is allowed
  276. * by a dev cgroup based on the default policy +
  277. * exceptions. This is used to make sure a child cgroup
  278. * won't have more privileges than its parent or to
  279. * verify if a certain access is allowed.
  280. * @dev_cgroup: dev cgroup to be tested against
  281. * @refex: new exception
  282. * @behavior: behavior of the exception
  283. */
  284. static bool may_access(struct dev_cgroup *dev_cgroup,
  285. struct dev_exception_item *refex,
  286. enum devcg_behavior behavior)
  287. {
  288. struct dev_exception_item *ex;
  289. bool match = false;
  290. rcu_lockdep_assert(rcu_read_lock_held() ||
  291. lockdep_is_held(&devcgroup_mutex),
  292. "device_cgroup::may_access() called without proper synchronization");
  293. list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
  294. if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
  295. continue;
  296. if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
  297. continue;
  298. if (ex->major != ~0 && ex->major != refex->major)
  299. continue;
  300. if (ex->minor != ~0 && ex->minor != refex->minor)
  301. continue;
  302. if (refex->access & (~ex->access))
  303. continue;
  304. match = true;
  305. break;
  306. }
  307. if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
  308. if (behavior == DEVCG_DEFAULT_ALLOW) {
  309. /* the exception will deny access to certain devices */
  310. return true;
  311. } else {
  312. /* the exception will allow access to certain devices */
  313. if (match)
  314. /*
  315. * a new exception allowing access shouldn't
  316. * match an parent's exception
  317. */
  318. return false;
  319. return true;
  320. }
  321. } else {
  322. /* only behavior == DEVCG_DEFAULT_DENY allowed here */
  323. if (match)
  324. /* parent has an exception that matches the proposed */
  325. return true;
  326. else
  327. return false;
  328. }
  329. return false;
  330. }
  331. /*
  332. * parent_has_perm:
  333. * when adding a new allow rule to a device exception list, the rule
  334. * must be allowed in the parent device
  335. */
  336. static int parent_has_perm(struct dev_cgroup *childcg,
  337. struct dev_exception_item *ex)
  338. {
  339. struct cgroup *pcg = childcg->css.cgroup->parent;
  340. struct dev_cgroup *parent;
  341. if (!pcg)
  342. return 1;
  343. parent = cgroup_to_devcgroup(pcg);
  344. return may_access(parent, ex, childcg->behavior);
  345. }
  346. /**
  347. * may_allow_all - checks if it's possible to change the behavior to
  348. * allow based on parent's rules.
  349. * @parent: device cgroup's parent
  350. * returns: != 0 in case it's allowed, 0 otherwise
  351. */
  352. static inline int may_allow_all(struct dev_cgroup *parent)
  353. {
  354. if (!parent)
  355. return 1;
  356. return parent->behavior == DEVCG_DEFAULT_ALLOW;
  357. }
  358. /**
  359. * revalidate_active_exceptions - walks through the active exception list and
  360. * revalidates the exceptions based on parent's
  361. * behavior and exceptions. The exceptions that
  362. * are no longer valid will be removed.
  363. * Called with devcgroup_mutex held.
  364. * @devcg: cgroup which exceptions will be checked
  365. *
  366. * This is one of the three key functions for hierarchy implementation.
  367. * This function is responsible for re-evaluating all the cgroup's active
  368. * exceptions due to a parent's exception change.
  369. * Refer to Documentation/cgroups/devices.txt for more details.
  370. */
  371. static void revalidate_active_exceptions(struct dev_cgroup *devcg)
  372. {
  373. struct dev_exception_item *ex;
  374. struct list_head *this, *tmp;
  375. list_for_each_safe(this, tmp, &devcg->exceptions) {
  376. ex = container_of(this, struct dev_exception_item, list);
  377. if (!parent_has_perm(devcg, ex))
  378. dev_exception_rm(devcg, ex);
  379. }
  380. }
  381. /**
  382. * propagate_exception - propagates a new exception to the children
  383. * @devcg_root: device cgroup that added a new exception
  384. * @ex: new exception to be propagated
  385. *
  386. * returns: 0 in case of success, != 0 in case of error
  387. */
  388. static int propagate_exception(struct dev_cgroup *devcg_root,
  389. struct dev_exception_item *ex)
  390. {
  391. struct cgroup *root = devcg_root->css.cgroup, *pos;
  392. int rc = 0;
  393. rcu_read_lock();
  394. cgroup_for_each_descendant_pre(pos, root) {
  395. struct dev_cgroup *devcg = cgroup_to_devcgroup(pos);
  396. /*
  397. * Because devcgroup_mutex is held, no devcg will become
  398. * online or offline during the tree walk (see on/offline
  399. * methods), and online ones are safe to access outside RCU
  400. * read lock without bumping refcnt.
  401. */
  402. if (!is_devcg_online(devcg))
  403. continue;
  404. rcu_read_unlock();
  405. /*
  406. * in case both root's behavior and devcg is allow, a new
  407. * restriction means adding to the exception list
  408. */
  409. if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
  410. devcg->behavior == DEVCG_DEFAULT_ALLOW) {
  411. rc = dev_exception_add(devcg, ex);
  412. if (rc)
  413. break;
  414. } else {
  415. /*
  416. * in the other possible cases:
  417. * root's behavior: allow, devcg's: deny
  418. * root's behavior: deny, devcg's: deny
  419. * the exception will be removed
  420. */
  421. dev_exception_rm(devcg, ex);
  422. }
  423. revalidate_active_exceptions(devcg);
  424. rcu_read_lock();
  425. }
  426. rcu_read_unlock();
  427. return rc;
  428. }
  429. static inline bool has_children(struct dev_cgroup *devcgroup)
  430. {
  431. struct cgroup *cgrp = devcgroup->css.cgroup;
  432. return !list_empty(&cgrp->children);
  433. }
  434. /*
  435. * Modify the exception list using allow/deny rules.
  436. * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
  437. * so we can give a container CAP_MKNOD to let it create devices but not
  438. * modify the exception list.
  439. * It seems likely we'll want to add a CAP_CONTAINER capability to allow
  440. * us to also grant CAP_SYS_ADMIN to containers without giving away the
  441. * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
  442. *
  443. * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
  444. * new access is only allowed if you're in the top-level cgroup, or your
  445. * parent cgroup has the access you're asking for.
  446. */
  447. static int devcgroup_update_access(struct dev_cgroup *devcgroup,
  448. int filetype, const char *buffer)
  449. {
  450. const char *b;
  451. char temp[12]; /* 11 + 1 characters needed for a u32 */
  452. int count, rc = 0;
  453. struct dev_exception_item ex;
  454. struct cgroup *p = devcgroup->css.cgroup;
  455. struct dev_cgroup *parent = NULL;
  456. if (!capable(CAP_SYS_ADMIN))
  457. return -EPERM;
  458. if (p->parent)
  459. parent = cgroup_to_devcgroup(p->parent);
  460. memset(&ex, 0, sizeof(ex));
  461. b = buffer;
  462. switch (*b) {
  463. case 'a':
  464. switch (filetype) {
  465. case DEVCG_ALLOW:
  466. if (has_children(devcgroup))
  467. return -EINVAL;
  468. if (!may_allow_all(parent))
  469. return -EPERM;
  470. dev_exception_clean(devcgroup);
  471. devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
  472. if (!parent)
  473. break;
  474. rc = dev_exceptions_copy(&devcgroup->exceptions,
  475. &parent->exceptions);
  476. if (rc)
  477. return rc;
  478. break;
  479. case DEVCG_DENY:
  480. if (has_children(devcgroup))
  481. return -EINVAL;
  482. dev_exception_clean(devcgroup);
  483. devcgroup->behavior = DEVCG_DEFAULT_DENY;
  484. break;
  485. default:
  486. return -EINVAL;
  487. }
  488. return 0;
  489. case 'b':
  490. ex.type = DEV_BLOCK;
  491. break;
  492. case 'c':
  493. ex.type = DEV_CHAR;
  494. break;
  495. default:
  496. return -EINVAL;
  497. }
  498. b++;
  499. if (!isspace(*b))
  500. return -EINVAL;
  501. b++;
  502. if (*b == '*') {
  503. ex.major = ~0;
  504. b++;
  505. } else if (isdigit(*b)) {
  506. memset(temp, 0, sizeof(temp));
  507. for (count = 0; count < sizeof(temp) - 1; count++) {
  508. temp[count] = *b;
  509. b++;
  510. if (!isdigit(*b))
  511. break;
  512. }
  513. rc = kstrtou32(temp, 10, &ex.major);
  514. if (rc)
  515. return -EINVAL;
  516. } else {
  517. return -EINVAL;
  518. }
  519. if (*b != ':')
  520. return -EINVAL;
  521. b++;
  522. /* read minor */
  523. if (*b == '*') {
  524. ex.minor = ~0;
  525. b++;
  526. } else if (isdigit(*b)) {
  527. memset(temp, 0, sizeof(temp));
  528. for (count = 0; count < sizeof(temp) - 1; count++) {
  529. temp[count] = *b;
  530. b++;
  531. if (!isdigit(*b))
  532. break;
  533. }
  534. rc = kstrtou32(temp, 10, &ex.minor);
  535. if (rc)
  536. return -EINVAL;
  537. } else {
  538. return -EINVAL;
  539. }
  540. if (!isspace(*b))
  541. return -EINVAL;
  542. for (b++, count = 0; count < 3; count++, b++) {
  543. switch (*b) {
  544. case 'r':
  545. ex.access |= ACC_READ;
  546. break;
  547. case 'w':
  548. ex.access |= ACC_WRITE;
  549. break;
  550. case 'm':
  551. ex.access |= ACC_MKNOD;
  552. break;
  553. case '\n':
  554. case '\0':
  555. count = 3;
  556. break;
  557. default:
  558. return -EINVAL;
  559. }
  560. }
  561. switch (filetype) {
  562. case DEVCG_ALLOW:
  563. if (!parent_has_perm(devcgroup, &ex))
  564. return -EPERM;
  565. /*
  566. * If the default policy is to allow by default, try to remove
  567. * an matching exception instead. And be silent about it: we
  568. * don't want to break compatibility
  569. */
  570. if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
  571. dev_exception_rm(devcgroup, &ex);
  572. return 0;
  573. }
  574. rc = dev_exception_add(devcgroup, &ex);
  575. break;
  576. case DEVCG_DENY:
  577. /*
  578. * If the default policy is to deny by default, try to remove
  579. * an matching exception instead. And be silent about it: we
  580. * don't want to break compatibility
  581. */
  582. if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
  583. dev_exception_rm(devcgroup, &ex);
  584. else
  585. rc = dev_exception_add(devcgroup, &ex);
  586. if (rc)
  587. break;
  588. /* we only propagate new restrictions */
  589. rc = propagate_exception(devcgroup, &ex);
  590. break;
  591. default:
  592. rc = -EINVAL;
  593. }
  594. return rc;
  595. }
  596. static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
  597. const char *buffer)
  598. {
  599. int retval;
  600. mutex_lock(&devcgroup_mutex);
  601. retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
  602. cft->private, buffer);
  603. mutex_unlock(&devcgroup_mutex);
  604. return retval;
  605. }
  606. static struct cftype dev_cgroup_files[] = {
  607. {
  608. .name = "allow",
  609. .write_string = devcgroup_access_write,
  610. .private = DEVCG_ALLOW,
  611. },
  612. {
  613. .name = "deny",
  614. .write_string = devcgroup_access_write,
  615. .private = DEVCG_DENY,
  616. },
  617. {
  618. .name = "list",
  619. .read_seq_string = devcgroup_seq_read,
  620. .private = DEVCG_LIST,
  621. },
  622. { } /* terminate */
  623. };
  624. struct cgroup_subsys devices_subsys = {
  625. .name = "devices",
  626. .can_attach = devcgroup_can_attach,
  627. .css_alloc = devcgroup_css_alloc,
  628. .css_free = devcgroup_css_free,
  629. .css_online = devcgroup_online,
  630. .css_offline = devcgroup_offline,
  631. .subsys_id = devices_subsys_id,
  632. .base_cftypes = dev_cgroup_files,
  633. };
  634. /**
  635. * __devcgroup_check_permission - checks if an inode operation is permitted
  636. * @dev_cgroup: the dev cgroup to be tested against
  637. * @type: device type
  638. * @major: device major number
  639. * @minor: device minor number
  640. * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
  641. *
  642. * returns 0 on success, -EPERM case the operation is not permitted
  643. */
  644. static int __devcgroup_check_permission(short type, u32 major, u32 minor,
  645. short access)
  646. {
  647. struct dev_cgroup *dev_cgroup;
  648. struct dev_exception_item ex;
  649. int rc;
  650. memset(&ex, 0, sizeof(ex));
  651. ex.type = type;
  652. ex.major = major;
  653. ex.minor = minor;
  654. ex.access = access;
  655. rcu_read_lock();
  656. dev_cgroup = task_devcgroup(current);
  657. rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
  658. rcu_read_unlock();
  659. if (!rc)
  660. return -EPERM;
  661. return 0;
  662. }
  663. int __devcgroup_inode_permission(struct inode *inode, int mask)
  664. {
  665. short type, access = 0;
  666. if (S_ISBLK(inode->i_mode))
  667. type = DEV_BLOCK;
  668. if (S_ISCHR(inode->i_mode))
  669. type = DEV_CHAR;
  670. if (mask & MAY_WRITE)
  671. access |= ACC_WRITE;
  672. if (mask & MAY_READ)
  673. access |= ACC_READ;
  674. return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
  675. access);
  676. }
  677. int devcgroup_inode_mknod(int mode, dev_t dev)
  678. {
  679. short type;
  680. if (!S_ISBLK(mode) && !S_ISCHR(mode))
  681. return 0;
  682. if (S_ISBLK(mode))
  683. type = DEV_BLOCK;
  684. else
  685. type = DEV_CHAR;
  686. return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
  687. ACC_MKNOD);
  688. }