avc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * Implementation of the kernel access vector cache (AVC).
  3. *
  4. * Authors: Stephen Smalley, <sds@epoch.ncsc.mil>
  5. * James Morris <jmorris@redhat.com>
  6. *
  7. * Update: KaiGai, Kohei <kaigai@ak.jp.nec.com>
  8. * Replaced the avc_lock spinlock by RCU.
  9. *
  10. * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2,
  14. * as published by the Free Software Foundation.
  15. */
  16. #include <linux/types.h>
  17. #include <linux/stddef.h>
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/fs.h>
  21. #include <linux/dcache.h>
  22. #include <linux/init.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/percpu.h>
  25. #include <net/sock.h>
  26. #include <linux/un.h>
  27. #include <net/af_unix.h>
  28. #include <linux/ip.h>
  29. #include <linux/audit.h>
  30. #include <linux/ipv6.h>
  31. #include <net/ipv6.h>
  32. #include "avc.h"
  33. #include "avc_ss.h"
  34. #include "classmap.h"
  35. #define AVC_CACHE_SLOTS 512
  36. #define AVC_DEF_CACHE_THRESHOLD 512
  37. #define AVC_CACHE_RECLAIM 16
  38. #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
  39. #define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field)
  40. #else
  41. #define avc_cache_stats_incr(field) do {} while (0)
  42. #endif
  43. struct avc_entry {
  44. u32 ssid;
  45. u32 tsid;
  46. u16 tclass;
  47. struct av_decision avd;
  48. };
  49. struct avc_node {
  50. struct avc_entry ae;
  51. struct hlist_node list; /* anchored in avc_cache->slots[i] */
  52. struct rcu_head rhead;
  53. };
  54. struct avc_cache {
  55. struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
  56. spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
  57. atomic_t lru_hint; /* LRU hint for reclaim scan */
  58. atomic_t active_nodes;
  59. u32 latest_notif; /* latest revocation notification */
  60. };
  61. struct avc_callback_node {
  62. int (*callback) (u32 event);
  63. u32 events;
  64. struct avc_callback_node *next;
  65. };
  66. /* Exported via selinufs */
  67. unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
  68. #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
  69. DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
  70. #endif
  71. static struct avc_cache avc_cache;
  72. static struct avc_callback_node *avc_callbacks;
  73. static struct kmem_cache *avc_node_cachep;
  74. static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
  75. {
  76. return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
  77. }
  78. /**
  79. * avc_dump_av - Display an access vector in human-readable form.
  80. * @tclass: target security class
  81. * @av: access vector
  82. */
  83. static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
  84. {
  85. const char **perms;
  86. int i, perm;
  87. if (av == 0) {
  88. audit_log_format(ab, " null");
  89. return;
  90. }
  91. perms = secclass_map[tclass-1].perms;
  92. audit_log_format(ab, " {");
  93. i = 0;
  94. perm = 1;
  95. while (i < (sizeof(av) * 8)) {
  96. if ((perm & av) && perms[i]) {
  97. audit_log_format(ab, " %s", perms[i]);
  98. av &= ~perm;
  99. }
  100. i++;
  101. perm <<= 1;
  102. }
  103. if (av)
  104. audit_log_format(ab, " 0x%x", av);
  105. audit_log_format(ab, " }");
  106. }
  107. /**
  108. * avc_dump_query - Display a SID pair and a class in human-readable form.
  109. * @ssid: source security identifier
  110. * @tsid: target security identifier
  111. * @tclass: target security class
  112. */
  113. static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass)
  114. {
  115. int rc;
  116. char *scontext;
  117. u32 scontext_len;
  118. rc = security_sid_to_context(ssid, &scontext, &scontext_len);
  119. if (rc)
  120. audit_log_format(ab, "ssid=%d", ssid);
  121. else {
  122. audit_log_format(ab, "scontext=%s", scontext);
  123. kfree(scontext);
  124. }
  125. rc = security_sid_to_context(tsid, &scontext, &scontext_len);
  126. if (rc)
  127. audit_log_format(ab, " tsid=%d", tsid);
  128. else {
  129. audit_log_format(ab, " tcontext=%s", scontext);
  130. kfree(scontext);
  131. }
  132. BUG_ON(tclass >= ARRAY_SIZE(secclass_map));
  133. audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name);
  134. }
  135. /**
  136. * avc_init - Initialize the AVC.
  137. *
  138. * Initialize the access vector cache.
  139. */
  140. void __init avc_init(void)
  141. {
  142. int i;
  143. for (i = 0; i < AVC_CACHE_SLOTS; i++) {
  144. INIT_HLIST_HEAD(&avc_cache.slots[i]);
  145. spin_lock_init(&avc_cache.slots_lock[i]);
  146. }
  147. atomic_set(&avc_cache.active_nodes, 0);
  148. atomic_set(&avc_cache.lru_hint, 0);
  149. avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
  150. 0, SLAB_PANIC, NULL);
  151. audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
  152. }
  153. int avc_get_hash_stats(char *page)
  154. {
  155. int i, chain_len, max_chain_len, slots_used;
  156. struct avc_node *node;
  157. struct hlist_head *head;
  158. rcu_read_lock();
  159. slots_used = 0;
  160. max_chain_len = 0;
  161. for (i = 0; i < AVC_CACHE_SLOTS; i++) {
  162. head = &avc_cache.slots[i];
  163. if (!hlist_empty(head)) {
  164. slots_used++;
  165. chain_len = 0;
  166. hlist_for_each_entry_rcu(node, head, list)
  167. chain_len++;
  168. if (chain_len > max_chain_len)
  169. max_chain_len = chain_len;
  170. }
  171. }
  172. rcu_read_unlock();
  173. return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
  174. "longest chain: %d\n",
  175. atomic_read(&avc_cache.active_nodes),
  176. slots_used, AVC_CACHE_SLOTS, max_chain_len);
  177. }
  178. static void avc_node_free(struct rcu_head *rhead)
  179. {
  180. struct avc_node *node = container_of(rhead, struct avc_node, rhead);
  181. kmem_cache_free(avc_node_cachep, node);
  182. avc_cache_stats_incr(frees);
  183. }
  184. static void avc_node_delete(struct avc_node *node)
  185. {
  186. hlist_del_rcu(&node->list);
  187. call_rcu(&node->rhead, avc_node_free);
  188. atomic_dec(&avc_cache.active_nodes);
  189. }
  190. static void avc_node_kill(struct avc_node *node)
  191. {
  192. kmem_cache_free(avc_node_cachep, node);
  193. avc_cache_stats_incr(frees);
  194. atomic_dec(&avc_cache.active_nodes);
  195. }
  196. static void avc_node_replace(struct avc_node *new, struct avc_node *old)
  197. {
  198. hlist_replace_rcu(&old->list, &new->list);
  199. call_rcu(&old->rhead, avc_node_free);
  200. atomic_dec(&avc_cache.active_nodes);
  201. }
  202. static inline int avc_reclaim_node(void)
  203. {
  204. struct avc_node *node;
  205. int hvalue, try, ecx;
  206. unsigned long flags;
  207. struct hlist_head *head;
  208. spinlock_t *lock;
  209. for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
  210. hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
  211. head = &avc_cache.slots[hvalue];
  212. lock = &avc_cache.slots_lock[hvalue];
  213. if (!spin_trylock_irqsave(lock, flags))
  214. continue;
  215. rcu_read_lock();
  216. hlist_for_each_entry(node, head, list) {
  217. avc_node_delete(node);
  218. avc_cache_stats_incr(reclaims);
  219. ecx++;
  220. if (ecx >= AVC_CACHE_RECLAIM) {
  221. rcu_read_unlock();
  222. spin_unlock_irqrestore(lock, flags);
  223. goto out;
  224. }
  225. }
  226. rcu_read_unlock();
  227. spin_unlock_irqrestore(lock, flags);
  228. }
  229. out:
  230. return ecx;
  231. }
  232. static struct avc_node *avc_alloc_node(void)
  233. {
  234. struct avc_node *node;
  235. node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
  236. if (!node)
  237. goto out;
  238. INIT_HLIST_NODE(&node->list);
  239. avc_cache_stats_incr(allocations);
  240. if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold)
  241. avc_reclaim_node();
  242. out:
  243. return node;
  244. }
  245. static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
  246. {
  247. node->ae.ssid = ssid;
  248. node->ae.tsid = tsid;
  249. node->ae.tclass = tclass;
  250. memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
  251. }
  252. static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
  253. {
  254. struct avc_node *node, *ret = NULL;
  255. int hvalue;
  256. struct hlist_head *head;
  257. hvalue = avc_hash(ssid, tsid, tclass);
  258. head = &avc_cache.slots[hvalue];
  259. hlist_for_each_entry_rcu(node, head, list) {
  260. if (ssid == node->ae.ssid &&
  261. tclass == node->ae.tclass &&
  262. tsid == node->ae.tsid) {
  263. ret = node;
  264. break;
  265. }
  266. }
  267. return ret;
  268. }
  269. /**
  270. * avc_lookup - Look up an AVC entry.
  271. * @ssid: source security identifier
  272. * @tsid: target security identifier
  273. * @tclass: target security class
  274. *
  275. * Look up an AVC entry that is valid for the
  276. * (@ssid, @tsid), interpreting the permissions
  277. * based on @tclass. If a valid AVC entry exists,
  278. * then this function returns the avc_node.
  279. * Otherwise, this function returns NULL.
  280. */
  281. static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
  282. {
  283. struct avc_node *node;
  284. avc_cache_stats_incr(lookups);
  285. node = avc_search_node(ssid, tsid, tclass);
  286. if (node)
  287. return node;
  288. avc_cache_stats_incr(misses);
  289. return NULL;
  290. }
  291. static int avc_latest_notif_update(int seqno, int is_insert)
  292. {
  293. int ret = 0;
  294. static DEFINE_SPINLOCK(notif_lock);
  295. unsigned long flag;
  296. spin_lock_irqsave(&notif_lock, flag);
  297. if (is_insert) {
  298. if (seqno < avc_cache.latest_notif) {
  299. printk(KERN_WARNING "SELinux: avc: seqno %d < latest_notif %d\n",
  300. seqno, avc_cache.latest_notif);
  301. ret = -EAGAIN;
  302. }
  303. } else {
  304. if (seqno > avc_cache.latest_notif)
  305. avc_cache.latest_notif = seqno;
  306. }
  307. spin_unlock_irqrestore(&notif_lock, flag);
  308. return ret;
  309. }
  310. /**
  311. * avc_insert - Insert an AVC entry.
  312. * @ssid: source security identifier
  313. * @tsid: target security identifier
  314. * @tclass: target security class
  315. * @avd: resulting av decision
  316. *
  317. * Insert an AVC entry for the SID pair
  318. * (@ssid, @tsid) and class @tclass.
  319. * The access vectors and the sequence number are
  320. * normally provided by the security server in
  321. * response to a security_compute_av() call. If the
  322. * sequence number @avd->seqno is not less than the latest
  323. * revocation notification, then the function copies
  324. * the access vectors into a cache entry, returns
  325. * avc_node inserted. Otherwise, this function returns NULL.
  326. */
  327. static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
  328. {
  329. struct avc_node *pos, *node = NULL;
  330. int hvalue;
  331. unsigned long flag;
  332. if (avc_latest_notif_update(avd->seqno, 1))
  333. goto out;
  334. node = avc_alloc_node();
  335. if (node) {
  336. struct hlist_head *head;
  337. spinlock_t *lock;
  338. hvalue = avc_hash(ssid, tsid, tclass);
  339. avc_node_populate(node, ssid, tsid, tclass, avd);
  340. head = &avc_cache.slots[hvalue];
  341. lock = &avc_cache.slots_lock[hvalue];
  342. spin_lock_irqsave(lock, flag);
  343. hlist_for_each_entry(pos, head, list) {
  344. if (pos->ae.ssid == ssid &&
  345. pos->ae.tsid == tsid &&
  346. pos->ae.tclass == tclass) {
  347. avc_node_replace(node, pos);
  348. goto found;
  349. }
  350. }
  351. hlist_add_head_rcu(&node->list, head);
  352. found:
  353. spin_unlock_irqrestore(lock, flag);
  354. }
  355. out:
  356. return node;
  357. }
  358. /**
  359. * avc_audit_pre_callback - SELinux specific information
  360. * will be called by generic audit code
  361. * @ab: the audit buffer
  362. * @a: audit_data
  363. */
  364. static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
  365. {
  366. struct common_audit_data *ad = a;
  367. audit_log_format(ab, "avc: %s ",
  368. ad->selinux_audit_data->denied ? "denied" : "granted");
  369. avc_dump_av(ab, ad->selinux_audit_data->tclass,
  370. ad->selinux_audit_data->audited);
  371. audit_log_format(ab, " for ");
  372. }
  373. /**
  374. * avc_audit_post_callback - SELinux specific information
  375. * will be called by generic audit code
  376. * @ab: the audit buffer
  377. * @a: audit_data
  378. */
  379. static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
  380. {
  381. struct common_audit_data *ad = a;
  382. audit_log_format(ab, " ");
  383. avc_dump_query(ab, ad->selinux_audit_data->ssid,
  384. ad->selinux_audit_data->tsid,
  385. ad->selinux_audit_data->tclass);
  386. }
  387. /* This is the slow part of avc audit with big stack footprint */
  388. noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
  389. u32 requested, u32 audited, u32 denied,
  390. struct common_audit_data *a,
  391. unsigned flags)
  392. {
  393. struct common_audit_data stack_data;
  394. struct selinux_audit_data sad;
  395. if (!a) {
  396. a = &stack_data;
  397. a->type = LSM_AUDIT_DATA_NONE;
  398. }
  399. /*
  400. * When in a RCU walk do the audit on the RCU retry. This is because
  401. * the collection of the dname in an inode audit message is not RCU
  402. * safe. Note this may drop some audits when the situation changes
  403. * during retry. However this is logically just as if the operation
  404. * happened a little later.
  405. */
  406. if ((a->type == LSM_AUDIT_DATA_INODE) &&
  407. (flags & MAY_NOT_BLOCK))
  408. return -ECHILD;
  409. sad.tclass = tclass;
  410. sad.requested = requested;
  411. sad.ssid = ssid;
  412. sad.tsid = tsid;
  413. sad.audited = audited;
  414. sad.denied = denied;
  415. a->selinux_audit_data = &sad;
  416. common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
  417. return 0;
  418. }
  419. /**
  420. * avc_add_callback - Register a callback for security events.
  421. * @callback: callback function
  422. * @events: security events
  423. *
  424. * Register a callback function for events in the set @events.
  425. * Returns %0 on success or -%ENOMEM if insufficient memory
  426. * exists to add the callback.
  427. */
  428. int __init avc_add_callback(int (*callback)(u32 event), u32 events)
  429. {
  430. struct avc_callback_node *c;
  431. int rc = 0;
  432. c = kmalloc(sizeof(*c), GFP_KERNEL);
  433. if (!c) {
  434. rc = -ENOMEM;
  435. goto out;
  436. }
  437. c->callback = callback;
  438. c->events = events;
  439. c->next = avc_callbacks;
  440. avc_callbacks = c;
  441. out:
  442. return rc;
  443. }
  444. static inline int avc_sidcmp(u32 x, u32 y)
  445. {
  446. return (x == y || x == SECSID_WILD || y == SECSID_WILD);
  447. }
  448. /**
  449. * avc_update_node Update an AVC entry
  450. * @event : Updating event
  451. * @perms : Permission mask bits
  452. * @ssid,@tsid,@tclass : identifier of an AVC entry
  453. * @seqno : sequence number when decision was made
  454. *
  455. * if a valid AVC entry doesn't exist,this function returns -ENOENT.
  456. * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
  457. * otherwise, this function updates the AVC entry. The original AVC-entry object
  458. * will release later by RCU.
  459. */
  460. static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
  461. u32 seqno)
  462. {
  463. int hvalue, rc = 0;
  464. unsigned long flag;
  465. struct avc_node *pos, *node, *orig = NULL;
  466. struct hlist_head *head;
  467. spinlock_t *lock;
  468. node = avc_alloc_node();
  469. if (!node) {
  470. rc = -ENOMEM;
  471. goto out;
  472. }
  473. /* Lock the target slot */
  474. hvalue = avc_hash(ssid, tsid, tclass);
  475. head = &avc_cache.slots[hvalue];
  476. lock = &avc_cache.slots_lock[hvalue];
  477. spin_lock_irqsave(lock, flag);
  478. hlist_for_each_entry(pos, head, list) {
  479. if (ssid == pos->ae.ssid &&
  480. tsid == pos->ae.tsid &&
  481. tclass == pos->ae.tclass &&
  482. seqno == pos->ae.avd.seqno){
  483. orig = pos;
  484. break;
  485. }
  486. }
  487. if (!orig) {
  488. rc = -ENOENT;
  489. avc_node_kill(node);
  490. goto out_unlock;
  491. }
  492. /*
  493. * Copy and replace original node.
  494. */
  495. avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
  496. switch (event) {
  497. case AVC_CALLBACK_GRANT:
  498. node->ae.avd.allowed |= perms;
  499. break;
  500. case AVC_CALLBACK_TRY_REVOKE:
  501. case AVC_CALLBACK_REVOKE:
  502. node->ae.avd.allowed &= ~perms;
  503. break;
  504. case AVC_CALLBACK_AUDITALLOW_ENABLE:
  505. node->ae.avd.auditallow |= perms;
  506. break;
  507. case AVC_CALLBACK_AUDITALLOW_DISABLE:
  508. node->ae.avd.auditallow &= ~perms;
  509. break;
  510. case AVC_CALLBACK_AUDITDENY_ENABLE:
  511. node->ae.avd.auditdeny |= perms;
  512. break;
  513. case AVC_CALLBACK_AUDITDENY_DISABLE:
  514. node->ae.avd.auditdeny &= ~perms;
  515. break;
  516. }
  517. avc_node_replace(node, orig);
  518. out_unlock:
  519. spin_unlock_irqrestore(lock, flag);
  520. out:
  521. return rc;
  522. }
  523. /**
  524. * avc_flush - Flush the cache
  525. */
  526. static void avc_flush(void)
  527. {
  528. struct hlist_head *head;
  529. struct avc_node *node;
  530. spinlock_t *lock;
  531. unsigned long flag;
  532. int i;
  533. for (i = 0; i < AVC_CACHE_SLOTS; i++) {
  534. head = &avc_cache.slots[i];
  535. lock = &avc_cache.slots_lock[i];
  536. spin_lock_irqsave(lock, flag);
  537. /*
  538. * With preemptable RCU, the outer spinlock does not
  539. * prevent RCU grace periods from ending.
  540. */
  541. rcu_read_lock();
  542. hlist_for_each_entry(node, head, list)
  543. avc_node_delete(node);
  544. rcu_read_unlock();
  545. spin_unlock_irqrestore(lock, flag);
  546. }
  547. }
  548. /**
  549. * avc_ss_reset - Flush the cache and revalidate migrated permissions.
  550. * @seqno: policy sequence number
  551. */
  552. int avc_ss_reset(u32 seqno)
  553. {
  554. struct avc_callback_node *c;
  555. int rc = 0, tmprc;
  556. avc_flush();
  557. for (c = avc_callbacks; c; c = c->next) {
  558. if (c->events & AVC_CALLBACK_RESET) {
  559. tmprc = c->callback(AVC_CALLBACK_RESET);
  560. /* save the first error encountered for the return
  561. value and continue processing the callbacks */
  562. if (!rc)
  563. rc = tmprc;
  564. }
  565. }
  566. avc_latest_notif_update(seqno, 0);
  567. return rc;
  568. }
  569. /*
  570. * Slow-path helper function for avc_has_perm_noaudit,
  571. * when the avc_node lookup fails. We get called with
  572. * the RCU read lock held, and need to return with it
  573. * still held, but drop if for the security compute.
  574. *
  575. * Don't inline this, since it's the slow-path and just
  576. * results in a bigger stack frame.
  577. */
  578. static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
  579. u16 tclass, struct av_decision *avd)
  580. {
  581. rcu_read_unlock();
  582. security_compute_av(ssid, tsid, tclass, avd);
  583. rcu_read_lock();
  584. return avc_insert(ssid, tsid, tclass, avd);
  585. }
  586. static noinline int avc_denied(u32 ssid, u32 tsid,
  587. u16 tclass, u32 requested,
  588. unsigned flags,
  589. struct av_decision *avd)
  590. {
  591. if (flags & AVC_STRICT)
  592. return -EACCES;
  593. if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
  594. return -EACCES;
  595. avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
  596. tsid, tclass, avd->seqno);
  597. return 0;
  598. }
  599. /**
  600. * avc_has_perm_noaudit - Check permissions but perform no auditing.
  601. * @ssid: source security identifier
  602. * @tsid: target security identifier
  603. * @tclass: target security class
  604. * @requested: requested permissions, interpreted based on @tclass
  605. * @flags: AVC_STRICT or 0
  606. * @avd: access vector decisions
  607. *
  608. * Check the AVC to determine whether the @requested permissions are granted
  609. * for the SID pair (@ssid, @tsid), interpreting the permissions
  610. * based on @tclass, and call the security server on a cache miss to obtain
  611. * a new decision and add it to the cache. Return a copy of the decisions
  612. * in @avd. Return %0 if all @requested permissions are granted,
  613. * -%EACCES if any permissions are denied, or another -errno upon
  614. * other errors. This function is typically called by avc_has_perm(),
  615. * but may also be called directly to separate permission checking from
  616. * auditing, e.g. in cases where a lock must be held for the check but
  617. * should be released for the auditing.
  618. */
  619. inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
  620. u16 tclass, u32 requested,
  621. unsigned flags,
  622. struct av_decision *avd)
  623. {
  624. struct avc_node *node;
  625. int rc = 0;
  626. u32 denied;
  627. BUG_ON(!requested);
  628. rcu_read_lock();
  629. node = avc_lookup(ssid, tsid, tclass);
  630. if (unlikely(!node)) {
  631. node = avc_compute_av(ssid, tsid, tclass, avd);
  632. } else {
  633. memcpy(avd, &node->ae.avd, sizeof(*avd));
  634. avd = &node->ae.avd;
  635. }
  636. denied = requested & ~(avd->allowed);
  637. if (unlikely(denied))
  638. rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
  639. rcu_read_unlock();
  640. return rc;
  641. }
  642. /**
  643. * avc_has_perm - Check permissions and perform any appropriate auditing.
  644. * @ssid: source security identifier
  645. * @tsid: target security identifier
  646. * @tclass: target security class
  647. * @requested: requested permissions, interpreted based on @tclass
  648. * @auditdata: auxiliary audit data
  649. * @flags: VFS walk flags
  650. *
  651. * Check the AVC to determine whether the @requested permissions are granted
  652. * for the SID pair (@ssid, @tsid), interpreting the permissions
  653. * based on @tclass, and call the security server on a cache miss to obtain
  654. * a new decision and add it to the cache. Audit the granting or denial of
  655. * permissions in accordance with the policy. Return %0 if all @requested
  656. * permissions are granted, -%EACCES if any permissions are denied, or
  657. * another -errno upon other errors.
  658. */
  659. int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass,
  660. u32 requested, struct common_audit_data *auditdata,
  661. unsigned flags)
  662. {
  663. struct av_decision avd;
  664. int rc, rc2;
  665. rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
  666. rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata,
  667. flags);
  668. if (rc2)
  669. return rc2;
  670. return rc;
  671. }
  672. u32 avc_policy_seqno(void)
  673. {
  674. return avc_cache.latest_notif;
  675. }
  676. void avc_disable(void)
  677. {
  678. /*
  679. * If you are looking at this because you have realized that we are
  680. * not destroying the avc_node_cachep it might be easy to fix, but
  681. * I don't know the memory barrier semantics well enough to know. It's
  682. * possible that some other task dereferenced security_ops when
  683. * it still pointed to selinux operations. If that is the case it's
  684. * possible that it is about to use the avc and is about to need the
  685. * avc_node_cachep. I know I could wrap the security.c security_ops call
  686. * in an rcu_lock, but seriously, it's not worth it. Instead I just flush
  687. * the cache and get that memory back.
  688. */
  689. if (avc_node_cachep) {
  690. avc_flush();
  691. /* kmem_cache_destroy(avc_node_cachep); */
  692. }
  693. }