netif.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. * Network interface table.
  3. *
  4. * Network interfaces (devices) do not have a security field, so we
  5. * maintain a table associating each interface with a SID.
  6. *
  7. * Author: James Morris <jmorris@redhat.com>
  8. *
  9. * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2,
  13. * as published by the Free Software Foundation.
  14. */
  15. #include <linux/init.h>
  16. #include <linux/types.h>
  17. #include <linux/stddef.h>
  18. #include <linux/kernel.h>
  19. #include <linux/list.h>
  20. #include <linux/notifier.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/rcupdate.h>
  23. #include <net/net_namespace.h>
  24. #include "security.h"
  25. #include "objsec.h"
  26. #include "netif.h"
  27. #define SEL_NETIF_HASH_SIZE 64
  28. #define SEL_NETIF_HASH_MAX 1024
  29. #undef DEBUG
  30. #ifdef DEBUG
  31. #define DEBUGP printk
  32. #else
  33. #define DEBUGP(format, args...)
  34. #endif
  35. struct sel_netif
  36. {
  37. struct list_head list;
  38. struct netif_security_struct nsec;
  39. struct rcu_head rcu_head;
  40. };
  41. static u32 sel_netif_total;
  42. static LIST_HEAD(sel_netif_list);
  43. static DEFINE_SPINLOCK(sel_netif_lock);
  44. static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
  45. static inline u32 sel_netif_hasfn(struct net_device *dev)
  46. {
  47. return (dev->ifindex & (SEL_NETIF_HASH_SIZE - 1));
  48. }
  49. /*
  50. * All of the devices should normally fit in the hash, so we optimize
  51. * for that case.
  52. */
  53. static inline struct sel_netif *sel_netif_find(struct net_device *dev)
  54. {
  55. struct list_head *pos;
  56. int idx = sel_netif_hasfn(dev);
  57. __list_for_each_rcu(pos, &sel_netif_hash[idx]) {
  58. struct sel_netif *netif = list_entry(pos,
  59. struct sel_netif, list);
  60. if (likely(netif->nsec.dev == dev))
  61. return netif;
  62. }
  63. return NULL;
  64. }
  65. static int sel_netif_insert(struct sel_netif *netif)
  66. {
  67. int idx, ret = 0;
  68. if (sel_netif_total >= SEL_NETIF_HASH_MAX) {
  69. ret = -ENOSPC;
  70. goto out;
  71. }
  72. idx = sel_netif_hasfn(netif->nsec.dev);
  73. list_add_rcu(&netif->list, &sel_netif_hash[idx]);
  74. sel_netif_total++;
  75. out:
  76. return ret;
  77. }
  78. static void sel_netif_free(struct rcu_head *p)
  79. {
  80. struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head);
  81. DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
  82. kfree(netif);
  83. }
  84. static void sel_netif_destroy(struct sel_netif *netif)
  85. {
  86. DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
  87. list_del_rcu(&netif->list);
  88. sel_netif_total--;
  89. call_rcu(&netif->rcu_head, sel_netif_free);
  90. }
  91. static struct sel_netif *sel_netif_lookup(struct net_device *dev)
  92. {
  93. int ret;
  94. struct sel_netif *netif, *new;
  95. struct netif_security_struct *nsec;
  96. netif = sel_netif_find(dev);
  97. if (likely(netif != NULL))
  98. goto out;
  99. new = kzalloc(sizeof(*new), GFP_ATOMIC);
  100. if (!new) {
  101. netif = ERR_PTR(-ENOMEM);
  102. goto out;
  103. }
  104. nsec = &new->nsec;
  105. ret = security_netif_sid(dev->name, &nsec->if_sid, &nsec->msg_sid);
  106. if (ret < 0) {
  107. kfree(new);
  108. netif = ERR_PTR(ret);
  109. goto out;
  110. }
  111. nsec->dev = dev;
  112. spin_lock_bh(&sel_netif_lock);
  113. netif = sel_netif_find(dev);
  114. if (netif) {
  115. spin_unlock_bh(&sel_netif_lock);
  116. kfree(new);
  117. goto out;
  118. }
  119. ret = sel_netif_insert(new);
  120. spin_unlock_bh(&sel_netif_lock);
  121. if (ret) {
  122. kfree(new);
  123. netif = ERR_PTR(ret);
  124. goto out;
  125. }
  126. netif = new;
  127. DEBUGP("new: ifindex=%u name=%s if_sid=%u msg_sid=%u\n", dev->ifindex, dev->name,
  128. nsec->if_sid, nsec->msg_sid);
  129. out:
  130. return netif;
  131. }
  132. static void sel_netif_assign_sids(u32 if_sid_in, u32 msg_sid_in, u32 *if_sid_out, u32 *msg_sid_out)
  133. {
  134. if (if_sid_out)
  135. *if_sid_out = if_sid_in;
  136. if (msg_sid_out)
  137. *msg_sid_out = msg_sid_in;
  138. }
  139. static int sel_netif_sids_slow(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
  140. {
  141. int ret = 0;
  142. u32 tmp_if_sid, tmp_msg_sid;
  143. ret = security_netif_sid(dev->name, &tmp_if_sid, &tmp_msg_sid);
  144. if (!ret)
  145. sel_netif_assign_sids(tmp_if_sid, tmp_msg_sid, if_sid, msg_sid);
  146. return ret;
  147. }
  148. int sel_netif_sids(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
  149. {
  150. int ret = 0;
  151. struct sel_netif *netif;
  152. rcu_read_lock();
  153. netif = sel_netif_lookup(dev);
  154. if (IS_ERR(netif)) {
  155. rcu_read_unlock();
  156. ret = sel_netif_sids_slow(dev, if_sid, msg_sid);
  157. goto out;
  158. }
  159. sel_netif_assign_sids(netif->nsec.if_sid, netif->nsec.msg_sid, if_sid, msg_sid);
  160. rcu_read_unlock();
  161. out:
  162. return ret;
  163. }
  164. static void sel_netif_kill(struct net_device *dev)
  165. {
  166. struct sel_netif *netif;
  167. spin_lock_bh(&sel_netif_lock);
  168. netif = sel_netif_find(dev);
  169. if (netif)
  170. sel_netif_destroy(netif);
  171. spin_unlock_bh(&sel_netif_lock);
  172. }
  173. static void sel_netif_flush(void)
  174. {
  175. int idx;
  176. spin_lock_bh(&sel_netif_lock);
  177. for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++) {
  178. struct sel_netif *netif;
  179. list_for_each_entry(netif, &sel_netif_hash[idx], list)
  180. sel_netif_destroy(netif);
  181. }
  182. spin_unlock_bh(&sel_netif_lock);
  183. }
  184. static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
  185. u16 class, u32 perms, u32 *retained)
  186. {
  187. if (event == AVC_CALLBACK_RESET) {
  188. sel_netif_flush();
  189. synchronize_net();
  190. }
  191. return 0;
  192. }
  193. static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
  194. unsigned long event, void *ptr)
  195. {
  196. struct net_device *dev = ptr;
  197. if (dev->nd_net != &init_net)
  198. return NOTIFY_DONE;
  199. if (event == NETDEV_DOWN)
  200. sel_netif_kill(dev);
  201. return NOTIFY_DONE;
  202. }
  203. static struct notifier_block sel_netif_netdev_notifier = {
  204. .notifier_call = sel_netif_netdev_notifier_handler,
  205. };
  206. static __init int sel_netif_init(void)
  207. {
  208. int i, err = 0;
  209. if (!selinux_enabled)
  210. goto out;
  211. for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
  212. INIT_LIST_HEAD(&sel_netif_hash[i]);
  213. register_netdevice_notifier(&sel_netif_netdev_notifier);
  214. err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET,
  215. SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
  216. if (err)
  217. panic("avc_add_callback() failed, error %d\n", err);
  218. out:
  219. return err;
  220. }
  221. __initcall(sel_netif_init);