netif.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * Network interface table.
  3. *
  4. * Network interfaces (devices) do not have a security field, so we
  5. * maintain a table associating each interface with a SID.
  6. *
  7. * Author: James Morris <jmorris@redhat.com>
  8. *
  9. * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2,
  13. * as published by the Free Software Foundation.
  14. */
  15. #include <linux/init.h>
  16. #include <linux/types.h>
  17. #include <linux/stddef.h>
  18. #include <linux/kernel.h>
  19. #include <linux/list.h>
  20. #include <linux/notifier.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/rcupdate.h>
  23. #include "security.h"
  24. #include "objsec.h"
  25. #include "netif.h"
  26. #define SEL_NETIF_HASH_SIZE 64
  27. #define SEL_NETIF_HASH_MAX 1024
  28. #undef DEBUG
  29. #ifdef DEBUG
  30. #define DEBUGP printk
  31. #else
  32. #define DEBUGP(format, args...)
  33. #endif
  34. struct sel_netif
  35. {
  36. struct list_head list;
  37. struct netif_security_struct nsec;
  38. struct rcu_head rcu_head;
  39. };
  40. static u32 sel_netif_total;
  41. static LIST_HEAD(sel_netif_list);
  42. static DEFINE_SPINLOCK(sel_netif_lock);
  43. static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
  44. static inline u32 sel_netif_hasfn(struct net_device *dev)
  45. {
  46. return (dev->ifindex & (SEL_NETIF_HASH_SIZE - 1));
  47. }
  48. /*
  49. * All of the devices should normally fit in the hash, so we optimize
  50. * for that case.
  51. */
  52. static inline struct sel_netif *sel_netif_find(struct net_device *dev)
  53. {
  54. struct list_head *pos;
  55. int idx = sel_netif_hasfn(dev);
  56. __list_for_each_rcu(pos, &sel_netif_hash[idx]) {
  57. struct sel_netif *netif = list_entry(pos,
  58. struct sel_netif, list);
  59. if (likely(netif->nsec.dev == dev))
  60. return netif;
  61. }
  62. return NULL;
  63. }
  64. static int sel_netif_insert(struct sel_netif *netif)
  65. {
  66. int idx, ret = 0;
  67. if (sel_netif_total >= SEL_NETIF_HASH_MAX) {
  68. ret = -ENOSPC;
  69. goto out;
  70. }
  71. idx = sel_netif_hasfn(netif->nsec.dev);
  72. list_add_rcu(&netif->list, &sel_netif_hash[idx]);
  73. sel_netif_total++;
  74. out:
  75. return ret;
  76. }
  77. static void sel_netif_free(struct rcu_head *p)
  78. {
  79. struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head);
  80. DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
  81. kfree(netif);
  82. }
  83. static void sel_netif_destroy(struct sel_netif *netif)
  84. {
  85. DEBUGP("%s: %s\n", __FUNCTION__, netif->nsec.dev->name);
  86. list_del_rcu(&netif->list);
  87. sel_netif_total--;
  88. call_rcu(&netif->rcu_head, sel_netif_free);
  89. }
  90. static struct sel_netif *sel_netif_lookup(struct net_device *dev)
  91. {
  92. int ret;
  93. struct sel_netif *netif, *new;
  94. struct netif_security_struct *nsec;
  95. netif = sel_netif_find(dev);
  96. if (likely(netif != NULL))
  97. goto out;
  98. new = kmalloc(sizeof(*new), GFP_ATOMIC);
  99. if (!new) {
  100. netif = ERR_PTR(-ENOMEM);
  101. goto out;
  102. }
  103. memset(new, 0, sizeof(*new));
  104. nsec = &new->nsec;
  105. ret = security_netif_sid(dev->name, &nsec->if_sid, &nsec->msg_sid);
  106. if (ret < 0) {
  107. kfree(new);
  108. netif = ERR_PTR(ret);
  109. goto out;
  110. }
  111. nsec->dev = dev;
  112. spin_lock_bh(&sel_netif_lock);
  113. netif = sel_netif_find(dev);
  114. if (netif) {
  115. spin_unlock_bh(&sel_netif_lock);
  116. kfree(new);
  117. goto out;
  118. }
  119. ret = sel_netif_insert(new);
  120. spin_unlock_bh(&sel_netif_lock);
  121. if (ret) {
  122. kfree(new);
  123. netif = ERR_PTR(ret);
  124. goto out;
  125. }
  126. netif = new;
  127. DEBUGP("new: ifindex=%u name=%s if_sid=%u msg_sid=%u\n", dev->ifindex, dev->name,
  128. nsec->if_sid, nsec->msg_sid);
  129. out:
  130. return netif;
  131. }
  132. static void sel_netif_assign_sids(u32 if_sid_in, u32 msg_sid_in, u32 *if_sid_out, u32 *msg_sid_out)
  133. {
  134. if (if_sid_out)
  135. *if_sid_out = if_sid_in;
  136. if (msg_sid_out)
  137. *msg_sid_out = msg_sid_in;
  138. }
  139. static int sel_netif_sids_slow(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
  140. {
  141. int ret = 0;
  142. u32 tmp_if_sid, tmp_msg_sid;
  143. ret = security_netif_sid(dev->name, &tmp_if_sid, &tmp_msg_sid);
  144. if (!ret)
  145. sel_netif_assign_sids(tmp_if_sid, tmp_msg_sid, if_sid, msg_sid);
  146. return ret;
  147. }
  148. int sel_netif_sids(struct net_device *dev, u32 *if_sid, u32 *msg_sid)
  149. {
  150. int ret = 0;
  151. struct sel_netif *netif;
  152. rcu_read_lock();
  153. netif = sel_netif_lookup(dev);
  154. if (IS_ERR(netif)) {
  155. rcu_read_unlock();
  156. ret = sel_netif_sids_slow(dev, if_sid, msg_sid);
  157. goto out;
  158. }
  159. sel_netif_assign_sids(netif->nsec.if_sid, netif->nsec.msg_sid, if_sid, msg_sid);
  160. rcu_read_unlock();
  161. out:
  162. return ret;
  163. }
  164. static void sel_netif_kill(struct net_device *dev)
  165. {
  166. struct sel_netif *netif;
  167. spin_lock_bh(&sel_netif_lock);
  168. netif = sel_netif_find(dev);
  169. if (netif)
  170. sel_netif_destroy(netif);
  171. spin_unlock_bh(&sel_netif_lock);
  172. }
  173. static void sel_netif_flush(void)
  174. {
  175. int idx;
  176. spin_lock_bh(&sel_netif_lock);
  177. for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++) {
  178. struct sel_netif *netif;
  179. list_for_each_entry(netif, &sel_netif_hash[idx], list)
  180. sel_netif_destroy(netif);
  181. }
  182. spin_unlock_bh(&sel_netif_lock);
  183. }
  184. static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
  185. u16 class, u32 perms, u32 *retained)
  186. {
  187. if (event == AVC_CALLBACK_RESET) {
  188. sel_netif_flush();
  189. synchronize_net();
  190. }
  191. return 0;
  192. }
  193. static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
  194. unsigned long event, void *ptr)
  195. {
  196. struct net_device *dev = ptr;
  197. if (event == NETDEV_DOWN)
  198. sel_netif_kill(dev);
  199. return NOTIFY_DONE;
  200. }
  201. static struct notifier_block sel_netif_netdev_notifier = {
  202. .notifier_call = sel_netif_netdev_notifier_handler,
  203. };
  204. static __init int sel_netif_init(void)
  205. {
  206. int i, err = 0;
  207. if (!selinux_enabled)
  208. goto out;
  209. for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
  210. INIT_LIST_HEAD(&sel_netif_hash[i]);
  211. register_netdevice_notifier(&sel_netif_netdev_notifier);
  212. err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET,
  213. SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
  214. if (err)
  215. panic("avc_add_callback() failed, error %d\n", err);
  216. out:
  217. return err;
  218. }
  219. __initcall(sel_netif_init);