xfrm4_policy.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /*
  2. * xfrm4_policy.c
  3. *
  4. * Changes:
  5. * Kazunori MIYAZAWA @USAGI
  6. * YOSHIFUJI Hideaki @USAGI
  7. * Split up af-specific portion
  8. *
  9. */
  10. #include <linux/err.h>
  11. #include <linux/kernel.h>
  12. #include <linux/inetdevice.h>
  13. #include <net/dst.h>
  14. #include <net/xfrm.h>
  15. #include <net/ip.h>
  16. static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
  17. static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
  18. xfrm_address_t *saddr,
  19. xfrm_address_t *daddr)
  20. {
  21. struct flowi fl = {
  22. .nl_u = {
  23. .ip4_u = {
  24. .tos = tos,
  25. .daddr = daddr->a4,
  26. },
  27. },
  28. };
  29. struct dst_entry *dst;
  30. struct rtable *rt;
  31. int err;
  32. if (saddr)
  33. fl.fl4_src = saddr->a4;
  34. err = __ip_route_output_key(net, &rt, &fl);
  35. dst = &rt->u.dst;
  36. if (err)
  37. dst = ERR_PTR(err);
  38. return dst;
  39. }
  40. static int xfrm4_get_saddr(struct net *net,
  41. xfrm_address_t *saddr, xfrm_address_t *daddr)
  42. {
  43. struct dst_entry *dst;
  44. struct rtable *rt;
  45. dst = xfrm4_dst_lookup(net, 0, NULL, daddr);
  46. if (IS_ERR(dst))
  47. return -EHOSTUNREACH;
  48. rt = (struct rtable *)dst;
  49. saddr->a4 = rt->rt_src;
  50. dst_release(dst);
  51. return 0;
  52. }
  53. static struct dst_entry *
  54. __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
  55. {
  56. struct dst_entry *dst;
  57. read_lock_bh(&policy->lock);
  58. for (dst = policy->bundles; dst; dst = dst->next) {
  59. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  60. if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
  61. xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
  62. xdst->u.rt.fl.fl4_src == fl->fl4_src &&
  63. xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
  64. xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
  65. dst_clone(dst);
  66. break;
  67. }
  68. }
  69. read_unlock_bh(&policy->lock);
  70. return dst;
  71. }
  72. static int xfrm4_get_tos(struct flowi *fl)
  73. {
  74. return fl->fl4_tos;
  75. }
  76. static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  77. int nfheader_len)
  78. {
  79. return 0;
  80. }
  81. static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
  82. {
  83. struct rtable *rt = (struct rtable *)xdst->route;
  84. xdst->u.rt.fl = rt->fl;
  85. xdst->u.dst.dev = dev;
  86. dev_hold(dev);
  87. xdst->u.rt.idev = in_dev_get(dev);
  88. if (!xdst->u.rt.idev)
  89. return -ENODEV;
  90. xdst->u.rt.peer = rt->peer;
  91. if (rt->peer)
  92. atomic_inc(&rt->peer->refcnt);
  93. /* Sheit... I remember I did this right. Apparently,
  94. * it was magically lost, so this code needs audit */
  95. xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
  96. RTCF_LOCAL);
  97. xdst->u.rt.rt_type = rt->rt_type;
  98. xdst->u.rt.rt_src = rt->rt_src;
  99. xdst->u.rt.rt_dst = rt->rt_dst;
  100. xdst->u.rt.rt_gateway = rt->rt_gateway;
  101. xdst->u.rt.rt_spec_dst = rt->rt_spec_dst;
  102. return 0;
  103. }
  104. static void
  105. _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
  106. {
  107. struct iphdr *iph = ip_hdr(skb);
  108. u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
  109. memset(fl, 0, sizeof(struct flowi));
  110. if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
  111. switch (iph->protocol) {
  112. case IPPROTO_UDP:
  113. case IPPROTO_UDPLITE:
  114. case IPPROTO_TCP:
  115. case IPPROTO_SCTP:
  116. case IPPROTO_DCCP:
  117. if (xprth + 4 < skb->data ||
  118. pskb_may_pull(skb, xprth + 4 - skb->data)) {
  119. __be16 *ports = (__be16 *)xprth;
  120. fl->fl_ip_sport = ports[!!reverse];
  121. fl->fl_ip_dport = ports[!reverse];
  122. }
  123. break;
  124. case IPPROTO_ICMP:
  125. if (pskb_may_pull(skb, xprth + 2 - skb->data)) {
  126. u8 *icmp = xprth;
  127. fl->fl_icmp_type = icmp[0];
  128. fl->fl_icmp_code = icmp[1];
  129. }
  130. break;
  131. case IPPROTO_ESP:
  132. if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
  133. __be32 *ehdr = (__be32 *)xprth;
  134. fl->fl_ipsec_spi = ehdr[0];
  135. }
  136. break;
  137. case IPPROTO_AH:
  138. if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
  139. __be32 *ah_hdr = (__be32*)xprth;
  140. fl->fl_ipsec_spi = ah_hdr[1];
  141. }
  142. break;
  143. case IPPROTO_COMP:
  144. if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
  145. __be16 *ipcomp_hdr = (__be16 *)xprth;
  146. fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
  147. }
  148. break;
  149. default:
  150. fl->fl_ipsec_spi = 0;
  151. break;
  152. }
  153. }
  154. fl->proto = iph->protocol;
  155. fl->fl4_dst = reverse ? iph->saddr : iph->daddr;
  156. fl->fl4_src = reverse ? iph->daddr : iph->saddr;
  157. fl->fl4_tos = iph->tos;
  158. }
  159. static inline int xfrm4_garbage_collect(struct dst_ops *ops)
  160. {
  161. struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
  162. xfrm4_policy_afinfo.garbage_collect(net);
  163. return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
  164. }
  165. static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
  166. {
  167. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  168. struct dst_entry *path = xdst->route;
  169. path->ops->update_pmtu(path, mtu);
  170. }
  171. static void xfrm4_dst_destroy(struct dst_entry *dst)
  172. {
  173. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  174. if (likely(xdst->u.rt.idev))
  175. in_dev_put(xdst->u.rt.idev);
  176. if (likely(xdst->u.rt.peer))
  177. inet_putpeer(xdst->u.rt.peer);
  178. xfrm_dst_destroy(xdst);
  179. }
  180. static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
  181. int unregister)
  182. {
  183. struct xfrm_dst *xdst;
  184. if (!unregister)
  185. return;
  186. xdst = (struct xfrm_dst *)dst;
  187. if (xdst->u.rt.idev->dev == dev) {
  188. struct in_device *loopback_idev =
  189. in_dev_get(dev_net(dev)->loopback_dev);
  190. BUG_ON(!loopback_idev);
  191. do {
  192. in_dev_put(xdst->u.rt.idev);
  193. xdst->u.rt.idev = loopback_idev;
  194. in_dev_hold(loopback_idev);
  195. xdst = (struct xfrm_dst *)xdst->u.dst.child;
  196. } while (xdst->u.dst.xfrm);
  197. __in_dev_put(loopback_idev);
  198. }
  199. xfrm_dst_ifdown(dst, dev);
  200. }
  201. static struct dst_ops xfrm4_dst_ops = {
  202. .family = AF_INET,
  203. .protocol = cpu_to_be16(ETH_P_IP),
  204. .gc = xfrm4_garbage_collect,
  205. .update_pmtu = xfrm4_update_pmtu,
  206. .destroy = xfrm4_dst_destroy,
  207. .ifdown = xfrm4_dst_ifdown,
  208. .local_out = __ip_local_out,
  209. .gc_thresh = 1024,
  210. .entries = ATOMIC_INIT(0),
  211. };
  212. static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
  213. .family = AF_INET,
  214. .dst_ops = &xfrm4_dst_ops,
  215. .dst_lookup = xfrm4_dst_lookup,
  216. .get_saddr = xfrm4_get_saddr,
  217. .find_bundle = __xfrm4_find_bundle,
  218. .decode_session = _decode_session4,
  219. .get_tos = xfrm4_get_tos,
  220. .init_path = xfrm4_init_path,
  221. .fill_dst = xfrm4_fill_dst,
  222. };
  223. #ifdef CONFIG_SYSCTL
  224. static struct ctl_table xfrm4_policy_table[] = {
  225. {
  226. .procname = "xfrm4_gc_thresh",
  227. .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
  228. .maxlen = sizeof(int),
  229. .mode = 0644,
  230. .proc_handler = proc_dointvec,
  231. },
  232. { }
  233. };
  234. static struct ctl_table_header *sysctl_hdr;
  235. #endif
  236. static void __init xfrm4_policy_init(void)
  237. {
  238. xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
  239. }
  240. static void __exit xfrm4_policy_fini(void)
  241. {
  242. #ifdef CONFIG_SYSCTL
  243. if (sysctl_hdr)
  244. unregister_net_sysctl_table(sysctl_hdr);
  245. #endif
  246. xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
  247. }
  248. void __init xfrm4_init(int rt_max_size)
  249. {
  250. /*
  251. * Select a default value for the gc_thresh based on the main route
  252. * table hash size. It seems to me the worst case scenario is when
  253. * we have ipsec operating in transport mode, in which we create a
  254. * dst_entry per socket. The xfrm gc algorithm starts trying to remove
  255. * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
  256. * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
  257. * That will let us store an ipsec connection per route table entry,
  258. * and start cleaning when were 1/2 full
  259. */
  260. xfrm4_dst_ops.gc_thresh = rt_max_size/2;
  261. xfrm4_state_init();
  262. xfrm4_policy_init();
  263. #ifdef CONFIG_SYSCTL
  264. sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
  265. xfrm4_policy_table);
  266. #endif
  267. }