sock_diag.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. #include <linux/mutex.h>
  2. #include <linux/socket.h>
  3. #include <linux/skbuff.h>
  4. #include <net/netlink.h>
  5. #include <net/net_namespace.h>
  6. #include <linux/module.h>
  7. #include <linux/rtnetlink.h>
  8. #include <net/sock.h>
  9. #include <linux/inet_diag.h>
  10. #include <linux/sock_diag.h>
  11. static struct sock_diag_handler *sock_diag_handlers[AF_MAX];
  12. static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
  13. static DEFINE_MUTEX(sock_diag_table_mutex);
  14. int sock_diag_check_cookie(void *sk, __u32 *cookie)
  15. {
  16. if ((cookie[0] != INET_DIAG_NOCOOKIE ||
  17. cookie[1] != INET_DIAG_NOCOOKIE) &&
  18. ((u32)(unsigned long)sk != cookie[0] ||
  19. (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
  20. return -ESTALE;
  21. else
  22. return 0;
  23. }
  24. EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
  25. void sock_diag_save_cookie(void *sk, __u32 *cookie)
  26. {
  27. cookie[0] = (u32)(unsigned long)sk;
  28. cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
  29. }
  30. EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
  31. int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
  32. {
  33. __u32 *mem;
  34. mem = RTA_DATA(__RTA_PUT(skb, attrtype, SK_MEMINFO_VARS * sizeof(__u32)));
  35. mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
  36. mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
  37. mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
  38. mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
  39. mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
  40. mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
  41. mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
  42. return 0;
  43. rtattr_failure:
  44. return -EMSGSIZE;
  45. }
  46. EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
  47. void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
  48. {
  49. mutex_lock(&sock_diag_table_mutex);
  50. inet_rcv_compat = fn;
  51. mutex_unlock(&sock_diag_table_mutex);
  52. }
  53. EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
  54. void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
  55. {
  56. mutex_lock(&sock_diag_table_mutex);
  57. inet_rcv_compat = NULL;
  58. mutex_unlock(&sock_diag_table_mutex);
  59. }
  60. EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
  61. int sock_diag_register(struct sock_diag_handler *hndl)
  62. {
  63. int err = 0;
  64. if (hndl->family >= AF_MAX)
  65. return -EINVAL;
  66. mutex_lock(&sock_diag_table_mutex);
  67. if (sock_diag_handlers[hndl->family])
  68. err = -EBUSY;
  69. else
  70. sock_diag_handlers[hndl->family] = hndl;
  71. mutex_unlock(&sock_diag_table_mutex);
  72. return err;
  73. }
  74. EXPORT_SYMBOL_GPL(sock_diag_register);
  75. void sock_diag_unregister(struct sock_diag_handler *hnld)
  76. {
  77. int family = hnld->family;
  78. if (family >= AF_MAX)
  79. return;
  80. mutex_lock(&sock_diag_table_mutex);
  81. BUG_ON(sock_diag_handlers[family] != hnld);
  82. sock_diag_handlers[family] = NULL;
  83. mutex_unlock(&sock_diag_table_mutex);
  84. }
  85. EXPORT_SYMBOL_GPL(sock_diag_unregister);
  86. static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
  87. {
  88. if (sock_diag_handlers[family] == NULL)
  89. request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
  90. NETLINK_SOCK_DIAG, family);
  91. mutex_lock(&sock_diag_table_mutex);
  92. return sock_diag_handlers[family];
  93. }
  94. static inline void sock_diag_unlock_handler(struct sock_diag_handler *h)
  95. {
  96. mutex_unlock(&sock_diag_table_mutex);
  97. }
  98. static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
  99. {
  100. int err;
  101. struct sock_diag_req *req = NLMSG_DATA(nlh);
  102. struct sock_diag_handler *hndl;
  103. if (nlmsg_len(nlh) < sizeof(*req))
  104. return -EINVAL;
  105. hndl = sock_diag_lock_handler(req->sdiag_family);
  106. if (hndl == NULL)
  107. err = -ENOENT;
  108. else
  109. err = hndl->dump(skb, nlh);
  110. sock_diag_unlock_handler(hndl);
  111. return err;
  112. }
  113. static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
  114. {
  115. int ret;
  116. switch (nlh->nlmsg_type) {
  117. case TCPDIAG_GETSOCK:
  118. case DCCPDIAG_GETSOCK:
  119. if (inet_rcv_compat == NULL)
  120. request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
  121. NETLINK_SOCK_DIAG, AF_INET);
  122. mutex_lock(&sock_diag_table_mutex);
  123. if (inet_rcv_compat != NULL)
  124. ret = inet_rcv_compat(skb, nlh);
  125. else
  126. ret = -EOPNOTSUPP;
  127. mutex_unlock(&sock_diag_table_mutex);
  128. return ret;
  129. case SOCK_DIAG_BY_FAMILY:
  130. return __sock_diag_rcv_msg(skb, nlh);
  131. default:
  132. return -EINVAL;
  133. }
  134. }
  135. static DEFINE_MUTEX(sock_diag_mutex);
  136. static void sock_diag_rcv(struct sk_buff *skb)
  137. {
  138. mutex_lock(&sock_diag_mutex);
  139. netlink_rcv_skb(skb, &sock_diag_rcv_msg);
  140. mutex_unlock(&sock_diag_mutex);
  141. }
  142. struct sock *sock_diag_nlsk;
  143. EXPORT_SYMBOL_GPL(sock_diag_nlsk);
  144. static int __init sock_diag_init(void)
  145. {
  146. sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0,
  147. sock_diag_rcv, NULL, THIS_MODULE);
  148. return sock_diag_nlsk == NULL ? -ENOMEM : 0;
  149. }
  150. static void __exit sock_diag_exit(void)
  151. {
  152. netlink_kernel_release(sock_diag_nlsk);
  153. }
  154. module_init(sock_diag_init);
  155. module_exit(sock_diag_exit);
  156. MODULE_LICENSE("GPL");
  157. MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);