diag.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. #include <linux/types.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/sock_diag.h>
  4. #include <linux/unix_diag.h>
  5. #include <linux/skbuff.h>
  6. #include <linux/module.h>
  7. #include <net/netlink.h>
  8. #include <net/af_unix.h>
  9. #include <net/tcp_states.h>
  10. static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
  11. {
  12. struct unix_address *addr = unix_sk(sk)->addr;
  13. if (!addr)
  14. return 0;
  15. return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
  16. addr->name->sun_path);
  17. }
  18. static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
  19. {
  20. struct dentry *dentry = unix_sk(sk)->path.dentry;
  21. if (dentry) {
  22. struct unix_diag_vfs uv = {
  23. .udiag_vfs_ino = dentry->d_inode->i_ino,
  24. .udiag_vfs_dev = dentry->d_sb->s_dev,
  25. };
  26. return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
  27. }
  28. return 0;
  29. }
  30. static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
  31. {
  32. struct sock *peer;
  33. int ino;
  34. peer = unix_peer_get(sk);
  35. if (peer) {
  36. unix_state_lock(peer);
  37. ino = sock_i_ino(peer);
  38. unix_state_unlock(peer);
  39. sock_put(peer);
  40. return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
  41. }
  42. return 0;
  43. }
  44. static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
  45. {
  46. struct sk_buff *skb;
  47. struct nlattr *attr;
  48. u32 *buf;
  49. int i;
  50. if (sk->sk_state == TCP_LISTEN) {
  51. spin_lock(&sk->sk_receive_queue.lock);
  52. attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
  53. sk->sk_receive_queue.qlen * sizeof(u32));
  54. if (!attr)
  55. goto errout;
  56. buf = nla_data(attr);
  57. i = 0;
  58. skb_queue_walk(&sk->sk_receive_queue, skb) {
  59. struct sock *req, *peer;
  60. req = skb->sk;
  61. /*
  62. * The state lock is outer for the same sk's
  63. * queue lock. With the other's queue locked it's
  64. * OK to lock the state.
  65. */
  66. unix_state_lock_nested(req);
  67. peer = unix_sk(req)->peer;
  68. buf[i++] = (peer ? sock_i_ino(peer) : 0);
  69. unix_state_unlock(req);
  70. }
  71. spin_unlock(&sk->sk_receive_queue.lock);
  72. }
  73. return 0;
  74. errout:
  75. spin_unlock(&sk->sk_receive_queue.lock);
  76. return -EMSGSIZE;
  77. }
  78. static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
  79. {
  80. struct unix_diag_rqlen rql;
  81. if (sk->sk_state == TCP_LISTEN) {
  82. rql.udiag_rqueue = sk->sk_receive_queue.qlen;
  83. rql.udiag_wqueue = sk->sk_max_ack_backlog;
  84. } else {
  85. rql.udiag_rqueue = (u32) unix_inq_len(sk);
  86. rql.udiag_wqueue = (u32) unix_outq_len(sk);
  87. }
  88. return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
  89. }
  90. static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
  91. u32 pid, u32 seq, u32 flags, int sk_ino)
  92. {
  93. struct nlmsghdr *nlh;
  94. struct unix_diag_msg *rep;
  95. nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
  96. flags);
  97. if (!nlh)
  98. return -EMSGSIZE;
  99. rep = nlmsg_data(nlh);
  100. rep->udiag_family = AF_UNIX;
  101. rep->udiag_type = sk->sk_type;
  102. rep->udiag_state = sk->sk_state;
  103. rep->udiag_ino = sk_ino;
  104. sock_diag_save_cookie(sk, rep->udiag_cookie);
  105. if ((req->udiag_show & UDIAG_SHOW_NAME) &&
  106. sk_diag_dump_name(sk, skb))
  107. goto out_nlmsg_trim;
  108. if ((req->udiag_show & UDIAG_SHOW_VFS) &&
  109. sk_diag_dump_vfs(sk, skb))
  110. goto out_nlmsg_trim;
  111. if ((req->udiag_show & UDIAG_SHOW_PEER) &&
  112. sk_diag_dump_peer(sk, skb))
  113. goto out_nlmsg_trim;
  114. if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
  115. sk_diag_dump_icons(sk, skb))
  116. goto out_nlmsg_trim;
  117. if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
  118. sk_diag_show_rqlen(sk, skb))
  119. goto out_nlmsg_trim;
  120. if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
  121. sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
  122. goto out_nlmsg_trim;
  123. return nlmsg_end(skb, nlh);
  124. out_nlmsg_trim:
  125. nlmsg_cancel(skb, nlh);
  126. return -EMSGSIZE;
  127. }
  128. static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
  129. u32 pid, u32 seq, u32 flags)
  130. {
  131. int sk_ino;
  132. unix_state_lock(sk);
  133. sk_ino = sock_i_ino(sk);
  134. unix_state_unlock(sk);
  135. if (!sk_ino)
  136. return 0;
  137. return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
  138. }
  139. static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
  140. {
  141. struct unix_diag_req *req;
  142. int num, s_num, slot, s_slot;
  143. struct net *net = sock_net(skb->sk);
  144. req = nlmsg_data(cb->nlh);
  145. s_slot = cb->args[0];
  146. num = s_num = cb->args[1];
  147. spin_lock(&unix_table_lock);
  148. for (slot = s_slot;
  149. slot < ARRAY_SIZE(unix_socket_table);
  150. s_num = 0, slot++) {
  151. struct sock *sk;
  152. struct hlist_node *node;
  153. num = 0;
  154. sk_for_each(sk, node, &unix_socket_table[slot]) {
  155. if (!net_eq(sock_net(sk), net))
  156. continue;
  157. if (num < s_num)
  158. goto next;
  159. if (!(req->udiag_states & (1 << sk->sk_state)))
  160. goto next;
  161. if (sk_diag_dump(sk, skb, req,
  162. NETLINK_CB(cb->skb).pid,
  163. cb->nlh->nlmsg_seq,
  164. NLM_F_MULTI) < 0)
  165. goto done;
  166. next:
  167. num++;
  168. }
  169. }
  170. done:
  171. spin_unlock(&unix_table_lock);
  172. cb->args[0] = slot;
  173. cb->args[1] = num;
  174. return skb->len;
  175. }
  176. static struct sock *unix_lookup_by_ino(int ino)
  177. {
  178. int i;
  179. struct sock *sk;
  180. spin_lock(&unix_table_lock);
  181. for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
  182. struct hlist_node *node;
  183. sk_for_each(sk, node, &unix_socket_table[i])
  184. if (ino == sock_i_ino(sk)) {
  185. sock_hold(sk);
  186. spin_unlock(&unix_table_lock);
  187. return sk;
  188. }
  189. }
  190. spin_unlock(&unix_table_lock);
  191. return NULL;
  192. }
  193. static int unix_diag_get_exact(struct sk_buff *in_skb,
  194. const struct nlmsghdr *nlh,
  195. struct unix_diag_req *req)
  196. {
  197. int err = -EINVAL;
  198. struct sock *sk;
  199. struct sk_buff *rep;
  200. unsigned int extra_len;
  201. struct net *net = sock_net(in_skb->sk);
  202. if (req->udiag_ino == 0)
  203. goto out_nosk;
  204. sk = unix_lookup_by_ino(req->udiag_ino);
  205. err = -ENOENT;
  206. if (sk == NULL)
  207. goto out_nosk;
  208. err = sock_diag_check_cookie(sk, req->udiag_cookie);
  209. if (err)
  210. goto out;
  211. extra_len = 256;
  212. again:
  213. err = -ENOMEM;
  214. rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
  215. if (!rep)
  216. goto out;
  217. err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
  218. nlh->nlmsg_seq, 0, req->udiag_ino);
  219. if (err < 0) {
  220. nlmsg_free(rep);
  221. extra_len += 256;
  222. if (extra_len >= PAGE_SIZE)
  223. goto out;
  224. goto again;
  225. }
  226. err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
  227. MSG_DONTWAIT);
  228. if (err > 0)
  229. err = 0;
  230. out:
  231. if (sk)
  232. sock_put(sk);
  233. out_nosk:
  234. return err;
  235. }
  236. static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
  237. {
  238. int hdrlen = sizeof(struct unix_diag_req);
  239. struct net *net = sock_net(skb->sk);
  240. if (nlmsg_len(h) < hdrlen)
  241. return -EINVAL;
  242. if (h->nlmsg_flags & NLM_F_DUMP) {
  243. struct netlink_dump_control c = {
  244. .dump = unix_diag_dump,
  245. };
  246. return netlink_dump_start(net->diag_nlsk, skb, h, &c);
  247. } else
  248. return unix_diag_get_exact(skb, h, nlmsg_data(h));
  249. }
  250. static const struct sock_diag_handler unix_diag_handler = {
  251. .family = AF_UNIX,
  252. .dump = unix_diag_handler_dump,
  253. };
  254. static int __init unix_diag_init(void)
  255. {
  256. return sock_diag_register(&unix_diag_handler);
  257. }
  258. static void __exit unix_diag_exit(void)
  259. {
  260. sock_diag_unregister(&unix_diag_handler);
  261. }
  262. module_init(unix_diag_init);
  263. module_exit(unix_diag_exit);
  264. MODULE_LICENSE("GPL");
  265. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);