diag.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. #include <linux/types.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/sock_diag.h>
  4. #include <linux/unix_diag.h>
  5. #include <linux/skbuff.h>
  6. #include <linux/module.h>
  7. #include <net/netlink.h>
  8. #include <net/af_unix.h>
  9. #include <net/tcp_states.h>
  10. #define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
  11. RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
  12. static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
  13. {
  14. struct unix_address *addr = unix_sk(sk)->addr;
  15. char *s;
  16. if (addr) {
  17. s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
  18. memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
  19. }
  20. return 0;
  21. rtattr_failure:
  22. return -EMSGSIZE;
  23. }
  24. static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
  25. {
  26. struct dentry *dentry = unix_sk(sk)->path.dentry;
  27. struct unix_diag_vfs *uv;
  28. if (dentry) {
  29. uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
  30. uv->udiag_vfs_ino = dentry->d_inode->i_ino;
  31. uv->udiag_vfs_dev = dentry->d_sb->s_dev;
  32. }
  33. return 0;
  34. rtattr_failure:
  35. return -EMSGSIZE;
  36. }
  37. static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
  38. {
  39. struct sock *peer;
  40. int ino;
  41. peer = unix_peer_get(sk);
  42. if (peer) {
  43. unix_state_lock(peer);
  44. ino = sock_i_ino(peer);
  45. unix_state_unlock(peer);
  46. sock_put(peer);
  47. RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
  48. }
  49. return 0;
  50. rtattr_failure:
  51. return -EMSGSIZE;
  52. }
  53. static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
  54. {
  55. struct sk_buff *skb;
  56. u32 *buf;
  57. int i;
  58. if (sk->sk_state == TCP_LISTEN) {
  59. spin_lock(&sk->sk_receive_queue.lock);
  60. buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS,
  61. sk->sk_receive_queue.qlen * sizeof(u32));
  62. i = 0;
  63. skb_queue_walk(&sk->sk_receive_queue, skb) {
  64. struct sock *req, *peer;
  65. req = skb->sk;
  66. /*
  67. * The state lock is outer for the same sk's
  68. * queue lock. With the other's queue locked it's
  69. * OK to lock the state.
  70. */
  71. unix_state_lock_nested(req);
  72. peer = unix_sk(req)->peer;
  73. buf[i++] = (peer ? sock_i_ino(peer) : 0);
  74. unix_state_unlock(req);
  75. }
  76. spin_unlock(&sk->sk_receive_queue.lock);
  77. }
  78. return 0;
  79. rtattr_failure:
  80. spin_unlock(&sk->sk_receive_queue.lock);
  81. return -EMSGSIZE;
  82. }
  83. static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
  84. {
  85. struct unix_diag_rqlen *rql;
  86. rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql));
  87. if (sk->sk_state == TCP_LISTEN) {
  88. rql->udiag_rqueue = sk->sk_receive_queue.qlen;
  89. rql->udiag_wqueue = sk->sk_max_ack_backlog;
  90. } else {
  91. rql->udiag_rqueue = (__u32)unix_inq_len(sk);
  92. rql->udiag_wqueue = (__u32)unix_outq_len(sk);
  93. }
  94. return 0;
  95. rtattr_failure:
  96. return -EMSGSIZE;
  97. }
  98. static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
  99. u32 pid, u32 seq, u32 flags, int sk_ino)
  100. {
  101. unsigned char *b = skb_tail_pointer(skb);
  102. struct nlmsghdr *nlh;
  103. struct unix_diag_msg *rep;
  104. nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), 0);
  105. if (!nlh)
  106. goto out_nlmsg_trim;
  107. nlh->nlmsg_flags = flags;
  108. rep = nlmsg_data(nlh);
  109. rep->udiag_family = AF_UNIX;
  110. rep->udiag_type = sk->sk_type;
  111. rep->udiag_state = sk->sk_state;
  112. rep->udiag_ino = sk_ino;
  113. sock_diag_save_cookie(sk, rep->udiag_cookie);
  114. if ((req->udiag_show & UDIAG_SHOW_NAME) &&
  115. sk_diag_dump_name(sk, skb))
  116. goto out_nlmsg_trim;
  117. if ((req->udiag_show & UDIAG_SHOW_VFS) &&
  118. sk_diag_dump_vfs(sk, skb))
  119. goto out_nlmsg_trim;
  120. if ((req->udiag_show & UDIAG_SHOW_PEER) &&
  121. sk_diag_dump_peer(sk, skb))
  122. goto out_nlmsg_trim;
  123. if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
  124. sk_diag_dump_icons(sk, skb))
  125. goto out_nlmsg_trim;
  126. if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
  127. sk_diag_show_rqlen(sk, skb))
  128. goto out_nlmsg_trim;
  129. if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
  130. sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
  131. goto out_nlmsg_trim;
  132. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  133. return skb->len;
  134. out_nlmsg_trim:
  135. nlmsg_trim(skb, b);
  136. return -EMSGSIZE;
  137. }
  138. static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
  139. u32 pid, u32 seq, u32 flags)
  140. {
  141. int sk_ino;
  142. unix_state_lock(sk);
  143. sk_ino = sock_i_ino(sk);
  144. unix_state_unlock(sk);
  145. if (!sk_ino)
  146. return 0;
  147. return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
  148. }
  149. static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
  150. {
  151. struct unix_diag_req *req;
  152. int num, s_num, slot, s_slot;
  153. req = nlmsg_data(cb->nlh);
  154. s_slot = cb->args[0];
  155. num = s_num = cb->args[1];
  156. spin_lock(&unix_table_lock);
  157. for (slot = s_slot;
  158. slot < ARRAY_SIZE(unix_socket_table);
  159. s_num = 0, slot++) {
  160. struct sock *sk;
  161. struct hlist_node *node;
  162. num = 0;
  163. sk_for_each(sk, node, &unix_socket_table[slot]) {
  164. if (num < s_num)
  165. goto next;
  166. if (!(req->udiag_states & (1 << sk->sk_state)))
  167. goto next;
  168. if (sk_diag_dump(sk, skb, req,
  169. NETLINK_CB(cb->skb).pid,
  170. cb->nlh->nlmsg_seq,
  171. NLM_F_MULTI) < 0)
  172. goto done;
  173. next:
  174. num++;
  175. }
  176. }
  177. done:
  178. spin_unlock(&unix_table_lock);
  179. cb->args[0] = slot;
  180. cb->args[1] = num;
  181. return skb->len;
  182. }
  183. static struct sock *unix_lookup_by_ino(int ino)
  184. {
  185. int i;
  186. struct sock *sk;
  187. spin_lock(&unix_table_lock);
  188. for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
  189. struct hlist_node *node;
  190. sk_for_each(sk, node, &unix_socket_table[i])
  191. if (ino == sock_i_ino(sk)) {
  192. sock_hold(sk);
  193. spin_unlock(&unix_table_lock);
  194. return sk;
  195. }
  196. }
  197. spin_unlock(&unix_table_lock);
  198. return NULL;
  199. }
  200. static int unix_diag_get_exact(struct sk_buff *in_skb,
  201. const struct nlmsghdr *nlh,
  202. struct unix_diag_req *req)
  203. {
  204. int err = -EINVAL;
  205. struct sock *sk;
  206. struct sk_buff *rep;
  207. unsigned int extra_len;
  208. if (req->udiag_ino == 0)
  209. goto out_nosk;
  210. sk = unix_lookup_by_ino(req->udiag_ino);
  211. err = -ENOENT;
  212. if (sk == NULL)
  213. goto out_nosk;
  214. err = sock_diag_check_cookie(sk, req->udiag_cookie);
  215. if (err)
  216. goto out;
  217. extra_len = 256;
  218. again:
  219. err = -ENOMEM;
  220. rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
  221. GFP_KERNEL);
  222. if (!rep)
  223. goto out;
  224. err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
  225. nlh->nlmsg_seq, 0, req->udiag_ino);
  226. if (err < 0) {
  227. kfree_skb(rep);
  228. extra_len += 256;
  229. if (extra_len >= PAGE_SIZE)
  230. goto out;
  231. goto again;
  232. }
  233. err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
  234. MSG_DONTWAIT);
  235. if (err > 0)
  236. err = 0;
  237. out:
  238. if (sk)
  239. sock_put(sk);
  240. out_nosk:
  241. return err;
  242. }
  243. static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
  244. {
  245. int hdrlen = sizeof(struct unix_diag_req);
  246. if (nlmsg_len(h) < hdrlen)
  247. return -EINVAL;
  248. if (h->nlmsg_flags & NLM_F_DUMP) {
  249. struct netlink_dump_control c = {
  250. .dump = unix_diag_dump,
  251. };
  252. return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
  253. } else
  254. return unix_diag_get_exact(skb, h, nlmsg_data(h));
  255. }
  256. static const struct sock_diag_handler unix_diag_handler = {
  257. .family = AF_UNIX,
  258. .dump = unix_diag_handler_dump,
  259. };
  260. static int __init unix_diag_init(void)
  261. {
  262. return sock_diag_register(&unix_diag_handler);
  263. }
  264. static void __exit unix_diag_exit(void)
  265. {
  266. sock_diag_unregister(&unix_diag_handler);
  267. }
  268. module_init(unix_diag_init);
  269. module_exit(unix_diag_exit);
  270. MODULE_LICENSE("GPL");
  271. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);