diag.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. #include <linux/module.h>
  2. #include <linux/sock_diag.h>
  3. #include <linux/net.h>
  4. #include <linux/netdevice.h>
  5. #include <linux/packet_diag.h>
  6. #include <net/net_namespace.h>
  7. #include <net/sock.h>
  8. #include "internal.h"
  9. static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
  10. {
  11. struct packet_diag_info pinfo;
  12. pinfo.pdi_index = po->ifindex;
  13. pinfo.pdi_version = po->tp_version;
  14. pinfo.pdi_reserve = po->tp_reserve;
  15. pinfo.pdi_copy_thresh = po->copy_thresh;
  16. pinfo.pdi_tstamp = po->tp_tstamp;
  17. pinfo.pdi_flags = 0;
  18. if (po->running)
  19. pinfo.pdi_flags |= PDI_RUNNING;
  20. if (po->auxdata)
  21. pinfo.pdi_flags |= PDI_AUXDATA;
  22. if (po->origdev)
  23. pinfo.pdi_flags |= PDI_ORIGDEV;
  24. if (po->has_vnet_hdr)
  25. pinfo.pdi_flags |= PDI_VNETHDR;
  26. if (po->tp_loss)
  27. pinfo.pdi_flags |= PDI_LOSS;
  28. return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
  29. }
  30. static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
  31. {
  32. struct nlattr *mca;
  33. struct packet_mclist *ml;
  34. mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
  35. if (!mca)
  36. return -EMSGSIZE;
  37. rtnl_lock();
  38. for (ml = po->mclist; ml; ml = ml->next) {
  39. struct packet_diag_mclist *dml;
  40. dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
  41. if (!dml) {
  42. rtnl_unlock();
  43. nla_nest_cancel(nlskb, mca);
  44. return -EMSGSIZE;
  45. }
  46. dml->pdmc_index = ml->ifindex;
  47. dml->pdmc_type = ml->type;
  48. dml->pdmc_alen = ml->alen;
  49. dml->pdmc_count = ml->count;
  50. BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
  51. memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
  52. }
  53. rtnl_unlock();
  54. nla_nest_end(nlskb, mca);
  55. return 0;
  56. }
  57. static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
  58. struct sk_buff *nlskb)
  59. {
  60. struct packet_diag_ring pdr;
  61. if (!ring->pg_vec || ((ver > TPACKET_V2) &&
  62. (nl_type == PACKET_DIAG_TX_RING)))
  63. return 0;
  64. pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
  65. pdr.pdr_block_nr = ring->pg_vec_len;
  66. pdr.pdr_frame_size = ring->frame_size;
  67. pdr.pdr_frame_nr = ring->frame_max + 1;
  68. if (ver > TPACKET_V2) {
  69. pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
  70. pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
  71. pdr.pdr_features = ring->prb_bdqc.feature_req_word;
  72. } else {
  73. pdr.pdr_retire_tmo = 0;
  74. pdr.pdr_sizeof_priv = 0;
  75. pdr.pdr_features = 0;
  76. }
  77. return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
  78. }
  79. static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
  80. {
  81. int ret;
  82. mutex_lock(&po->pg_vec_lock);
  83. ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
  84. PACKET_DIAG_RX_RING, skb);
  85. if (!ret)
  86. ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
  87. PACKET_DIAG_TX_RING, skb);
  88. mutex_unlock(&po->pg_vec_lock);
  89. return ret;
  90. }
  91. static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
  92. {
  93. int ret = 0;
  94. mutex_lock(&fanout_mutex);
  95. if (po->fanout) {
  96. u32 val;
  97. val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
  98. ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
  99. }
  100. mutex_unlock(&fanout_mutex);
  101. return ret;
  102. }
  103. static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
  104. struct packet_diag_req *req,
  105. struct user_namespace *user_ns,
  106. u32 portid, u32 seq, u32 flags, int sk_ino)
  107. {
  108. struct nlmsghdr *nlh;
  109. struct packet_diag_msg *rp;
  110. struct packet_sock *po = pkt_sk(sk);
  111. nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
  112. if (!nlh)
  113. return -EMSGSIZE;
  114. rp = nlmsg_data(nlh);
  115. rp->pdiag_family = AF_PACKET;
  116. rp->pdiag_type = sk->sk_type;
  117. rp->pdiag_num = ntohs(po->num);
  118. rp->pdiag_ino = sk_ino;
  119. sock_diag_save_cookie(sk, rp->pdiag_cookie);
  120. if ((req->pdiag_show & PACKET_SHOW_INFO) &&
  121. pdiag_put_info(po, skb))
  122. goto out_nlmsg_trim;
  123. if ((req->pdiag_show & PACKET_SHOW_INFO) &&
  124. nla_put_u32(skb, PACKET_DIAG_UID,
  125. from_kuid_munged(user_ns, sock_i_uid(sk))))
  126. goto out_nlmsg_trim;
  127. if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
  128. pdiag_put_mclist(po, skb))
  129. goto out_nlmsg_trim;
  130. if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
  131. pdiag_put_rings_cfg(po, skb))
  132. goto out_nlmsg_trim;
  133. if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
  134. pdiag_put_fanout(po, skb))
  135. goto out_nlmsg_trim;
  136. if ((req->pdiag_show & PACKET_SHOW_MEMINFO) &&
  137. sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO))
  138. goto out_nlmsg_trim;
  139. if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
  140. sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER))
  141. goto out_nlmsg_trim;
  142. return nlmsg_end(skb, nlh);
  143. out_nlmsg_trim:
  144. nlmsg_cancel(skb, nlh);
  145. return -EMSGSIZE;
  146. }
  147. static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
  148. {
  149. int num = 0, s_num = cb->args[0];
  150. struct packet_diag_req *req;
  151. struct net *net;
  152. struct sock *sk;
  153. net = sock_net(skb->sk);
  154. req = nlmsg_data(cb->nlh);
  155. mutex_lock(&net->packet.sklist_lock);
  156. sk_for_each(sk, &net->packet.sklist) {
  157. if (!net_eq(sock_net(sk), net))
  158. continue;
  159. if (num < s_num)
  160. goto next;
  161. if (sk_diag_fill(sk, skb, req,
  162. sk_user_ns(NETLINK_CB(cb->skb).sk),
  163. NETLINK_CB(cb->skb).portid,
  164. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  165. sock_i_ino(sk)) < 0)
  166. goto done;
  167. next:
  168. num++;
  169. }
  170. done:
  171. mutex_unlock(&net->packet.sklist_lock);
  172. cb->args[0] = num;
  173. return skb->len;
  174. }
  175. static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
  176. {
  177. int hdrlen = sizeof(struct packet_diag_req);
  178. struct net *net = sock_net(skb->sk);
  179. struct packet_diag_req *req;
  180. if (nlmsg_len(h) < hdrlen)
  181. return -EINVAL;
  182. req = nlmsg_data(h);
  183. /* Make it possible to support protocol filtering later */
  184. if (req->sdiag_protocol)
  185. return -EINVAL;
  186. if (h->nlmsg_flags & NLM_F_DUMP) {
  187. struct netlink_dump_control c = {
  188. .dump = packet_diag_dump,
  189. };
  190. return netlink_dump_start(net->diag_nlsk, skb, h, &c);
  191. } else
  192. return -EOPNOTSUPP;
  193. }
  194. static const struct sock_diag_handler packet_diag_handler = {
  195. .family = AF_PACKET,
  196. .dump = packet_diag_handler_dump,
  197. };
  198. static int __init packet_diag_init(void)
  199. {
  200. return sock_diag_register(&packet_diag_handler);
  201. }
  202. static void __exit packet_diag_exit(void)
  203. {
  204. sock_diag_unregister(&packet_diag_handler);
  205. }
  206. module_init(packet_diag_init);
  207. module_exit(packet_diag_exit);
  208. MODULE_LICENSE("GPL");
  209. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);