inet_diag.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096
  1. /*
  2. * inet_diag.c Module for monitoring INET transport protocols sockets.
  3. *
  4. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/types.h>
  14. #include <linux/fcntl.h>
  15. #include <linux/random.h>
  16. #include <linux/slab.h>
  17. #include <linux/cache.h>
  18. #include <linux/init.h>
  19. #include <linux/time.h>
  20. #include <net/icmp.h>
  21. #include <net/tcp.h>
  22. #include <net/ipv6.h>
  23. #include <net/inet_common.h>
  24. #include <net/inet_connection_sock.h>
  25. #include <net/inet_hashtables.h>
  26. #include <net/inet_timewait_sock.h>
  27. #include <net/inet6_hashtables.h>
  28. #include <net/netlink.h>
  29. #include <linux/inet.h>
  30. #include <linux/stddef.h>
  31. #include <linux/inet_diag.h>
  32. #include <linux/sock_diag.h>
  33. static const struct inet_diag_handler **inet_diag_table;
  34. struct inet_diag_entry {
  35. __be32 *saddr;
  36. __be32 *daddr;
  37. u16 sport;
  38. u16 dport;
  39. u16 family;
  40. u16 userlocks;
  41. };
  42. static DEFINE_MUTEX(inet_diag_table_mutex);
  43. static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
  44. {
  45. if (!inet_diag_table[proto])
  46. request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
  47. NETLINK_SOCK_DIAG, AF_INET, proto);
  48. mutex_lock(&inet_diag_table_mutex);
  49. if (!inet_diag_table[proto])
  50. return ERR_PTR(-ENOENT);
  51. return inet_diag_table[proto];
  52. }
  53. static inline void inet_diag_unlock_handler(
  54. const struct inet_diag_handler *handler)
  55. {
  56. mutex_unlock(&inet_diag_table_mutex);
  57. }
  58. int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
  59. struct sk_buff *skb, struct inet_diag_req_v2 *req,
  60. u32 pid, u32 seq, u16 nlmsg_flags,
  61. const struct nlmsghdr *unlh)
  62. {
  63. const struct inet_sock *inet = inet_sk(sk);
  64. struct inet_diag_msg *r;
  65. struct nlmsghdr *nlh;
  66. struct nlattr *attr;
  67. void *info = NULL;
  68. const struct inet_diag_handler *handler;
  69. int ext = req->idiag_ext;
  70. handler = inet_diag_table[req->sdiag_protocol];
  71. BUG_ON(handler == NULL);
  72. nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
  73. nlmsg_flags);
  74. if (!nlh)
  75. return -EMSGSIZE;
  76. r = nlmsg_data(nlh);
  77. BUG_ON(sk->sk_state == TCP_TIME_WAIT);
  78. r->idiag_family = sk->sk_family;
  79. r->idiag_state = sk->sk_state;
  80. r->idiag_timer = 0;
  81. r->idiag_retrans = 0;
  82. r->id.idiag_if = sk->sk_bound_dev_if;
  83. sock_diag_save_cookie(sk, r->id.idiag_cookie);
  84. r->id.idiag_sport = inet->inet_sport;
  85. r->id.idiag_dport = inet->inet_dport;
  86. r->id.idiag_src[0] = inet->inet_rcv_saddr;
  87. r->id.idiag_dst[0] = inet->inet_daddr;
  88. /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
  89. * hence this needs to be included regardless of socket family.
  90. */
  91. if (ext & (1 << (INET_DIAG_TOS - 1)))
  92. if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
  93. goto errout;
  94. #if IS_ENABLED(CONFIG_IPV6)
  95. if (r->idiag_family == AF_INET6) {
  96. const struct ipv6_pinfo *np = inet6_sk(sk);
  97. *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
  98. *(struct in6_addr *)r->id.idiag_dst = np->daddr;
  99. if (ext & (1 << (INET_DIAG_TCLASS - 1)))
  100. if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
  101. goto errout;
  102. }
  103. #endif
  104. r->idiag_uid = sock_i_uid(sk);
  105. r->idiag_inode = sock_i_ino(sk);
  106. if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
  107. struct inet_diag_meminfo minfo = {
  108. .idiag_rmem = sk_rmem_alloc_get(sk),
  109. .idiag_wmem = sk->sk_wmem_queued,
  110. .idiag_fmem = sk->sk_forward_alloc,
  111. .idiag_tmem = sk_wmem_alloc_get(sk),
  112. };
  113. if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
  114. goto errout;
  115. }
  116. if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
  117. if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
  118. goto errout;
  119. if (icsk == NULL) {
  120. handler->idiag_get_info(sk, r, NULL);
  121. goto out;
  122. }
  123. #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
  124. if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
  125. r->idiag_timer = 1;
  126. r->idiag_retrans = icsk->icsk_retransmits;
  127. r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
  128. } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
  129. r->idiag_timer = 4;
  130. r->idiag_retrans = icsk->icsk_probes_out;
  131. r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
  132. } else if (timer_pending(&sk->sk_timer)) {
  133. r->idiag_timer = 2;
  134. r->idiag_retrans = icsk->icsk_probes_out;
  135. r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
  136. } else {
  137. r->idiag_timer = 0;
  138. r->idiag_expires = 0;
  139. }
  140. #undef EXPIRES_IN_MS
  141. if (ext & (1 << (INET_DIAG_INFO - 1))) {
  142. attr = nla_reserve(skb, INET_DIAG_INFO,
  143. sizeof(struct tcp_info));
  144. if (!attr)
  145. goto errout;
  146. info = nla_data(attr);
  147. }
  148. if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
  149. if (nla_put_string(skb, INET_DIAG_CONG,
  150. icsk->icsk_ca_ops->name) < 0)
  151. goto errout;
  152. handler->idiag_get_info(sk, r, info);
  153. if (sk->sk_state < TCP_TIME_WAIT &&
  154. icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
  155. icsk->icsk_ca_ops->get_info(sk, ext, skb);
  156. out:
  157. return nlmsg_end(skb, nlh);
  158. errout:
  159. nlmsg_cancel(skb, nlh);
  160. return -EMSGSIZE;
  161. }
  162. EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
  163. static int inet_csk_diag_fill(struct sock *sk,
  164. struct sk_buff *skb, struct inet_diag_req_v2 *req,
  165. u32 pid, u32 seq, u16 nlmsg_flags,
  166. const struct nlmsghdr *unlh)
  167. {
  168. return inet_sk_diag_fill(sk, inet_csk(sk),
  169. skb, req, pid, seq, nlmsg_flags, unlh);
  170. }
  171. static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
  172. struct sk_buff *skb, struct inet_diag_req_v2 *req,
  173. u32 pid, u32 seq, u16 nlmsg_flags,
  174. const struct nlmsghdr *unlh)
  175. {
  176. long tmo;
  177. struct inet_diag_msg *r;
  178. struct nlmsghdr *nlh;
  179. nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
  180. nlmsg_flags);
  181. if (!nlh)
  182. return -EMSGSIZE;
  183. r = nlmsg_data(nlh);
  184. BUG_ON(tw->tw_state != TCP_TIME_WAIT);
  185. tmo = tw->tw_ttd - jiffies;
  186. if (tmo < 0)
  187. tmo = 0;
  188. r->idiag_family = tw->tw_family;
  189. r->idiag_retrans = 0;
  190. r->id.idiag_if = tw->tw_bound_dev_if;
  191. sock_diag_save_cookie(tw, r->id.idiag_cookie);
  192. r->id.idiag_sport = tw->tw_sport;
  193. r->id.idiag_dport = tw->tw_dport;
  194. r->id.idiag_src[0] = tw->tw_rcv_saddr;
  195. r->id.idiag_dst[0] = tw->tw_daddr;
  196. r->idiag_state = tw->tw_substate;
  197. r->idiag_timer = 3;
  198. r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
  199. r->idiag_rqueue = 0;
  200. r->idiag_wqueue = 0;
  201. r->idiag_uid = 0;
  202. r->idiag_inode = 0;
  203. #if IS_ENABLED(CONFIG_IPV6)
  204. if (tw->tw_family == AF_INET6) {
  205. const struct inet6_timewait_sock *tw6 =
  206. inet6_twsk((struct sock *)tw);
  207. *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
  208. *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
  209. }
  210. #endif
  211. return nlmsg_end(skb, nlh);
  212. }
  213. static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
  214. struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags,
  215. const struct nlmsghdr *unlh)
  216. {
  217. if (sk->sk_state == TCP_TIME_WAIT)
  218. return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
  219. skb, r, pid, seq, nlmsg_flags,
  220. unlh);
  221. return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
  222. }
  223. int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
  224. const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req)
  225. {
  226. int err;
  227. struct sock *sk;
  228. struct sk_buff *rep;
  229. struct net *net = sock_net(in_skb->sk);
  230. err = -EINVAL;
  231. if (req->sdiag_family == AF_INET) {
  232. sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
  233. req->id.idiag_dport, req->id.idiag_src[0],
  234. req->id.idiag_sport, req->id.idiag_if);
  235. }
  236. #if IS_ENABLED(CONFIG_IPV6)
  237. else if (req->sdiag_family == AF_INET6) {
  238. sk = inet6_lookup(net, hashinfo,
  239. (struct in6_addr *)req->id.idiag_dst,
  240. req->id.idiag_dport,
  241. (struct in6_addr *)req->id.idiag_src,
  242. req->id.idiag_sport,
  243. req->id.idiag_if);
  244. }
  245. #endif
  246. else {
  247. goto out_nosk;
  248. }
  249. err = -ENOENT;
  250. if (sk == NULL)
  251. goto out_nosk;
  252. err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
  253. if (err)
  254. goto out;
  255. rep = nlmsg_new(sizeof(struct inet_diag_msg) +
  256. sizeof(struct inet_diag_meminfo) +
  257. sizeof(struct tcp_info) + 64, GFP_KERNEL);
  258. if (!rep) {
  259. err = -ENOMEM;
  260. goto out;
  261. }
  262. err = sk_diag_fill(sk, rep, req,
  263. NETLINK_CB(in_skb).pid,
  264. nlh->nlmsg_seq, 0, nlh);
  265. if (err < 0) {
  266. WARN_ON(err == -EMSGSIZE);
  267. nlmsg_free(rep);
  268. goto out;
  269. }
  270. err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
  271. MSG_DONTWAIT);
  272. if (err > 0)
  273. err = 0;
  274. out:
  275. if (sk) {
  276. if (sk->sk_state == TCP_TIME_WAIT)
  277. inet_twsk_put((struct inet_timewait_sock *)sk);
  278. else
  279. sock_put(sk);
  280. }
  281. out_nosk:
  282. return err;
  283. }
  284. EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
  285. static int inet_diag_get_exact(struct sk_buff *in_skb,
  286. const struct nlmsghdr *nlh,
  287. struct inet_diag_req_v2 *req)
  288. {
  289. const struct inet_diag_handler *handler;
  290. int err;
  291. handler = inet_diag_lock_handler(req->sdiag_protocol);
  292. if (IS_ERR(handler))
  293. err = PTR_ERR(handler);
  294. else
  295. err = handler->dump_one(in_skb, nlh, req);
  296. inet_diag_unlock_handler(handler);
  297. return err;
  298. }
  299. static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
  300. {
  301. int words = bits >> 5;
  302. bits &= 0x1f;
  303. if (words) {
  304. if (memcmp(a1, a2, words << 2))
  305. return 0;
  306. }
  307. if (bits) {
  308. __be32 w1, w2;
  309. __be32 mask;
  310. w1 = a1[words];
  311. w2 = a2[words];
  312. mask = htonl((0xffffffff) << (32 - bits));
  313. if ((w1 ^ w2) & mask)
  314. return 0;
  315. }
  316. return 1;
  317. }
  318. static int inet_diag_bc_run(const struct nlattr *_bc,
  319. const struct inet_diag_entry *entry)
  320. {
  321. const void *bc = nla_data(_bc);
  322. int len = nla_len(_bc);
  323. while (len > 0) {
  324. int yes = 1;
  325. const struct inet_diag_bc_op *op = bc;
  326. switch (op->code) {
  327. case INET_DIAG_BC_NOP:
  328. break;
  329. case INET_DIAG_BC_JMP:
  330. yes = 0;
  331. break;
  332. case INET_DIAG_BC_S_GE:
  333. yes = entry->sport >= op[1].no;
  334. break;
  335. case INET_DIAG_BC_S_LE:
  336. yes = entry->sport <= op[1].no;
  337. break;
  338. case INET_DIAG_BC_D_GE:
  339. yes = entry->dport >= op[1].no;
  340. break;
  341. case INET_DIAG_BC_D_LE:
  342. yes = entry->dport <= op[1].no;
  343. break;
  344. case INET_DIAG_BC_AUTO:
  345. yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
  346. break;
  347. case INET_DIAG_BC_S_COND:
  348. case INET_DIAG_BC_D_COND: {
  349. struct inet_diag_hostcond *cond;
  350. __be32 *addr;
  351. cond = (struct inet_diag_hostcond *)(op + 1);
  352. if (cond->port != -1 &&
  353. cond->port != (op->code == INET_DIAG_BC_S_COND ?
  354. entry->sport : entry->dport)) {
  355. yes = 0;
  356. break;
  357. }
  358. if (cond->prefix_len == 0)
  359. break;
  360. if (op->code == INET_DIAG_BC_S_COND)
  361. addr = entry->saddr;
  362. else
  363. addr = entry->daddr;
  364. if (bitstring_match(addr, cond->addr,
  365. cond->prefix_len))
  366. break;
  367. if (entry->family == AF_INET6 &&
  368. cond->family == AF_INET) {
  369. if (addr[0] == 0 && addr[1] == 0 &&
  370. addr[2] == htonl(0xffff) &&
  371. bitstring_match(addr + 3, cond->addr,
  372. cond->prefix_len))
  373. break;
  374. }
  375. yes = 0;
  376. break;
  377. }
  378. }
  379. if (yes) {
  380. len -= op->yes;
  381. bc += op->yes;
  382. } else {
  383. len -= op->no;
  384. bc += op->no;
  385. }
  386. }
  387. return len == 0;
  388. }
  389. int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
  390. {
  391. struct inet_diag_entry entry;
  392. struct inet_sock *inet = inet_sk(sk);
  393. if (bc == NULL)
  394. return 1;
  395. entry.family = sk->sk_family;
  396. #if IS_ENABLED(CONFIG_IPV6)
  397. if (entry.family == AF_INET6) {
  398. struct ipv6_pinfo *np = inet6_sk(sk);
  399. entry.saddr = np->rcv_saddr.s6_addr32;
  400. entry.daddr = np->daddr.s6_addr32;
  401. } else
  402. #endif
  403. {
  404. entry.saddr = &inet->inet_rcv_saddr;
  405. entry.daddr = &inet->inet_daddr;
  406. }
  407. entry.sport = inet->inet_num;
  408. entry.dport = ntohs(inet->inet_dport);
  409. entry.userlocks = sk->sk_userlocks;
  410. return inet_diag_bc_run(bc, &entry);
  411. }
  412. EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
  413. static int valid_cc(const void *bc, int len, int cc)
  414. {
  415. while (len >= 0) {
  416. const struct inet_diag_bc_op *op = bc;
  417. if (cc > len)
  418. return 0;
  419. if (cc == len)
  420. return 1;
  421. if (op->yes < 4 || op->yes & 3)
  422. return 0;
  423. len -= op->yes;
  424. bc += op->yes;
  425. }
  426. return 0;
  427. }
  428. static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
  429. {
  430. const void *bc = bytecode;
  431. int len = bytecode_len;
  432. while (len > 0) {
  433. const struct inet_diag_bc_op *op = bc;
  434. //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
  435. switch (op->code) {
  436. case INET_DIAG_BC_AUTO:
  437. case INET_DIAG_BC_S_COND:
  438. case INET_DIAG_BC_D_COND:
  439. case INET_DIAG_BC_S_GE:
  440. case INET_DIAG_BC_S_LE:
  441. case INET_DIAG_BC_D_GE:
  442. case INET_DIAG_BC_D_LE:
  443. case INET_DIAG_BC_JMP:
  444. if (op->no < 4 || op->no > len + 4 || op->no & 3)
  445. return -EINVAL;
  446. if (op->no < len &&
  447. !valid_cc(bytecode, bytecode_len, len - op->no))
  448. return -EINVAL;
  449. break;
  450. case INET_DIAG_BC_NOP:
  451. break;
  452. default:
  453. return -EINVAL;
  454. }
  455. if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
  456. return -EINVAL;
  457. bc += op->yes;
  458. len -= op->yes;
  459. }
  460. return len == 0 ? 0 : -EINVAL;
  461. }
  462. static int inet_csk_diag_dump(struct sock *sk,
  463. struct sk_buff *skb,
  464. struct netlink_callback *cb,
  465. struct inet_diag_req_v2 *r,
  466. const struct nlattr *bc)
  467. {
  468. if (!inet_diag_bc_sk(bc, sk))
  469. return 0;
  470. return inet_csk_diag_fill(sk, skb, r,
  471. NETLINK_CB(cb->skb).pid,
  472. cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
  473. }
  474. static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
  475. struct sk_buff *skb,
  476. struct netlink_callback *cb,
  477. struct inet_diag_req_v2 *r,
  478. const struct nlattr *bc)
  479. {
  480. if (bc != NULL) {
  481. struct inet_diag_entry entry;
  482. entry.family = tw->tw_family;
  483. #if IS_ENABLED(CONFIG_IPV6)
  484. if (tw->tw_family == AF_INET6) {
  485. struct inet6_timewait_sock *tw6 =
  486. inet6_twsk((struct sock *)tw);
  487. entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
  488. entry.daddr = tw6->tw_v6_daddr.s6_addr32;
  489. } else
  490. #endif
  491. {
  492. entry.saddr = &tw->tw_rcv_saddr;
  493. entry.daddr = &tw->tw_daddr;
  494. }
  495. entry.sport = tw->tw_num;
  496. entry.dport = ntohs(tw->tw_dport);
  497. entry.userlocks = 0;
  498. if (!inet_diag_bc_run(bc, &entry))
  499. return 0;
  500. }
  501. return inet_twsk_diag_fill(tw, skb, r,
  502. NETLINK_CB(cb->skb).pid,
  503. cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
  504. }
  505. static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
  506. struct request_sock *req, u32 pid, u32 seq,
  507. const struct nlmsghdr *unlh)
  508. {
  509. const struct inet_request_sock *ireq = inet_rsk(req);
  510. struct inet_sock *inet = inet_sk(sk);
  511. struct inet_diag_msg *r;
  512. struct nlmsghdr *nlh;
  513. long tmo;
  514. nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
  515. NLM_F_MULTI);
  516. if (!nlh)
  517. return -EMSGSIZE;
  518. r = nlmsg_data(nlh);
  519. r->idiag_family = sk->sk_family;
  520. r->idiag_state = TCP_SYN_RECV;
  521. r->idiag_timer = 1;
  522. r->idiag_retrans = req->retrans;
  523. r->id.idiag_if = sk->sk_bound_dev_if;
  524. sock_diag_save_cookie(req, r->id.idiag_cookie);
  525. tmo = req->expires - jiffies;
  526. if (tmo < 0)
  527. tmo = 0;
  528. r->id.idiag_sport = inet->inet_sport;
  529. r->id.idiag_dport = ireq->rmt_port;
  530. r->id.idiag_src[0] = ireq->loc_addr;
  531. r->id.idiag_dst[0] = ireq->rmt_addr;
  532. r->idiag_expires = jiffies_to_msecs(tmo);
  533. r->idiag_rqueue = 0;
  534. r->idiag_wqueue = 0;
  535. r->idiag_uid = sock_i_uid(sk);
  536. r->idiag_inode = 0;
  537. #if IS_ENABLED(CONFIG_IPV6)
  538. if (r->idiag_family == AF_INET6) {
  539. *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
  540. *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
  541. }
  542. #endif
  543. return nlmsg_end(skb, nlh);
  544. }
  545. static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
  546. struct netlink_callback *cb,
  547. struct inet_diag_req_v2 *r,
  548. const struct nlattr *bc)
  549. {
  550. struct inet_diag_entry entry;
  551. struct inet_connection_sock *icsk = inet_csk(sk);
  552. struct listen_sock *lopt;
  553. struct inet_sock *inet = inet_sk(sk);
  554. int j, s_j;
  555. int reqnum, s_reqnum;
  556. int err = 0;
  557. s_j = cb->args[3];
  558. s_reqnum = cb->args[4];
  559. if (s_j > 0)
  560. s_j--;
  561. entry.family = sk->sk_family;
  562. read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
  563. lopt = icsk->icsk_accept_queue.listen_opt;
  564. if (!lopt || !lopt->qlen)
  565. goto out;
  566. if (bc != NULL) {
  567. entry.sport = inet->inet_num;
  568. entry.userlocks = sk->sk_userlocks;
  569. }
  570. for (j = s_j; j < lopt->nr_table_entries; j++) {
  571. struct request_sock *req, *head = lopt->syn_table[j];
  572. reqnum = 0;
  573. for (req = head; req; reqnum++, req = req->dl_next) {
  574. struct inet_request_sock *ireq = inet_rsk(req);
  575. if (reqnum < s_reqnum)
  576. continue;
  577. if (r->id.idiag_dport != ireq->rmt_port &&
  578. r->id.idiag_dport)
  579. continue;
  580. if (bc) {
  581. entry.saddr =
  582. #if IS_ENABLED(CONFIG_IPV6)
  583. (entry.family == AF_INET6) ?
  584. inet6_rsk(req)->loc_addr.s6_addr32 :
  585. #endif
  586. &ireq->loc_addr;
  587. entry.daddr =
  588. #if IS_ENABLED(CONFIG_IPV6)
  589. (entry.family == AF_INET6) ?
  590. inet6_rsk(req)->rmt_addr.s6_addr32 :
  591. #endif
  592. &ireq->rmt_addr;
  593. entry.dport = ntohs(ireq->rmt_port);
  594. if (!inet_diag_bc_run(bc, &entry))
  595. continue;
  596. }
  597. err = inet_diag_fill_req(skb, sk, req,
  598. NETLINK_CB(cb->skb).pid,
  599. cb->nlh->nlmsg_seq, cb->nlh);
  600. if (err < 0) {
  601. cb->args[3] = j + 1;
  602. cb->args[4] = reqnum;
  603. goto out;
  604. }
  605. }
  606. s_reqnum = 0;
  607. }
  608. out:
  609. read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
  610. return err;
  611. }
  612. void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
  613. struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc)
  614. {
  615. int i, num;
  616. int s_i, s_num;
  617. struct net *net = sock_net(skb->sk);
  618. s_i = cb->args[1];
  619. s_num = num = cb->args[2];
  620. if (cb->args[0] == 0) {
  621. if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
  622. goto skip_listen_ht;
  623. for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
  624. struct sock *sk;
  625. struct hlist_nulls_node *node;
  626. struct inet_listen_hashbucket *ilb;
  627. num = 0;
  628. ilb = &hashinfo->listening_hash[i];
  629. spin_lock_bh(&ilb->lock);
  630. sk_nulls_for_each(sk, node, &ilb->head) {
  631. struct inet_sock *inet = inet_sk(sk);
  632. if (!net_eq(sock_net(sk), net))
  633. continue;
  634. if (num < s_num) {
  635. num++;
  636. continue;
  637. }
  638. if (r->sdiag_family != AF_UNSPEC &&
  639. sk->sk_family != r->sdiag_family)
  640. goto next_listen;
  641. if (r->id.idiag_sport != inet->inet_sport &&
  642. r->id.idiag_sport)
  643. goto next_listen;
  644. if (!(r->idiag_states & TCPF_LISTEN) ||
  645. r->id.idiag_dport ||
  646. cb->args[3] > 0)
  647. goto syn_recv;
  648. if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
  649. spin_unlock_bh(&ilb->lock);
  650. goto done;
  651. }
  652. syn_recv:
  653. if (!(r->idiag_states & TCPF_SYN_RECV))
  654. goto next_listen;
  655. if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
  656. spin_unlock_bh(&ilb->lock);
  657. goto done;
  658. }
  659. next_listen:
  660. cb->args[3] = 0;
  661. cb->args[4] = 0;
  662. ++num;
  663. }
  664. spin_unlock_bh(&ilb->lock);
  665. s_num = 0;
  666. cb->args[3] = 0;
  667. cb->args[4] = 0;
  668. }
  669. skip_listen_ht:
  670. cb->args[0] = 1;
  671. s_i = num = s_num = 0;
  672. }
  673. if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
  674. goto out;
  675. for (i = s_i; i <= hashinfo->ehash_mask; i++) {
  676. struct inet_ehash_bucket *head = &hashinfo->ehash[i];
  677. spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
  678. struct sock *sk;
  679. struct hlist_nulls_node *node;
  680. num = 0;
  681. if (hlist_nulls_empty(&head->chain) &&
  682. hlist_nulls_empty(&head->twchain))
  683. continue;
  684. if (i > s_i)
  685. s_num = 0;
  686. spin_lock_bh(lock);
  687. sk_nulls_for_each(sk, node, &head->chain) {
  688. struct inet_sock *inet = inet_sk(sk);
  689. if (!net_eq(sock_net(sk), net))
  690. continue;
  691. if (num < s_num)
  692. goto next_normal;
  693. if (!(r->idiag_states & (1 << sk->sk_state)))
  694. goto next_normal;
  695. if (r->sdiag_family != AF_UNSPEC &&
  696. sk->sk_family != r->sdiag_family)
  697. goto next_normal;
  698. if (r->id.idiag_sport != inet->inet_sport &&
  699. r->id.idiag_sport)
  700. goto next_normal;
  701. if (r->id.idiag_dport != inet->inet_dport &&
  702. r->id.idiag_dport)
  703. goto next_normal;
  704. if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
  705. spin_unlock_bh(lock);
  706. goto done;
  707. }
  708. next_normal:
  709. ++num;
  710. }
  711. if (r->idiag_states & TCPF_TIME_WAIT) {
  712. struct inet_timewait_sock *tw;
  713. inet_twsk_for_each(tw, node,
  714. &head->twchain) {
  715. if (!net_eq(twsk_net(tw), net))
  716. continue;
  717. if (num < s_num)
  718. goto next_dying;
  719. if (r->sdiag_family != AF_UNSPEC &&
  720. tw->tw_family != r->sdiag_family)
  721. goto next_dying;
  722. if (r->id.idiag_sport != tw->tw_sport &&
  723. r->id.idiag_sport)
  724. goto next_dying;
  725. if (r->id.idiag_dport != tw->tw_dport &&
  726. r->id.idiag_dport)
  727. goto next_dying;
  728. if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
  729. spin_unlock_bh(lock);
  730. goto done;
  731. }
  732. next_dying:
  733. ++num;
  734. }
  735. }
  736. spin_unlock_bh(lock);
  737. }
  738. done:
  739. cb->args[1] = i;
  740. cb->args[2] = num;
  741. out:
  742. ;
  743. }
  744. EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
  745. static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
  746. struct inet_diag_req_v2 *r, struct nlattr *bc)
  747. {
  748. const struct inet_diag_handler *handler;
  749. handler = inet_diag_lock_handler(r->sdiag_protocol);
  750. if (!IS_ERR(handler))
  751. handler->dump(skb, cb, r, bc);
  752. inet_diag_unlock_handler(handler);
  753. return skb->len;
  754. }
  755. static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
  756. {
  757. struct nlattr *bc = NULL;
  758. int hdrlen = sizeof(struct inet_diag_req_v2);
  759. if (nlmsg_attrlen(cb->nlh, hdrlen))
  760. bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
  761. return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
  762. }
  763. static inline int inet_diag_type2proto(int type)
  764. {
  765. switch (type) {
  766. case TCPDIAG_GETSOCK:
  767. return IPPROTO_TCP;
  768. case DCCPDIAG_GETSOCK:
  769. return IPPROTO_DCCP;
  770. default:
  771. return 0;
  772. }
  773. }
  774. static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
  775. {
  776. struct inet_diag_req *rc = nlmsg_data(cb->nlh);
  777. struct inet_diag_req_v2 req;
  778. struct nlattr *bc = NULL;
  779. int hdrlen = sizeof(struct inet_diag_req);
  780. req.sdiag_family = AF_UNSPEC; /* compatibility */
  781. req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
  782. req.idiag_ext = rc->idiag_ext;
  783. req.idiag_states = rc->idiag_states;
  784. req.id = rc->id;
  785. if (nlmsg_attrlen(cb->nlh, hdrlen))
  786. bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
  787. return __inet_diag_dump(skb, cb, &req, bc);
  788. }
  789. static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
  790. const struct nlmsghdr *nlh)
  791. {
  792. struct inet_diag_req *rc = nlmsg_data(nlh);
  793. struct inet_diag_req_v2 req;
  794. req.sdiag_family = rc->idiag_family;
  795. req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
  796. req.idiag_ext = rc->idiag_ext;
  797. req.idiag_states = rc->idiag_states;
  798. req.id = rc->id;
  799. return inet_diag_get_exact(in_skb, nlh, &req);
  800. }
  801. static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
  802. {
  803. int hdrlen = sizeof(struct inet_diag_req);
  804. struct net *net = sock_net(skb->sk);
  805. if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
  806. nlmsg_len(nlh) < hdrlen)
  807. return -EINVAL;
  808. if (nlh->nlmsg_flags & NLM_F_DUMP) {
  809. if (nlmsg_attrlen(nlh, hdrlen)) {
  810. struct nlattr *attr;
  811. attr = nlmsg_find_attr(nlh, hdrlen,
  812. INET_DIAG_REQ_BYTECODE);
  813. if (attr == NULL ||
  814. nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
  815. inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
  816. return -EINVAL;
  817. }
  818. {
  819. struct netlink_dump_control c = {
  820. .dump = inet_diag_dump_compat,
  821. };
  822. return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
  823. }
  824. }
  825. return inet_diag_get_exact_compat(skb, nlh);
  826. }
  827. static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
  828. {
  829. int hdrlen = sizeof(struct inet_diag_req_v2);
  830. struct net *net = sock_net(skb->sk);
  831. if (nlmsg_len(h) < hdrlen)
  832. return -EINVAL;
  833. if (h->nlmsg_flags & NLM_F_DUMP) {
  834. if (nlmsg_attrlen(h, hdrlen)) {
  835. struct nlattr *attr;
  836. attr = nlmsg_find_attr(h, hdrlen,
  837. INET_DIAG_REQ_BYTECODE);
  838. if (attr == NULL ||
  839. nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
  840. inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
  841. return -EINVAL;
  842. }
  843. {
  844. struct netlink_dump_control c = {
  845. .dump = inet_diag_dump,
  846. };
  847. return netlink_dump_start(net->diag_nlsk, skb, h, &c);
  848. }
  849. }
  850. return inet_diag_get_exact(skb, h, nlmsg_data(h));
  851. }
  852. static const struct sock_diag_handler inet_diag_handler = {
  853. .family = AF_INET,
  854. .dump = inet_diag_handler_dump,
  855. };
  856. static const struct sock_diag_handler inet6_diag_handler = {
  857. .family = AF_INET6,
  858. .dump = inet_diag_handler_dump,
  859. };
  860. int inet_diag_register(const struct inet_diag_handler *h)
  861. {
  862. const __u16 type = h->idiag_type;
  863. int err = -EINVAL;
  864. if (type >= IPPROTO_MAX)
  865. goto out;
  866. mutex_lock(&inet_diag_table_mutex);
  867. err = -EEXIST;
  868. if (inet_diag_table[type] == NULL) {
  869. inet_diag_table[type] = h;
  870. err = 0;
  871. }
  872. mutex_unlock(&inet_diag_table_mutex);
  873. out:
  874. return err;
  875. }
  876. EXPORT_SYMBOL_GPL(inet_diag_register);
  877. void inet_diag_unregister(const struct inet_diag_handler *h)
  878. {
  879. const __u16 type = h->idiag_type;
  880. if (type >= IPPROTO_MAX)
  881. return;
  882. mutex_lock(&inet_diag_table_mutex);
  883. inet_diag_table[type] = NULL;
  884. mutex_unlock(&inet_diag_table_mutex);
  885. }
  886. EXPORT_SYMBOL_GPL(inet_diag_unregister);
  887. static int __init inet_diag_init(void)
  888. {
  889. const int inet_diag_table_size = (IPPROTO_MAX *
  890. sizeof(struct inet_diag_handler *));
  891. int err = -ENOMEM;
  892. inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
  893. if (!inet_diag_table)
  894. goto out;
  895. err = sock_diag_register(&inet_diag_handler);
  896. if (err)
  897. goto out_free_nl;
  898. err = sock_diag_register(&inet6_diag_handler);
  899. if (err)
  900. goto out_free_inet;
  901. sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
  902. out:
  903. return err;
  904. out_free_inet:
  905. sock_diag_unregister(&inet_diag_handler);
  906. out_free_nl:
  907. kfree(inet_diag_table);
  908. goto out;
  909. }
  910. static void __exit inet_diag_exit(void)
  911. {
  912. sock_diag_unregister(&inet6_diag_handler);
  913. sock_diag_unregister(&inet_diag_handler);
  914. sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
  915. kfree(inet_diag_table);
  916. }
  917. module_init(inet_diag_init);
  918. module_exit(inet_diag_exit);
  919. MODULE_LICENSE("GPL");
  920. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
  921. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);