udp.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * The User Datagram Protocol (UDP).
  7. *
  8. * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  13. * Alan Cox, <Alan.Cox@linux.org>
  14. * Hirokazu Takahashi, <taka@valinux.co.jp>
  15. *
  16. * Fixes:
  17. * Alan Cox : verify_area() calls
  18. * Alan Cox : stopped close while in use off icmp
  19. * messages. Not a fix but a botch that
  20. * for udp at least is 'valid'.
  21. * Alan Cox : Fixed icmp handling properly
  22. * Alan Cox : Correct error for oversized datagrams
  23. * Alan Cox : Tidied select() semantics.
  24. * Alan Cox : udp_err() fixed properly, also now
  25. * select and read wake correctly on errors
  26. * Alan Cox : udp_send verify_area moved to avoid mem leak
  27. * Alan Cox : UDP can count its memory
  28. * Alan Cox : send to an unknown connection causes
  29. * an ECONNREFUSED off the icmp, but
  30. * does NOT close.
  31. * Alan Cox : Switched to new sk_buff handlers. No more backlog!
  32. * Alan Cox : Using generic datagram code. Even smaller and the PEEK
  33. * bug no longer crashes it.
  34. * Fred Van Kempen : Net2e support for sk->broadcast.
  35. * Alan Cox : Uses skb_free_datagram
  36. * Alan Cox : Added get/set sockopt support.
  37. * Alan Cox : Broadcasting without option set returns EACCES.
  38. * Alan Cox : No wakeup calls. Instead we now use the callbacks.
  39. * Alan Cox : Use ip_tos and ip_ttl
  40. * Alan Cox : SNMP Mibs
  41. * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
  42. * Matt Dillon : UDP length checks.
  43. * Alan Cox : Smarter af_inet used properly.
  44. * Alan Cox : Use new kernel side addressing.
  45. * Alan Cox : Incorrect return on truncated datagram receive.
  46. * Arnt Gulbrandsen : New udp_send and stuff
  47. * Alan Cox : Cache last socket
  48. * Alan Cox : Route cache
  49. * Jon Peatfield : Minor efficiency fix to sendto().
  50. * Mike Shaver : RFC1122 checks.
  51. * Alan Cox : Nonblocking error fix.
  52. * Willy Konynenberg : Transparent proxying support.
  53. * Mike McLagan : Routing by source
  54. * David S. Miller : New socket lookup architecture.
  55. * Last socket cache retained as it
  56. * does have a high hit rate.
  57. * Olaf Kirch : Don't linearise iovec on sendmsg.
  58. * Andi Kleen : Some cleanups, cache destination entry
  59. * for connect.
  60. * Vitaly E. Lavrov : Transparent proxy revived after year coma.
  61. * Melvin Smith : Check msg_name not msg_namelen in sendto(),
  62. * return ENOTCONN for unconnected sockets (POSIX)
  63. * Janos Farkas : don't deliver multi/broadcasts to a different
  64. * bound-to-device socket
  65. * Hirokazu Takahashi : HW checksumming for outgoing UDP
  66. * datagrams.
  67. * Hirokazu Takahashi : sendfile() on UDP works now.
  68. * Arnaldo C. Melo : convert /proc/net/udp to seq_file
  69. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
  70. * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
  71. * a single port at the same time.
  72. * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
  73. * James Chapman : Add L2TP encapsulation type.
  74. *
  75. *
  76. * This program is free software; you can redistribute it and/or
  77. * modify it under the terms of the GNU General Public License
  78. * as published by the Free Software Foundation; either version
  79. * 2 of the License, or (at your option) any later version.
  80. */
  81. #include <asm/system.h>
  82. #include <asm/uaccess.h>
  83. #include <asm/ioctls.h>
  84. #include <linux/bootmem.h>
  85. #include <linux/types.h>
  86. #include <linux/fcntl.h>
  87. #include <linux/module.h>
  88. #include <linux/socket.h>
  89. #include <linux/sockios.h>
  90. #include <linux/igmp.h>
  91. #include <linux/in.h>
  92. #include <linux/errno.h>
  93. #include <linux/timer.h>
  94. #include <linux/mm.h>
  95. #include <linux/inet.h>
  96. #include <linux/netdevice.h>
  97. #include <net/tcp_states.h>
  98. #include <linux/skbuff.h>
  99. #include <linux/proc_fs.h>
  100. #include <linux/seq_file.h>
  101. #include <net/net_namespace.h>
  102. #include <net/icmp.h>
  103. #include <net/route.h>
  104. #include <net/checksum.h>
  105. #include <net/xfrm.h>
  106. #include "udp_impl.h"
  107. /*
  108. * Snmp MIB for the UDP layer
  109. */
  110. DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
  111. EXPORT_SYMBOL(udp_statistics);
  112. DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly;
  113. EXPORT_SYMBOL(udp_stats_in6);
  114. struct hlist_head udp_hash[UDP_HTABLE_SIZE];
  115. DEFINE_RWLOCK(udp_hash_lock);
  116. int sysctl_udp_mem[3] __read_mostly;
  117. int sysctl_udp_rmem_min __read_mostly;
  118. int sysctl_udp_wmem_min __read_mostly;
  119. EXPORT_SYMBOL(sysctl_udp_mem);
  120. EXPORT_SYMBOL(sysctl_udp_rmem_min);
  121. EXPORT_SYMBOL(sysctl_udp_wmem_min);
  122. atomic_t udp_memory_allocated;
  123. EXPORT_SYMBOL(udp_memory_allocated);
  124. static inline int __udp_lib_lport_inuse(struct net *net, __u16 num,
  125. const struct hlist_head udptable[])
  126. {
  127. struct sock *sk;
  128. struct hlist_node *node;
  129. sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
  130. if (net_eq(sock_net(sk), net) && sk->sk_hash == num)
  131. return 1;
  132. return 0;
  133. }
  134. /**
  135. * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
  136. *
  137. * @sk: socket struct in question
  138. * @snum: port number to look up
  139. * @saddr_comp: AF-dependent comparison of bound local IP addresses
  140. */
  141. int udp_lib_get_port(struct sock *sk, unsigned short snum,
  142. int (*saddr_comp)(const struct sock *sk1,
  143. const struct sock *sk2 ) )
  144. {
  145. struct hlist_head *udptable = sk->sk_prot->h.udp_hash;
  146. struct hlist_node *node;
  147. struct hlist_head *head;
  148. struct sock *sk2;
  149. int error = 1;
  150. struct net *net = sock_net(sk);
  151. write_lock_bh(&udp_hash_lock);
  152. if (!snum) {
  153. int i, low, high, remaining;
  154. unsigned rover, best, best_size_so_far;
  155. inet_get_local_port_range(&low, &high);
  156. remaining = (high - low) + 1;
  157. best_size_so_far = UINT_MAX;
  158. best = rover = net_random() % remaining + low;
  159. /* 1st pass: look for empty (or shortest) hash chain */
  160. for (i = 0; i < UDP_HTABLE_SIZE; i++) {
  161. int size = 0;
  162. head = &udptable[rover & (UDP_HTABLE_SIZE - 1)];
  163. if (hlist_empty(head))
  164. goto gotit;
  165. sk_for_each(sk2, node, head) {
  166. if (++size >= best_size_so_far)
  167. goto next;
  168. }
  169. best_size_so_far = size;
  170. best = rover;
  171. next:
  172. /* fold back if end of range */
  173. if (++rover > high)
  174. rover = low + ((rover - low)
  175. & (UDP_HTABLE_SIZE - 1));
  176. }
  177. /* 2nd pass: find hole in shortest hash chain */
  178. rover = best;
  179. for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++) {
  180. if (! __udp_lib_lport_inuse(net, rover, udptable))
  181. goto gotit;
  182. rover += UDP_HTABLE_SIZE;
  183. if (rover > high)
  184. rover = low + ((rover - low)
  185. & (UDP_HTABLE_SIZE - 1));
  186. }
  187. /* All ports in use! */
  188. goto fail;
  189. gotit:
  190. snum = rover;
  191. } else {
  192. head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
  193. sk_for_each(sk2, node, head)
  194. if (sk2->sk_hash == snum &&
  195. sk2 != sk &&
  196. net_eq(sock_net(sk2), net) &&
  197. (!sk2->sk_reuse || !sk->sk_reuse) &&
  198. (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
  199. || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
  200. (*saddr_comp)(sk, sk2) )
  201. goto fail;
  202. }
  203. inet_sk(sk)->num = snum;
  204. sk->sk_hash = snum;
  205. if (sk_unhashed(sk)) {
  206. head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
  207. sk_add_node(sk, head);
  208. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  209. }
  210. error = 0;
  211. fail:
  212. write_unlock_bh(&udp_hash_lock);
  213. return error;
  214. }
  215. static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
  216. {
  217. struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
  218. return ( !ipv6_only_sock(sk2) &&
  219. (!inet1->rcv_saddr || !inet2->rcv_saddr ||
  220. inet1->rcv_saddr == inet2->rcv_saddr ));
  221. }
  222. int udp_v4_get_port(struct sock *sk, unsigned short snum)
  223. {
  224. return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal);
  225. }
  226. /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  227. * harder than this. -DaveM
  228. */
  229. static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
  230. __be16 sport, __be32 daddr, __be16 dport,
  231. int dif, struct hlist_head udptable[])
  232. {
  233. struct sock *sk, *result = NULL;
  234. struct hlist_node *node;
  235. unsigned short hnum = ntohs(dport);
  236. int badness = -1;
  237. read_lock(&udp_hash_lock);
  238. sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
  239. struct inet_sock *inet = inet_sk(sk);
  240. if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
  241. !ipv6_only_sock(sk)) {
  242. int score = (sk->sk_family == PF_INET ? 1 : 0);
  243. if (inet->rcv_saddr) {
  244. if (inet->rcv_saddr != daddr)
  245. continue;
  246. score+=2;
  247. }
  248. if (inet->daddr) {
  249. if (inet->daddr != saddr)
  250. continue;
  251. score+=2;
  252. }
  253. if (inet->dport) {
  254. if (inet->dport != sport)
  255. continue;
  256. score+=2;
  257. }
  258. if (sk->sk_bound_dev_if) {
  259. if (sk->sk_bound_dev_if != dif)
  260. continue;
  261. score+=2;
  262. }
  263. if (score == 9) {
  264. result = sk;
  265. break;
  266. } else if (score > badness) {
  267. result = sk;
  268. badness = score;
  269. }
  270. }
  271. }
  272. if (result)
  273. sock_hold(result);
  274. read_unlock(&udp_hash_lock);
  275. return result;
  276. }
  277. static inline struct sock *udp_v4_mcast_next(struct sock *sk,
  278. __be16 loc_port, __be32 loc_addr,
  279. __be16 rmt_port, __be32 rmt_addr,
  280. int dif)
  281. {
  282. struct hlist_node *node;
  283. struct sock *s = sk;
  284. unsigned short hnum = ntohs(loc_port);
  285. sk_for_each_from(s, node) {
  286. struct inet_sock *inet = inet_sk(s);
  287. if (s->sk_hash != hnum ||
  288. (inet->daddr && inet->daddr != rmt_addr) ||
  289. (inet->dport != rmt_port && inet->dport) ||
  290. (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
  291. ipv6_only_sock(s) ||
  292. (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
  293. continue;
  294. if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
  295. continue;
  296. goto found;
  297. }
  298. s = NULL;
  299. found:
  300. return s;
  301. }
  302. /*
  303. * This routine is called by the ICMP module when it gets some
  304. * sort of error condition. If err < 0 then the socket should
  305. * be closed and the error returned to the user. If err > 0
  306. * it's just the icmp type << 8 | icmp code.
  307. * Header points to the ip header of the error packet. We move
  308. * on past this. Then (as it used to claim before adjustment)
  309. * header points to the first 8 bytes of the udp header. We need
  310. * to find the appropriate port.
  311. */
  312. void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
  313. {
  314. struct inet_sock *inet;
  315. struct iphdr *iph = (struct iphdr*)skb->data;
  316. struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
  317. const int type = icmp_hdr(skb)->type;
  318. const int code = icmp_hdr(skb)->code;
  319. struct sock *sk;
  320. int harderr;
  321. int err;
  322. sk = __udp4_lib_lookup(dev_net(skb->dev), iph->daddr, uh->dest,
  323. iph->saddr, uh->source, skb->dev->ifindex, udptable);
  324. if (sk == NULL) {
  325. ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
  326. return; /* No socket for error */
  327. }
  328. err = 0;
  329. harderr = 0;
  330. inet = inet_sk(sk);
  331. switch (type) {
  332. default:
  333. case ICMP_TIME_EXCEEDED:
  334. err = EHOSTUNREACH;
  335. break;
  336. case ICMP_SOURCE_QUENCH:
  337. goto out;
  338. case ICMP_PARAMETERPROB:
  339. err = EPROTO;
  340. harderr = 1;
  341. break;
  342. case ICMP_DEST_UNREACH:
  343. if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
  344. if (inet->pmtudisc != IP_PMTUDISC_DONT) {
  345. err = EMSGSIZE;
  346. harderr = 1;
  347. break;
  348. }
  349. goto out;
  350. }
  351. err = EHOSTUNREACH;
  352. if (code <= NR_ICMP_UNREACH) {
  353. harderr = icmp_err_convert[code].fatal;
  354. err = icmp_err_convert[code].errno;
  355. }
  356. break;
  357. }
  358. /*
  359. * RFC1122: OK. Passes ICMP errors back to application, as per
  360. * 4.1.3.3.
  361. */
  362. if (!inet->recverr) {
  363. if (!harderr || sk->sk_state != TCP_ESTABLISHED)
  364. goto out;
  365. } else {
  366. ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
  367. }
  368. sk->sk_err = err;
  369. sk->sk_error_report(sk);
  370. out:
  371. sock_put(sk);
  372. }
  373. void udp_err(struct sk_buff *skb, u32 info)
  374. {
  375. __udp4_lib_err(skb, info, udp_hash);
  376. }
  377. /*
  378. * Throw away all pending data and cancel the corking. Socket is locked.
  379. */
  380. void udp_flush_pending_frames(struct sock *sk)
  381. {
  382. struct udp_sock *up = udp_sk(sk);
  383. if (up->pending) {
  384. up->len = 0;
  385. up->pending = 0;
  386. ip_flush_pending_frames(sk);
  387. }
  388. }
  389. EXPORT_SYMBOL(udp_flush_pending_frames);
  390. /**
  391. * udp4_hwcsum_outgoing - handle outgoing HW checksumming
  392. * @sk: socket we are sending on
  393. * @skb: sk_buff containing the filled-in UDP header
  394. * (checksum field must be zeroed out)
  395. */
  396. static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
  397. __be32 src, __be32 dst, int len )
  398. {
  399. unsigned int offset;
  400. struct udphdr *uh = udp_hdr(skb);
  401. __wsum csum = 0;
  402. if (skb_queue_len(&sk->sk_write_queue) == 1) {
  403. /*
  404. * Only one fragment on the socket.
  405. */
  406. skb->csum_start = skb_transport_header(skb) - skb->head;
  407. skb->csum_offset = offsetof(struct udphdr, check);
  408. uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
  409. } else {
  410. /*
  411. * HW-checksum won't work as there are two or more
  412. * fragments on the socket so that all csums of sk_buffs
  413. * should be together
  414. */
  415. offset = skb_transport_offset(skb);
  416. skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
  417. skb->ip_summed = CHECKSUM_NONE;
  418. skb_queue_walk(&sk->sk_write_queue, skb) {
  419. csum = csum_add(csum, skb->csum);
  420. }
  421. uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
  422. if (uh->check == 0)
  423. uh->check = CSUM_MANGLED_0;
  424. }
  425. }
  426. /*
  427. * Push out all pending data as one UDP datagram. Socket is locked.
  428. */
  429. static int udp_push_pending_frames(struct sock *sk)
  430. {
  431. struct udp_sock *up = udp_sk(sk);
  432. struct inet_sock *inet = inet_sk(sk);
  433. struct flowi *fl = &inet->cork.fl;
  434. struct sk_buff *skb;
  435. struct udphdr *uh;
  436. int err = 0;
  437. int is_udplite = IS_UDPLITE(sk);
  438. __wsum csum = 0;
  439. /* Grab the skbuff where UDP header space exists. */
  440. if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
  441. goto out;
  442. /*
  443. * Create a UDP header
  444. */
  445. uh = udp_hdr(skb);
  446. uh->source = fl->fl_ip_sport;
  447. uh->dest = fl->fl_ip_dport;
  448. uh->len = htons(up->len);
  449. uh->check = 0;
  450. if (is_udplite) /* UDP-Lite */
  451. csum = udplite_csum_outgoing(sk, skb);
  452. else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
  453. skb->ip_summed = CHECKSUM_NONE;
  454. goto send;
  455. } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
  456. udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len);
  457. goto send;
  458. } else /* `normal' UDP */
  459. csum = udp_csum_outgoing(sk, skb);
  460. /* add protocol-dependent pseudo-header */
  461. uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
  462. sk->sk_protocol, csum );
  463. if (uh->check == 0)
  464. uh->check = CSUM_MANGLED_0;
  465. send:
  466. err = ip_push_pending_frames(sk);
  467. out:
  468. up->len = 0;
  469. up->pending = 0;
  470. if (!err)
  471. UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
  472. return err;
  473. }
  474. int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  475. size_t len)
  476. {
  477. struct inet_sock *inet = inet_sk(sk);
  478. struct udp_sock *up = udp_sk(sk);
  479. int ulen = len;
  480. struct ipcm_cookie ipc;
  481. struct rtable *rt = NULL;
  482. int free = 0;
  483. int connected = 0;
  484. __be32 daddr, faddr, saddr;
  485. __be16 dport;
  486. u8 tos;
  487. int err, is_udplite = IS_UDPLITE(sk);
  488. int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
  489. int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
  490. if (len > 0xFFFF)
  491. return -EMSGSIZE;
  492. /*
  493. * Check the flags.
  494. */
  495. if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
  496. return -EOPNOTSUPP;
  497. ipc.opt = NULL;
  498. if (up->pending) {
  499. /*
  500. * There are pending frames.
  501. * The socket lock must be held while it's corked.
  502. */
  503. lock_sock(sk);
  504. if (likely(up->pending)) {
  505. if (unlikely(up->pending != AF_INET)) {
  506. release_sock(sk);
  507. return -EINVAL;
  508. }
  509. goto do_append_data;
  510. }
  511. release_sock(sk);
  512. }
  513. ulen += sizeof(struct udphdr);
  514. /*
  515. * Get and verify the address.
  516. */
  517. if (msg->msg_name) {
  518. struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
  519. if (msg->msg_namelen < sizeof(*usin))
  520. return -EINVAL;
  521. if (usin->sin_family != AF_INET) {
  522. if (usin->sin_family != AF_UNSPEC)
  523. return -EAFNOSUPPORT;
  524. }
  525. daddr = usin->sin_addr.s_addr;
  526. dport = usin->sin_port;
  527. if (dport == 0)
  528. return -EINVAL;
  529. } else {
  530. if (sk->sk_state != TCP_ESTABLISHED)
  531. return -EDESTADDRREQ;
  532. daddr = inet->daddr;
  533. dport = inet->dport;
  534. /* Open fast path for connected socket.
  535. Route will not be used, if at least one option is set.
  536. */
  537. connected = 1;
  538. }
  539. ipc.addr = inet->saddr;
  540. ipc.oif = sk->sk_bound_dev_if;
  541. if (msg->msg_controllen) {
  542. err = ip_cmsg_send(sock_net(sk), msg, &ipc);
  543. if (err)
  544. return err;
  545. if (ipc.opt)
  546. free = 1;
  547. connected = 0;
  548. }
  549. if (!ipc.opt)
  550. ipc.opt = inet->opt;
  551. saddr = ipc.addr;
  552. ipc.addr = faddr = daddr;
  553. if (ipc.opt && ipc.opt->srr) {
  554. if (!daddr)
  555. return -EINVAL;
  556. faddr = ipc.opt->faddr;
  557. connected = 0;
  558. }
  559. tos = RT_TOS(inet->tos);
  560. if (sock_flag(sk, SOCK_LOCALROUTE) ||
  561. (msg->msg_flags & MSG_DONTROUTE) ||
  562. (ipc.opt && ipc.opt->is_strictroute)) {
  563. tos |= RTO_ONLINK;
  564. connected = 0;
  565. }
  566. if (ipv4_is_multicast(daddr)) {
  567. if (!ipc.oif)
  568. ipc.oif = inet->mc_index;
  569. if (!saddr)
  570. saddr = inet->mc_addr;
  571. connected = 0;
  572. }
  573. if (connected)
  574. rt = (struct rtable*)sk_dst_check(sk, 0);
  575. if (rt == NULL) {
  576. struct flowi fl = { .oif = ipc.oif,
  577. .nl_u = { .ip4_u =
  578. { .daddr = faddr,
  579. .saddr = saddr,
  580. .tos = tos } },
  581. .proto = sk->sk_protocol,
  582. .uli_u = { .ports =
  583. { .sport = inet->sport,
  584. .dport = dport } } };
  585. security_sk_classify_flow(sk, &fl);
  586. err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
  587. if (err) {
  588. if (err == -ENETUNREACH)
  589. IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
  590. goto out;
  591. }
  592. err = -EACCES;
  593. if ((rt->rt_flags & RTCF_BROADCAST) &&
  594. !sock_flag(sk, SOCK_BROADCAST))
  595. goto out;
  596. if (connected)
  597. sk_dst_set(sk, dst_clone(&rt->u.dst));
  598. }
  599. if (msg->msg_flags&MSG_CONFIRM)
  600. goto do_confirm;
  601. back_from_confirm:
  602. saddr = rt->rt_src;
  603. if (!ipc.addr)
  604. daddr = ipc.addr = rt->rt_dst;
  605. lock_sock(sk);
  606. if (unlikely(up->pending)) {
  607. /* The socket is already corked while preparing it. */
  608. /* ... which is an evident application bug. --ANK */
  609. release_sock(sk);
  610. LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
  611. err = -EINVAL;
  612. goto out;
  613. }
  614. /*
  615. * Now cork the socket to pend data.
  616. */
  617. inet->cork.fl.fl4_dst = daddr;
  618. inet->cork.fl.fl_ip_dport = dport;
  619. inet->cork.fl.fl4_src = saddr;
  620. inet->cork.fl.fl_ip_sport = inet->sport;
  621. up->pending = AF_INET;
  622. do_append_data:
  623. up->len += ulen;
  624. getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
  625. err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
  626. sizeof(struct udphdr), &ipc, rt,
  627. corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
  628. if (err)
  629. udp_flush_pending_frames(sk);
  630. else if (!corkreq)
  631. err = udp_push_pending_frames(sk);
  632. else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
  633. up->pending = 0;
  634. release_sock(sk);
  635. out:
  636. ip_rt_put(rt);
  637. if (free)
  638. kfree(ipc.opt);
  639. if (!err)
  640. return len;
  641. /*
  642. * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
  643. * ENOBUFS might not be good (it's not tunable per se), but otherwise
  644. * we don't have a good statistic (IpOutDiscards but it can be too many
  645. * things). We could add another new stat but at least for now that
  646. * seems like overkill.
  647. */
  648. if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
  649. UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite);
  650. }
  651. return err;
  652. do_confirm:
  653. dst_confirm(&rt->u.dst);
  654. if (!(msg->msg_flags&MSG_PROBE) || len)
  655. goto back_from_confirm;
  656. err = 0;
  657. goto out;
  658. }
  659. int udp_sendpage(struct sock *sk, struct page *page, int offset,
  660. size_t size, int flags)
  661. {
  662. struct udp_sock *up = udp_sk(sk);
  663. int ret;
  664. if (!up->pending) {
  665. struct msghdr msg = { .msg_flags = flags|MSG_MORE };
  666. /* Call udp_sendmsg to specify destination address which
  667. * sendpage interface can't pass.
  668. * This will succeed only when the socket is connected.
  669. */
  670. ret = udp_sendmsg(NULL, sk, &msg, 0);
  671. if (ret < 0)
  672. return ret;
  673. }
  674. lock_sock(sk);
  675. if (unlikely(!up->pending)) {
  676. release_sock(sk);
  677. LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
  678. return -EINVAL;
  679. }
  680. ret = ip_append_page(sk, page, offset, size, flags);
  681. if (ret == -EOPNOTSUPP) {
  682. release_sock(sk);
  683. return sock_no_sendpage(sk->sk_socket, page, offset,
  684. size, flags);
  685. }
  686. if (ret < 0) {
  687. udp_flush_pending_frames(sk);
  688. goto out;
  689. }
  690. up->len += size;
  691. if (!(up->corkflag || (flags&MSG_MORE)))
  692. ret = udp_push_pending_frames(sk);
  693. if (!ret)
  694. ret = size;
  695. out:
  696. release_sock(sk);
  697. return ret;
  698. }
  699. /*
  700. * IOCTL requests applicable to the UDP protocol
  701. */
  702. int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  703. {
  704. switch (cmd) {
  705. case SIOCOUTQ:
  706. {
  707. int amount = atomic_read(&sk->sk_wmem_alloc);
  708. return put_user(amount, (int __user *)arg);
  709. }
  710. case SIOCINQ:
  711. {
  712. struct sk_buff *skb;
  713. unsigned long amount;
  714. amount = 0;
  715. spin_lock_bh(&sk->sk_receive_queue.lock);
  716. skb = skb_peek(&sk->sk_receive_queue);
  717. if (skb != NULL) {
  718. /*
  719. * We will only return the amount
  720. * of this packet since that is all
  721. * that will be read.
  722. */
  723. amount = skb->len - sizeof(struct udphdr);
  724. }
  725. spin_unlock_bh(&sk->sk_receive_queue.lock);
  726. return put_user(amount, (int __user *)arg);
  727. }
  728. default:
  729. return -ENOIOCTLCMD;
  730. }
  731. return 0;
  732. }
  733. /*
  734. * This should be easy, if there is something there we
  735. * return it, otherwise we block.
  736. */
  737. int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  738. size_t len, int noblock, int flags, int *addr_len)
  739. {
  740. struct inet_sock *inet = inet_sk(sk);
  741. struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
  742. struct sk_buff *skb;
  743. unsigned int ulen, copied;
  744. int peeked;
  745. int err;
  746. int is_udplite = IS_UDPLITE(sk);
  747. /*
  748. * Check any passed addresses
  749. */
  750. if (addr_len)
  751. *addr_len=sizeof(*sin);
  752. if (flags & MSG_ERRQUEUE)
  753. return ip_recv_error(sk, msg, len);
  754. try_again:
  755. skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
  756. &peeked, &err);
  757. if (!skb)
  758. goto out;
  759. ulen = skb->len - sizeof(struct udphdr);
  760. copied = len;
  761. if (copied > ulen)
  762. copied = ulen;
  763. else if (copied < ulen)
  764. msg->msg_flags |= MSG_TRUNC;
  765. /*
  766. * If checksum is needed at all, try to do it while copying the
  767. * data. If the data is truncated, or if we only want a partial
  768. * coverage checksum (UDP-Lite), do it before the copy.
  769. */
  770. if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
  771. if (udp_lib_checksum_complete(skb))
  772. goto csum_copy_err;
  773. }
  774. if (skb_csum_unnecessary(skb))
  775. err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
  776. msg->msg_iov, copied );
  777. else {
  778. err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
  779. if (err == -EINVAL)
  780. goto csum_copy_err;
  781. }
  782. if (err)
  783. goto out_free;
  784. if (!peeked)
  785. UDP_INC_STATS_USER(UDP_MIB_INDATAGRAMS, is_udplite);
  786. sock_recv_timestamp(msg, sk, skb);
  787. /* Copy the address. */
  788. if (sin)
  789. {
  790. sin->sin_family = AF_INET;
  791. sin->sin_port = udp_hdr(skb)->source;
  792. sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
  793. memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
  794. }
  795. if (inet->cmsg_flags)
  796. ip_cmsg_recv(msg, skb);
  797. err = copied;
  798. if (flags & MSG_TRUNC)
  799. err = ulen;
  800. out_free:
  801. lock_sock(sk);
  802. skb_free_datagram(sk, skb);
  803. release_sock(sk);
  804. out:
  805. return err;
  806. csum_copy_err:
  807. lock_sock(sk);
  808. if (!skb_kill_datagram(sk, skb, flags))
  809. UDP_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
  810. release_sock(sk);
  811. if (noblock)
  812. return -EAGAIN;
  813. goto try_again;
  814. }
  815. int udp_disconnect(struct sock *sk, int flags)
  816. {
  817. struct inet_sock *inet = inet_sk(sk);
  818. /*
  819. * 1003.1g - break association.
  820. */
  821. sk->sk_state = TCP_CLOSE;
  822. inet->daddr = 0;
  823. inet->dport = 0;
  824. sk->sk_bound_dev_if = 0;
  825. if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
  826. inet_reset_saddr(sk);
  827. if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
  828. sk->sk_prot->unhash(sk);
  829. inet->sport = 0;
  830. }
  831. sk_dst_reset(sk);
  832. return 0;
  833. }
  834. /* returns:
  835. * -1: error
  836. * 0: success
  837. * >0: "udp encap" protocol resubmission
  838. *
  839. * Note that in the success and error cases, the skb is assumed to
  840. * have either been requeued or freed.
  841. */
  842. int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
  843. {
  844. struct udp_sock *up = udp_sk(sk);
  845. int rc;
  846. int is_udplite = IS_UDPLITE(sk);
  847. /*
  848. * Charge it to the socket, dropping if the queue is full.
  849. */
  850. if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
  851. goto drop;
  852. nf_reset(skb);
  853. if (up->encap_type) {
  854. /*
  855. * This is an encapsulation socket so pass the skb to
  856. * the socket's udp_encap_rcv() hook. Otherwise, just
  857. * fall through and pass this up the UDP socket.
  858. * up->encap_rcv() returns the following value:
  859. * =0 if skb was successfully passed to the encap
  860. * handler or was discarded by it.
  861. * >0 if skb should be passed on to UDP.
  862. * <0 if skb should be resubmitted as proto -N
  863. */
  864. /* if we're overly short, let UDP handle it */
  865. if (skb->len > sizeof(struct udphdr) &&
  866. up->encap_rcv != NULL) {
  867. int ret;
  868. ret = (*up->encap_rcv)(sk, skb);
  869. if (ret <= 0) {
  870. UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS,
  871. is_udplite);
  872. return -ret;
  873. }
  874. }
  875. /* FALLTHROUGH -- it's a UDP Packet */
  876. }
  877. /*
  878. * UDP-Lite specific tests, ignored on UDP sockets
  879. */
  880. if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
  881. /*
  882. * MIB statistics other than incrementing the error count are
  883. * disabled for the following two types of errors: these depend
  884. * on the application settings, not on the functioning of the
  885. * protocol stack as such.
  886. *
  887. * RFC 3828 here recommends (sec 3.3): "There should also be a
  888. * way ... to ... at least let the receiving application block
  889. * delivery of packets with coverage values less than a value
  890. * provided by the application."
  891. */
  892. if (up->pcrlen == 0) { /* full coverage was set */
  893. LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
  894. "%d while full coverage %d requested\n",
  895. UDP_SKB_CB(skb)->cscov, skb->len);
  896. goto drop;
  897. }
  898. /* The next case involves violating the min. coverage requested
  899. * by the receiver. This is subtle: if receiver wants x and x is
  900. * greater than the buffersize/MTU then receiver will complain
  901. * that it wants x while sender emits packets of smaller size y.
  902. * Therefore the above ...()->partial_cov statement is essential.
  903. */
  904. if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
  905. LIMIT_NETDEBUG(KERN_WARNING
  906. "UDPLITE: coverage %d too small, need min %d\n",
  907. UDP_SKB_CB(skb)->cscov, up->pcrlen);
  908. goto drop;
  909. }
  910. }
  911. if (sk->sk_filter) {
  912. if (udp_lib_checksum_complete(skb))
  913. goto drop;
  914. }
  915. if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
  916. /* Note that an ENOMEM error is charged twice */
  917. if (rc == -ENOMEM)
  918. UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, is_udplite);
  919. goto drop;
  920. }
  921. return 0;
  922. drop:
  923. UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
  924. kfree_skb(skb);
  925. return -1;
  926. }
  927. /*
  928. * Multicasts and broadcasts go to each listener.
  929. *
  930. * Note: called only from the BH handler context,
  931. * so we don't need to lock the hashes.
  932. */
  933. static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
  934. struct udphdr *uh,
  935. __be32 saddr, __be32 daddr,
  936. struct hlist_head udptable[])
  937. {
  938. struct sock *sk;
  939. int dif;
  940. read_lock(&udp_hash_lock);
  941. sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
  942. dif = skb->dev->ifindex;
  943. sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
  944. if (sk) {
  945. struct sock *sknext = NULL;
  946. do {
  947. struct sk_buff *skb1 = skb;
  948. sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
  949. uh->source, saddr, dif);
  950. if (sknext)
  951. skb1 = skb_clone(skb, GFP_ATOMIC);
  952. if (skb1) {
  953. int ret = 0;
  954. bh_lock_sock_nested(sk);
  955. if (!sock_owned_by_user(sk))
  956. ret = udp_queue_rcv_skb(sk, skb1);
  957. else
  958. sk_add_backlog(sk, skb1);
  959. bh_unlock_sock(sk);
  960. if (ret > 0)
  961. /* we should probably re-process instead
  962. * of dropping packets here. */
  963. kfree_skb(skb1);
  964. }
  965. sk = sknext;
  966. } while (sknext);
  967. } else
  968. kfree_skb(skb);
  969. read_unlock(&udp_hash_lock);
  970. return 0;
  971. }
  972. /* Initialize UDP checksum. If exited with zero value (success),
  973. * CHECKSUM_UNNECESSARY means, that no more checks are required.
  974. * Otherwise, csum completion requires chacksumming packet body,
  975. * including udp header and folding it to skb->csum.
  976. */
  977. static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
  978. int proto)
  979. {
  980. const struct iphdr *iph;
  981. int err;
  982. UDP_SKB_CB(skb)->partial_cov = 0;
  983. UDP_SKB_CB(skb)->cscov = skb->len;
  984. if (proto == IPPROTO_UDPLITE) {
  985. err = udplite_checksum_init(skb, uh);
  986. if (err)
  987. return err;
  988. }
  989. iph = ip_hdr(skb);
  990. if (uh->check == 0) {
  991. skb->ip_summed = CHECKSUM_UNNECESSARY;
  992. } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
  993. if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
  994. proto, skb->csum))
  995. skb->ip_summed = CHECKSUM_UNNECESSARY;
  996. }
  997. if (!skb_csum_unnecessary(skb))
  998. skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
  999. skb->len, proto, 0);
  1000. /* Probably, we should checksum udp header (it should be in cache
  1001. * in any case) and data in tiny packets (< rx copybreak).
  1002. */
  1003. return 0;
  1004. }
  1005. /*
  1006. * All we need to do is get the socket, and then do a checksum.
  1007. */
  1008. int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
  1009. int proto)
  1010. {
  1011. struct sock *sk;
  1012. struct udphdr *uh = udp_hdr(skb);
  1013. unsigned short ulen;
  1014. struct rtable *rt = (struct rtable*)skb->dst;
  1015. __be32 saddr = ip_hdr(skb)->saddr;
  1016. __be32 daddr = ip_hdr(skb)->daddr;
  1017. /*
  1018. * Validate the packet.
  1019. */
  1020. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  1021. goto drop; /* No space for header. */
  1022. ulen = ntohs(uh->len);
  1023. if (ulen > skb->len)
  1024. goto short_packet;
  1025. if (proto == IPPROTO_UDP) {
  1026. /* UDP validates ulen. */
  1027. if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
  1028. goto short_packet;
  1029. uh = udp_hdr(skb);
  1030. }
  1031. if (udp4_csum_init(skb, uh, proto))
  1032. goto csum_error;
  1033. if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
  1034. return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
  1035. sk = __udp4_lib_lookup(dev_net(skb->dev), saddr, uh->source, daddr,
  1036. uh->dest, inet_iif(skb), udptable);
  1037. if (sk != NULL) {
  1038. int ret = 0;
  1039. bh_lock_sock_nested(sk);
  1040. if (!sock_owned_by_user(sk))
  1041. ret = udp_queue_rcv_skb(sk, skb);
  1042. else
  1043. sk_add_backlog(sk, skb);
  1044. bh_unlock_sock(sk);
  1045. sock_put(sk);
  1046. /* a return value > 0 means to resubmit the input, but
  1047. * it wants the return to be -protocol, or 0
  1048. */
  1049. if (ret > 0)
  1050. return -ret;
  1051. return 0;
  1052. }
  1053. if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
  1054. goto drop;
  1055. nf_reset(skb);
  1056. /* No socket. Drop packet silently, if checksum is wrong */
  1057. if (udp_lib_checksum_complete(skb))
  1058. goto csum_error;
  1059. UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
  1060. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
  1061. /*
  1062. * Hmm. We got an UDP packet to a port to which we
  1063. * don't wanna listen. Ignore it.
  1064. */
  1065. kfree_skb(skb);
  1066. return 0;
  1067. short_packet:
  1068. LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From " NIPQUAD_FMT ":%u %d/%d to " NIPQUAD_FMT ":%u\n",
  1069. proto == IPPROTO_UDPLITE ? "-Lite" : "",
  1070. NIPQUAD(saddr),
  1071. ntohs(uh->source),
  1072. ulen,
  1073. skb->len,
  1074. NIPQUAD(daddr),
  1075. ntohs(uh->dest));
  1076. goto drop;
  1077. csum_error:
  1078. /*
  1079. * RFC1122: OK. Discards the bad packet silently (as far as
  1080. * the network is concerned, anyway) as per 4.1.3.4 (MUST).
  1081. */
  1082. LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From " NIPQUAD_FMT ":%u to " NIPQUAD_FMT ":%u ulen %d\n",
  1083. proto == IPPROTO_UDPLITE ? "-Lite" : "",
  1084. NIPQUAD(saddr),
  1085. ntohs(uh->source),
  1086. NIPQUAD(daddr),
  1087. ntohs(uh->dest),
  1088. ulen);
  1089. drop:
  1090. UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
  1091. kfree_skb(skb);
  1092. return 0;
  1093. }
  1094. int udp_rcv(struct sk_buff *skb)
  1095. {
  1096. return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
  1097. }
  1098. int udp_destroy_sock(struct sock *sk)
  1099. {
  1100. lock_sock(sk);
  1101. udp_flush_pending_frames(sk);
  1102. release_sock(sk);
  1103. return 0;
  1104. }
  1105. /*
  1106. * Socket option code for UDP
  1107. */
  1108. int udp_lib_setsockopt(struct sock *sk, int level, int optname,
  1109. char __user *optval, int optlen,
  1110. int (*push_pending_frames)(struct sock *))
  1111. {
  1112. struct udp_sock *up = udp_sk(sk);
  1113. int val;
  1114. int err = 0;
  1115. int is_udplite = IS_UDPLITE(sk);
  1116. if (optlen<sizeof(int))
  1117. return -EINVAL;
  1118. if (get_user(val, (int __user *)optval))
  1119. return -EFAULT;
  1120. switch (optname) {
  1121. case UDP_CORK:
  1122. if (val != 0) {
  1123. up->corkflag = 1;
  1124. } else {
  1125. up->corkflag = 0;
  1126. lock_sock(sk);
  1127. (*push_pending_frames)(sk);
  1128. release_sock(sk);
  1129. }
  1130. break;
  1131. case UDP_ENCAP:
  1132. switch (val) {
  1133. case 0:
  1134. case UDP_ENCAP_ESPINUDP:
  1135. case UDP_ENCAP_ESPINUDP_NON_IKE:
  1136. up->encap_rcv = xfrm4_udp_encap_rcv;
  1137. /* FALLTHROUGH */
  1138. case UDP_ENCAP_L2TPINUDP:
  1139. up->encap_type = val;
  1140. break;
  1141. default:
  1142. err = -ENOPROTOOPT;
  1143. break;
  1144. }
  1145. break;
  1146. /*
  1147. * UDP-Lite's partial checksum coverage (RFC 3828).
  1148. */
  1149. /* The sender sets actual checksum coverage length via this option.
  1150. * The case coverage > packet length is handled by send module. */
  1151. case UDPLITE_SEND_CSCOV:
  1152. if (!is_udplite) /* Disable the option on UDP sockets */
  1153. return -ENOPROTOOPT;
  1154. if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
  1155. val = 8;
  1156. up->pcslen = val;
  1157. up->pcflag |= UDPLITE_SEND_CC;
  1158. break;
  1159. /* The receiver specifies a minimum checksum coverage value. To make
  1160. * sense, this should be set to at least 8 (as done below). If zero is
  1161. * used, this again means full checksum coverage. */
  1162. case UDPLITE_RECV_CSCOV:
  1163. if (!is_udplite) /* Disable the option on UDP sockets */
  1164. return -ENOPROTOOPT;
  1165. if (val != 0 && val < 8) /* Avoid silly minimal values. */
  1166. val = 8;
  1167. up->pcrlen = val;
  1168. up->pcflag |= UDPLITE_RECV_CC;
  1169. break;
  1170. default:
  1171. err = -ENOPROTOOPT;
  1172. break;
  1173. }
  1174. return err;
  1175. }
  1176. int udp_setsockopt(struct sock *sk, int level, int optname,
  1177. char __user *optval, int optlen)
  1178. {
  1179. if (level == SOL_UDP || level == SOL_UDPLITE)
  1180. return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  1181. udp_push_pending_frames);
  1182. return ip_setsockopt(sk, level, optname, optval, optlen);
  1183. }
  1184. #ifdef CONFIG_COMPAT
  1185. int compat_udp_setsockopt(struct sock *sk, int level, int optname,
  1186. char __user *optval, int optlen)
  1187. {
  1188. if (level == SOL_UDP || level == SOL_UDPLITE)
  1189. return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  1190. udp_push_pending_frames);
  1191. return compat_ip_setsockopt(sk, level, optname, optval, optlen);
  1192. }
  1193. #endif
  1194. int udp_lib_getsockopt(struct sock *sk, int level, int optname,
  1195. char __user *optval, int __user *optlen)
  1196. {
  1197. struct udp_sock *up = udp_sk(sk);
  1198. int val, len;
  1199. if (get_user(len,optlen))
  1200. return -EFAULT;
  1201. len = min_t(unsigned int, len, sizeof(int));
  1202. if (len < 0)
  1203. return -EINVAL;
  1204. switch (optname) {
  1205. case UDP_CORK:
  1206. val = up->corkflag;
  1207. break;
  1208. case UDP_ENCAP:
  1209. val = up->encap_type;
  1210. break;
  1211. /* The following two cannot be changed on UDP sockets, the return is
  1212. * always 0 (which corresponds to the full checksum coverage of UDP). */
  1213. case UDPLITE_SEND_CSCOV:
  1214. val = up->pcslen;
  1215. break;
  1216. case UDPLITE_RECV_CSCOV:
  1217. val = up->pcrlen;
  1218. break;
  1219. default:
  1220. return -ENOPROTOOPT;
  1221. }
  1222. if (put_user(len, optlen))
  1223. return -EFAULT;
  1224. if (copy_to_user(optval, &val,len))
  1225. return -EFAULT;
  1226. return 0;
  1227. }
  1228. int udp_getsockopt(struct sock *sk, int level, int optname,
  1229. char __user *optval, int __user *optlen)
  1230. {
  1231. if (level == SOL_UDP || level == SOL_UDPLITE)
  1232. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  1233. return ip_getsockopt(sk, level, optname, optval, optlen);
  1234. }
  1235. #ifdef CONFIG_COMPAT
  1236. int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  1237. char __user *optval, int __user *optlen)
  1238. {
  1239. if (level == SOL_UDP || level == SOL_UDPLITE)
  1240. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  1241. return compat_ip_getsockopt(sk, level, optname, optval, optlen);
  1242. }
  1243. #endif
  1244. /**
  1245. * udp_poll - wait for a UDP event.
  1246. * @file - file struct
  1247. * @sock - socket
  1248. * @wait - poll table
  1249. *
  1250. * This is same as datagram poll, except for the special case of
  1251. * blocking sockets. If application is using a blocking fd
  1252. * and a packet with checksum error is in the queue;
  1253. * then it could get return from select indicating data available
  1254. * but then block when reading it. Add special case code
  1255. * to work around these arguably broken applications.
  1256. */
  1257. unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
  1258. {
  1259. unsigned int mask = datagram_poll(file, sock, wait);
  1260. struct sock *sk = sock->sk;
  1261. int is_lite = IS_UDPLITE(sk);
  1262. /* Check for false positives due to checksum errors */
  1263. if ( (mask & POLLRDNORM) &&
  1264. !(file->f_flags & O_NONBLOCK) &&
  1265. !(sk->sk_shutdown & RCV_SHUTDOWN)){
  1266. struct sk_buff_head *rcvq = &sk->sk_receive_queue;
  1267. struct sk_buff *skb;
  1268. spin_lock_bh(&rcvq->lock);
  1269. while ((skb = skb_peek(rcvq)) != NULL &&
  1270. udp_lib_checksum_complete(skb)) {
  1271. UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite);
  1272. __skb_unlink(skb, rcvq);
  1273. kfree_skb(skb);
  1274. }
  1275. spin_unlock_bh(&rcvq->lock);
  1276. /* nothing to see, move along */
  1277. if (skb == NULL)
  1278. mask &= ~(POLLIN | POLLRDNORM);
  1279. }
  1280. return mask;
  1281. }
  1282. struct proto udp_prot = {
  1283. .name = "UDP",
  1284. .owner = THIS_MODULE,
  1285. .close = udp_lib_close,
  1286. .connect = ip4_datagram_connect,
  1287. .disconnect = udp_disconnect,
  1288. .ioctl = udp_ioctl,
  1289. .destroy = udp_destroy_sock,
  1290. .setsockopt = udp_setsockopt,
  1291. .getsockopt = udp_getsockopt,
  1292. .sendmsg = udp_sendmsg,
  1293. .recvmsg = udp_recvmsg,
  1294. .sendpage = udp_sendpage,
  1295. .backlog_rcv = udp_queue_rcv_skb,
  1296. .hash = udp_lib_hash,
  1297. .unhash = udp_lib_unhash,
  1298. .get_port = udp_v4_get_port,
  1299. .memory_allocated = &udp_memory_allocated,
  1300. .sysctl_mem = sysctl_udp_mem,
  1301. .sysctl_wmem = &sysctl_udp_wmem_min,
  1302. .sysctl_rmem = &sysctl_udp_rmem_min,
  1303. .obj_size = sizeof(struct udp_sock),
  1304. .h.udp_hash = udp_hash,
  1305. #ifdef CONFIG_COMPAT
  1306. .compat_setsockopt = compat_udp_setsockopt,
  1307. .compat_getsockopt = compat_udp_getsockopt,
  1308. #endif
  1309. };
  1310. /* ------------------------------------------------------------------------ */
  1311. #ifdef CONFIG_PROC_FS
  1312. static struct sock *udp_get_first(struct seq_file *seq)
  1313. {
  1314. struct sock *sk;
  1315. struct udp_iter_state *state = seq->private;
  1316. struct net *net = seq_file_net(seq);
  1317. for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
  1318. struct hlist_node *node;
  1319. sk_for_each(sk, node, state->hashtable + state->bucket) {
  1320. if (!net_eq(sock_net(sk), net))
  1321. continue;
  1322. if (sk->sk_family == state->family)
  1323. goto found;
  1324. }
  1325. }
  1326. sk = NULL;
  1327. found:
  1328. return sk;
  1329. }
  1330. static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
  1331. {
  1332. struct udp_iter_state *state = seq->private;
  1333. struct net *net = seq_file_net(seq);
  1334. do {
  1335. sk = sk_next(sk);
  1336. try_again:
  1337. ;
  1338. } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
  1339. if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
  1340. sk = sk_head(state->hashtable + state->bucket);
  1341. goto try_again;
  1342. }
  1343. return sk;
  1344. }
  1345. static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
  1346. {
  1347. struct sock *sk = udp_get_first(seq);
  1348. if (sk)
  1349. while (pos && (sk = udp_get_next(seq, sk)) != NULL)
  1350. --pos;
  1351. return pos ? NULL : sk;
  1352. }
  1353. static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
  1354. __acquires(udp_hash_lock)
  1355. {
  1356. read_lock(&udp_hash_lock);
  1357. return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
  1358. }
  1359. static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1360. {
  1361. struct sock *sk;
  1362. if (v == SEQ_START_TOKEN)
  1363. sk = udp_get_idx(seq, 0);
  1364. else
  1365. sk = udp_get_next(seq, v);
  1366. ++*pos;
  1367. return sk;
  1368. }
  1369. static void udp_seq_stop(struct seq_file *seq, void *v)
  1370. __releases(udp_hash_lock)
  1371. {
  1372. read_unlock(&udp_hash_lock);
  1373. }
  1374. static int udp_seq_open(struct inode *inode, struct file *file)
  1375. {
  1376. struct udp_seq_afinfo *afinfo = PDE(inode)->data;
  1377. struct udp_iter_state *s;
  1378. int err;
  1379. err = seq_open_net(inode, file, &afinfo->seq_ops,
  1380. sizeof(struct udp_iter_state));
  1381. if (err < 0)
  1382. return err;
  1383. s = ((struct seq_file *)file->private_data)->private;
  1384. s->family = afinfo->family;
  1385. s->hashtable = afinfo->hashtable;
  1386. return err;
  1387. }
  1388. /* ------------------------------------------------------------------------ */
  1389. int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
  1390. {
  1391. struct proc_dir_entry *p;
  1392. int rc = 0;
  1393. afinfo->seq_fops.open = udp_seq_open;
  1394. afinfo->seq_fops.read = seq_read;
  1395. afinfo->seq_fops.llseek = seq_lseek;
  1396. afinfo->seq_fops.release = seq_release_net;
  1397. afinfo->seq_ops.start = udp_seq_start;
  1398. afinfo->seq_ops.next = udp_seq_next;
  1399. afinfo->seq_ops.stop = udp_seq_stop;
  1400. p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
  1401. &afinfo->seq_fops, afinfo);
  1402. if (!p)
  1403. rc = -ENOMEM;
  1404. return rc;
  1405. }
  1406. void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
  1407. {
  1408. proc_net_remove(net, afinfo->name);
  1409. }
  1410. /* ------------------------------------------------------------------------ */
  1411. static void udp4_format_sock(struct sock *sp, struct seq_file *f,
  1412. int bucket, int *len)
  1413. {
  1414. struct inet_sock *inet = inet_sk(sp);
  1415. __be32 dest = inet->daddr;
  1416. __be32 src = inet->rcv_saddr;
  1417. __u16 destp = ntohs(inet->dport);
  1418. __u16 srcp = ntohs(inet->sport);
  1419. seq_printf(f, "%4d: %08X:%04X %08X:%04X"
  1420. " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p%n",
  1421. bucket, src, srcp, dest, destp, sp->sk_state,
  1422. atomic_read(&sp->sk_wmem_alloc),
  1423. atomic_read(&sp->sk_rmem_alloc),
  1424. 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
  1425. atomic_read(&sp->sk_refcnt), sp, len);
  1426. }
  1427. int udp4_seq_show(struct seq_file *seq, void *v)
  1428. {
  1429. if (v == SEQ_START_TOKEN)
  1430. seq_printf(seq, "%-127s\n",
  1431. " sl local_address rem_address st tx_queue "
  1432. "rx_queue tr tm->when retrnsmt uid timeout "
  1433. "inode");
  1434. else {
  1435. struct udp_iter_state *state = seq->private;
  1436. int len;
  1437. udp4_format_sock(v, seq, state->bucket, &len);
  1438. seq_printf(seq, "%*s\n", 127 - len ,"");
  1439. }
  1440. return 0;
  1441. }
  1442. /* ------------------------------------------------------------------------ */
  1443. static struct udp_seq_afinfo udp4_seq_afinfo = {
  1444. .name = "udp",
  1445. .family = AF_INET,
  1446. .hashtable = udp_hash,
  1447. .seq_fops = {
  1448. .owner = THIS_MODULE,
  1449. },
  1450. .seq_ops = {
  1451. .show = udp4_seq_show,
  1452. },
  1453. };
  1454. static int udp4_proc_init_net(struct net *net)
  1455. {
  1456. return udp_proc_register(net, &udp4_seq_afinfo);
  1457. }
  1458. static void udp4_proc_exit_net(struct net *net)
  1459. {
  1460. udp_proc_unregister(net, &udp4_seq_afinfo);
  1461. }
  1462. static struct pernet_operations udp4_net_ops = {
  1463. .init = udp4_proc_init_net,
  1464. .exit = udp4_proc_exit_net,
  1465. };
  1466. int __init udp4_proc_init(void)
  1467. {
  1468. return register_pernet_subsys(&udp4_net_ops);
  1469. }
  1470. void udp4_proc_exit(void)
  1471. {
  1472. unregister_pernet_subsys(&udp4_net_ops);
  1473. }
  1474. #endif /* CONFIG_PROC_FS */
  1475. void __init udp_init(void)
  1476. {
  1477. unsigned long limit;
  1478. /* Set the pressure threshold up by the same strategy of TCP. It is a
  1479. * fraction of global memory that is up to 1/2 at 256 MB, decreasing
  1480. * toward zero with the amount of memory, with a floor of 128 pages.
  1481. */
  1482. limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
  1483. limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
  1484. limit = max(limit, 128UL);
  1485. sysctl_udp_mem[0] = limit / 4 * 3;
  1486. sysctl_udp_mem[1] = limit;
  1487. sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
  1488. sysctl_udp_rmem_min = SK_MEM_QUANTUM;
  1489. sysctl_udp_wmem_min = SK_MEM_QUANTUM;
  1490. }
  1491. EXPORT_SYMBOL(udp_disconnect);
  1492. EXPORT_SYMBOL(udp_hash);
  1493. EXPORT_SYMBOL(udp_hash_lock);
  1494. EXPORT_SYMBOL(udp_ioctl);
  1495. EXPORT_SYMBOL(udp_prot);
  1496. EXPORT_SYMBOL(udp_sendmsg);
  1497. EXPORT_SYMBOL(udp_lib_getsockopt);
  1498. EXPORT_SYMBOL(udp_lib_setsockopt);
  1499. EXPORT_SYMBOL(udp_poll);
  1500. EXPORT_SYMBOL(udp_lib_get_port);
  1501. #ifdef CONFIG_PROC_FS
  1502. EXPORT_SYMBOL(udp_proc_register);
  1503. EXPORT_SYMBOL(udp_proc_unregister);
  1504. #endif