udp.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * The User Datagram Protocol (UDP).
  7. *
  8. * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  13. * Alan Cox, <Alan.Cox@linux.org>
  14. * Hirokazu Takahashi, <taka@valinux.co.jp>
  15. *
  16. * Fixes:
  17. * Alan Cox : verify_area() calls
  18. * Alan Cox : stopped close while in use off icmp
  19. * messages. Not a fix but a botch that
  20. * for udp at least is 'valid'.
  21. * Alan Cox : Fixed icmp handling properly
  22. * Alan Cox : Correct error for oversized datagrams
  23. * Alan Cox : Tidied select() semantics.
  24. * Alan Cox : udp_err() fixed properly, also now
  25. * select and read wake correctly on errors
  26. * Alan Cox : udp_send verify_area moved to avoid mem leak
  27. * Alan Cox : UDP can count its memory
  28. * Alan Cox : send to an unknown connection causes
  29. * an ECONNREFUSED off the icmp, but
  30. * does NOT close.
  31. * Alan Cox : Switched to new sk_buff handlers. No more backlog!
  32. * Alan Cox : Using generic datagram code. Even smaller and the PEEK
  33. * bug no longer crashes it.
  34. * Fred Van Kempen : Net2e support for sk->broadcast.
  35. * Alan Cox : Uses skb_free_datagram
  36. * Alan Cox : Added get/set sockopt support.
  37. * Alan Cox : Broadcasting without option set returns EACCES.
  38. * Alan Cox : No wakeup calls. Instead we now use the callbacks.
  39. * Alan Cox : Use ip_tos and ip_ttl
  40. * Alan Cox : SNMP Mibs
  41. * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
  42. * Matt Dillon : UDP length checks.
  43. * Alan Cox : Smarter af_inet used properly.
  44. * Alan Cox : Use new kernel side addressing.
  45. * Alan Cox : Incorrect return on truncated datagram receive.
  46. * Arnt Gulbrandsen : New udp_send and stuff
  47. * Alan Cox : Cache last socket
  48. * Alan Cox : Route cache
  49. * Jon Peatfield : Minor efficiency fix to sendto().
  50. * Mike Shaver : RFC1122 checks.
  51. * Alan Cox : Nonblocking error fix.
  52. * Willy Konynenberg : Transparent proxying support.
  53. * Mike McLagan : Routing by source
  54. * David S. Miller : New socket lookup architecture.
  55. * Last socket cache retained as it
  56. * does have a high hit rate.
  57. * Olaf Kirch : Don't linearise iovec on sendmsg.
  58. * Andi Kleen : Some cleanups, cache destination entry
  59. * for connect.
  60. * Vitaly E. Lavrov : Transparent proxy revived after year coma.
  61. * Melvin Smith : Check msg_name not msg_namelen in sendto(),
  62. * return ENOTCONN for unconnected sockets (POSIX)
  63. * Janos Farkas : don't deliver multi/broadcasts to a different
  64. * bound-to-device socket
  65. * Hirokazu Takahashi : HW checksumming for outgoing UDP
  66. * datagrams.
  67. * Hirokazu Takahashi : sendfile() on UDP works now.
  68. * Arnaldo C. Melo : convert /proc/net/udp to seq_file
  69. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
  70. * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
  71. * a single port at the same time.
  72. * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
  73. * James Chapman : Add L2TP encapsulation type.
  74. *
  75. *
  76. * This program is free software; you can redistribute it and/or
  77. * modify it under the terms of the GNU General Public License
  78. * as published by the Free Software Foundation; either version
  79. * 2 of the License, or (at your option) any later version.
  80. */
  81. #include <asm/system.h>
  82. #include <asm/uaccess.h>
  83. #include <asm/ioctls.h>
  84. #include <linux/types.h>
  85. #include <linux/fcntl.h>
  86. #include <linux/module.h>
  87. #include <linux/socket.h>
  88. #include <linux/sockios.h>
  89. #include <linux/igmp.h>
  90. #include <linux/in.h>
  91. #include <linux/errno.h>
  92. #include <linux/timer.h>
  93. #include <linux/mm.h>
  94. #include <linux/inet.h>
  95. #include <linux/netdevice.h>
  96. #include <net/tcp_states.h>
  97. #include <linux/skbuff.h>
  98. #include <linux/proc_fs.h>
  99. #include <linux/seq_file.h>
  100. #include <net/icmp.h>
  101. #include <net/route.h>
  102. #include <net/checksum.h>
  103. #include <net/xfrm.h>
  104. #include "udp_impl.h"
  105. /*
  106. * Snmp MIB for the UDP layer
  107. */
  108. DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
  109. struct hlist_head udp_hash[UDP_HTABLE_SIZE];
  110. DEFINE_RWLOCK(udp_hash_lock);
  111. static int udp_port_rover;
  112. static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[])
  113. {
  114. struct sock *sk;
  115. struct hlist_node *node;
  116. sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
  117. if (sk->sk_hash == num)
  118. return 1;
  119. return 0;
  120. }
  121. /**
  122. * __udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
  123. *
  124. * @sk: socket struct in question
  125. * @snum: port number to look up
  126. * @udptable: hash list table, must be of UDP_HTABLE_SIZE
  127. * @port_rover: pointer to record of last unallocated port
  128. * @saddr_comp: AF-dependent comparison of bound local IP addresses
  129. */
  130. int __udp_lib_get_port(struct sock *sk, unsigned short snum,
  131. struct hlist_head udptable[], int *port_rover,
  132. int (*saddr_comp)(const struct sock *sk1,
  133. const struct sock *sk2 ) )
  134. {
  135. struct hlist_node *node;
  136. struct hlist_head *head;
  137. struct sock *sk2;
  138. int error = 1;
  139. write_lock_bh(&udp_hash_lock);
  140. if (snum == 0) {
  141. int best_size_so_far, best, result, i;
  142. if (*port_rover > sysctl_local_port_range[1] ||
  143. *port_rover < sysctl_local_port_range[0])
  144. *port_rover = sysctl_local_port_range[0];
  145. best_size_so_far = 32767;
  146. best = result = *port_rover;
  147. for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
  148. int size;
  149. head = &udptable[result & (UDP_HTABLE_SIZE - 1)];
  150. if (hlist_empty(head)) {
  151. if (result > sysctl_local_port_range[1])
  152. result = sysctl_local_port_range[0] +
  153. ((result - sysctl_local_port_range[0]) &
  154. (UDP_HTABLE_SIZE - 1));
  155. goto gotit;
  156. }
  157. size = 0;
  158. sk_for_each(sk2, node, head) {
  159. if (++size >= best_size_so_far)
  160. goto next;
  161. }
  162. best_size_so_far = size;
  163. best = result;
  164. next:
  165. ;
  166. }
  167. result = best;
  168. for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE;
  169. i++, result += UDP_HTABLE_SIZE) {
  170. if (result > sysctl_local_port_range[1])
  171. result = sysctl_local_port_range[0]
  172. + ((result - sysctl_local_port_range[0]) &
  173. (UDP_HTABLE_SIZE - 1));
  174. if (! __udp_lib_lport_inuse(result, udptable))
  175. break;
  176. }
  177. if (i >= (1 << 16) / UDP_HTABLE_SIZE)
  178. goto fail;
  179. gotit:
  180. *port_rover = snum = result;
  181. } else {
  182. head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
  183. sk_for_each(sk2, node, head)
  184. if (sk2->sk_hash == snum &&
  185. sk2 != sk &&
  186. (!sk2->sk_reuse || !sk->sk_reuse) &&
  187. (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
  188. || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
  189. (*saddr_comp)(sk, sk2) )
  190. goto fail;
  191. }
  192. inet_sk(sk)->num = snum;
  193. sk->sk_hash = snum;
  194. if (sk_unhashed(sk)) {
  195. head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
  196. sk_add_node(sk, head);
  197. sock_prot_inc_use(sk->sk_prot);
  198. }
  199. error = 0;
  200. fail:
  201. write_unlock_bh(&udp_hash_lock);
  202. return error;
  203. }
  204. int udp_get_port(struct sock *sk, unsigned short snum,
  205. int (*scmp)(const struct sock *, const struct sock *))
  206. {
  207. return __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, scmp);
  208. }
  209. int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
  210. {
  211. struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
  212. return ( !ipv6_only_sock(sk2) &&
  213. (!inet1->rcv_saddr || !inet2->rcv_saddr ||
  214. inet1->rcv_saddr == inet2->rcv_saddr ));
  215. }
  216. static inline int udp_v4_get_port(struct sock *sk, unsigned short snum)
  217. {
  218. return udp_get_port(sk, snum, ipv4_rcv_saddr_equal);
  219. }
  220. /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  221. * harder than this. -DaveM
  222. */
  223. static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
  224. __be32 daddr, __be16 dport,
  225. int dif, struct hlist_head udptable[])
  226. {
  227. struct sock *sk, *result = NULL;
  228. struct hlist_node *node;
  229. unsigned short hnum = ntohs(dport);
  230. int badness = -1;
  231. read_lock(&udp_hash_lock);
  232. sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
  233. struct inet_sock *inet = inet_sk(sk);
  234. if (sk->sk_hash == hnum && !ipv6_only_sock(sk)) {
  235. int score = (sk->sk_family == PF_INET ? 1 : 0);
  236. if (inet->rcv_saddr) {
  237. if (inet->rcv_saddr != daddr)
  238. continue;
  239. score+=2;
  240. }
  241. if (inet->daddr) {
  242. if (inet->daddr != saddr)
  243. continue;
  244. score+=2;
  245. }
  246. if (inet->dport) {
  247. if (inet->dport != sport)
  248. continue;
  249. score+=2;
  250. }
  251. if (sk->sk_bound_dev_if) {
  252. if (sk->sk_bound_dev_if != dif)
  253. continue;
  254. score+=2;
  255. }
  256. if (score == 9) {
  257. result = sk;
  258. break;
  259. } else if (score > badness) {
  260. result = sk;
  261. badness = score;
  262. }
  263. }
  264. }
  265. if (result)
  266. sock_hold(result);
  267. read_unlock(&udp_hash_lock);
  268. return result;
  269. }
  270. static inline struct sock *udp_v4_mcast_next(struct sock *sk,
  271. __be16 loc_port, __be32 loc_addr,
  272. __be16 rmt_port, __be32 rmt_addr,
  273. int dif)
  274. {
  275. struct hlist_node *node;
  276. struct sock *s = sk;
  277. unsigned short hnum = ntohs(loc_port);
  278. sk_for_each_from(s, node) {
  279. struct inet_sock *inet = inet_sk(s);
  280. if (s->sk_hash != hnum ||
  281. (inet->daddr && inet->daddr != rmt_addr) ||
  282. (inet->dport != rmt_port && inet->dport) ||
  283. (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
  284. ipv6_only_sock(s) ||
  285. (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
  286. continue;
  287. if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
  288. continue;
  289. goto found;
  290. }
  291. s = NULL;
  292. found:
  293. return s;
  294. }
  295. /*
  296. * This routine is called by the ICMP module when it gets some
  297. * sort of error condition. If err < 0 then the socket should
  298. * be closed and the error returned to the user. If err > 0
  299. * it's just the icmp type << 8 | icmp code.
  300. * Header points to the ip header of the error packet. We move
  301. * on past this. Then (as it used to claim before adjustment)
  302. * header points to the first 8 bytes of the udp header. We need
  303. * to find the appropriate port.
  304. */
  305. void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[])
  306. {
  307. struct inet_sock *inet;
  308. struct iphdr *iph = (struct iphdr*)skb->data;
  309. struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
  310. const int type = icmp_hdr(skb)->type;
  311. const int code = icmp_hdr(skb)->code;
  312. struct sock *sk;
  313. int harderr;
  314. int err;
  315. sk = __udp4_lib_lookup(iph->daddr, uh->dest, iph->saddr, uh->source,
  316. skb->dev->ifindex, udptable );
  317. if (sk == NULL) {
  318. ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
  319. return; /* No socket for error */
  320. }
  321. err = 0;
  322. harderr = 0;
  323. inet = inet_sk(sk);
  324. switch (type) {
  325. default:
  326. case ICMP_TIME_EXCEEDED:
  327. err = EHOSTUNREACH;
  328. break;
  329. case ICMP_SOURCE_QUENCH:
  330. goto out;
  331. case ICMP_PARAMETERPROB:
  332. err = EPROTO;
  333. harderr = 1;
  334. break;
  335. case ICMP_DEST_UNREACH:
  336. if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
  337. if (inet->pmtudisc != IP_PMTUDISC_DONT) {
  338. err = EMSGSIZE;
  339. harderr = 1;
  340. break;
  341. }
  342. goto out;
  343. }
  344. err = EHOSTUNREACH;
  345. if (code <= NR_ICMP_UNREACH) {
  346. harderr = icmp_err_convert[code].fatal;
  347. err = icmp_err_convert[code].errno;
  348. }
  349. break;
  350. }
  351. /*
  352. * RFC1122: OK. Passes ICMP errors back to application, as per
  353. * 4.1.3.3.
  354. */
  355. if (!inet->recverr) {
  356. if (!harderr || sk->sk_state != TCP_ESTABLISHED)
  357. goto out;
  358. } else {
  359. ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
  360. }
  361. sk->sk_err = err;
  362. sk->sk_error_report(sk);
  363. out:
  364. sock_put(sk);
  365. }
  366. void udp_err(struct sk_buff *skb, u32 info)
  367. {
  368. return __udp4_lib_err(skb, info, udp_hash);
  369. }
  370. /*
  371. * Throw away all pending data and cancel the corking. Socket is locked.
  372. */
  373. static void udp_flush_pending_frames(struct sock *sk)
  374. {
  375. struct udp_sock *up = udp_sk(sk);
  376. if (up->pending) {
  377. up->len = 0;
  378. up->pending = 0;
  379. ip_flush_pending_frames(sk);
  380. }
  381. }
  382. /**
  383. * udp4_hwcsum_outgoing - handle outgoing HW checksumming
  384. * @sk: socket we are sending on
  385. * @skb: sk_buff containing the filled-in UDP header
  386. * (checksum field must be zeroed out)
  387. */
  388. static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
  389. __be32 src, __be32 dst, int len )
  390. {
  391. unsigned int offset;
  392. struct udphdr *uh = udp_hdr(skb);
  393. __wsum csum = 0;
  394. if (skb_queue_len(&sk->sk_write_queue) == 1) {
  395. /*
  396. * Only one fragment on the socket.
  397. */
  398. skb->csum_start = skb_transport_header(skb) - skb->head;
  399. skb->csum_offset = offsetof(struct udphdr, check);
  400. uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
  401. } else {
  402. /*
  403. * HW-checksum won't work as there are two or more
  404. * fragments on the socket so that all csums of sk_buffs
  405. * should be together
  406. */
  407. offset = skb_transport_offset(skb);
  408. skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
  409. skb->ip_summed = CHECKSUM_NONE;
  410. skb_queue_walk(&sk->sk_write_queue, skb) {
  411. csum = csum_add(csum, skb->csum);
  412. }
  413. uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
  414. if (uh->check == 0)
  415. uh->check = CSUM_MANGLED_0;
  416. }
  417. }
  418. /*
  419. * Push out all pending data as one UDP datagram. Socket is locked.
  420. */
  421. static int udp_push_pending_frames(struct sock *sk)
  422. {
  423. struct udp_sock *up = udp_sk(sk);
  424. struct inet_sock *inet = inet_sk(sk);
  425. struct flowi *fl = &inet->cork.fl;
  426. struct sk_buff *skb;
  427. struct udphdr *uh;
  428. int err = 0;
  429. __wsum csum = 0;
  430. /* Grab the skbuff where UDP header space exists. */
  431. if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
  432. goto out;
  433. /*
  434. * Create a UDP header
  435. */
  436. uh = udp_hdr(skb);
  437. uh->source = fl->fl_ip_sport;
  438. uh->dest = fl->fl_ip_dport;
  439. uh->len = htons(up->len);
  440. uh->check = 0;
  441. if (up->pcflag) /* UDP-Lite */
  442. csum = udplite_csum_outgoing(sk, skb);
  443. else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
  444. skb->ip_summed = CHECKSUM_NONE;
  445. goto send;
  446. } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
  447. udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len);
  448. goto send;
  449. } else /* `normal' UDP */
  450. csum = udp_csum_outgoing(sk, skb);
  451. /* add protocol-dependent pseudo-header */
  452. uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
  453. sk->sk_protocol, csum );
  454. if (uh->check == 0)
  455. uh->check = CSUM_MANGLED_0;
  456. send:
  457. err = ip_push_pending_frames(sk);
  458. out:
  459. up->len = 0;
  460. up->pending = 0;
  461. return err;
  462. }
  463. int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  464. size_t len)
  465. {
  466. struct inet_sock *inet = inet_sk(sk);
  467. struct udp_sock *up = udp_sk(sk);
  468. int ulen = len;
  469. struct ipcm_cookie ipc;
  470. struct rtable *rt = NULL;
  471. int free = 0;
  472. int connected = 0;
  473. __be32 daddr, faddr, saddr;
  474. __be16 dport;
  475. u8 tos;
  476. int err, is_udplite = up->pcflag;
  477. int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
  478. int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
  479. if (len > 0xFFFF)
  480. return -EMSGSIZE;
  481. /*
  482. * Check the flags.
  483. */
  484. if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
  485. return -EOPNOTSUPP;
  486. ipc.opt = NULL;
  487. if (up->pending) {
  488. /*
  489. * There are pending frames.
  490. * The socket lock must be held while it's corked.
  491. */
  492. lock_sock(sk);
  493. if (likely(up->pending)) {
  494. if (unlikely(up->pending != AF_INET)) {
  495. release_sock(sk);
  496. return -EINVAL;
  497. }
  498. goto do_append_data;
  499. }
  500. release_sock(sk);
  501. }
  502. ulen += sizeof(struct udphdr);
  503. /*
  504. * Get and verify the address.
  505. */
  506. if (msg->msg_name) {
  507. struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
  508. if (msg->msg_namelen < sizeof(*usin))
  509. return -EINVAL;
  510. if (usin->sin_family != AF_INET) {
  511. if (usin->sin_family != AF_UNSPEC)
  512. return -EAFNOSUPPORT;
  513. }
  514. daddr = usin->sin_addr.s_addr;
  515. dport = usin->sin_port;
  516. if (dport == 0)
  517. return -EINVAL;
  518. } else {
  519. if (sk->sk_state != TCP_ESTABLISHED)
  520. return -EDESTADDRREQ;
  521. daddr = inet->daddr;
  522. dport = inet->dport;
  523. /* Open fast path for connected socket.
  524. Route will not be used, if at least one option is set.
  525. */
  526. connected = 1;
  527. }
  528. ipc.addr = inet->saddr;
  529. ipc.oif = sk->sk_bound_dev_if;
  530. if (msg->msg_controllen) {
  531. err = ip_cmsg_send(msg, &ipc);
  532. if (err)
  533. return err;
  534. if (ipc.opt)
  535. free = 1;
  536. connected = 0;
  537. }
  538. if (!ipc.opt)
  539. ipc.opt = inet->opt;
  540. saddr = ipc.addr;
  541. ipc.addr = faddr = daddr;
  542. if (ipc.opt && ipc.opt->srr) {
  543. if (!daddr)
  544. return -EINVAL;
  545. faddr = ipc.opt->faddr;
  546. connected = 0;
  547. }
  548. tos = RT_TOS(inet->tos);
  549. if (sock_flag(sk, SOCK_LOCALROUTE) ||
  550. (msg->msg_flags & MSG_DONTROUTE) ||
  551. (ipc.opt && ipc.opt->is_strictroute)) {
  552. tos |= RTO_ONLINK;
  553. connected = 0;
  554. }
  555. if (MULTICAST(daddr)) {
  556. if (!ipc.oif)
  557. ipc.oif = inet->mc_index;
  558. if (!saddr)
  559. saddr = inet->mc_addr;
  560. connected = 0;
  561. }
  562. if (connected)
  563. rt = (struct rtable*)sk_dst_check(sk, 0);
  564. if (rt == NULL) {
  565. struct flowi fl = { .oif = ipc.oif,
  566. .nl_u = { .ip4_u =
  567. { .daddr = faddr,
  568. .saddr = saddr,
  569. .tos = tos } },
  570. .proto = sk->sk_protocol,
  571. .uli_u = { .ports =
  572. { .sport = inet->sport,
  573. .dport = dport } } };
  574. security_sk_classify_flow(sk, &fl);
  575. err = ip_route_output_flow(&rt, &fl, sk, 1);
  576. if (err) {
  577. if (err == -ENETUNREACH)
  578. IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
  579. goto out;
  580. }
  581. err = -EACCES;
  582. if ((rt->rt_flags & RTCF_BROADCAST) &&
  583. !sock_flag(sk, SOCK_BROADCAST))
  584. goto out;
  585. if (connected)
  586. sk_dst_set(sk, dst_clone(&rt->u.dst));
  587. }
  588. if (msg->msg_flags&MSG_CONFIRM)
  589. goto do_confirm;
  590. back_from_confirm:
  591. saddr = rt->rt_src;
  592. if (!ipc.addr)
  593. daddr = ipc.addr = rt->rt_dst;
  594. lock_sock(sk);
  595. if (unlikely(up->pending)) {
  596. /* The socket is already corked while preparing it. */
  597. /* ... which is an evident application bug. --ANK */
  598. release_sock(sk);
  599. LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
  600. err = -EINVAL;
  601. goto out;
  602. }
  603. /*
  604. * Now cork the socket to pend data.
  605. */
  606. inet->cork.fl.fl4_dst = daddr;
  607. inet->cork.fl.fl_ip_dport = dport;
  608. inet->cork.fl.fl4_src = saddr;
  609. inet->cork.fl.fl_ip_sport = inet->sport;
  610. up->pending = AF_INET;
  611. do_append_data:
  612. up->len += ulen;
  613. getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
  614. err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
  615. sizeof(struct udphdr), &ipc, rt,
  616. corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
  617. if (err)
  618. udp_flush_pending_frames(sk);
  619. else if (!corkreq)
  620. err = udp_push_pending_frames(sk);
  621. else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
  622. up->pending = 0;
  623. release_sock(sk);
  624. out:
  625. ip_rt_put(rt);
  626. if (free)
  627. kfree(ipc.opt);
  628. if (!err) {
  629. UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
  630. return len;
  631. }
  632. /*
  633. * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
  634. * ENOBUFS might not be good (it's not tunable per se), but otherwise
  635. * we don't have a good statistic (IpOutDiscards but it can be too many
  636. * things). We could add another new stat but at least for now that
  637. * seems like overkill.
  638. */
  639. if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
  640. UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite);
  641. }
  642. return err;
  643. do_confirm:
  644. dst_confirm(&rt->u.dst);
  645. if (!(msg->msg_flags&MSG_PROBE) || len)
  646. goto back_from_confirm;
  647. err = 0;
  648. goto out;
  649. }
  650. int udp_sendpage(struct sock *sk, struct page *page, int offset,
  651. size_t size, int flags)
  652. {
  653. struct udp_sock *up = udp_sk(sk);
  654. int ret;
  655. if (!up->pending) {
  656. struct msghdr msg = { .msg_flags = flags|MSG_MORE };
  657. /* Call udp_sendmsg to specify destination address which
  658. * sendpage interface can't pass.
  659. * This will succeed only when the socket is connected.
  660. */
  661. ret = udp_sendmsg(NULL, sk, &msg, 0);
  662. if (ret < 0)
  663. return ret;
  664. }
  665. lock_sock(sk);
  666. if (unlikely(!up->pending)) {
  667. release_sock(sk);
  668. LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
  669. return -EINVAL;
  670. }
  671. ret = ip_append_page(sk, page, offset, size, flags);
  672. if (ret == -EOPNOTSUPP) {
  673. release_sock(sk);
  674. return sock_no_sendpage(sk->sk_socket, page, offset,
  675. size, flags);
  676. }
  677. if (ret < 0) {
  678. udp_flush_pending_frames(sk);
  679. goto out;
  680. }
  681. up->len += size;
  682. if (!(up->corkflag || (flags&MSG_MORE)))
  683. ret = udp_push_pending_frames(sk);
  684. if (!ret)
  685. ret = size;
  686. out:
  687. release_sock(sk);
  688. return ret;
  689. }
  690. /*
  691. * IOCTL requests applicable to the UDP protocol
  692. */
  693. int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  694. {
  695. switch (cmd) {
  696. case SIOCOUTQ:
  697. {
  698. int amount = atomic_read(&sk->sk_wmem_alloc);
  699. return put_user(amount, (int __user *)arg);
  700. }
  701. case SIOCINQ:
  702. {
  703. struct sk_buff *skb;
  704. unsigned long amount;
  705. amount = 0;
  706. spin_lock_bh(&sk->sk_receive_queue.lock);
  707. skb = skb_peek(&sk->sk_receive_queue);
  708. if (skb != NULL) {
  709. /*
  710. * We will only return the amount
  711. * of this packet since that is all
  712. * that will be read.
  713. */
  714. amount = skb->len - sizeof(struct udphdr);
  715. }
  716. spin_unlock_bh(&sk->sk_receive_queue.lock);
  717. return put_user(amount, (int __user *)arg);
  718. }
  719. default:
  720. return -ENOIOCTLCMD;
  721. }
  722. return 0;
  723. }
  724. /*
  725. * This should be easy, if there is something there we
  726. * return it, otherwise we block.
  727. */
  728. int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  729. size_t len, int noblock, int flags, int *addr_len)
  730. {
  731. struct inet_sock *inet = inet_sk(sk);
  732. struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
  733. struct sk_buff *skb;
  734. unsigned int ulen, copied;
  735. int err;
  736. int is_udplite = IS_UDPLITE(sk);
  737. /*
  738. * Check any passed addresses
  739. */
  740. if (addr_len)
  741. *addr_len=sizeof(*sin);
  742. if (flags & MSG_ERRQUEUE)
  743. return ip_recv_error(sk, msg, len);
  744. try_again:
  745. skb = skb_recv_datagram(sk, flags, noblock, &err);
  746. if (!skb)
  747. goto out;
  748. ulen = skb->len - sizeof(struct udphdr);
  749. copied = len;
  750. if (copied > ulen)
  751. copied = ulen;
  752. else if (copied < ulen)
  753. msg->msg_flags |= MSG_TRUNC;
  754. /*
  755. * If checksum is needed at all, try to do it while copying the
  756. * data. If the data is truncated, or if we only want a partial
  757. * coverage checksum (UDP-Lite), do it before the copy.
  758. */
  759. if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
  760. if (udp_lib_checksum_complete(skb))
  761. goto csum_copy_err;
  762. }
  763. if (skb_csum_unnecessary(skb))
  764. err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
  765. msg->msg_iov, copied );
  766. else {
  767. err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
  768. if (err == -EINVAL)
  769. goto csum_copy_err;
  770. }
  771. if (err)
  772. goto out_free;
  773. sock_recv_timestamp(msg, sk, skb);
  774. /* Copy the address. */
  775. if (sin)
  776. {
  777. sin->sin_family = AF_INET;
  778. sin->sin_port = udp_hdr(skb)->source;
  779. sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
  780. memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
  781. }
  782. if (inet->cmsg_flags)
  783. ip_cmsg_recv(msg, skb);
  784. err = copied;
  785. if (flags & MSG_TRUNC)
  786. err = ulen;
  787. out_free:
  788. skb_free_datagram(sk, skb);
  789. out:
  790. return err;
  791. csum_copy_err:
  792. UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite);
  793. skb_kill_datagram(sk, skb, flags);
  794. if (noblock)
  795. return -EAGAIN;
  796. goto try_again;
  797. }
  798. int udp_disconnect(struct sock *sk, int flags)
  799. {
  800. struct inet_sock *inet = inet_sk(sk);
  801. /*
  802. * 1003.1g - break association.
  803. */
  804. sk->sk_state = TCP_CLOSE;
  805. inet->daddr = 0;
  806. inet->dport = 0;
  807. sk->sk_bound_dev_if = 0;
  808. if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
  809. inet_reset_saddr(sk);
  810. if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
  811. sk->sk_prot->unhash(sk);
  812. inet->sport = 0;
  813. }
  814. sk_dst_reset(sk);
  815. return 0;
  816. }
  817. /* returns:
  818. * -1: error
  819. * 0: success
  820. * >0: "udp encap" protocol resubmission
  821. *
  822. * Note that in the success and error cases, the skb is assumed to
  823. * have either been requeued or freed.
  824. */
  825. int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
  826. {
  827. struct udp_sock *up = udp_sk(sk);
  828. int rc;
  829. /*
  830. * Charge it to the socket, dropping if the queue is full.
  831. */
  832. if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
  833. goto drop;
  834. nf_reset(skb);
  835. if (up->encap_type) {
  836. /*
  837. * This is an encapsulation socket so pass the skb to
  838. * the socket's udp_encap_rcv() hook. Otherwise, just
  839. * fall through and pass this up the UDP socket.
  840. * up->encap_rcv() returns the following value:
  841. * =0 if skb was successfully passed to the encap
  842. * handler or was discarded by it.
  843. * >0 if skb should be passed on to UDP.
  844. * <0 if skb should be resubmitted as proto -N
  845. */
  846. unsigned int len;
  847. /* if we're overly short, let UDP handle it */
  848. len = skb->len - sizeof(struct udphdr);
  849. if (len <= 0)
  850. goto udp;
  851. if (up->encap_rcv != NULL) {
  852. int ret;
  853. ret = (*up->encap_rcv)(sk, skb);
  854. if (ret <= 0) {
  855. UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
  856. return -ret;
  857. }
  858. }
  859. /* FALLTHROUGH -- it's a UDP Packet */
  860. }
  861. udp:
  862. /*
  863. * UDP-Lite specific tests, ignored on UDP sockets
  864. */
  865. if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
  866. /*
  867. * MIB statistics other than incrementing the error count are
  868. * disabled for the following two types of errors: these depend
  869. * on the application settings, not on the functioning of the
  870. * protocol stack as such.
  871. *
  872. * RFC 3828 here recommends (sec 3.3): "There should also be a
  873. * way ... to ... at least let the receiving application block
  874. * delivery of packets with coverage values less than a value
  875. * provided by the application."
  876. */
  877. if (up->pcrlen == 0) { /* full coverage was set */
  878. LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
  879. "%d while full coverage %d requested\n",
  880. UDP_SKB_CB(skb)->cscov, skb->len);
  881. goto drop;
  882. }
  883. /* The next case involves violating the min. coverage requested
  884. * by the receiver. This is subtle: if receiver wants x and x is
  885. * greater than the buffersize/MTU then receiver will complain
  886. * that it wants x while sender emits packets of smaller size y.
  887. * Therefore the above ...()->partial_cov statement is essential.
  888. */
  889. if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
  890. LIMIT_NETDEBUG(KERN_WARNING
  891. "UDPLITE: coverage %d too small, need min %d\n",
  892. UDP_SKB_CB(skb)->cscov, up->pcrlen);
  893. goto drop;
  894. }
  895. }
  896. if (sk->sk_filter) {
  897. if (udp_lib_checksum_complete(skb))
  898. goto drop;
  899. }
  900. if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
  901. /* Note that an ENOMEM error is charged twice */
  902. if (rc == -ENOMEM)
  903. UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, up->pcflag);
  904. goto drop;
  905. }
  906. UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
  907. return 0;
  908. drop:
  909. UDP_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag);
  910. kfree_skb(skb);
  911. return -1;
  912. }
  913. /*
  914. * Multicasts and broadcasts go to each listener.
  915. *
  916. * Note: called only from the BH handler context,
  917. * so we don't need to lock the hashes.
  918. */
  919. static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
  920. struct udphdr *uh,
  921. __be32 saddr, __be32 daddr,
  922. struct hlist_head udptable[])
  923. {
  924. struct sock *sk;
  925. int dif;
  926. read_lock(&udp_hash_lock);
  927. sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
  928. dif = skb->dev->ifindex;
  929. sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
  930. if (sk) {
  931. struct sock *sknext = NULL;
  932. do {
  933. struct sk_buff *skb1 = skb;
  934. sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
  935. uh->source, saddr, dif);
  936. if (sknext)
  937. skb1 = skb_clone(skb, GFP_ATOMIC);
  938. if (skb1) {
  939. int ret = udp_queue_rcv_skb(sk, skb1);
  940. if (ret > 0)
  941. /* we should probably re-process instead
  942. * of dropping packets here. */
  943. kfree_skb(skb1);
  944. }
  945. sk = sknext;
  946. } while (sknext);
  947. } else
  948. kfree_skb(skb);
  949. read_unlock(&udp_hash_lock);
  950. return 0;
  951. }
  952. /* Initialize UDP checksum. If exited with zero value (success),
  953. * CHECKSUM_UNNECESSARY means, that no more checks are required.
  954. * Otherwise, csum completion requires chacksumming packet body,
  955. * including udp header and folding it to skb->csum.
  956. */
  957. static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
  958. int proto)
  959. {
  960. const struct iphdr *iph;
  961. int err;
  962. UDP_SKB_CB(skb)->partial_cov = 0;
  963. UDP_SKB_CB(skb)->cscov = skb->len;
  964. if (proto == IPPROTO_UDPLITE) {
  965. err = udplite_checksum_init(skb, uh);
  966. if (err)
  967. return err;
  968. }
  969. iph = ip_hdr(skb);
  970. if (uh->check == 0) {
  971. skb->ip_summed = CHECKSUM_UNNECESSARY;
  972. } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
  973. if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
  974. proto, skb->csum))
  975. skb->ip_summed = CHECKSUM_UNNECESSARY;
  976. }
  977. if (!skb_csum_unnecessary(skb))
  978. skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
  979. skb->len, proto, 0);
  980. /* Probably, we should checksum udp header (it should be in cache
  981. * in any case) and data in tiny packets (< rx copybreak).
  982. */
  983. return 0;
  984. }
  985. /*
  986. * All we need to do is get the socket, and then do a checksum.
  987. */
  988. int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
  989. int proto)
  990. {
  991. struct sock *sk;
  992. struct udphdr *uh = udp_hdr(skb);
  993. unsigned short ulen;
  994. struct rtable *rt = (struct rtable*)skb->dst;
  995. __be32 saddr = ip_hdr(skb)->saddr;
  996. __be32 daddr = ip_hdr(skb)->daddr;
  997. /*
  998. * Validate the packet.
  999. */
  1000. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  1001. goto drop; /* No space for header. */
  1002. ulen = ntohs(uh->len);
  1003. if (ulen > skb->len)
  1004. goto short_packet;
  1005. if (proto == IPPROTO_UDP) {
  1006. /* UDP validates ulen. */
  1007. if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
  1008. goto short_packet;
  1009. uh = udp_hdr(skb);
  1010. }
  1011. if (udp4_csum_init(skb, uh, proto))
  1012. goto csum_error;
  1013. if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
  1014. return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable);
  1015. sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest,
  1016. skb->dev->ifindex, udptable );
  1017. if (sk != NULL) {
  1018. int ret = udp_queue_rcv_skb(sk, skb);
  1019. sock_put(sk);
  1020. /* a return value > 0 means to resubmit the input, but
  1021. * it wants the return to be -protocol, or 0
  1022. */
  1023. if (ret > 0)
  1024. return -ret;
  1025. return 0;
  1026. }
  1027. if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
  1028. goto drop;
  1029. nf_reset(skb);
  1030. /* No socket. Drop packet silently, if checksum is wrong */
  1031. if (udp_lib_checksum_complete(skb))
  1032. goto csum_error;
  1033. UDP_INC_STATS_BH(UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
  1034. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
  1035. /*
  1036. * Hmm. We got an UDP packet to a port to which we
  1037. * don't wanna listen. Ignore it.
  1038. */
  1039. kfree_skb(skb);
  1040. return 0;
  1041. short_packet:
  1042. LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n",
  1043. proto == IPPROTO_UDPLITE ? "-Lite" : "",
  1044. NIPQUAD(saddr),
  1045. ntohs(uh->source),
  1046. ulen,
  1047. skb->len,
  1048. NIPQUAD(daddr),
  1049. ntohs(uh->dest));
  1050. goto drop;
  1051. csum_error:
  1052. /*
  1053. * RFC1122: OK. Discards the bad packet silently (as far as
  1054. * the network is concerned, anyway) as per 4.1.3.4 (MUST).
  1055. */
  1056. LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
  1057. proto == IPPROTO_UDPLITE ? "-Lite" : "",
  1058. NIPQUAD(saddr),
  1059. ntohs(uh->source),
  1060. NIPQUAD(daddr),
  1061. ntohs(uh->dest),
  1062. ulen);
  1063. drop:
  1064. UDP_INC_STATS_BH(UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
  1065. kfree_skb(skb);
  1066. return 0;
  1067. }
  1068. int udp_rcv(struct sk_buff *skb)
  1069. {
  1070. return __udp4_lib_rcv(skb, udp_hash, IPPROTO_UDP);
  1071. }
  1072. int udp_destroy_sock(struct sock *sk)
  1073. {
  1074. lock_sock(sk);
  1075. udp_flush_pending_frames(sk);
  1076. release_sock(sk);
  1077. return 0;
  1078. }
  1079. /*
  1080. * Socket option code for UDP
  1081. */
  1082. int udp_lib_setsockopt(struct sock *sk, int level, int optname,
  1083. char __user *optval, int optlen,
  1084. int (*push_pending_frames)(struct sock *))
  1085. {
  1086. struct udp_sock *up = udp_sk(sk);
  1087. int val;
  1088. int err = 0;
  1089. if (optlen<sizeof(int))
  1090. return -EINVAL;
  1091. if (get_user(val, (int __user *)optval))
  1092. return -EFAULT;
  1093. switch (optname) {
  1094. case UDP_CORK:
  1095. if (val != 0) {
  1096. up->corkflag = 1;
  1097. } else {
  1098. up->corkflag = 0;
  1099. lock_sock(sk);
  1100. (*push_pending_frames)(sk);
  1101. release_sock(sk);
  1102. }
  1103. break;
  1104. case UDP_ENCAP:
  1105. switch (val) {
  1106. case 0:
  1107. case UDP_ENCAP_ESPINUDP:
  1108. case UDP_ENCAP_ESPINUDP_NON_IKE:
  1109. up->encap_rcv = xfrm4_udp_encap_rcv;
  1110. /* FALLTHROUGH */
  1111. case UDP_ENCAP_L2TPINUDP:
  1112. up->encap_type = val;
  1113. break;
  1114. default:
  1115. err = -ENOPROTOOPT;
  1116. break;
  1117. }
  1118. break;
  1119. /*
  1120. * UDP-Lite's partial checksum coverage (RFC 3828).
  1121. */
  1122. /* The sender sets actual checksum coverage length via this option.
  1123. * The case coverage > packet length is handled by send module. */
  1124. case UDPLITE_SEND_CSCOV:
  1125. if (!up->pcflag) /* Disable the option on UDP sockets */
  1126. return -ENOPROTOOPT;
  1127. if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
  1128. val = 8;
  1129. up->pcslen = val;
  1130. up->pcflag |= UDPLITE_SEND_CC;
  1131. break;
  1132. /* The receiver specifies a minimum checksum coverage value. To make
  1133. * sense, this should be set to at least 8 (as done below). If zero is
  1134. * used, this again means full checksum coverage. */
  1135. case UDPLITE_RECV_CSCOV:
  1136. if (!up->pcflag) /* Disable the option on UDP sockets */
  1137. return -ENOPROTOOPT;
  1138. if (val != 0 && val < 8) /* Avoid silly minimal values. */
  1139. val = 8;
  1140. up->pcrlen = val;
  1141. up->pcflag |= UDPLITE_RECV_CC;
  1142. break;
  1143. default:
  1144. err = -ENOPROTOOPT;
  1145. break;
  1146. }
  1147. return err;
  1148. }
  1149. int udp_setsockopt(struct sock *sk, int level, int optname,
  1150. char __user *optval, int optlen)
  1151. {
  1152. if (level == SOL_UDP || level == SOL_UDPLITE)
  1153. return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  1154. udp_push_pending_frames);
  1155. return ip_setsockopt(sk, level, optname, optval, optlen);
  1156. }
  1157. #ifdef CONFIG_COMPAT
  1158. int compat_udp_setsockopt(struct sock *sk, int level, int optname,
  1159. char __user *optval, int optlen)
  1160. {
  1161. if (level == SOL_UDP || level == SOL_UDPLITE)
  1162. return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  1163. udp_push_pending_frames);
  1164. return compat_ip_setsockopt(sk, level, optname, optval, optlen);
  1165. }
  1166. #endif
  1167. int udp_lib_getsockopt(struct sock *sk, int level, int optname,
  1168. char __user *optval, int __user *optlen)
  1169. {
  1170. struct udp_sock *up = udp_sk(sk);
  1171. int val, len;
  1172. if (get_user(len,optlen))
  1173. return -EFAULT;
  1174. len = min_t(unsigned int, len, sizeof(int));
  1175. if (len < 0)
  1176. return -EINVAL;
  1177. switch (optname) {
  1178. case UDP_CORK:
  1179. val = up->corkflag;
  1180. break;
  1181. case UDP_ENCAP:
  1182. val = up->encap_type;
  1183. break;
  1184. /* The following two cannot be changed on UDP sockets, the return is
  1185. * always 0 (which corresponds to the full checksum coverage of UDP). */
  1186. case UDPLITE_SEND_CSCOV:
  1187. val = up->pcslen;
  1188. break;
  1189. case UDPLITE_RECV_CSCOV:
  1190. val = up->pcrlen;
  1191. break;
  1192. default:
  1193. return -ENOPROTOOPT;
  1194. }
  1195. if (put_user(len, optlen))
  1196. return -EFAULT;
  1197. if (copy_to_user(optval, &val,len))
  1198. return -EFAULT;
  1199. return 0;
  1200. }
  1201. int udp_getsockopt(struct sock *sk, int level, int optname,
  1202. char __user *optval, int __user *optlen)
  1203. {
  1204. if (level == SOL_UDP || level == SOL_UDPLITE)
  1205. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  1206. return ip_getsockopt(sk, level, optname, optval, optlen);
  1207. }
  1208. #ifdef CONFIG_COMPAT
  1209. int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  1210. char __user *optval, int __user *optlen)
  1211. {
  1212. if (level == SOL_UDP || level == SOL_UDPLITE)
  1213. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  1214. return compat_ip_getsockopt(sk, level, optname, optval, optlen);
  1215. }
  1216. #endif
  1217. /**
  1218. * udp_poll - wait for a UDP event.
  1219. * @file - file struct
  1220. * @sock - socket
  1221. * @wait - poll table
  1222. *
  1223. * This is same as datagram poll, except for the special case of
  1224. * blocking sockets. If application is using a blocking fd
  1225. * and a packet with checksum error is in the queue;
  1226. * then it could get return from select indicating data available
  1227. * but then block when reading it. Add special case code
  1228. * to work around these arguably broken applications.
  1229. */
  1230. unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
  1231. {
  1232. unsigned int mask = datagram_poll(file, sock, wait);
  1233. struct sock *sk = sock->sk;
  1234. int is_lite = IS_UDPLITE(sk);
  1235. /* Check for false positives due to checksum errors */
  1236. if ( (mask & POLLRDNORM) &&
  1237. !(file->f_flags & O_NONBLOCK) &&
  1238. !(sk->sk_shutdown & RCV_SHUTDOWN)){
  1239. struct sk_buff_head *rcvq = &sk->sk_receive_queue;
  1240. struct sk_buff *skb;
  1241. spin_lock_bh(&rcvq->lock);
  1242. while ((skb = skb_peek(rcvq)) != NULL &&
  1243. udp_lib_checksum_complete(skb)) {
  1244. UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite);
  1245. __skb_unlink(skb, rcvq);
  1246. kfree_skb(skb);
  1247. }
  1248. spin_unlock_bh(&rcvq->lock);
  1249. /* nothing to see, move along */
  1250. if (skb == NULL)
  1251. mask &= ~(POLLIN | POLLRDNORM);
  1252. }
  1253. return mask;
  1254. }
  1255. struct proto udp_prot = {
  1256. .name = "UDP",
  1257. .owner = THIS_MODULE,
  1258. .close = udp_lib_close,
  1259. .connect = ip4_datagram_connect,
  1260. .disconnect = udp_disconnect,
  1261. .ioctl = udp_ioctl,
  1262. .destroy = udp_destroy_sock,
  1263. .setsockopt = udp_setsockopt,
  1264. .getsockopt = udp_getsockopt,
  1265. .sendmsg = udp_sendmsg,
  1266. .recvmsg = udp_recvmsg,
  1267. .sendpage = udp_sendpage,
  1268. .backlog_rcv = udp_queue_rcv_skb,
  1269. .hash = udp_lib_hash,
  1270. .unhash = udp_lib_unhash,
  1271. .get_port = udp_v4_get_port,
  1272. .obj_size = sizeof(struct udp_sock),
  1273. #ifdef CONFIG_COMPAT
  1274. .compat_setsockopt = compat_udp_setsockopt,
  1275. .compat_getsockopt = compat_udp_getsockopt,
  1276. #endif
  1277. };
  1278. /* ------------------------------------------------------------------------ */
  1279. #ifdef CONFIG_PROC_FS
  1280. static struct sock *udp_get_first(struct seq_file *seq)
  1281. {
  1282. struct sock *sk;
  1283. struct udp_iter_state *state = seq->private;
  1284. for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
  1285. struct hlist_node *node;
  1286. sk_for_each(sk, node, state->hashtable + state->bucket) {
  1287. if (sk->sk_family == state->family)
  1288. goto found;
  1289. }
  1290. }
  1291. sk = NULL;
  1292. found:
  1293. return sk;
  1294. }
  1295. static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
  1296. {
  1297. struct udp_iter_state *state = seq->private;
  1298. do {
  1299. sk = sk_next(sk);
  1300. try_again:
  1301. ;
  1302. } while (sk && sk->sk_family != state->family);
  1303. if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
  1304. sk = sk_head(state->hashtable + state->bucket);
  1305. goto try_again;
  1306. }
  1307. return sk;
  1308. }
  1309. static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
  1310. {
  1311. struct sock *sk = udp_get_first(seq);
  1312. if (sk)
  1313. while (pos && (sk = udp_get_next(seq, sk)) != NULL)
  1314. --pos;
  1315. return pos ? NULL : sk;
  1316. }
  1317. static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
  1318. {
  1319. read_lock(&udp_hash_lock);
  1320. return *pos ? udp_get_idx(seq, *pos-1) : (void *)1;
  1321. }
  1322. static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1323. {
  1324. struct sock *sk;
  1325. if (v == (void *)1)
  1326. sk = udp_get_idx(seq, 0);
  1327. else
  1328. sk = udp_get_next(seq, v);
  1329. ++*pos;
  1330. return sk;
  1331. }
  1332. static void udp_seq_stop(struct seq_file *seq, void *v)
  1333. {
  1334. read_unlock(&udp_hash_lock);
  1335. }
  1336. static int udp_seq_open(struct inode *inode, struct file *file)
  1337. {
  1338. struct udp_seq_afinfo *afinfo = PDE(inode)->data;
  1339. struct seq_file *seq;
  1340. int rc = -ENOMEM;
  1341. struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL);
  1342. if (!s)
  1343. goto out;
  1344. s->family = afinfo->family;
  1345. s->hashtable = afinfo->hashtable;
  1346. s->seq_ops.start = udp_seq_start;
  1347. s->seq_ops.next = udp_seq_next;
  1348. s->seq_ops.show = afinfo->seq_show;
  1349. s->seq_ops.stop = udp_seq_stop;
  1350. rc = seq_open(file, &s->seq_ops);
  1351. if (rc)
  1352. goto out_kfree;
  1353. seq = file->private_data;
  1354. seq->private = s;
  1355. out:
  1356. return rc;
  1357. out_kfree:
  1358. kfree(s);
  1359. goto out;
  1360. }
  1361. /* ------------------------------------------------------------------------ */
  1362. int udp_proc_register(struct udp_seq_afinfo *afinfo)
  1363. {
  1364. struct proc_dir_entry *p;
  1365. int rc = 0;
  1366. if (!afinfo)
  1367. return -EINVAL;
  1368. afinfo->seq_fops->owner = afinfo->owner;
  1369. afinfo->seq_fops->open = udp_seq_open;
  1370. afinfo->seq_fops->read = seq_read;
  1371. afinfo->seq_fops->llseek = seq_lseek;
  1372. afinfo->seq_fops->release = seq_release_private;
  1373. p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
  1374. if (p)
  1375. p->data = afinfo;
  1376. else
  1377. rc = -ENOMEM;
  1378. return rc;
  1379. }
  1380. void udp_proc_unregister(struct udp_seq_afinfo *afinfo)
  1381. {
  1382. if (!afinfo)
  1383. return;
  1384. proc_net_remove(afinfo->name);
  1385. memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
  1386. }
  1387. /* ------------------------------------------------------------------------ */
  1388. static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
  1389. {
  1390. struct inet_sock *inet = inet_sk(sp);
  1391. __be32 dest = inet->daddr;
  1392. __be32 src = inet->rcv_saddr;
  1393. __u16 destp = ntohs(inet->dport);
  1394. __u16 srcp = ntohs(inet->sport);
  1395. sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
  1396. " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
  1397. bucket, src, srcp, dest, destp, sp->sk_state,
  1398. atomic_read(&sp->sk_wmem_alloc),
  1399. atomic_read(&sp->sk_rmem_alloc),
  1400. 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
  1401. atomic_read(&sp->sk_refcnt), sp);
  1402. }
  1403. int udp4_seq_show(struct seq_file *seq, void *v)
  1404. {
  1405. if (v == SEQ_START_TOKEN)
  1406. seq_printf(seq, "%-127s\n",
  1407. " sl local_address rem_address st tx_queue "
  1408. "rx_queue tr tm->when retrnsmt uid timeout "
  1409. "inode");
  1410. else {
  1411. char tmpbuf[129];
  1412. struct udp_iter_state *state = seq->private;
  1413. udp4_format_sock(v, tmpbuf, state->bucket);
  1414. seq_printf(seq, "%-127s\n", tmpbuf);
  1415. }
  1416. return 0;
  1417. }
  1418. /* ------------------------------------------------------------------------ */
  1419. static struct file_operations udp4_seq_fops;
  1420. static struct udp_seq_afinfo udp4_seq_afinfo = {
  1421. .owner = THIS_MODULE,
  1422. .name = "udp",
  1423. .family = AF_INET,
  1424. .hashtable = udp_hash,
  1425. .seq_show = udp4_seq_show,
  1426. .seq_fops = &udp4_seq_fops,
  1427. };
  1428. int __init udp4_proc_init(void)
  1429. {
  1430. return udp_proc_register(&udp4_seq_afinfo);
  1431. }
  1432. void udp4_proc_exit(void)
  1433. {
  1434. udp_proc_unregister(&udp4_seq_afinfo);
  1435. }
  1436. #endif /* CONFIG_PROC_FS */
  1437. EXPORT_SYMBOL(udp_disconnect);
  1438. EXPORT_SYMBOL(udp_hash);
  1439. EXPORT_SYMBOL(udp_hash_lock);
  1440. EXPORT_SYMBOL(udp_ioctl);
  1441. EXPORT_SYMBOL(udp_get_port);
  1442. EXPORT_SYMBOL(udp_prot);
  1443. EXPORT_SYMBOL(udp_sendmsg);
  1444. EXPORT_SYMBOL(udp_lib_getsockopt);
  1445. EXPORT_SYMBOL(udp_lib_setsockopt);
  1446. EXPORT_SYMBOL(udp_poll);
  1447. #ifdef CONFIG_PROC_FS
  1448. EXPORT_SYMBOL(udp_proc_register);
  1449. EXPORT_SYMBOL(udp_proc_unregister);
  1450. #endif