sock.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Generic socket support routines. Memory allocators, socket lock/release
  7. * handler for protocols to use and generic option handler.
  8. *
  9. *
  10. * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
  11. *
  12. * Authors: Ross Biro
  13. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  14. * Florian La Roche, <flla@stud.uni-sb.de>
  15. * Alan Cox, <A.Cox@swansea.ac.uk>
  16. *
  17. * Fixes:
  18. * Alan Cox : Numerous verify_area() problems
  19. * Alan Cox : Connecting on a connecting socket
  20. * now returns an error for tcp.
  21. * Alan Cox : sock->protocol is set correctly.
  22. * and is not sometimes left as 0.
  23. * Alan Cox : connect handles icmp errors on a
  24. * connect properly. Unfortunately there
  25. * is a restart syscall nasty there. I
  26. * can't match BSD without hacking the C
  27. * library. Ideas urgently sought!
  28. * Alan Cox : Disallow bind() to addresses that are
  29. * not ours - especially broadcast ones!!
  30. * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
  31. * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
  32. * instead they leave that for the DESTROY timer.
  33. * Alan Cox : Clean up error flag in accept
  34. * Alan Cox : TCP ack handling is buggy, the DESTROY timer
  35. * was buggy. Put a remove_sock() in the handler
  36. * for memory when we hit 0. Also altered the timer
  37. * code. The ACK stuff can wait and needs major
  38. * TCP layer surgery.
  39. * Alan Cox : Fixed TCP ack bug, removed remove sock
  40. * and fixed timer/inet_bh race.
  41. * Alan Cox : Added zapped flag for TCP
  42. * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
  43. * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  44. * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
  45. * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
  46. * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  47. * Rick Sladkey : Relaxed UDP rules for matching packets.
  48. * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
  49. * Pauline Middelink : identd support
  50. * Alan Cox : Fixed connect() taking signals I think.
  51. * Alan Cox : SO_LINGER supported
  52. * Alan Cox : Error reporting fixes
  53. * Anonymous : inet_create tidied up (sk->reuse setting)
  54. * Alan Cox : inet sockets don't set sk->type!
  55. * Alan Cox : Split socket option code
  56. * Alan Cox : Callbacks
  57. * Alan Cox : Nagle flag for Charles & Johannes stuff
  58. * Alex : Removed restriction on inet fioctl
  59. * Alan Cox : Splitting INET from NET core
  60. * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
  61. * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
  62. * Alan Cox : Split IP from generic code
  63. * Alan Cox : New kfree_skbmem()
  64. * Alan Cox : Make SO_DEBUG superuser only.
  65. * Alan Cox : Allow anyone to clear SO_DEBUG
  66. * (compatibility fix)
  67. * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
  68. * Alan Cox : Allocator for a socket is settable.
  69. * Alan Cox : SO_ERROR includes soft errors.
  70. * Alan Cox : Allow NULL arguments on some SO_ opts
  71. * Alan Cox : Generic socket allocation to make hooks
  72. * easier (suggested by Craig Metz).
  73. * Michael Pall : SO_ERROR returns positive errno again
  74. * Steve Whitehouse: Added default destructor to free
  75. * protocol private data.
  76. * Steve Whitehouse: Added various other default routines
  77. * common to several socket families.
  78. * Chris Evans : Call suser() check last on F_SETOWN
  79. * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  80. * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
  81. * Andi Kleen : Fix write_space callback
  82. * Chris Evans : Security fixes - signedness again
  83. * Arnaldo C. Melo : cleanups, use skb_queue_purge
  84. *
  85. * To Fix:
  86. *
  87. *
  88. * This program is free software; you can redistribute it and/or
  89. * modify it under the terms of the GNU General Public License
  90. * as published by the Free Software Foundation; either version
  91. * 2 of the License, or (at your option) any later version.
  92. */
  93. #include <linux/capability.h>
  94. #include <linux/errno.h>
  95. #include <linux/types.h>
  96. #include <linux/socket.h>
  97. #include <linux/in.h>
  98. #include <linux/kernel.h>
  99. #include <linux/module.h>
  100. #include <linux/proc_fs.h>
  101. #include <linux/seq_file.h>
  102. #include <linux/sched.h>
  103. #include <linux/timer.h>
  104. #include <linux/string.h>
  105. #include <linux/sockios.h>
  106. #include <linux/net.h>
  107. #include <linux/mm.h>
  108. #include <linux/slab.h>
  109. #include <linux/interrupt.h>
  110. #include <linux/poll.h>
  111. #include <linux/tcp.h>
  112. #include <linux/init.h>
  113. #include <linux/highmem.h>
  114. #include <asm/uaccess.h>
  115. #include <asm/system.h>
  116. #include <linux/netdevice.h>
  117. #include <net/protocol.h>
  118. #include <linux/skbuff.h>
  119. #include <net/net_namespace.h>
  120. #include <net/request_sock.h>
  121. #include <net/sock.h>
  122. #include <net/xfrm.h>
  123. #include <linux/ipsec.h>
  124. #include <linux/filter.h>
  125. #ifdef CONFIG_INET
  126. #include <net/tcp.h>
  127. #endif
  128. /*
  129. * Each address family might have different locking rules, so we have
  130. * one slock key per address family:
  131. */
  132. static struct lock_class_key af_family_keys[AF_MAX];
  133. static struct lock_class_key af_family_slock_keys[AF_MAX];
  134. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  135. /*
  136. * Make lock validator output more readable. (we pre-construct these
  137. * strings build-time, so that runtime initialization of socket
  138. * locks is fast):
  139. */
  140. static const char *af_family_key_strings[AF_MAX+1] = {
  141. "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
  142. "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
  143. "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
  144. "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
  145. "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
  146. "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
  147. "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
  148. "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
  149. "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
  150. "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
  151. "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
  152. "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX"
  153. };
  154. static const char *af_family_slock_key_strings[AF_MAX+1] = {
  155. "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
  156. "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
  157. "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
  158. "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
  159. "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
  160. "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
  161. "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
  162. "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" ,
  163. "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
  164. "slock-27" , "slock-28" , "slock-AF_CAN" ,
  165. "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
  166. "slock-AF_RXRPC" , "slock-AF_MAX"
  167. };
  168. static const char *af_family_clock_key_strings[AF_MAX+1] = {
  169. "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
  170. "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
  171. "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
  172. "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
  173. "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
  174. "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
  175. "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
  176. "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" ,
  177. "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
  178. "clock-27" , "clock-28" , "clock-29" ,
  179. "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
  180. "clock-AF_RXRPC" , "clock-AF_MAX"
  181. };
  182. #endif
  183. /*
  184. * sk_callback_lock locking rules are per-address-family,
  185. * so split the lock classes by using a per-AF key:
  186. */
  187. static struct lock_class_key af_callback_keys[AF_MAX];
  188. /* Take into consideration the size of the struct sk_buff overhead in the
  189. * determination of these values, since that is non-constant across
  190. * platforms. This makes socket queueing behavior and performance
  191. * not depend upon such differences.
  192. */
  193. #define _SK_MEM_PACKETS 256
  194. #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
  195. #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  196. #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  197. /* Run time adjustable parameters. */
  198. __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
  199. __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
  200. __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
  201. __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
  202. /* Maximal space eaten by iovec or ancilliary data plus some space */
  203. int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
  204. static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
  205. {
  206. struct timeval tv;
  207. if (optlen < sizeof(tv))
  208. return -EINVAL;
  209. if (copy_from_user(&tv, optval, sizeof(tv)))
  210. return -EFAULT;
  211. if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
  212. return -EDOM;
  213. if (tv.tv_sec < 0) {
  214. static int warned __read_mostly;
  215. *timeo_p = 0;
  216. if (warned < 10 && net_ratelimit())
  217. warned++;
  218. printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
  219. "tries to set negative timeout\n",
  220. current->comm, task_pid_nr(current));
  221. return 0;
  222. }
  223. *timeo_p = MAX_SCHEDULE_TIMEOUT;
  224. if (tv.tv_sec == 0 && tv.tv_usec == 0)
  225. return 0;
  226. if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
  227. *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
  228. return 0;
  229. }
  230. static void sock_warn_obsolete_bsdism(const char *name)
  231. {
  232. static int warned;
  233. static char warncomm[TASK_COMM_LEN];
  234. if (strcmp(warncomm, current->comm) && warned < 5) {
  235. strcpy(warncomm, current->comm);
  236. printk(KERN_WARNING "process `%s' is using obsolete "
  237. "%s SO_BSDCOMPAT\n", warncomm, name);
  238. warned++;
  239. }
  240. }
  241. static void sock_disable_timestamp(struct sock *sk)
  242. {
  243. if (sock_flag(sk, SOCK_TIMESTAMP)) {
  244. sock_reset_flag(sk, SOCK_TIMESTAMP);
  245. net_disable_timestamp();
  246. }
  247. }
  248. int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  249. {
  250. int err = 0;
  251. int skb_len;
  252. /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
  253. number of warnings when compiling with -W --ANK
  254. */
  255. if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
  256. (unsigned)sk->sk_rcvbuf) {
  257. err = -ENOMEM;
  258. goto out;
  259. }
  260. err = sk_filter(sk, skb);
  261. if (err)
  262. goto out;
  263. skb->dev = NULL;
  264. skb_set_owner_r(skb, sk);
  265. /* Cache the SKB length before we tack it onto the receive
  266. * queue. Once it is added it no longer belongs to us and
  267. * may be freed by other threads of control pulling packets
  268. * from the queue.
  269. */
  270. skb_len = skb->len;
  271. skb_queue_tail(&sk->sk_receive_queue, skb);
  272. if (!sock_flag(sk, SOCK_DEAD))
  273. sk->sk_data_ready(sk, skb_len);
  274. out:
  275. return err;
  276. }
  277. EXPORT_SYMBOL(sock_queue_rcv_skb);
  278. int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
  279. {
  280. int rc = NET_RX_SUCCESS;
  281. if (sk_filter(sk, skb))
  282. goto discard_and_relse;
  283. skb->dev = NULL;
  284. if (nested)
  285. bh_lock_sock_nested(sk);
  286. else
  287. bh_lock_sock(sk);
  288. if (!sock_owned_by_user(sk)) {
  289. /*
  290. * trylock + unlock semantics:
  291. */
  292. mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
  293. rc = sk->sk_backlog_rcv(sk, skb);
  294. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  295. } else
  296. sk_add_backlog(sk, skb);
  297. bh_unlock_sock(sk);
  298. out:
  299. sock_put(sk);
  300. return rc;
  301. discard_and_relse:
  302. kfree_skb(skb);
  303. goto out;
  304. }
  305. EXPORT_SYMBOL(sk_receive_skb);
  306. struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
  307. {
  308. struct dst_entry *dst = sk->sk_dst_cache;
  309. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  310. sk->sk_dst_cache = NULL;
  311. dst_release(dst);
  312. return NULL;
  313. }
  314. return dst;
  315. }
  316. EXPORT_SYMBOL(__sk_dst_check);
  317. struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
  318. {
  319. struct dst_entry *dst = sk_dst_get(sk);
  320. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  321. sk_dst_reset(sk);
  322. dst_release(dst);
  323. return NULL;
  324. }
  325. return dst;
  326. }
  327. EXPORT_SYMBOL(sk_dst_check);
  328. static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
  329. {
  330. int ret = -ENOPROTOOPT;
  331. #ifdef CONFIG_NETDEVICES
  332. struct net *net = sk->sk_net;
  333. char devname[IFNAMSIZ];
  334. int index;
  335. /* Sorry... */
  336. ret = -EPERM;
  337. if (!capable(CAP_NET_RAW))
  338. goto out;
  339. ret = -EINVAL;
  340. if (optlen < 0)
  341. goto out;
  342. /* Bind this socket to a particular device like "eth0",
  343. * as specified in the passed interface name. If the
  344. * name is "" or the option length is zero the socket
  345. * is not bound.
  346. */
  347. if (optlen > IFNAMSIZ - 1)
  348. optlen = IFNAMSIZ - 1;
  349. memset(devname, 0, sizeof(devname));
  350. ret = -EFAULT;
  351. if (copy_from_user(devname, optval, optlen))
  352. goto out;
  353. if (devname[0] == '\0') {
  354. index = 0;
  355. } else {
  356. struct net_device *dev = dev_get_by_name(net, devname);
  357. ret = -ENODEV;
  358. if (!dev)
  359. goto out;
  360. index = dev->ifindex;
  361. dev_put(dev);
  362. }
  363. lock_sock(sk);
  364. sk->sk_bound_dev_if = index;
  365. sk_dst_reset(sk);
  366. release_sock(sk);
  367. ret = 0;
  368. out:
  369. #endif
  370. return ret;
  371. }
  372. static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
  373. {
  374. if (valbool)
  375. sock_set_flag(sk, bit);
  376. else
  377. sock_reset_flag(sk, bit);
  378. }
  379. /*
  380. * This is meant for all protocols to use and covers goings on
  381. * at the socket level. Everything here is generic.
  382. */
  383. int sock_setsockopt(struct socket *sock, int level, int optname,
  384. char __user *optval, int optlen)
  385. {
  386. struct sock *sk=sock->sk;
  387. int val;
  388. int valbool;
  389. struct linger ling;
  390. int ret = 0;
  391. /*
  392. * Options without arguments
  393. */
  394. #ifdef SO_DONTLINGER /* Compatibility item... */
  395. if (optname == SO_DONTLINGER) {
  396. lock_sock(sk);
  397. sock_reset_flag(sk, SOCK_LINGER);
  398. release_sock(sk);
  399. return 0;
  400. }
  401. #endif
  402. if (optname == SO_BINDTODEVICE)
  403. return sock_bindtodevice(sk, optval, optlen);
  404. if (optlen < sizeof(int))
  405. return -EINVAL;
  406. if (get_user(val, (int __user *)optval))
  407. return -EFAULT;
  408. valbool = val?1:0;
  409. lock_sock(sk);
  410. switch(optname) {
  411. case SO_DEBUG:
  412. if (val && !capable(CAP_NET_ADMIN)) {
  413. ret = -EACCES;
  414. } else
  415. sock_valbool_flag(sk, SOCK_DBG, valbool);
  416. break;
  417. case SO_REUSEADDR:
  418. sk->sk_reuse = valbool;
  419. break;
  420. case SO_TYPE:
  421. case SO_ERROR:
  422. ret = -ENOPROTOOPT;
  423. break;
  424. case SO_DONTROUTE:
  425. sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
  426. break;
  427. case SO_BROADCAST:
  428. sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
  429. break;
  430. case SO_SNDBUF:
  431. /* Don't error on this BSD doesn't and if you think
  432. about it this is right. Otherwise apps have to
  433. play 'guess the biggest size' games. RCVBUF/SNDBUF
  434. are treated in BSD as hints */
  435. if (val > sysctl_wmem_max)
  436. val = sysctl_wmem_max;
  437. set_sndbuf:
  438. sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
  439. if ((val * 2) < SOCK_MIN_SNDBUF)
  440. sk->sk_sndbuf = SOCK_MIN_SNDBUF;
  441. else
  442. sk->sk_sndbuf = val * 2;
  443. /*
  444. * Wake up sending tasks if we
  445. * upped the value.
  446. */
  447. sk->sk_write_space(sk);
  448. break;
  449. case SO_SNDBUFFORCE:
  450. if (!capable(CAP_NET_ADMIN)) {
  451. ret = -EPERM;
  452. break;
  453. }
  454. goto set_sndbuf;
  455. case SO_RCVBUF:
  456. /* Don't error on this BSD doesn't and if you think
  457. about it this is right. Otherwise apps have to
  458. play 'guess the biggest size' games. RCVBUF/SNDBUF
  459. are treated in BSD as hints */
  460. if (val > sysctl_rmem_max)
  461. val = sysctl_rmem_max;
  462. set_rcvbuf:
  463. sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
  464. /*
  465. * We double it on the way in to account for
  466. * "struct sk_buff" etc. overhead. Applications
  467. * assume that the SO_RCVBUF setting they make will
  468. * allow that much actual data to be received on that
  469. * socket.
  470. *
  471. * Applications are unaware that "struct sk_buff" and
  472. * other overheads allocate from the receive buffer
  473. * during socket buffer allocation.
  474. *
  475. * And after considering the possible alternatives,
  476. * returning the value we actually used in getsockopt
  477. * is the most desirable behavior.
  478. */
  479. if ((val * 2) < SOCK_MIN_RCVBUF)
  480. sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
  481. else
  482. sk->sk_rcvbuf = val * 2;
  483. break;
  484. case SO_RCVBUFFORCE:
  485. if (!capable(CAP_NET_ADMIN)) {
  486. ret = -EPERM;
  487. break;
  488. }
  489. goto set_rcvbuf;
  490. case SO_KEEPALIVE:
  491. #ifdef CONFIG_INET
  492. if (sk->sk_protocol == IPPROTO_TCP)
  493. tcp_set_keepalive(sk, valbool);
  494. #endif
  495. sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
  496. break;
  497. case SO_OOBINLINE:
  498. sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
  499. break;
  500. case SO_NO_CHECK:
  501. sk->sk_no_check = valbool;
  502. break;
  503. case SO_PRIORITY:
  504. if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
  505. sk->sk_priority = val;
  506. else
  507. ret = -EPERM;
  508. break;
  509. case SO_LINGER:
  510. if (optlen < sizeof(ling)) {
  511. ret = -EINVAL; /* 1003.1g */
  512. break;
  513. }
  514. if (copy_from_user(&ling,optval,sizeof(ling))) {
  515. ret = -EFAULT;
  516. break;
  517. }
  518. if (!ling.l_onoff)
  519. sock_reset_flag(sk, SOCK_LINGER);
  520. else {
  521. #if (BITS_PER_LONG == 32)
  522. if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
  523. sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
  524. else
  525. #endif
  526. sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
  527. sock_set_flag(sk, SOCK_LINGER);
  528. }
  529. break;
  530. case SO_BSDCOMPAT:
  531. sock_warn_obsolete_bsdism("setsockopt");
  532. break;
  533. case SO_PASSCRED:
  534. if (valbool)
  535. set_bit(SOCK_PASSCRED, &sock->flags);
  536. else
  537. clear_bit(SOCK_PASSCRED, &sock->flags);
  538. break;
  539. case SO_TIMESTAMP:
  540. case SO_TIMESTAMPNS:
  541. if (valbool) {
  542. if (optname == SO_TIMESTAMP)
  543. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  544. else
  545. sock_set_flag(sk, SOCK_RCVTSTAMPNS);
  546. sock_set_flag(sk, SOCK_RCVTSTAMP);
  547. sock_enable_timestamp(sk);
  548. } else {
  549. sock_reset_flag(sk, SOCK_RCVTSTAMP);
  550. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  551. }
  552. break;
  553. case SO_RCVLOWAT:
  554. if (val < 0)
  555. val = INT_MAX;
  556. sk->sk_rcvlowat = val ? : 1;
  557. break;
  558. case SO_RCVTIMEO:
  559. ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
  560. break;
  561. case SO_SNDTIMEO:
  562. ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
  563. break;
  564. case SO_ATTACH_FILTER:
  565. ret = -EINVAL;
  566. if (optlen == sizeof(struct sock_fprog)) {
  567. struct sock_fprog fprog;
  568. ret = -EFAULT;
  569. if (copy_from_user(&fprog, optval, sizeof(fprog)))
  570. break;
  571. ret = sk_attach_filter(&fprog, sk);
  572. }
  573. break;
  574. case SO_DETACH_FILTER:
  575. ret = sk_detach_filter(sk);
  576. break;
  577. case SO_PASSSEC:
  578. if (valbool)
  579. set_bit(SOCK_PASSSEC, &sock->flags);
  580. else
  581. clear_bit(SOCK_PASSSEC, &sock->flags);
  582. break;
  583. /* We implement the SO_SNDLOWAT etc to
  584. not be settable (1003.1g 5.3) */
  585. default:
  586. ret = -ENOPROTOOPT;
  587. break;
  588. }
  589. release_sock(sk);
  590. return ret;
  591. }
  592. int sock_getsockopt(struct socket *sock, int level, int optname,
  593. char __user *optval, int __user *optlen)
  594. {
  595. struct sock *sk = sock->sk;
  596. union {
  597. int val;
  598. struct linger ling;
  599. struct timeval tm;
  600. } v;
  601. unsigned int lv = sizeof(int);
  602. int len;
  603. if (get_user(len, optlen))
  604. return -EFAULT;
  605. if (len < 0)
  606. return -EINVAL;
  607. switch(optname) {
  608. case SO_DEBUG:
  609. v.val = sock_flag(sk, SOCK_DBG);
  610. break;
  611. case SO_DONTROUTE:
  612. v.val = sock_flag(sk, SOCK_LOCALROUTE);
  613. break;
  614. case SO_BROADCAST:
  615. v.val = !!sock_flag(sk, SOCK_BROADCAST);
  616. break;
  617. case SO_SNDBUF:
  618. v.val = sk->sk_sndbuf;
  619. break;
  620. case SO_RCVBUF:
  621. v.val = sk->sk_rcvbuf;
  622. break;
  623. case SO_REUSEADDR:
  624. v.val = sk->sk_reuse;
  625. break;
  626. case SO_KEEPALIVE:
  627. v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
  628. break;
  629. case SO_TYPE:
  630. v.val = sk->sk_type;
  631. break;
  632. case SO_ERROR:
  633. v.val = -sock_error(sk);
  634. if (v.val==0)
  635. v.val = xchg(&sk->sk_err_soft, 0);
  636. break;
  637. case SO_OOBINLINE:
  638. v.val = !!sock_flag(sk, SOCK_URGINLINE);
  639. break;
  640. case SO_NO_CHECK:
  641. v.val = sk->sk_no_check;
  642. break;
  643. case SO_PRIORITY:
  644. v.val = sk->sk_priority;
  645. break;
  646. case SO_LINGER:
  647. lv = sizeof(v.ling);
  648. v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
  649. v.ling.l_linger = sk->sk_lingertime / HZ;
  650. break;
  651. case SO_BSDCOMPAT:
  652. sock_warn_obsolete_bsdism("getsockopt");
  653. break;
  654. case SO_TIMESTAMP:
  655. v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
  656. !sock_flag(sk, SOCK_RCVTSTAMPNS);
  657. break;
  658. case SO_TIMESTAMPNS:
  659. v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
  660. break;
  661. case SO_RCVTIMEO:
  662. lv=sizeof(struct timeval);
  663. if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
  664. v.tm.tv_sec = 0;
  665. v.tm.tv_usec = 0;
  666. } else {
  667. v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
  668. v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
  669. }
  670. break;
  671. case SO_SNDTIMEO:
  672. lv=sizeof(struct timeval);
  673. if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
  674. v.tm.tv_sec = 0;
  675. v.tm.tv_usec = 0;
  676. } else {
  677. v.tm.tv_sec = sk->sk_sndtimeo / HZ;
  678. v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
  679. }
  680. break;
  681. case SO_RCVLOWAT:
  682. v.val = sk->sk_rcvlowat;
  683. break;
  684. case SO_SNDLOWAT:
  685. v.val=1;
  686. break;
  687. case SO_PASSCRED:
  688. v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
  689. break;
  690. case SO_PEERCRED:
  691. if (len > sizeof(sk->sk_peercred))
  692. len = sizeof(sk->sk_peercred);
  693. if (copy_to_user(optval, &sk->sk_peercred, len))
  694. return -EFAULT;
  695. goto lenout;
  696. case SO_PEERNAME:
  697. {
  698. char address[128];
  699. if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
  700. return -ENOTCONN;
  701. if (lv < len)
  702. return -EINVAL;
  703. if (copy_to_user(optval, address, len))
  704. return -EFAULT;
  705. goto lenout;
  706. }
  707. /* Dubious BSD thing... Probably nobody even uses it, but
  708. * the UNIX standard wants it for whatever reason... -DaveM
  709. */
  710. case SO_ACCEPTCONN:
  711. v.val = sk->sk_state == TCP_LISTEN;
  712. break;
  713. case SO_PASSSEC:
  714. v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
  715. break;
  716. case SO_PEERSEC:
  717. return security_socket_getpeersec_stream(sock, optval, optlen, len);
  718. default:
  719. return -ENOPROTOOPT;
  720. }
  721. if (len > lv)
  722. len = lv;
  723. if (copy_to_user(optval, &v, len))
  724. return -EFAULT;
  725. lenout:
  726. if (put_user(len, optlen))
  727. return -EFAULT;
  728. return 0;
  729. }
  730. /*
  731. * Initialize an sk_lock.
  732. *
  733. * (We also register the sk_lock with the lock validator.)
  734. */
  735. static inline void sock_lock_init(struct sock *sk)
  736. {
  737. sock_lock_init_class_and_name(sk,
  738. af_family_slock_key_strings[sk->sk_family],
  739. af_family_slock_keys + sk->sk_family,
  740. af_family_key_strings[sk->sk_family],
  741. af_family_keys + sk->sk_family);
  742. }
  743. static void sock_copy(struct sock *nsk, const struct sock *osk)
  744. {
  745. #ifdef CONFIG_SECURITY_NETWORK
  746. void *sptr = nsk->sk_security;
  747. #endif
  748. memcpy(nsk, osk, osk->sk_prot->obj_size);
  749. #ifdef CONFIG_SECURITY_NETWORK
  750. nsk->sk_security = sptr;
  751. security_sk_clone(osk, nsk);
  752. #endif
  753. }
  754. static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
  755. int family)
  756. {
  757. struct sock *sk;
  758. struct kmem_cache *slab;
  759. slab = prot->slab;
  760. if (slab != NULL)
  761. sk = kmem_cache_alloc(slab, priority);
  762. else
  763. sk = kmalloc(prot->obj_size, priority);
  764. if (sk != NULL) {
  765. if (security_sk_alloc(sk, family, priority))
  766. goto out_free;
  767. if (!try_module_get(prot->owner))
  768. goto out_free_sec;
  769. }
  770. return sk;
  771. out_free_sec:
  772. security_sk_free(sk);
  773. out_free:
  774. if (slab != NULL)
  775. kmem_cache_free(slab, sk);
  776. else
  777. kfree(sk);
  778. return NULL;
  779. }
  780. static void sk_prot_free(struct proto *prot, struct sock *sk)
  781. {
  782. struct kmem_cache *slab;
  783. struct module *owner;
  784. owner = prot->owner;
  785. slab = prot->slab;
  786. security_sk_free(sk);
  787. if (slab != NULL)
  788. kmem_cache_free(slab, sk);
  789. else
  790. kfree(sk);
  791. module_put(owner);
  792. }
  793. /**
  794. * sk_alloc - All socket objects are allocated here
  795. * @net: the applicable net namespace
  796. * @family: protocol family
  797. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  798. * @prot: struct proto associated with this new sock instance
  799. * @zero_it: if we should zero the newly allocated sock
  800. */
  801. struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
  802. struct proto *prot)
  803. {
  804. struct sock *sk;
  805. sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
  806. if (sk) {
  807. sk->sk_family = family;
  808. /*
  809. * See comment in struct sock definition to understand
  810. * why we need sk_prot_creator -acme
  811. */
  812. sk->sk_prot = sk->sk_prot_creator = prot;
  813. sock_lock_init(sk);
  814. sk->sk_net = get_net(net);
  815. }
  816. return sk;
  817. }
  818. void sk_free(struct sock *sk)
  819. {
  820. struct sk_filter *filter;
  821. if (sk->sk_destruct)
  822. sk->sk_destruct(sk);
  823. filter = rcu_dereference(sk->sk_filter);
  824. if (filter) {
  825. sk_filter_uncharge(sk, filter);
  826. rcu_assign_pointer(sk->sk_filter, NULL);
  827. }
  828. sock_disable_timestamp(sk);
  829. if (atomic_read(&sk->sk_omem_alloc))
  830. printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
  831. __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
  832. put_net(sk->sk_net);
  833. sk_prot_free(sk->sk_prot_creator, sk);
  834. }
  835. struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
  836. {
  837. struct sock *newsk;
  838. newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
  839. if (newsk != NULL) {
  840. struct sk_filter *filter;
  841. sock_copy(newsk, sk);
  842. /* SANITY */
  843. get_net(newsk->sk_net);
  844. sk_node_init(&newsk->sk_node);
  845. sock_lock_init(newsk);
  846. bh_lock_sock(newsk);
  847. newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
  848. atomic_set(&newsk->sk_rmem_alloc, 0);
  849. atomic_set(&newsk->sk_wmem_alloc, 0);
  850. atomic_set(&newsk->sk_omem_alloc, 0);
  851. skb_queue_head_init(&newsk->sk_receive_queue);
  852. skb_queue_head_init(&newsk->sk_write_queue);
  853. #ifdef CONFIG_NET_DMA
  854. skb_queue_head_init(&newsk->sk_async_wait_queue);
  855. #endif
  856. rwlock_init(&newsk->sk_dst_lock);
  857. rwlock_init(&newsk->sk_callback_lock);
  858. lockdep_set_class_and_name(&newsk->sk_callback_lock,
  859. af_callback_keys + newsk->sk_family,
  860. af_family_clock_key_strings[newsk->sk_family]);
  861. newsk->sk_dst_cache = NULL;
  862. newsk->sk_wmem_queued = 0;
  863. newsk->sk_forward_alloc = 0;
  864. newsk->sk_send_head = NULL;
  865. newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
  866. sock_reset_flag(newsk, SOCK_DONE);
  867. skb_queue_head_init(&newsk->sk_error_queue);
  868. filter = newsk->sk_filter;
  869. if (filter != NULL)
  870. sk_filter_charge(newsk, filter);
  871. if (unlikely(xfrm_sk_clone_policy(newsk))) {
  872. /* It is still raw copy of parent, so invalidate
  873. * destructor and make plain sk_free() */
  874. newsk->sk_destruct = NULL;
  875. sk_free(newsk);
  876. newsk = NULL;
  877. goto out;
  878. }
  879. newsk->sk_err = 0;
  880. newsk->sk_priority = 0;
  881. atomic_set(&newsk->sk_refcnt, 2);
  882. /*
  883. * Increment the counter in the same struct proto as the master
  884. * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
  885. * is the same as sk->sk_prot->socks, as this field was copied
  886. * with memcpy).
  887. *
  888. * This _changes_ the previous behaviour, where
  889. * tcp_create_openreq_child always was incrementing the
  890. * equivalent to tcp_prot->socks (inet_sock_nr), so this have
  891. * to be taken into account in all callers. -acme
  892. */
  893. sk_refcnt_debug_inc(newsk);
  894. newsk->sk_socket = NULL;
  895. newsk->sk_sleep = NULL;
  896. if (newsk->sk_prot->sockets_allocated)
  897. atomic_inc(newsk->sk_prot->sockets_allocated);
  898. }
  899. out:
  900. return newsk;
  901. }
  902. EXPORT_SYMBOL_GPL(sk_clone);
  903. void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
  904. {
  905. __sk_dst_set(sk, dst);
  906. sk->sk_route_caps = dst->dev->features;
  907. if (sk->sk_route_caps & NETIF_F_GSO)
  908. sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
  909. if (sk_can_gso(sk)) {
  910. if (dst->header_len)
  911. sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
  912. else
  913. sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
  914. }
  915. }
  916. EXPORT_SYMBOL_GPL(sk_setup_caps);
  917. void __init sk_init(void)
  918. {
  919. if (num_physpages <= 4096) {
  920. sysctl_wmem_max = 32767;
  921. sysctl_rmem_max = 32767;
  922. sysctl_wmem_default = 32767;
  923. sysctl_rmem_default = 32767;
  924. } else if (num_physpages >= 131072) {
  925. sysctl_wmem_max = 131071;
  926. sysctl_rmem_max = 131071;
  927. }
  928. }
  929. /*
  930. * Simple resource managers for sockets.
  931. */
  932. /*
  933. * Write buffer destructor automatically called from kfree_skb.
  934. */
  935. void sock_wfree(struct sk_buff *skb)
  936. {
  937. struct sock *sk = skb->sk;
  938. /* In case it might be waiting for more memory. */
  939. atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
  940. if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
  941. sk->sk_write_space(sk);
  942. sock_put(sk);
  943. }
  944. /*
  945. * Read buffer destructor automatically called from kfree_skb.
  946. */
  947. void sock_rfree(struct sk_buff *skb)
  948. {
  949. struct sock *sk = skb->sk;
  950. atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
  951. }
  952. int sock_i_uid(struct sock *sk)
  953. {
  954. int uid;
  955. read_lock(&sk->sk_callback_lock);
  956. uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
  957. read_unlock(&sk->sk_callback_lock);
  958. return uid;
  959. }
  960. unsigned long sock_i_ino(struct sock *sk)
  961. {
  962. unsigned long ino;
  963. read_lock(&sk->sk_callback_lock);
  964. ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
  965. read_unlock(&sk->sk_callback_lock);
  966. return ino;
  967. }
  968. /*
  969. * Allocate a skb from the socket's send buffer.
  970. */
  971. struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
  972. gfp_t priority)
  973. {
  974. if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
  975. struct sk_buff * skb = alloc_skb(size, priority);
  976. if (skb) {
  977. skb_set_owner_w(skb, sk);
  978. return skb;
  979. }
  980. }
  981. return NULL;
  982. }
  983. /*
  984. * Allocate a skb from the socket's receive buffer.
  985. */
  986. struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
  987. gfp_t priority)
  988. {
  989. if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
  990. struct sk_buff *skb = alloc_skb(size, priority);
  991. if (skb) {
  992. skb_set_owner_r(skb, sk);
  993. return skb;
  994. }
  995. }
  996. return NULL;
  997. }
  998. /*
  999. * Allocate a memory block from the socket's option memory buffer.
  1000. */
  1001. void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
  1002. {
  1003. if ((unsigned)size <= sysctl_optmem_max &&
  1004. atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
  1005. void *mem;
  1006. /* First do the add, to avoid the race if kmalloc
  1007. * might sleep.
  1008. */
  1009. atomic_add(size, &sk->sk_omem_alloc);
  1010. mem = kmalloc(size, priority);
  1011. if (mem)
  1012. return mem;
  1013. atomic_sub(size, &sk->sk_omem_alloc);
  1014. }
  1015. return NULL;
  1016. }
  1017. /*
  1018. * Free an option memory block.
  1019. */
  1020. void sock_kfree_s(struct sock *sk, void *mem, int size)
  1021. {
  1022. kfree(mem);
  1023. atomic_sub(size, &sk->sk_omem_alloc);
  1024. }
  1025. /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
  1026. I think, these locks should be removed for datagram sockets.
  1027. */
  1028. static long sock_wait_for_wmem(struct sock * sk, long timeo)
  1029. {
  1030. DEFINE_WAIT(wait);
  1031. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  1032. for (;;) {
  1033. if (!timeo)
  1034. break;
  1035. if (signal_pending(current))
  1036. break;
  1037. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1038. prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
  1039. if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
  1040. break;
  1041. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1042. break;
  1043. if (sk->sk_err)
  1044. break;
  1045. timeo = schedule_timeout(timeo);
  1046. }
  1047. finish_wait(sk->sk_sleep, &wait);
  1048. return timeo;
  1049. }
  1050. /*
  1051. * Generic send/receive buffer handlers
  1052. */
  1053. static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
  1054. unsigned long header_len,
  1055. unsigned long data_len,
  1056. int noblock, int *errcode)
  1057. {
  1058. struct sk_buff *skb;
  1059. gfp_t gfp_mask;
  1060. long timeo;
  1061. int err;
  1062. gfp_mask = sk->sk_allocation;
  1063. if (gfp_mask & __GFP_WAIT)
  1064. gfp_mask |= __GFP_REPEAT;
  1065. timeo = sock_sndtimeo(sk, noblock);
  1066. while (1) {
  1067. err = sock_error(sk);
  1068. if (err != 0)
  1069. goto failure;
  1070. err = -EPIPE;
  1071. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1072. goto failure;
  1073. if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
  1074. skb = alloc_skb(header_len, gfp_mask);
  1075. if (skb) {
  1076. int npages;
  1077. int i;
  1078. /* No pages, we're done... */
  1079. if (!data_len)
  1080. break;
  1081. npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  1082. skb->truesize += data_len;
  1083. skb_shinfo(skb)->nr_frags = npages;
  1084. for (i = 0; i < npages; i++) {
  1085. struct page *page;
  1086. skb_frag_t *frag;
  1087. page = alloc_pages(sk->sk_allocation, 0);
  1088. if (!page) {
  1089. err = -ENOBUFS;
  1090. skb_shinfo(skb)->nr_frags = i;
  1091. kfree_skb(skb);
  1092. goto failure;
  1093. }
  1094. frag = &skb_shinfo(skb)->frags[i];
  1095. frag->page = page;
  1096. frag->page_offset = 0;
  1097. frag->size = (data_len >= PAGE_SIZE ?
  1098. PAGE_SIZE :
  1099. data_len);
  1100. data_len -= PAGE_SIZE;
  1101. }
  1102. /* Full success... */
  1103. break;
  1104. }
  1105. err = -ENOBUFS;
  1106. goto failure;
  1107. }
  1108. set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  1109. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1110. err = -EAGAIN;
  1111. if (!timeo)
  1112. goto failure;
  1113. if (signal_pending(current))
  1114. goto interrupted;
  1115. timeo = sock_wait_for_wmem(sk, timeo);
  1116. }
  1117. skb_set_owner_w(skb, sk);
  1118. return skb;
  1119. interrupted:
  1120. err = sock_intr_errno(timeo);
  1121. failure:
  1122. *errcode = err;
  1123. return NULL;
  1124. }
  1125. struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
  1126. int noblock, int *errcode)
  1127. {
  1128. return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
  1129. }
  1130. static void __lock_sock(struct sock *sk)
  1131. {
  1132. DEFINE_WAIT(wait);
  1133. for (;;) {
  1134. prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
  1135. TASK_UNINTERRUPTIBLE);
  1136. spin_unlock_bh(&sk->sk_lock.slock);
  1137. schedule();
  1138. spin_lock_bh(&sk->sk_lock.slock);
  1139. if (!sock_owned_by_user(sk))
  1140. break;
  1141. }
  1142. finish_wait(&sk->sk_lock.wq, &wait);
  1143. }
  1144. static void __release_sock(struct sock *sk)
  1145. {
  1146. struct sk_buff *skb = sk->sk_backlog.head;
  1147. do {
  1148. sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
  1149. bh_unlock_sock(sk);
  1150. do {
  1151. struct sk_buff *next = skb->next;
  1152. skb->next = NULL;
  1153. sk->sk_backlog_rcv(sk, skb);
  1154. /*
  1155. * We are in process context here with softirqs
  1156. * disabled, use cond_resched_softirq() to preempt.
  1157. * This is safe to do because we've taken the backlog
  1158. * queue private:
  1159. */
  1160. cond_resched_softirq();
  1161. skb = next;
  1162. } while (skb != NULL);
  1163. bh_lock_sock(sk);
  1164. } while ((skb = sk->sk_backlog.head) != NULL);
  1165. }
  1166. /**
  1167. * sk_wait_data - wait for data to arrive at sk_receive_queue
  1168. * @sk: sock to wait on
  1169. * @timeo: for how long
  1170. *
  1171. * Now socket state including sk->sk_err is changed only under lock,
  1172. * hence we may omit checks after joining wait queue.
  1173. * We check receive queue before schedule() only as optimization;
  1174. * it is very likely that release_sock() added new data.
  1175. */
  1176. int sk_wait_data(struct sock *sk, long *timeo)
  1177. {
  1178. int rc;
  1179. DEFINE_WAIT(wait);
  1180. prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
  1181. set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
  1182. rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
  1183. clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
  1184. finish_wait(sk->sk_sleep, &wait);
  1185. return rc;
  1186. }
  1187. EXPORT_SYMBOL(sk_wait_data);
  1188. /*
  1189. * Set of default routines for initialising struct proto_ops when
  1190. * the protocol does not support a particular function. In certain
  1191. * cases where it makes no sense for a protocol to have a "do nothing"
  1192. * function, some default processing is provided.
  1193. */
  1194. int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
  1195. {
  1196. return -EOPNOTSUPP;
  1197. }
  1198. int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
  1199. int len, int flags)
  1200. {
  1201. return -EOPNOTSUPP;
  1202. }
  1203. int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
  1204. {
  1205. return -EOPNOTSUPP;
  1206. }
  1207. int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
  1208. {
  1209. return -EOPNOTSUPP;
  1210. }
  1211. int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
  1212. int *len, int peer)
  1213. {
  1214. return -EOPNOTSUPP;
  1215. }
  1216. unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
  1217. {
  1218. return 0;
  1219. }
  1220. int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1221. {
  1222. return -EOPNOTSUPP;
  1223. }
  1224. int sock_no_listen(struct socket *sock, int backlog)
  1225. {
  1226. return -EOPNOTSUPP;
  1227. }
  1228. int sock_no_shutdown(struct socket *sock, int how)
  1229. {
  1230. return -EOPNOTSUPP;
  1231. }
  1232. int sock_no_setsockopt(struct socket *sock, int level, int optname,
  1233. char __user *optval, int optlen)
  1234. {
  1235. return -EOPNOTSUPP;
  1236. }
  1237. int sock_no_getsockopt(struct socket *sock, int level, int optname,
  1238. char __user *optval, int __user *optlen)
  1239. {
  1240. return -EOPNOTSUPP;
  1241. }
  1242. int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
  1243. size_t len)
  1244. {
  1245. return -EOPNOTSUPP;
  1246. }
  1247. int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
  1248. size_t len, int flags)
  1249. {
  1250. return -EOPNOTSUPP;
  1251. }
  1252. int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
  1253. {
  1254. /* Mirror missing mmap method error code */
  1255. return -ENODEV;
  1256. }
  1257. ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
  1258. {
  1259. ssize_t res;
  1260. struct msghdr msg = {.msg_flags = flags};
  1261. struct kvec iov;
  1262. char *kaddr = kmap(page);
  1263. iov.iov_base = kaddr + offset;
  1264. iov.iov_len = size;
  1265. res = kernel_sendmsg(sock, &msg, &iov, 1, size);
  1266. kunmap(page);
  1267. return res;
  1268. }
  1269. /*
  1270. * Default Socket Callbacks
  1271. */
  1272. static void sock_def_wakeup(struct sock *sk)
  1273. {
  1274. read_lock(&sk->sk_callback_lock);
  1275. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  1276. wake_up_interruptible_all(sk->sk_sleep);
  1277. read_unlock(&sk->sk_callback_lock);
  1278. }
  1279. static void sock_def_error_report(struct sock *sk)
  1280. {
  1281. read_lock(&sk->sk_callback_lock);
  1282. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  1283. wake_up_interruptible(sk->sk_sleep);
  1284. sk_wake_async(sk,0,POLL_ERR);
  1285. read_unlock(&sk->sk_callback_lock);
  1286. }
  1287. static void sock_def_readable(struct sock *sk, int len)
  1288. {
  1289. read_lock(&sk->sk_callback_lock);
  1290. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  1291. wake_up_interruptible(sk->sk_sleep);
  1292. sk_wake_async(sk,1,POLL_IN);
  1293. read_unlock(&sk->sk_callback_lock);
  1294. }
  1295. static void sock_def_write_space(struct sock *sk)
  1296. {
  1297. read_lock(&sk->sk_callback_lock);
  1298. /* Do not wake up a writer until he can make "significant"
  1299. * progress. --DaveM
  1300. */
  1301. if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
  1302. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  1303. wake_up_interruptible(sk->sk_sleep);
  1304. /* Should agree with poll, otherwise some programs break */
  1305. if (sock_writeable(sk))
  1306. sk_wake_async(sk, 2, POLL_OUT);
  1307. }
  1308. read_unlock(&sk->sk_callback_lock);
  1309. }
  1310. static void sock_def_destruct(struct sock *sk)
  1311. {
  1312. kfree(sk->sk_protinfo);
  1313. }
  1314. void sk_send_sigurg(struct sock *sk)
  1315. {
  1316. if (sk->sk_socket && sk->sk_socket->file)
  1317. if (send_sigurg(&sk->sk_socket->file->f_owner))
  1318. sk_wake_async(sk, 3, POLL_PRI);
  1319. }
  1320. void sk_reset_timer(struct sock *sk, struct timer_list* timer,
  1321. unsigned long expires)
  1322. {
  1323. if (!mod_timer(timer, expires))
  1324. sock_hold(sk);
  1325. }
  1326. EXPORT_SYMBOL(sk_reset_timer);
  1327. void sk_stop_timer(struct sock *sk, struct timer_list* timer)
  1328. {
  1329. if (timer_pending(timer) && del_timer(timer))
  1330. __sock_put(sk);
  1331. }
  1332. EXPORT_SYMBOL(sk_stop_timer);
  1333. void sock_init_data(struct socket *sock, struct sock *sk)
  1334. {
  1335. skb_queue_head_init(&sk->sk_receive_queue);
  1336. skb_queue_head_init(&sk->sk_write_queue);
  1337. skb_queue_head_init(&sk->sk_error_queue);
  1338. #ifdef CONFIG_NET_DMA
  1339. skb_queue_head_init(&sk->sk_async_wait_queue);
  1340. #endif
  1341. sk->sk_send_head = NULL;
  1342. init_timer(&sk->sk_timer);
  1343. sk->sk_allocation = GFP_KERNEL;
  1344. sk->sk_rcvbuf = sysctl_rmem_default;
  1345. sk->sk_sndbuf = sysctl_wmem_default;
  1346. sk->sk_state = TCP_CLOSE;
  1347. sk->sk_socket = sock;
  1348. sock_set_flag(sk, SOCK_ZAPPED);
  1349. if (sock) {
  1350. sk->sk_type = sock->type;
  1351. sk->sk_sleep = &sock->wait;
  1352. sock->sk = sk;
  1353. } else
  1354. sk->sk_sleep = NULL;
  1355. rwlock_init(&sk->sk_dst_lock);
  1356. rwlock_init(&sk->sk_callback_lock);
  1357. lockdep_set_class_and_name(&sk->sk_callback_lock,
  1358. af_callback_keys + sk->sk_family,
  1359. af_family_clock_key_strings[sk->sk_family]);
  1360. sk->sk_state_change = sock_def_wakeup;
  1361. sk->sk_data_ready = sock_def_readable;
  1362. sk->sk_write_space = sock_def_write_space;
  1363. sk->sk_error_report = sock_def_error_report;
  1364. sk->sk_destruct = sock_def_destruct;
  1365. sk->sk_sndmsg_page = NULL;
  1366. sk->sk_sndmsg_off = 0;
  1367. sk->sk_peercred.pid = 0;
  1368. sk->sk_peercred.uid = -1;
  1369. sk->sk_peercred.gid = -1;
  1370. sk->sk_write_pending = 0;
  1371. sk->sk_rcvlowat = 1;
  1372. sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
  1373. sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
  1374. sk->sk_stamp = ktime_set(-1L, -1L);
  1375. atomic_set(&sk->sk_refcnt, 1);
  1376. atomic_set(&sk->sk_drops, 0);
  1377. }
  1378. void fastcall lock_sock_nested(struct sock *sk, int subclass)
  1379. {
  1380. might_sleep();
  1381. spin_lock_bh(&sk->sk_lock.slock);
  1382. if (sk->sk_lock.owned)
  1383. __lock_sock(sk);
  1384. sk->sk_lock.owned = 1;
  1385. spin_unlock(&sk->sk_lock.slock);
  1386. /*
  1387. * The sk_lock has mutex_lock() semantics here:
  1388. */
  1389. mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
  1390. local_bh_enable();
  1391. }
  1392. EXPORT_SYMBOL(lock_sock_nested);
  1393. void fastcall release_sock(struct sock *sk)
  1394. {
  1395. /*
  1396. * The sk_lock has mutex_unlock() semantics:
  1397. */
  1398. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  1399. spin_lock_bh(&sk->sk_lock.slock);
  1400. if (sk->sk_backlog.tail)
  1401. __release_sock(sk);
  1402. sk->sk_lock.owned = 0;
  1403. if (waitqueue_active(&sk->sk_lock.wq))
  1404. wake_up(&sk->sk_lock.wq);
  1405. spin_unlock_bh(&sk->sk_lock.slock);
  1406. }
  1407. EXPORT_SYMBOL(release_sock);
  1408. int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
  1409. {
  1410. struct timeval tv;
  1411. if (!sock_flag(sk, SOCK_TIMESTAMP))
  1412. sock_enable_timestamp(sk);
  1413. tv = ktime_to_timeval(sk->sk_stamp);
  1414. if (tv.tv_sec == -1)
  1415. return -ENOENT;
  1416. if (tv.tv_sec == 0) {
  1417. sk->sk_stamp = ktime_get_real();
  1418. tv = ktime_to_timeval(sk->sk_stamp);
  1419. }
  1420. return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
  1421. }
  1422. EXPORT_SYMBOL(sock_get_timestamp);
  1423. int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
  1424. {
  1425. struct timespec ts;
  1426. if (!sock_flag(sk, SOCK_TIMESTAMP))
  1427. sock_enable_timestamp(sk);
  1428. ts = ktime_to_timespec(sk->sk_stamp);
  1429. if (ts.tv_sec == -1)
  1430. return -ENOENT;
  1431. if (ts.tv_sec == 0) {
  1432. sk->sk_stamp = ktime_get_real();
  1433. ts = ktime_to_timespec(sk->sk_stamp);
  1434. }
  1435. return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
  1436. }
  1437. EXPORT_SYMBOL(sock_get_timestampns);
  1438. void sock_enable_timestamp(struct sock *sk)
  1439. {
  1440. if (!sock_flag(sk, SOCK_TIMESTAMP)) {
  1441. sock_set_flag(sk, SOCK_TIMESTAMP);
  1442. net_enable_timestamp();
  1443. }
  1444. }
  1445. /*
  1446. * Get a socket option on an socket.
  1447. *
  1448. * FIX: POSIX 1003.1g is very ambiguous here. It states that
  1449. * asynchronous errors should be reported by getsockopt. We assume
  1450. * this means if you specify SO_ERROR (otherwise whats the point of it).
  1451. */
  1452. int sock_common_getsockopt(struct socket *sock, int level, int optname,
  1453. char __user *optval, int __user *optlen)
  1454. {
  1455. struct sock *sk = sock->sk;
  1456. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  1457. }
  1458. EXPORT_SYMBOL(sock_common_getsockopt);
  1459. #ifdef CONFIG_COMPAT
  1460. int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
  1461. char __user *optval, int __user *optlen)
  1462. {
  1463. struct sock *sk = sock->sk;
  1464. if (sk->sk_prot->compat_getsockopt != NULL)
  1465. return sk->sk_prot->compat_getsockopt(sk, level, optname,
  1466. optval, optlen);
  1467. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  1468. }
  1469. EXPORT_SYMBOL(compat_sock_common_getsockopt);
  1470. #endif
  1471. int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
  1472. struct msghdr *msg, size_t size, int flags)
  1473. {
  1474. struct sock *sk = sock->sk;
  1475. int addr_len = 0;
  1476. int err;
  1477. err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
  1478. flags & ~MSG_DONTWAIT, &addr_len);
  1479. if (err >= 0)
  1480. msg->msg_namelen = addr_len;
  1481. return err;
  1482. }
  1483. EXPORT_SYMBOL(sock_common_recvmsg);
  1484. /*
  1485. * Set socket options on an inet socket.
  1486. */
  1487. int sock_common_setsockopt(struct socket *sock, int level, int optname,
  1488. char __user *optval, int optlen)
  1489. {
  1490. struct sock *sk = sock->sk;
  1491. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  1492. }
  1493. EXPORT_SYMBOL(sock_common_setsockopt);
  1494. #ifdef CONFIG_COMPAT
  1495. int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
  1496. char __user *optval, int optlen)
  1497. {
  1498. struct sock *sk = sock->sk;
  1499. if (sk->sk_prot->compat_setsockopt != NULL)
  1500. return sk->sk_prot->compat_setsockopt(sk, level, optname,
  1501. optval, optlen);
  1502. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  1503. }
  1504. EXPORT_SYMBOL(compat_sock_common_setsockopt);
  1505. #endif
  1506. void sk_common_release(struct sock *sk)
  1507. {
  1508. if (sk->sk_prot->destroy)
  1509. sk->sk_prot->destroy(sk);
  1510. /*
  1511. * Observation: when sock_common_release is called, processes have
  1512. * no access to socket. But net still has.
  1513. * Step one, detach it from networking:
  1514. *
  1515. * A. Remove from hash tables.
  1516. */
  1517. sk->sk_prot->unhash(sk);
  1518. /*
  1519. * In this point socket cannot receive new packets, but it is possible
  1520. * that some packets are in flight because some CPU runs receiver and
  1521. * did hash table lookup before we unhashed socket. They will achieve
  1522. * receive queue and will be purged by socket destructor.
  1523. *
  1524. * Also we still have packets pending on receive queue and probably,
  1525. * our own packets waiting in device queues. sock_destroy will drain
  1526. * receive queue, but transmitted packets will delay socket destruction
  1527. * until the last reference will be released.
  1528. */
  1529. sock_orphan(sk);
  1530. xfrm_sk_free_policy(sk);
  1531. sk_refcnt_debug_release(sk);
  1532. sock_put(sk);
  1533. }
  1534. EXPORT_SYMBOL(sk_common_release);
  1535. static DEFINE_RWLOCK(proto_list_lock);
  1536. static LIST_HEAD(proto_list);
  1537. #ifdef CONFIG_SMP
  1538. /*
  1539. * Define default functions to keep track of inuse sockets per protocol
  1540. * Note that often used protocols use dedicated functions to get a speed increase.
  1541. * (see DEFINE_PROTO_INUSE/REF_PROTO_INUSE)
  1542. */
  1543. static void inuse_add(struct proto *prot, int inc)
  1544. {
  1545. per_cpu_ptr(prot->inuse_ptr, smp_processor_id())[0] += inc;
  1546. }
  1547. static int inuse_get(const struct proto *prot)
  1548. {
  1549. int res = 0, cpu;
  1550. for_each_possible_cpu(cpu)
  1551. res += per_cpu_ptr(prot->inuse_ptr, cpu)[0];
  1552. return res;
  1553. }
  1554. static int inuse_init(struct proto *prot)
  1555. {
  1556. if (!prot->inuse_getval || !prot->inuse_add) {
  1557. prot->inuse_ptr = alloc_percpu(int);
  1558. if (prot->inuse_ptr == NULL)
  1559. return -ENOBUFS;
  1560. prot->inuse_getval = inuse_get;
  1561. prot->inuse_add = inuse_add;
  1562. }
  1563. return 0;
  1564. }
  1565. static void inuse_fini(struct proto *prot)
  1566. {
  1567. if (prot->inuse_ptr != NULL) {
  1568. free_percpu(prot->inuse_ptr);
  1569. prot->inuse_ptr = NULL;
  1570. prot->inuse_getval = NULL;
  1571. prot->inuse_add = NULL;
  1572. }
  1573. }
  1574. #else
  1575. static inline int inuse_init(struct proto *prot)
  1576. {
  1577. return 0;
  1578. }
  1579. static inline void inuse_fini(struct proto *prot)
  1580. {
  1581. }
  1582. #endif
  1583. int proto_register(struct proto *prot, int alloc_slab)
  1584. {
  1585. char *request_sock_slab_name = NULL;
  1586. char *timewait_sock_slab_name;
  1587. if (inuse_init(prot))
  1588. goto out;
  1589. if (alloc_slab) {
  1590. prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
  1591. SLAB_HWCACHE_ALIGN, NULL);
  1592. if (prot->slab == NULL) {
  1593. printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
  1594. prot->name);
  1595. goto out_free_inuse;
  1596. }
  1597. if (prot->rsk_prot != NULL) {
  1598. static const char mask[] = "request_sock_%s";
  1599. request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
  1600. if (request_sock_slab_name == NULL)
  1601. goto out_free_sock_slab;
  1602. sprintf(request_sock_slab_name, mask, prot->name);
  1603. prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
  1604. prot->rsk_prot->obj_size, 0,
  1605. SLAB_HWCACHE_ALIGN, NULL);
  1606. if (prot->rsk_prot->slab == NULL) {
  1607. printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
  1608. prot->name);
  1609. goto out_free_request_sock_slab_name;
  1610. }
  1611. }
  1612. if (prot->twsk_prot != NULL) {
  1613. static const char mask[] = "tw_sock_%s";
  1614. timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
  1615. if (timewait_sock_slab_name == NULL)
  1616. goto out_free_request_sock_slab;
  1617. sprintf(timewait_sock_slab_name, mask, prot->name);
  1618. prot->twsk_prot->twsk_slab =
  1619. kmem_cache_create(timewait_sock_slab_name,
  1620. prot->twsk_prot->twsk_obj_size,
  1621. 0, SLAB_HWCACHE_ALIGN,
  1622. NULL);
  1623. if (prot->twsk_prot->twsk_slab == NULL)
  1624. goto out_free_timewait_sock_slab_name;
  1625. }
  1626. }
  1627. write_lock(&proto_list_lock);
  1628. list_add(&prot->node, &proto_list);
  1629. write_unlock(&proto_list_lock);
  1630. return 0;
  1631. out_free_timewait_sock_slab_name:
  1632. kfree(timewait_sock_slab_name);
  1633. out_free_request_sock_slab:
  1634. if (prot->rsk_prot && prot->rsk_prot->slab) {
  1635. kmem_cache_destroy(prot->rsk_prot->slab);
  1636. prot->rsk_prot->slab = NULL;
  1637. }
  1638. out_free_request_sock_slab_name:
  1639. kfree(request_sock_slab_name);
  1640. out_free_sock_slab:
  1641. kmem_cache_destroy(prot->slab);
  1642. prot->slab = NULL;
  1643. out_free_inuse:
  1644. inuse_fini(prot);
  1645. out:
  1646. return -ENOBUFS;
  1647. }
  1648. EXPORT_SYMBOL(proto_register);
  1649. void proto_unregister(struct proto *prot)
  1650. {
  1651. write_lock(&proto_list_lock);
  1652. list_del(&prot->node);
  1653. write_unlock(&proto_list_lock);
  1654. inuse_fini(prot);
  1655. if (prot->slab != NULL) {
  1656. kmem_cache_destroy(prot->slab);
  1657. prot->slab = NULL;
  1658. }
  1659. if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
  1660. const char *name = kmem_cache_name(prot->rsk_prot->slab);
  1661. kmem_cache_destroy(prot->rsk_prot->slab);
  1662. kfree(name);
  1663. prot->rsk_prot->slab = NULL;
  1664. }
  1665. if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
  1666. const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
  1667. kmem_cache_destroy(prot->twsk_prot->twsk_slab);
  1668. kfree(name);
  1669. prot->twsk_prot->twsk_slab = NULL;
  1670. }
  1671. }
  1672. EXPORT_SYMBOL(proto_unregister);
  1673. #ifdef CONFIG_PROC_FS
  1674. static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
  1675. {
  1676. read_lock(&proto_list_lock);
  1677. return seq_list_start_head(&proto_list, *pos);
  1678. }
  1679. static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1680. {
  1681. return seq_list_next(v, &proto_list, pos);
  1682. }
  1683. static void proto_seq_stop(struct seq_file *seq, void *v)
  1684. {
  1685. read_unlock(&proto_list_lock);
  1686. }
  1687. static char proto_method_implemented(const void *method)
  1688. {
  1689. return method == NULL ? 'n' : 'y';
  1690. }
  1691. static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
  1692. {
  1693. seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
  1694. "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
  1695. proto->name,
  1696. proto->obj_size,
  1697. proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1,
  1698. proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
  1699. proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
  1700. proto->max_header,
  1701. proto->slab == NULL ? "no" : "yes",
  1702. module_name(proto->owner),
  1703. proto_method_implemented(proto->close),
  1704. proto_method_implemented(proto->connect),
  1705. proto_method_implemented(proto->disconnect),
  1706. proto_method_implemented(proto->accept),
  1707. proto_method_implemented(proto->ioctl),
  1708. proto_method_implemented(proto->init),
  1709. proto_method_implemented(proto->destroy),
  1710. proto_method_implemented(proto->shutdown),
  1711. proto_method_implemented(proto->setsockopt),
  1712. proto_method_implemented(proto->getsockopt),
  1713. proto_method_implemented(proto->sendmsg),
  1714. proto_method_implemented(proto->recvmsg),
  1715. proto_method_implemented(proto->sendpage),
  1716. proto_method_implemented(proto->bind),
  1717. proto_method_implemented(proto->backlog_rcv),
  1718. proto_method_implemented(proto->hash),
  1719. proto_method_implemented(proto->unhash),
  1720. proto_method_implemented(proto->get_port),
  1721. proto_method_implemented(proto->enter_memory_pressure));
  1722. }
  1723. static int proto_seq_show(struct seq_file *seq, void *v)
  1724. {
  1725. if (v == &proto_list)
  1726. seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
  1727. "protocol",
  1728. "size",
  1729. "sockets",
  1730. "memory",
  1731. "press",
  1732. "maxhdr",
  1733. "slab",
  1734. "module",
  1735. "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
  1736. else
  1737. proto_seq_printf(seq, list_entry(v, struct proto, node));
  1738. return 0;
  1739. }
  1740. static const struct seq_operations proto_seq_ops = {
  1741. .start = proto_seq_start,
  1742. .next = proto_seq_next,
  1743. .stop = proto_seq_stop,
  1744. .show = proto_seq_show,
  1745. };
  1746. static int proto_seq_open(struct inode *inode, struct file *file)
  1747. {
  1748. return seq_open(file, &proto_seq_ops);
  1749. }
  1750. static const struct file_operations proto_seq_fops = {
  1751. .owner = THIS_MODULE,
  1752. .open = proto_seq_open,
  1753. .read = seq_read,
  1754. .llseek = seq_lseek,
  1755. .release = seq_release,
  1756. };
  1757. static int __init proto_init(void)
  1758. {
  1759. /* register /proc/net/protocols */
  1760. return proc_net_fops_create(&init_net, "protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0;
  1761. }
  1762. subsys_initcall(proto_init);
  1763. #endif /* PROC_FS */
  1764. EXPORT_SYMBOL(sk_alloc);
  1765. EXPORT_SYMBOL(sk_free);
  1766. EXPORT_SYMBOL(sk_send_sigurg);
  1767. EXPORT_SYMBOL(sock_alloc_send_skb);
  1768. EXPORT_SYMBOL(sock_init_data);
  1769. EXPORT_SYMBOL(sock_kfree_s);
  1770. EXPORT_SYMBOL(sock_kmalloc);
  1771. EXPORT_SYMBOL(sock_no_accept);
  1772. EXPORT_SYMBOL(sock_no_bind);
  1773. EXPORT_SYMBOL(sock_no_connect);
  1774. EXPORT_SYMBOL(sock_no_getname);
  1775. EXPORT_SYMBOL(sock_no_getsockopt);
  1776. EXPORT_SYMBOL(sock_no_ioctl);
  1777. EXPORT_SYMBOL(sock_no_listen);
  1778. EXPORT_SYMBOL(sock_no_mmap);
  1779. EXPORT_SYMBOL(sock_no_poll);
  1780. EXPORT_SYMBOL(sock_no_recvmsg);
  1781. EXPORT_SYMBOL(sock_no_sendmsg);
  1782. EXPORT_SYMBOL(sock_no_sendpage);
  1783. EXPORT_SYMBOL(sock_no_setsockopt);
  1784. EXPORT_SYMBOL(sock_no_shutdown);
  1785. EXPORT_SYMBOL(sock_no_socketpair);
  1786. EXPORT_SYMBOL(sock_rfree);
  1787. EXPORT_SYMBOL(sock_setsockopt);
  1788. EXPORT_SYMBOL(sock_wfree);
  1789. EXPORT_SYMBOL(sock_wmalloc);
  1790. EXPORT_SYMBOL(sock_i_uid);
  1791. EXPORT_SYMBOL(sock_i_ino);
  1792. EXPORT_SYMBOL(sysctl_optmem_max);