sock.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Generic socket support routines. Memory allocators, socket lock/release
  7. * handler for protocols to use and generic option handler.
  8. *
  9. *
  10. * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
  11. *
  12. * Authors: Ross Biro
  13. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  14. * Florian La Roche, <flla@stud.uni-sb.de>
  15. * Alan Cox, <A.Cox@swansea.ac.uk>
  16. *
  17. * Fixes:
  18. * Alan Cox : Numerous verify_area() problems
  19. * Alan Cox : Connecting on a connecting socket
  20. * now returns an error for tcp.
  21. * Alan Cox : sock->protocol is set correctly.
  22. * and is not sometimes left as 0.
  23. * Alan Cox : connect handles icmp errors on a
  24. * connect properly. Unfortunately there
  25. * is a restart syscall nasty there. I
  26. * can't match BSD without hacking the C
  27. * library. Ideas urgently sought!
  28. * Alan Cox : Disallow bind() to addresses that are
  29. * not ours - especially broadcast ones!!
  30. * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
  31. * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
  32. * instead they leave that for the DESTROY timer.
  33. * Alan Cox : Clean up error flag in accept
  34. * Alan Cox : TCP ack handling is buggy, the DESTROY timer
  35. * was buggy. Put a remove_sock() in the handler
  36. * for memory when we hit 0. Also altered the timer
  37. * code. The ACK stuff can wait and needs major
  38. * TCP layer surgery.
  39. * Alan Cox : Fixed TCP ack bug, removed remove sock
  40. * and fixed timer/inet_bh race.
  41. * Alan Cox : Added zapped flag for TCP
  42. * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
  43. * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  44. * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
  45. * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
  46. * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  47. * Rick Sladkey : Relaxed UDP rules for matching packets.
  48. * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
  49. * Pauline Middelink : identd support
  50. * Alan Cox : Fixed connect() taking signals I think.
  51. * Alan Cox : SO_LINGER supported
  52. * Alan Cox : Error reporting fixes
  53. * Anonymous : inet_create tidied up (sk->reuse setting)
  54. * Alan Cox : inet sockets don't set sk->type!
  55. * Alan Cox : Split socket option code
  56. * Alan Cox : Callbacks
  57. * Alan Cox : Nagle flag for Charles & Johannes stuff
  58. * Alex : Removed restriction on inet fioctl
  59. * Alan Cox : Splitting INET from NET core
  60. * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
  61. * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
  62. * Alan Cox : Split IP from generic code
  63. * Alan Cox : New kfree_skbmem()
  64. * Alan Cox : Make SO_DEBUG superuser only.
  65. * Alan Cox : Allow anyone to clear SO_DEBUG
  66. * (compatibility fix)
  67. * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
  68. * Alan Cox : Allocator for a socket is settable.
  69. * Alan Cox : SO_ERROR includes soft errors.
  70. * Alan Cox : Allow NULL arguments on some SO_ opts
  71. * Alan Cox : Generic socket allocation to make hooks
  72. * easier (suggested by Craig Metz).
  73. * Michael Pall : SO_ERROR returns positive errno again
  74. * Steve Whitehouse: Added default destructor to free
  75. * protocol private data.
  76. * Steve Whitehouse: Added various other default routines
  77. * common to several socket families.
  78. * Chris Evans : Call suser() check last on F_SETOWN
  79. * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  80. * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
  81. * Andi Kleen : Fix write_space callback
  82. * Chris Evans : Security fixes - signedness again
  83. * Arnaldo C. Melo : cleanups, use skb_queue_purge
  84. *
  85. * To Fix:
  86. *
  87. *
  88. * This program is free software; you can redistribute it and/or
  89. * modify it under the terms of the GNU General Public License
  90. * as published by the Free Software Foundation; either version
  91. * 2 of the License, or (at your option) any later version.
  92. */
  93. #include <linux/capability.h>
  94. #include <linux/errno.h>
  95. #include <linux/types.h>
  96. #include <linux/socket.h>
  97. #include <linux/in.h>
  98. #include <linux/kernel.h>
  99. #include <linux/module.h>
  100. #include <linux/proc_fs.h>
  101. #include <linux/seq_file.h>
  102. #include <linux/sched.h>
  103. #include <linux/timer.h>
  104. #include <linux/string.h>
  105. #include <linux/sockios.h>
  106. #include <linux/net.h>
  107. #include <linux/mm.h>
  108. #include <linux/slab.h>
  109. #include <linux/interrupt.h>
  110. #include <linux/poll.h>
  111. #include <linux/tcp.h>
  112. #include <linux/init.h>
  113. #include <linux/highmem.h>
  114. #include <asm/uaccess.h>
  115. #include <asm/system.h>
  116. #include <linux/netdevice.h>
  117. #include <net/protocol.h>
  118. #include <linux/skbuff.h>
  119. #include <net/net_namespace.h>
  120. #include <net/request_sock.h>
  121. #include <net/sock.h>
  122. #include <net/xfrm.h>
  123. #include <linux/ipsec.h>
  124. #include <linux/filter.h>
  125. #ifdef CONFIG_INET
  126. #include <net/tcp.h>
  127. #endif
  128. /*
  129. * Each address family might have different locking rules, so we have
  130. * one slock key per address family:
  131. */
  132. static struct lock_class_key af_family_keys[AF_MAX];
  133. static struct lock_class_key af_family_slock_keys[AF_MAX];
  134. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  135. /*
  136. * Make lock validator output more readable. (we pre-construct these
  137. * strings build-time, so that runtime initialization of socket
  138. * locks is fast):
  139. */
  140. static const char *af_family_key_strings[AF_MAX+1] = {
  141. "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
  142. "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
  143. "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
  144. "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
  145. "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
  146. "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
  147. "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
  148. "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
  149. "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
  150. "sk_lock-27" , "sk_lock-28" , "sk_lock-29" ,
  151. "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
  152. "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX"
  153. };
  154. static const char *af_family_slock_key_strings[AF_MAX+1] = {
  155. "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
  156. "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
  157. "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
  158. "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
  159. "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
  160. "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
  161. "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
  162. "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" ,
  163. "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
  164. "slock-27" , "slock-28" , "slock-29" ,
  165. "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
  166. "slock-AF_RXRPC" , "slock-AF_MAX"
  167. };
  168. static const char *af_family_clock_key_strings[AF_MAX+1] = {
  169. "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
  170. "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
  171. "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
  172. "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
  173. "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
  174. "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
  175. "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
  176. "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" ,
  177. "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
  178. "clock-27" , "clock-28" , "clock-29" ,
  179. "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
  180. "clock-AF_RXRPC" , "clock-AF_MAX"
  181. };
  182. #endif
  183. /*
  184. * sk_callback_lock locking rules are per-address-family,
  185. * so split the lock classes by using a per-AF key:
  186. */
  187. static struct lock_class_key af_callback_keys[AF_MAX];
  188. /* Take into consideration the size of the struct sk_buff overhead in the
  189. * determination of these values, since that is non-constant across
  190. * platforms. This makes socket queueing behavior and performance
  191. * not depend upon such differences.
  192. */
  193. #define _SK_MEM_PACKETS 256
  194. #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
  195. #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  196. #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  197. /* Run time adjustable parameters. */
  198. __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
  199. __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
  200. __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
  201. __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
  202. /* Maximal space eaten by iovec or ancilliary data plus some space */
  203. int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
  204. static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
  205. {
  206. struct timeval tv;
  207. if (optlen < sizeof(tv))
  208. return -EINVAL;
  209. if (copy_from_user(&tv, optval, sizeof(tv)))
  210. return -EFAULT;
  211. if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
  212. return -EDOM;
  213. if (tv.tv_sec < 0) {
  214. static int warned __read_mostly;
  215. *timeo_p = 0;
  216. if (warned < 10 && net_ratelimit())
  217. warned++;
  218. printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
  219. "tries to set negative timeout\n",
  220. current->comm, current->pid);
  221. return 0;
  222. }
  223. *timeo_p = MAX_SCHEDULE_TIMEOUT;
  224. if (tv.tv_sec == 0 && tv.tv_usec == 0)
  225. return 0;
  226. if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
  227. *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
  228. return 0;
  229. }
  230. static void sock_warn_obsolete_bsdism(const char *name)
  231. {
  232. static int warned;
  233. static char warncomm[TASK_COMM_LEN];
  234. if (strcmp(warncomm, current->comm) && warned < 5) {
  235. strcpy(warncomm, current->comm);
  236. printk(KERN_WARNING "process `%s' is using obsolete "
  237. "%s SO_BSDCOMPAT\n", warncomm, name);
  238. warned++;
  239. }
  240. }
  241. static void sock_disable_timestamp(struct sock *sk)
  242. {
  243. if (sock_flag(sk, SOCK_TIMESTAMP)) {
  244. sock_reset_flag(sk, SOCK_TIMESTAMP);
  245. net_disable_timestamp();
  246. }
  247. }
  248. int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  249. {
  250. int err = 0;
  251. int skb_len;
  252. /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
  253. number of warnings when compiling with -W --ANK
  254. */
  255. if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
  256. (unsigned)sk->sk_rcvbuf) {
  257. err = -ENOMEM;
  258. goto out;
  259. }
  260. err = sk_filter(sk, skb);
  261. if (err)
  262. goto out;
  263. skb->dev = NULL;
  264. skb_set_owner_r(skb, sk);
  265. /* Cache the SKB length before we tack it onto the receive
  266. * queue. Once it is added it no longer belongs to us and
  267. * may be freed by other threads of control pulling packets
  268. * from the queue.
  269. */
  270. skb_len = skb->len;
  271. skb_queue_tail(&sk->sk_receive_queue, skb);
  272. if (!sock_flag(sk, SOCK_DEAD))
  273. sk->sk_data_ready(sk, skb_len);
  274. out:
  275. return err;
  276. }
  277. EXPORT_SYMBOL(sock_queue_rcv_skb);
  278. int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
  279. {
  280. int rc = NET_RX_SUCCESS;
  281. if (sk_filter(sk, skb))
  282. goto discard_and_relse;
  283. skb->dev = NULL;
  284. if (nested)
  285. bh_lock_sock_nested(sk);
  286. else
  287. bh_lock_sock(sk);
  288. if (!sock_owned_by_user(sk)) {
  289. /*
  290. * trylock + unlock semantics:
  291. */
  292. mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
  293. rc = sk->sk_backlog_rcv(sk, skb);
  294. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  295. } else
  296. sk_add_backlog(sk, skb);
  297. bh_unlock_sock(sk);
  298. out:
  299. sock_put(sk);
  300. return rc;
  301. discard_and_relse:
  302. kfree_skb(skb);
  303. goto out;
  304. }
  305. EXPORT_SYMBOL(sk_receive_skb);
  306. struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
  307. {
  308. struct dst_entry *dst = sk->sk_dst_cache;
  309. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  310. sk->sk_dst_cache = NULL;
  311. dst_release(dst);
  312. return NULL;
  313. }
  314. return dst;
  315. }
  316. EXPORT_SYMBOL(__sk_dst_check);
  317. struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
  318. {
  319. struct dst_entry *dst = sk_dst_get(sk);
  320. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  321. sk_dst_reset(sk);
  322. dst_release(dst);
  323. return NULL;
  324. }
  325. return dst;
  326. }
  327. EXPORT_SYMBOL(sk_dst_check);
  328. static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
  329. {
  330. int ret = -ENOPROTOOPT;
  331. #ifdef CONFIG_NETDEVICES
  332. struct net *net = sk->sk_net;
  333. char devname[IFNAMSIZ];
  334. int index;
  335. /* Sorry... */
  336. ret = -EPERM;
  337. if (!capable(CAP_NET_RAW))
  338. goto out;
  339. ret = -EINVAL;
  340. if (optlen < 0)
  341. goto out;
  342. /* Bind this socket to a particular device like "eth0",
  343. * as specified in the passed interface name. If the
  344. * name is "" or the option length is zero the socket
  345. * is not bound.
  346. */
  347. if (optlen > IFNAMSIZ - 1)
  348. optlen = IFNAMSIZ - 1;
  349. memset(devname, 0, sizeof(devname));
  350. ret = -EFAULT;
  351. if (copy_from_user(devname, optval, optlen))
  352. goto out;
  353. if (devname[0] == '\0') {
  354. index = 0;
  355. } else {
  356. struct net_device *dev = dev_get_by_name(net, devname);
  357. ret = -ENODEV;
  358. if (!dev)
  359. goto out;
  360. index = dev->ifindex;
  361. dev_put(dev);
  362. }
  363. lock_sock(sk);
  364. sk->sk_bound_dev_if = index;
  365. sk_dst_reset(sk);
  366. release_sock(sk);
  367. ret = 0;
  368. out:
  369. #endif
  370. return ret;
  371. }
  372. /*
  373. * This is meant for all protocols to use and covers goings on
  374. * at the socket level. Everything here is generic.
  375. */
  376. int sock_setsockopt(struct socket *sock, int level, int optname,
  377. char __user *optval, int optlen)
  378. {
  379. struct sock *sk=sock->sk;
  380. struct sk_filter *filter;
  381. int val;
  382. int valbool;
  383. struct linger ling;
  384. int ret = 0;
  385. /*
  386. * Options without arguments
  387. */
  388. #ifdef SO_DONTLINGER /* Compatibility item... */
  389. if (optname == SO_DONTLINGER) {
  390. lock_sock(sk);
  391. sock_reset_flag(sk, SOCK_LINGER);
  392. release_sock(sk);
  393. return 0;
  394. }
  395. #endif
  396. if (optname == SO_BINDTODEVICE)
  397. return sock_bindtodevice(sk, optval, optlen);
  398. if (optlen < sizeof(int))
  399. return -EINVAL;
  400. if (get_user(val, (int __user *)optval))
  401. return -EFAULT;
  402. valbool = val?1:0;
  403. lock_sock(sk);
  404. switch(optname) {
  405. case SO_DEBUG:
  406. if (val && !capable(CAP_NET_ADMIN)) {
  407. ret = -EACCES;
  408. }
  409. else if (valbool)
  410. sock_set_flag(sk, SOCK_DBG);
  411. else
  412. sock_reset_flag(sk, SOCK_DBG);
  413. break;
  414. case SO_REUSEADDR:
  415. sk->sk_reuse = valbool;
  416. break;
  417. case SO_TYPE:
  418. case SO_ERROR:
  419. ret = -ENOPROTOOPT;
  420. break;
  421. case SO_DONTROUTE:
  422. if (valbool)
  423. sock_set_flag(sk, SOCK_LOCALROUTE);
  424. else
  425. sock_reset_flag(sk, SOCK_LOCALROUTE);
  426. break;
  427. case SO_BROADCAST:
  428. sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
  429. break;
  430. case SO_SNDBUF:
  431. /* Don't error on this BSD doesn't and if you think
  432. about it this is right. Otherwise apps have to
  433. play 'guess the biggest size' games. RCVBUF/SNDBUF
  434. are treated in BSD as hints */
  435. if (val > sysctl_wmem_max)
  436. val = sysctl_wmem_max;
  437. set_sndbuf:
  438. sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
  439. if ((val * 2) < SOCK_MIN_SNDBUF)
  440. sk->sk_sndbuf = SOCK_MIN_SNDBUF;
  441. else
  442. sk->sk_sndbuf = val * 2;
  443. /*
  444. * Wake up sending tasks if we
  445. * upped the value.
  446. */
  447. sk->sk_write_space(sk);
  448. break;
  449. case SO_SNDBUFFORCE:
  450. if (!capable(CAP_NET_ADMIN)) {
  451. ret = -EPERM;
  452. break;
  453. }
  454. goto set_sndbuf;
  455. case SO_RCVBUF:
  456. /* Don't error on this BSD doesn't and if you think
  457. about it this is right. Otherwise apps have to
  458. play 'guess the biggest size' games. RCVBUF/SNDBUF
  459. are treated in BSD as hints */
  460. if (val > sysctl_rmem_max)
  461. val = sysctl_rmem_max;
  462. set_rcvbuf:
  463. sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
  464. /*
  465. * We double it on the way in to account for
  466. * "struct sk_buff" etc. overhead. Applications
  467. * assume that the SO_RCVBUF setting they make will
  468. * allow that much actual data to be received on that
  469. * socket.
  470. *
  471. * Applications are unaware that "struct sk_buff" and
  472. * other overheads allocate from the receive buffer
  473. * during socket buffer allocation.
  474. *
  475. * And after considering the possible alternatives,
  476. * returning the value we actually used in getsockopt
  477. * is the most desirable behavior.
  478. */
  479. if ((val * 2) < SOCK_MIN_RCVBUF)
  480. sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
  481. else
  482. sk->sk_rcvbuf = val * 2;
  483. break;
  484. case SO_RCVBUFFORCE:
  485. if (!capable(CAP_NET_ADMIN)) {
  486. ret = -EPERM;
  487. break;
  488. }
  489. goto set_rcvbuf;
  490. case SO_KEEPALIVE:
  491. #ifdef CONFIG_INET
  492. if (sk->sk_protocol == IPPROTO_TCP)
  493. tcp_set_keepalive(sk, valbool);
  494. #endif
  495. sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
  496. break;
  497. case SO_OOBINLINE:
  498. sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
  499. break;
  500. case SO_NO_CHECK:
  501. sk->sk_no_check = valbool;
  502. break;
  503. case SO_PRIORITY:
  504. if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
  505. sk->sk_priority = val;
  506. else
  507. ret = -EPERM;
  508. break;
  509. case SO_LINGER:
  510. if (optlen < sizeof(ling)) {
  511. ret = -EINVAL; /* 1003.1g */
  512. break;
  513. }
  514. if (copy_from_user(&ling,optval,sizeof(ling))) {
  515. ret = -EFAULT;
  516. break;
  517. }
  518. if (!ling.l_onoff)
  519. sock_reset_flag(sk, SOCK_LINGER);
  520. else {
  521. #if (BITS_PER_LONG == 32)
  522. if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
  523. sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
  524. else
  525. #endif
  526. sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
  527. sock_set_flag(sk, SOCK_LINGER);
  528. }
  529. break;
  530. case SO_BSDCOMPAT:
  531. sock_warn_obsolete_bsdism("setsockopt");
  532. break;
  533. case SO_PASSCRED:
  534. if (valbool)
  535. set_bit(SOCK_PASSCRED, &sock->flags);
  536. else
  537. clear_bit(SOCK_PASSCRED, &sock->flags);
  538. break;
  539. case SO_TIMESTAMP:
  540. case SO_TIMESTAMPNS:
  541. if (valbool) {
  542. if (optname == SO_TIMESTAMP)
  543. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  544. else
  545. sock_set_flag(sk, SOCK_RCVTSTAMPNS);
  546. sock_set_flag(sk, SOCK_RCVTSTAMP);
  547. sock_enable_timestamp(sk);
  548. } else {
  549. sock_reset_flag(sk, SOCK_RCVTSTAMP);
  550. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  551. }
  552. break;
  553. case SO_RCVLOWAT:
  554. if (val < 0)
  555. val = INT_MAX;
  556. sk->sk_rcvlowat = val ? : 1;
  557. break;
  558. case SO_RCVTIMEO:
  559. ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
  560. break;
  561. case SO_SNDTIMEO:
  562. ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
  563. break;
  564. case SO_ATTACH_FILTER:
  565. ret = -EINVAL;
  566. if (optlen == sizeof(struct sock_fprog)) {
  567. struct sock_fprog fprog;
  568. ret = -EFAULT;
  569. if (copy_from_user(&fprog, optval, sizeof(fprog)))
  570. break;
  571. ret = sk_attach_filter(&fprog, sk);
  572. }
  573. break;
  574. case SO_DETACH_FILTER:
  575. rcu_read_lock_bh();
  576. filter = rcu_dereference(sk->sk_filter);
  577. if (filter) {
  578. rcu_assign_pointer(sk->sk_filter, NULL);
  579. sk_filter_release(sk, filter);
  580. rcu_read_unlock_bh();
  581. break;
  582. }
  583. rcu_read_unlock_bh();
  584. ret = -ENONET;
  585. break;
  586. case SO_PASSSEC:
  587. if (valbool)
  588. set_bit(SOCK_PASSSEC, &sock->flags);
  589. else
  590. clear_bit(SOCK_PASSSEC, &sock->flags);
  591. break;
  592. /* We implement the SO_SNDLOWAT etc to
  593. not be settable (1003.1g 5.3) */
  594. default:
  595. ret = -ENOPROTOOPT;
  596. break;
  597. }
  598. release_sock(sk);
  599. return ret;
  600. }
  601. int sock_getsockopt(struct socket *sock, int level, int optname,
  602. char __user *optval, int __user *optlen)
  603. {
  604. struct sock *sk = sock->sk;
  605. union {
  606. int val;
  607. struct linger ling;
  608. struct timeval tm;
  609. } v;
  610. unsigned int lv = sizeof(int);
  611. int len;
  612. if (get_user(len, optlen))
  613. return -EFAULT;
  614. if (len < 0)
  615. return -EINVAL;
  616. switch(optname) {
  617. case SO_DEBUG:
  618. v.val = sock_flag(sk, SOCK_DBG);
  619. break;
  620. case SO_DONTROUTE:
  621. v.val = sock_flag(sk, SOCK_LOCALROUTE);
  622. break;
  623. case SO_BROADCAST:
  624. v.val = !!sock_flag(sk, SOCK_BROADCAST);
  625. break;
  626. case SO_SNDBUF:
  627. v.val = sk->sk_sndbuf;
  628. break;
  629. case SO_RCVBUF:
  630. v.val = sk->sk_rcvbuf;
  631. break;
  632. case SO_REUSEADDR:
  633. v.val = sk->sk_reuse;
  634. break;
  635. case SO_KEEPALIVE:
  636. v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
  637. break;
  638. case SO_TYPE:
  639. v.val = sk->sk_type;
  640. break;
  641. case SO_ERROR:
  642. v.val = -sock_error(sk);
  643. if (v.val==0)
  644. v.val = xchg(&sk->sk_err_soft, 0);
  645. break;
  646. case SO_OOBINLINE:
  647. v.val = !!sock_flag(sk, SOCK_URGINLINE);
  648. break;
  649. case SO_NO_CHECK:
  650. v.val = sk->sk_no_check;
  651. break;
  652. case SO_PRIORITY:
  653. v.val = sk->sk_priority;
  654. break;
  655. case SO_LINGER:
  656. lv = sizeof(v.ling);
  657. v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
  658. v.ling.l_linger = sk->sk_lingertime / HZ;
  659. break;
  660. case SO_BSDCOMPAT:
  661. sock_warn_obsolete_bsdism("getsockopt");
  662. break;
  663. case SO_TIMESTAMP:
  664. v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
  665. !sock_flag(sk, SOCK_RCVTSTAMPNS);
  666. break;
  667. case SO_TIMESTAMPNS:
  668. v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
  669. break;
  670. case SO_RCVTIMEO:
  671. lv=sizeof(struct timeval);
  672. if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
  673. v.tm.tv_sec = 0;
  674. v.tm.tv_usec = 0;
  675. } else {
  676. v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
  677. v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
  678. }
  679. break;
  680. case SO_SNDTIMEO:
  681. lv=sizeof(struct timeval);
  682. if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
  683. v.tm.tv_sec = 0;
  684. v.tm.tv_usec = 0;
  685. } else {
  686. v.tm.tv_sec = sk->sk_sndtimeo / HZ;
  687. v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
  688. }
  689. break;
  690. case SO_RCVLOWAT:
  691. v.val = sk->sk_rcvlowat;
  692. break;
  693. case SO_SNDLOWAT:
  694. v.val=1;
  695. break;
  696. case SO_PASSCRED:
  697. v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
  698. break;
  699. case SO_PEERCRED:
  700. if (len > sizeof(sk->sk_peercred))
  701. len = sizeof(sk->sk_peercred);
  702. if (copy_to_user(optval, &sk->sk_peercred, len))
  703. return -EFAULT;
  704. goto lenout;
  705. case SO_PEERNAME:
  706. {
  707. char address[128];
  708. if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
  709. return -ENOTCONN;
  710. if (lv < len)
  711. return -EINVAL;
  712. if (copy_to_user(optval, address, len))
  713. return -EFAULT;
  714. goto lenout;
  715. }
  716. /* Dubious BSD thing... Probably nobody even uses it, but
  717. * the UNIX standard wants it for whatever reason... -DaveM
  718. */
  719. case SO_ACCEPTCONN:
  720. v.val = sk->sk_state == TCP_LISTEN;
  721. break;
  722. case SO_PASSSEC:
  723. v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
  724. break;
  725. case SO_PEERSEC:
  726. return security_socket_getpeersec_stream(sock, optval, optlen, len);
  727. default:
  728. return -ENOPROTOOPT;
  729. }
  730. if (len > lv)
  731. len = lv;
  732. if (copy_to_user(optval, &v, len))
  733. return -EFAULT;
  734. lenout:
  735. if (put_user(len, optlen))
  736. return -EFAULT;
  737. return 0;
  738. }
  739. /*
  740. * Initialize an sk_lock.
  741. *
  742. * (We also register the sk_lock with the lock validator.)
  743. */
  744. static inline void sock_lock_init(struct sock *sk)
  745. {
  746. sock_lock_init_class_and_name(sk,
  747. af_family_slock_key_strings[sk->sk_family],
  748. af_family_slock_keys + sk->sk_family,
  749. af_family_key_strings[sk->sk_family],
  750. af_family_keys + sk->sk_family);
  751. }
  752. /**
  753. * sk_alloc - All socket objects are allocated here
  754. * @family: protocol family
  755. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  756. * @prot: struct proto associated with this new sock instance
  757. * @zero_it: if we should zero the newly allocated sock
  758. */
  759. struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
  760. struct proto *prot, int zero_it)
  761. {
  762. struct sock *sk = NULL;
  763. struct kmem_cache *slab = prot->slab;
  764. if (slab != NULL)
  765. sk = kmem_cache_alloc(slab, priority);
  766. else
  767. sk = kmalloc(prot->obj_size, priority);
  768. if (sk) {
  769. if (zero_it) {
  770. memset(sk, 0, prot->obj_size);
  771. sk->sk_family = family;
  772. /*
  773. * See comment in struct sock definition to understand
  774. * why we need sk_prot_creator -acme
  775. */
  776. sk->sk_prot = sk->sk_prot_creator = prot;
  777. sock_lock_init(sk);
  778. sk->sk_net = get_net(net);
  779. }
  780. if (security_sk_alloc(sk, family, priority))
  781. goto out_free;
  782. if (!try_module_get(prot->owner))
  783. goto out_free;
  784. }
  785. return sk;
  786. out_free:
  787. if (slab != NULL)
  788. kmem_cache_free(slab, sk);
  789. else
  790. kfree(sk);
  791. return NULL;
  792. }
  793. void sk_free(struct sock *sk)
  794. {
  795. struct sk_filter *filter;
  796. struct module *owner = sk->sk_prot_creator->owner;
  797. if (sk->sk_destruct)
  798. sk->sk_destruct(sk);
  799. filter = rcu_dereference(sk->sk_filter);
  800. if (filter) {
  801. sk_filter_release(sk, filter);
  802. rcu_assign_pointer(sk->sk_filter, NULL);
  803. }
  804. sock_disable_timestamp(sk);
  805. if (atomic_read(&sk->sk_omem_alloc))
  806. printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
  807. __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
  808. security_sk_free(sk);
  809. put_net(sk->sk_net);
  810. if (sk->sk_prot_creator->slab != NULL)
  811. kmem_cache_free(sk->sk_prot_creator->slab, sk);
  812. else
  813. kfree(sk);
  814. module_put(owner);
  815. }
  816. struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
  817. {
  818. struct sock *newsk = sk_alloc(sk->sk_net, sk->sk_family, priority, sk->sk_prot, 0);
  819. if (newsk != NULL) {
  820. struct sk_filter *filter;
  821. sock_copy(newsk, sk);
  822. /* SANITY */
  823. sk_node_init(&newsk->sk_node);
  824. sock_lock_init(newsk);
  825. bh_lock_sock(newsk);
  826. newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
  827. atomic_set(&newsk->sk_rmem_alloc, 0);
  828. atomic_set(&newsk->sk_wmem_alloc, 0);
  829. atomic_set(&newsk->sk_omem_alloc, 0);
  830. skb_queue_head_init(&newsk->sk_receive_queue);
  831. skb_queue_head_init(&newsk->sk_write_queue);
  832. #ifdef CONFIG_NET_DMA
  833. skb_queue_head_init(&newsk->sk_async_wait_queue);
  834. #endif
  835. rwlock_init(&newsk->sk_dst_lock);
  836. rwlock_init(&newsk->sk_callback_lock);
  837. lockdep_set_class_and_name(&newsk->sk_callback_lock,
  838. af_callback_keys + newsk->sk_family,
  839. af_family_clock_key_strings[newsk->sk_family]);
  840. newsk->sk_dst_cache = NULL;
  841. newsk->sk_wmem_queued = 0;
  842. newsk->sk_forward_alloc = 0;
  843. newsk->sk_send_head = NULL;
  844. newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
  845. sock_reset_flag(newsk, SOCK_DONE);
  846. skb_queue_head_init(&newsk->sk_error_queue);
  847. filter = newsk->sk_filter;
  848. if (filter != NULL)
  849. sk_filter_charge(newsk, filter);
  850. if (unlikely(xfrm_sk_clone_policy(newsk))) {
  851. /* It is still raw copy of parent, so invalidate
  852. * destructor and make plain sk_free() */
  853. newsk->sk_destruct = NULL;
  854. sk_free(newsk);
  855. newsk = NULL;
  856. goto out;
  857. }
  858. newsk->sk_err = 0;
  859. newsk->sk_priority = 0;
  860. atomic_set(&newsk->sk_refcnt, 2);
  861. /*
  862. * Increment the counter in the same struct proto as the master
  863. * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
  864. * is the same as sk->sk_prot->socks, as this field was copied
  865. * with memcpy).
  866. *
  867. * This _changes_ the previous behaviour, where
  868. * tcp_create_openreq_child always was incrementing the
  869. * equivalent to tcp_prot->socks (inet_sock_nr), so this have
  870. * to be taken into account in all callers. -acme
  871. */
  872. sk_refcnt_debug_inc(newsk);
  873. newsk->sk_socket = NULL;
  874. newsk->sk_sleep = NULL;
  875. if (newsk->sk_prot->sockets_allocated)
  876. atomic_inc(newsk->sk_prot->sockets_allocated);
  877. }
  878. out:
  879. return newsk;
  880. }
  881. EXPORT_SYMBOL_GPL(sk_clone);
  882. void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
  883. {
  884. __sk_dst_set(sk, dst);
  885. sk->sk_route_caps = dst->dev->features;
  886. if (sk->sk_route_caps & NETIF_F_GSO)
  887. sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
  888. if (sk_can_gso(sk)) {
  889. if (dst->header_len)
  890. sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
  891. else
  892. sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
  893. }
  894. }
  895. EXPORT_SYMBOL_GPL(sk_setup_caps);
  896. void __init sk_init(void)
  897. {
  898. if (num_physpages <= 4096) {
  899. sysctl_wmem_max = 32767;
  900. sysctl_rmem_max = 32767;
  901. sysctl_wmem_default = 32767;
  902. sysctl_rmem_default = 32767;
  903. } else if (num_physpages >= 131072) {
  904. sysctl_wmem_max = 131071;
  905. sysctl_rmem_max = 131071;
  906. }
  907. }
  908. /*
  909. * Simple resource managers for sockets.
  910. */
  911. /*
  912. * Write buffer destructor automatically called from kfree_skb.
  913. */
  914. void sock_wfree(struct sk_buff *skb)
  915. {
  916. struct sock *sk = skb->sk;
  917. /* In case it might be waiting for more memory. */
  918. atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
  919. if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
  920. sk->sk_write_space(sk);
  921. sock_put(sk);
  922. }
  923. /*
  924. * Read buffer destructor automatically called from kfree_skb.
  925. */
  926. void sock_rfree(struct sk_buff *skb)
  927. {
  928. struct sock *sk = skb->sk;
  929. atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
  930. }
  931. int sock_i_uid(struct sock *sk)
  932. {
  933. int uid;
  934. read_lock(&sk->sk_callback_lock);
  935. uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
  936. read_unlock(&sk->sk_callback_lock);
  937. return uid;
  938. }
  939. unsigned long sock_i_ino(struct sock *sk)
  940. {
  941. unsigned long ino;
  942. read_lock(&sk->sk_callback_lock);
  943. ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
  944. read_unlock(&sk->sk_callback_lock);
  945. return ino;
  946. }
  947. /*
  948. * Allocate a skb from the socket's send buffer.
  949. */
  950. struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
  951. gfp_t priority)
  952. {
  953. if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
  954. struct sk_buff * skb = alloc_skb(size, priority);
  955. if (skb) {
  956. skb_set_owner_w(skb, sk);
  957. return skb;
  958. }
  959. }
  960. return NULL;
  961. }
  962. /*
  963. * Allocate a skb from the socket's receive buffer.
  964. */
  965. struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
  966. gfp_t priority)
  967. {
  968. if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
  969. struct sk_buff *skb = alloc_skb(size, priority);
  970. if (skb) {
  971. skb_set_owner_r(skb, sk);
  972. return skb;
  973. }
  974. }
  975. return NULL;
  976. }
  977. /*
  978. * Allocate a memory block from the socket's option memory buffer.
  979. */
  980. void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
  981. {
  982. if ((unsigned)size <= sysctl_optmem_max &&
  983. atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
  984. void *mem;
  985. /* First do the add, to avoid the race if kmalloc
  986. * might sleep.
  987. */
  988. atomic_add(size, &sk->sk_omem_alloc);
  989. mem = kmalloc(size, priority);
  990. if (mem)
  991. return mem;
  992. atomic_sub(size, &sk->sk_omem_alloc);
  993. }
  994. return NULL;
  995. }
  996. /*
  997. * Free an option memory block.
  998. */
  999. void sock_kfree_s(struct sock *sk, void *mem, int size)
  1000. {
  1001. kfree(mem);
  1002. atomic_sub(size, &sk->sk_omem_alloc);
  1003. }
  1004. /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
  1005. I think, these locks should be removed for datagram sockets.
  1006. */
  1007. static long sock_wait_for_wmem(struct sock * sk, long timeo)
  1008. {
  1009. DEFINE_WAIT(wait);
  1010. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  1011. for (;;) {
  1012. if (!timeo)
  1013. break;
  1014. if (signal_pending(current))
  1015. break;
  1016. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1017. prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
  1018. if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
  1019. break;
  1020. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1021. break;
  1022. if (sk->sk_err)
  1023. break;
  1024. timeo = schedule_timeout(timeo);
  1025. }
  1026. finish_wait(sk->sk_sleep, &wait);
  1027. return timeo;
  1028. }
  1029. /*
  1030. * Generic send/receive buffer handlers
  1031. */
  1032. static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
  1033. unsigned long header_len,
  1034. unsigned long data_len,
  1035. int noblock, int *errcode)
  1036. {
  1037. struct sk_buff *skb;
  1038. gfp_t gfp_mask;
  1039. long timeo;
  1040. int err;
  1041. gfp_mask = sk->sk_allocation;
  1042. if (gfp_mask & __GFP_WAIT)
  1043. gfp_mask |= __GFP_REPEAT;
  1044. timeo = sock_sndtimeo(sk, noblock);
  1045. while (1) {
  1046. err = sock_error(sk);
  1047. if (err != 0)
  1048. goto failure;
  1049. err = -EPIPE;
  1050. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1051. goto failure;
  1052. if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
  1053. skb = alloc_skb(header_len, gfp_mask);
  1054. if (skb) {
  1055. int npages;
  1056. int i;
  1057. /* No pages, we're done... */
  1058. if (!data_len)
  1059. break;
  1060. npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  1061. skb->truesize += data_len;
  1062. skb_shinfo(skb)->nr_frags = npages;
  1063. for (i = 0; i < npages; i++) {
  1064. struct page *page;
  1065. skb_frag_t *frag;
  1066. page = alloc_pages(sk->sk_allocation, 0);
  1067. if (!page) {
  1068. err = -ENOBUFS;
  1069. skb_shinfo(skb)->nr_frags = i;
  1070. kfree_skb(skb);
  1071. goto failure;
  1072. }
  1073. frag = &skb_shinfo(skb)->frags[i];
  1074. frag->page = page;
  1075. frag->page_offset = 0;
  1076. frag->size = (data_len >= PAGE_SIZE ?
  1077. PAGE_SIZE :
  1078. data_len);
  1079. data_len -= PAGE_SIZE;
  1080. }
  1081. /* Full success... */
  1082. break;
  1083. }
  1084. err = -ENOBUFS;
  1085. goto failure;
  1086. }
  1087. set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  1088. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1089. err = -EAGAIN;
  1090. if (!timeo)
  1091. goto failure;
  1092. if (signal_pending(current))
  1093. goto interrupted;
  1094. timeo = sock_wait_for_wmem(sk, timeo);
  1095. }
  1096. skb_set_owner_w(skb, sk);
  1097. return skb;
  1098. interrupted:
  1099. err = sock_intr_errno(timeo);
  1100. failure:
  1101. *errcode = err;
  1102. return NULL;
  1103. }
  1104. struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
  1105. int noblock, int *errcode)
  1106. {
  1107. return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
  1108. }
  1109. static void __lock_sock(struct sock *sk)
  1110. {
  1111. DEFINE_WAIT(wait);
  1112. for (;;) {
  1113. prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
  1114. TASK_UNINTERRUPTIBLE);
  1115. spin_unlock_bh(&sk->sk_lock.slock);
  1116. schedule();
  1117. spin_lock_bh(&sk->sk_lock.slock);
  1118. if (!sock_owned_by_user(sk))
  1119. break;
  1120. }
  1121. finish_wait(&sk->sk_lock.wq, &wait);
  1122. }
  1123. static void __release_sock(struct sock *sk)
  1124. {
  1125. struct sk_buff *skb = sk->sk_backlog.head;
  1126. do {
  1127. sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
  1128. bh_unlock_sock(sk);
  1129. do {
  1130. struct sk_buff *next = skb->next;
  1131. skb->next = NULL;
  1132. sk->sk_backlog_rcv(sk, skb);
  1133. /*
  1134. * We are in process context here with softirqs
  1135. * disabled, use cond_resched_softirq() to preempt.
  1136. * This is safe to do because we've taken the backlog
  1137. * queue private:
  1138. */
  1139. cond_resched_softirq();
  1140. skb = next;
  1141. } while (skb != NULL);
  1142. bh_lock_sock(sk);
  1143. } while ((skb = sk->sk_backlog.head) != NULL);
  1144. }
  1145. /**
  1146. * sk_wait_data - wait for data to arrive at sk_receive_queue
  1147. * @sk: sock to wait on
  1148. * @timeo: for how long
  1149. *
  1150. * Now socket state including sk->sk_err is changed only under lock,
  1151. * hence we may omit checks after joining wait queue.
  1152. * We check receive queue before schedule() only as optimization;
  1153. * it is very likely that release_sock() added new data.
  1154. */
  1155. int sk_wait_data(struct sock *sk, long *timeo)
  1156. {
  1157. int rc;
  1158. DEFINE_WAIT(wait);
  1159. prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
  1160. set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
  1161. rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
  1162. clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
  1163. finish_wait(sk->sk_sleep, &wait);
  1164. return rc;
  1165. }
  1166. EXPORT_SYMBOL(sk_wait_data);
  1167. /*
  1168. * Set of default routines for initialising struct proto_ops when
  1169. * the protocol does not support a particular function. In certain
  1170. * cases where it makes no sense for a protocol to have a "do nothing"
  1171. * function, some default processing is provided.
  1172. */
  1173. int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
  1174. {
  1175. return -EOPNOTSUPP;
  1176. }
  1177. int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
  1178. int len, int flags)
  1179. {
  1180. return -EOPNOTSUPP;
  1181. }
  1182. int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
  1183. {
  1184. return -EOPNOTSUPP;
  1185. }
  1186. int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
  1187. {
  1188. return -EOPNOTSUPP;
  1189. }
  1190. int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
  1191. int *len, int peer)
  1192. {
  1193. return -EOPNOTSUPP;
  1194. }
  1195. unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
  1196. {
  1197. return 0;
  1198. }
  1199. int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1200. {
  1201. return -EOPNOTSUPP;
  1202. }
  1203. int sock_no_listen(struct socket *sock, int backlog)
  1204. {
  1205. return -EOPNOTSUPP;
  1206. }
  1207. int sock_no_shutdown(struct socket *sock, int how)
  1208. {
  1209. return -EOPNOTSUPP;
  1210. }
  1211. int sock_no_setsockopt(struct socket *sock, int level, int optname,
  1212. char __user *optval, int optlen)
  1213. {
  1214. return -EOPNOTSUPP;
  1215. }
  1216. int sock_no_getsockopt(struct socket *sock, int level, int optname,
  1217. char __user *optval, int __user *optlen)
  1218. {
  1219. return -EOPNOTSUPP;
  1220. }
  1221. int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
  1222. size_t len)
  1223. {
  1224. return -EOPNOTSUPP;
  1225. }
  1226. int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
  1227. size_t len, int flags)
  1228. {
  1229. return -EOPNOTSUPP;
  1230. }
  1231. int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
  1232. {
  1233. /* Mirror missing mmap method error code */
  1234. return -ENODEV;
  1235. }
  1236. ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
  1237. {
  1238. ssize_t res;
  1239. struct msghdr msg = {.msg_flags = flags};
  1240. struct kvec iov;
  1241. char *kaddr = kmap(page);
  1242. iov.iov_base = kaddr + offset;
  1243. iov.iov_len = size;
  1244. res = kernel_sendmsg(sock, &msg, &iov, 1, size);
  1245. kunmap(page);
  1246. return res;
  1247. }
  1248. /*
  1249. * Default Socket Callbacks
  1250. */
  1251. static void sock_def_wakeup(struct sock *sk)
  1252. {
  1253. read_lock(&sk->sk_callback_lock);
  1254. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  1255. wake_up_interruptible_all(sk->sk_sleep);
  1256. read_unlock(&sk->sk_callback_lock);
  1257. }
  1258. static void sock_def_error_report(struct sock *sk)
  1259. {
  1260. read_lock(&sk->sk_callback_lock);
  1261. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  1262. wake_up_interruptible(sk->sk_sleep);
  1263. sk_wake_async(sk,0,POLL_ERR);
  1264. read_unlock(&sk->sk_callback_lock);
  1265. }
  1266. static void sock_def_readable(struct sock *sk, int len)
  1267. {
  1268. read_lock(&sk->sk_callback_lock);
  1269. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  1270. wake_up_interruptible(sk->sk_sleep);
  1271. sk_wake_async(sk,1,POLL_IN);
  1272. read_unlock(&sk->sk_callback_lock);
  1273. }
  1274. static void sock_def_write_space(struct sock *sk)
  1275. {
  1276. read_lock(&sk->sk_callback_lock);
  1277. /* Do not wake up a writer until he can make "significant"
  1278. * progress. --DaveM
  1279. */
  1280. if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
  1281. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  1282. wake_up_interruptible(sk->sk_sleep);
  1283. /* Should agree with poll, otherwise some programs break */
  1284. if (sock_writeable(sk))
  1285. sk_wake_async(sk, 2, POLL_OUT);
  1286. }
  1287. read_unlock(&sk->sk_callback_lock);
  1288. }
  1289. static void sock_def_destruct(struct sock *sk)
  1290. {
  1291. kfree(sk->sk_protinfo);
  1292. }
  1293. void sk_send_sigurg(struct sock *sk)
  1294. {
  1295. if (sk->sk_socket && sk->sk_socket->file)
  1296. if (send_sigurg(&sk->sk_socket->file->f_owner))
  1297. sk_wake_async(sk, 3, POLL_PRI);
  1298. }
  1299. void sk_reset_timer(struct sock *sk, struct timer_list* timer,
  1300. unsigned long expires)
  1301. {
  1302. if (!mod_timer(timer, expires))
  1303. sock_hold(sk);
  1304. }
  1305. EXPORT_SYMBOL(sk_reset_timer);
  1306. void sk_stop_timer(struct sock *sk, struct timer_list* timer)
  1307. {
  1308. if (timer_pending(timer) && del_timer(timer))
  1309. __sock_put(sk);
  1310. }
  1311. EXPORT_SYMBOL(sk_stop_timer);
  1312. void sock_init_data(struct socket *sock, struct sock *sk)
  1313. {
  1314. skb_queue_head_init(&sk->sk_receive_queue);
  1315. skb_queue_head_init(&sk->sk_write_queue);
  1316. skb_queue_head_init(&sk->sk_error_queue);
  1317. #ifdef CONFIG_NET_DMA
  1318. skb_queue_head_init(&sk->sk_async_wait_queue);
  1319. #endif
  1320. sk->sk_send_head = NULL;
  1321. init_timer(&sk->sk_timer);
  1322. sk->sk_allocation = GFP_KERNEL;
  1323. sk->sk_rcvbuf = sysctl_rmem_default;
  1324. sk->sk_sndbuf = sysctl_wmem_default;
  1325. sk->sk_state = TCP_CLOSE;
  1326. sk->sk_socket = sock;
  1327. sock_set_flag(sk, SOCK_ZAPPED);
  1328. if (sock) {
  1329. sk->sk_type = sock->type;
  1330. sk->sk_sleep = &sock->wait;
  1331. sock->sk = sk;
  1332. } else
  1333. sk->sk_sleep = NULL;
  1334. rwlock_init(&sk->sk_dst_lock);
  1335. rwlock_init(&sk->sk_callback_lock);
  1336. lockdep_set_class_and_name(&sk->sk_callback_lock,
  1337. af_callback_keys + sk->sk_family,
  1338. af_family_clock_key_strings[sk->sk_family]);
  1339. sk->sk_state_change = sock_def_wakeup;
  1340. sk->sk_data_ready = sock_def_readable;
  1341. sk->sk_write_space = sock_def_write_space;
  1342. sk->sk_error_report = sock_def_error_report;
  1343. sk->sk_destruct = sock_def_destruct;
  1344. sk->sk_sndmsg_page = NULL;
  1345. sk->sk_sndmsg_off = 0;
  1346. sk->sk_peercred.pid = 0;
  1347. sk->sk_peercred.uid = -1;
  1348. sk->sk_peercred.gid = -1;
  1349. sk->sk_write_pending = 0;
  1350. sk->sk_rcvlowat = 1;
  1351. sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
  1352. sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
  1353. sk->sk_stamp = ktime_set(-1L, -1L);
  1354. atomic_set(&sk->sk_refcnt, 1);
  1355. }
  1356. void fastcall lock_sock_nested(struct sock *sk, int subclass)
  1357. {
  1358. might_sleep();
  1359. spin_lock_bh(&sk->sk_lock.slock);
  1360. if (sk->sk_lock.owned)
  1361. __lock_sock(sk);
  1362. sk->sk_lock.owned = 1;
  1363. spin_unlock(&sk->sk_lock.slock);
  1364. /*
  1365. * The sk_lock has mutex_lock() semantics here:
  1366. */
  1367. mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
  1368. local_bh_enable();
  1369. }
  1370. EXPORT_SYMBOL(lock_sock_nested);
  1371. void fastcall release_sock(struct sock *sk)
  1372. {
  1373. /*
  1374. * The sk_lock has mutex_unlock() semantics:
  1375. */
  1376. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  1377. spin_lock_bh(&sk->sk_lock.slock);
  1378. if (sk->sk_backlog.tail)
  1379. __release_sock(sk);
  1380. sk->sk_lock.owned = 0;
  1381. if (waitqueue_active(&sk->sk_lock.wq))
  1382. wake_up(&sk->sk_lock.wq);
  1383. spin_unlock_bh(&sk->sk_lock.slock);
  1384. }
  1385. EXPORT_SYMBOL(release_sock);
  1386. int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
  1387. {
  1388. struct timeval tv;
  1389. if (!sock_flag(sk, SOCK_TIMESTAMP))
  1390. sock_enable_timestamp(sk);
  1391. tv = ktime_to_timeval(sk->sk_stamp);
  1392. if (tv.tv_sec == -1)
  1393. return -ENOENT;
  1394. if (tv.tv_sec == 0) {
  1395. sk->sk_stamp = ktime_get_real();
  1396. tv = ktime_to_timeval(sk->sk_stamp);
  1397. }
  1398. return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
  1399. }
  1400. EXPORT_SYMBOL(sock_get_timestamp);
  1401. int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
  1402. {
  1403. struct timespec ts;
  1404. if (!sock_flag(sk, SOCK_TIMESTAMP))
  1405. sock_enable_timestamp(sk);
  1406. ts = ktime_to_timespec(sk->sk_stamp);
  1407. if (ts.tv_sec == -1)
  1408. return -ENOENT;
  1409. if (ts.tv_sec == 0) {
  1410. sk->sk_stamp = ktime_get_real();
  1411. ts = ktime_to_timespec(sk->sk_stamp);
  1412. }
  1413. return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
  1414. }
  1415. EXPORT_SYMBOL(sock_get_timestampns);
  1416. void sock_enable_timestamp(struct sock *sk)
  1417. {
  1418. if (!sock_flag(sk, SOCK_TIMESTAMP)) {
  1419. sock_set_flag(sk, SOCK_TIMESTAMP);
  1420. net_enable_timestamp();
  1421. }
  1422. }
  1423. EXPORT_SYMBOL(sock_enable_timestamp);
  1424. /*
  1425. * Get a socket option on an socket.
  1426. *
  1427. * FIX: POSIX 1003.1g is very ambiguous here. It states that
  1428. * asynchronous errors should be reported by getsockopt. We assume
  1429. * this means if you specify SO_ERROR (otherwise whats the point of it).
  1430. */
  1431. int sock_common_getsockopt(struct socket *sock, int level, int optname,
  1432. char __user *optval, int __user *optlen)
  1433. {
  1434. struct sock *sk = sock->sk;
  1435. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  1436. }
  1437. EXPORT_SYMBOL(sock_common_getsockopt);
  1438. #ifdef CONFIG_COMPAT
  1439. int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
  1440. char __user *optval, int __user *optlen)
  1441. {
  1442. struct sock *sk = sock->sk;
  1443. if (sk->sk_prot->compat_getsockopt != NULL)
  1444. return sk->sk_prot->compat_getsockopt(sk, level, optname,
  1445. optval, optlen);
  1446. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  1447. }
  1448. EXPORT_SYMBOL(compat_sock_common_getsockopt);
  1449. #endif
  1450. int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
  1451. struct msghdr *msg, size_t size, int flags)
  1452. {
  1453. struct sock *sk = sock->sk;
  1454. int addr_len = 0;
  1455. int err;
  1456. err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
  1457. flags & ~MSG_DONTWAIT, &addr_len);
  1458. if (err >= 0)
  1459. msg->msg_namelen = addr_len;
  1460. return err;
  1461. }
  1462. EXPORT_SYMBOL(sock_common_recvmsg);
  1463. /*
  1464. * Set socket options on an inet socket.
  1465. */
  1466. int sock_common_setsockopt(struct socket *sock, int level, int optname,
  1467. char __user *optval, int optlen)
  1468. {
  1469. struct sock *sk = sock->sk;
  1470. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  1471. }
  1472. EXPORT_SYMBOL(sock_common_setsockopt);
  1473. #ifdef CONFIG_COMPAT
  1474. int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
  1475. char __user *optval, int optlen)
  1476. {
  1477. struct sock *sk = sock->sk;
  1478. if (sk->sk_prot->compat_setsockopt != NULL)
  1479. return sk->sk_prot->compat_setsockopt(sk, level, optname,
  1480. optval, optlen);
  1481. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  1482. }
  1483. EXPORT_SYMBOL(compat_sock_common_setsockopt);
  1484. #endif
  1485. void sk_common_release(struct sock *sk)
  1486. {
  1487. if (sk->sk_prot->destroy)
  1488. sk->sk_prot->destroy(sk);
  1489. /*
  1490. * Observation: when sock_common_release is called, processes have
  1491. * no access to socket. But net still has.
  1492. * Step one, detach it from networking:
  1493. *
  1494. * A. Remove from hash tables.
  1495. */
  1496. sk->sk_prot->unhash(sk);
  1497. /*
  1498. * In this point socket cannot receive new packets, but it is possible
  1499. * that some packets are in flight because some CPU runs receiver and
  1500. * did hash table lookup before we unhashed socket. They will achieve
  1501. * receive queue and will be purged by socket destructor.
  1502. *
  1503. * Also we still have packets pending on receive queue and probably,
  1504. * our own packets waiting in device queues. sock_destroy will drain
  1505. * receive queue, but transmitted packets will delay socket destruction
  1506. * until the last reference will be released.
  1507. */
  1508. sock_orphan(sk);
  1509. xfrm_sk_free_policy(sk);
  1510. sk_refcnt_debug_release(sk);
  1511. sock_put(sk);
  1512. }
  1513. EXPORT_SYMBOL(sk_common_release);
  1514. static DEFINE_RWLOCK(proto_list_lock);
  1515. static LIST_HEAD(proto_list);
  1516. int proto_register(struct proto *prot, int alloc_slab)
  1517. {
  1518. char *request_sock_slab_name = NULL;
  1519. char *timewait_sock_slab_name;
  1520. int rc = -ENOBUFS;
  1521. if (alloc_slab) {
  1522. prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
  1523. SLAB_HWCACHE_ALIGN, NULL);
  1524. if (prot->slab == NULL) {
  1525. printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
  1526. prot->name);
  1527. goto out;
  1528. }
  1529. if (prot->rsk_prot != NULL) {
  1530. static const char mask[] = "request_sock_%s";
  1531. request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
  1532. if (request_sock_slab_name == NULL)
  1533. goto out_free_sock_slab;
  1534. sprintf(request_sock_slab_name, mask, prot->name);
  1535. prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
  1536. prot->rsk_prot->obj_size, 0,
  1537. SLAB_HWCACHE_ALIGN, NULL);
  1538. if (prot->rsk_prot->slab == NULL) {
  1539. printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
  1540. prot->name);
  1541. goto out_free_request_sock_slab_name;
  1542. }
  1543. }
  1544. if (prot->twsk_prot != NULL) {
  1545. static const char mask[] = "tw_sock_%s";
  1546. timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
  1547. if (timewait_sock_slab_name == NULL)
  1548. goto out_free_request_sock_slab;
  1549. sprintf(timewait_sock_slab_name, mask, prot->name);
  1550. prot->twsk_prot->twsk_slab =
  1551. kmem_cache_create(timewait_sock_slab_name,
  1552. prot->twsk_prot->twsk_obj_size,
  1553. 0, SLAB_HWCACHE_ALIGN,
  1554. NULL);
  1555. if (prot->twsk_prot->twsk_slab == NULL)
  1556. goto out_free_timewait_sock_slab_name;
  1557. }
  1558. }
  1559. write_lock(&proto_list_lock);
  1560. list_add(&prot->node, &proto_list);
  1561. write_unlock(&proto_list_lock);
  1562. rc = 0;
  1563. out:
  1564. return rc;
  1565. out_free_timewait_sock_slab_name:
  1566. kfree(timewait_sock_slab_name);
  1567. out_free_request_sock_slab:
  1568. if (prot->rsk_prot && prot->rsk_prot->slab) {
  1569. kmem_cache_destroy(prot->rsk_prot->slab);
  1570. prot->rsk_prot->slab = NULL;
  1571. }
  1572. out_free_request_sock_slab_name:
  1573. kfree(request_sock_slab_name);
  1574. out_free_sock_slab:
  1575. kmem_cache_destroy(prot->slab);
  1576. prot->slab = NULL;
  1577. goto out;
  1578. }
  1579. EXPORT_SYMBOL(proto_register);
  1580. void proto_unregister(struct proto *prot)
  1581. {
  1582. write_lock(&proto_list_lock);
  1583. list_del(&prot->node);
  1584. write_unlock(&proto_list_lock);
  1585. if (prot->slab != NULL) {
  1586. kmem_cache_destroy(prot->slab);
  1587. prot->slab = NULL;
  1588. }
  1589. if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
  1590. const char *name = kmem_cache_name(prot->rsk_prot->slab);
  1591. kmem_cache_destroy(prot->rsk_prot->slab);
  1592. kfree(name);
  1593. prot->rsk_prot->slab = NULL;
  1594. }
  1595. if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
  1596. const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
  1597. kmem_cache_destroy(prot->twsk_prot->twsk_slab);
  1598. kfree(name);
  1599. prot->twsk_prot->twsk_slab = NULL;
  1600. }
  1601. }
  1602. EXPORT_SYMBOL(proto_unregister);
  1603. #ifdef CONFIG_PROC_FS
  1604. static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
  1605. {
  1606. read_lock(&proto_list_lock);
  1607. return seq_list_start_head(&proto_list, *pos);
  1608. }
  1609. static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1610. {
  1611. return seq_list_next(v, &proto_list, pos);
  1612. }
  1613. static void proto_seq_stop(struct seq_file *seq, void *v)
  1614. {
  1615. read_unlock(&proto_list_lock);
  1616. }
  1617. static char proto_method_implemented(const void *method)
  1618. {
  1619. return method == NULL ? 'n' : 'y';
  1620. }
  1621. static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
  1622. {
  1623. seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
  1624. "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
  1625. proto->name,
  1626. proto->obj_size,
  1627. proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1,
  1628. proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
  1629. proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
  1630. proto->max_header,
  1631. proto->slab == NULL ? "no" : "yes",
  1632. module_name(proto->owner),
  1633. proto_method_implemented(proto->close),
  1634. proto_method_implemented(proto->connect),
  1635. proto_method_implemented(proto->disconnect),
  1636. proto_method_implemented(proto->accept),
  1637. proto_method_implemented(proto->ioctl),
  1638. proto_method_implemented(proto->init),
  1639. proto_method_implemented(proto->destroy),
  1640. proto_method_implemented(proto->shutdown),
  1641. proto_method_implemented(proto->setsockopt),
  1642. proto_method_implemented(proto->getsockopt),
  1643. proto_method_implemented(proto->sendmsg),
  1644. proto_method_implemented(proto->recvmsg),
  1645. proto_method_implemented(proto->sendpage),
  1646. proto_method_implemented(proto->bind),
  1647. proto_method_implemented(proto->backlog_rcv),
  1648. proto_method_implemented(proto->hash),
  1649. proto_method_implemented(proto->unhash),
  1650. proto_method_implemented(proto->get_port),
  1651. proto_method_implemented(proto->enter_memory_pressure));
  1652. }
  1653. static int proto_seq_show(struct seq_file *seq, void *v)
  1654. {
  1655. if (v == &proto_list)
  1656. seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
  1657. "protocol",
  1658. "size",
  1659. "sockets",
  1660. "memory",
  1661. "press",
  1662. "maxhdr",
  1663. "slab",
  1664. "module",
  1665. "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
  1666. else
  1667. proto_seq_printf(seq, list_entry(v, struct proto, node));
  1668. return 0;
  1669. }
  1670. static const struct seq_operations proto_seq_ops = {
  1671. .start = proto_seq_start,
  1672. .next = proto_seq_next,
  1673. .stop = proto_seq_stop,
  1674. .show = proto_seq_show,
  1675. };
  1676. static int proto_seq_open(struct inode *inode, struct file *file)
  1677. {
  1678. return seq_open(file, &proto_seq_ops);
  1679. }
  1680. static const struct file_operations proto_seq_fops = {
  1681. .owner = THIS_MODULE,
  1682. .open = proto_seq_open,
  1683. .read = seq_read,
  1684. .llseek = seq_lseek,
  1685. .release = seq_release,
  1686. };
  1687. static int __init proto_init(void)
  1688. {
  1689. /* register /proc/net/protocols */
  1690. return proc_net_fops_create(&init_net, "protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0;
  1691. }
  1692. subsys_initcall(proto_init);
  1693. #endif /* PROC_FS */
  1694. EXPORT_SYMBOL(sk_alloc);
  1695. EXPORT_SYMBOL(sk_free);
  1696. EXPORT_SYMBOL(sk_send_sigurg);
  1697. EXPORT_SYMBOL(sock_alloc_send_skb);
  1698. EXPORT_SYMBOL(sock_init_data);
  1699. EXPORT_SYMBOL(sock_kfree_s);
  1700. EXPORT_SYMBOL(sock_kmalloc);
  1701. EXPORT_SYMBOL(sock_no_accept);
  1702. EXPORT_SYMBOL(sock_no_bind);
  1703. EXPORT_SYMBOL(sock_no_connect);
  1704. EXPORT_SYMBOL(sock_no_getname);
  1705. EXPORT_SYMBOL(sock_no_getsockopt);
  1706. EXPORT_SYMBOL(sock_no_ioctl);
  1707. EXPORT_SYMBOL(sock_no_listen);
  1708. EXPORT_SYMBOL(sock_no_mmap);
  1709. EXPORT_SYMBOL(sock_no_poll);
  1710. EXPORT_SYMBOL(sock_no_recvmsg);
  1711. EXPORT_SYMBOL(sock_no_sendmsg);
  1712. EXPORT_SYMBOL(sock_no_sendpage);
  1713. EXPORT_SYMBOL(sock_no_setsockopt);
  1714. EXPORT_SYMBOL(sock_no_shutdown);
  1715. EXPORT_SYMBOL(sock_no_socketpair);
  1716. EXPORT_SYMBOL(sock_rfree);
  1717. EXPORT_SYMBOL(sock_setsockopt);
  1718. EXPORT_SYMBOL(sock_wfree);
  1719. EXPORT_SYMBOL(sock_wmalloc);
  1720. EXPORT_SYMBOL(sock_i_uid);
  1721. EXPORT_SYMBOL(sock_i_ino);
  1722. EXPORT_SYMBOL(sysctl_optmem_max);
  1723. #ifdef CONFIG_SYSCTL
  1724. EXPORT_SYMBOL(sysctl_rmem_max);
  1725. EXPORT_SYMBOL(sysctl_wmem_max);
  1726. #endif