sock.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Generic socket support routines. Memory allocators, socket lock/release
  7. * handler for protocols to use and generic option handler.
  8. *
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Alan Cox, <A.Cox@swansea.ac.uk>
  14. *
  15. * Fixes:
  16. * Alan Cox : Numerous verify_area() problems
  17. * Alan Cox : Connecting on a connecting socket
  18. * now returns an error for tcp.
  19. * Alan Cox : sock->protocol is set correctly.
  20. * and is not sometimes left as 0.
  21. * Alan Cox : connect handles icmp errors on a
  22. * connect properly. Unfortunately there
  23. * is a restart syscall nasty there. I
  24. * can't match BSD without hacking the C
  25. * library. Ideas urgently sought!
  26. * Alan Cox : Disallow bind() to addresses that are
  27. * not ours - especially broadcast ones!!
  28. * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
  29. * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
  30. * instead they leave that for the DESTROY timer.
  31. * Alan Cox : Clean up error flag in accept
  32. * Alan Cox : TCP ack handling is buggy, the DESTROY timer
  33. * was buggy. Put a remove_sock() in the handler
  34. * for memory when we hit 0. Also altered the timer
  35. * code. The ACK stuff can wait and needs major
  36. * TCP layer surgery.
  37. * Alan Cox : Fixed TCP ack bug, removed remove sock
  38. * and fixed timer/inet_bh race.
  39. * Alan Cox : Added zapped flag for TCP
  40. * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
  41. * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42. * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
  43. * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44. * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45. * Rick Sladkey : Relaxed UDP rules for matching packets.
  46. * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
  47. * Pauline Middelink : identd support
  48. * Alan Cox : Fixed connect() taking signals I think.
  49. * Alan Cox : SO_LINGER supported
  50. * Alan Cox : Error reporting fixes
  51. * Anonymous : inet_create tidied up (sk->reuse setting)
  52. * Alan Cox : inet sockets don't set sk->type!
  53. * Alan Cox : Split socket option code
  54. * Alan Cox : Callbacks
  55. * Alan Cox : Nagle flag for Charles & Johannes stuff
  56. * Alex : Removed restriction on inet fioctl
  57. * Alan Cox : Splitting INET from NET core
  58. * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
  59. * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
  60. * Alan Cox : Split IP from generic code
  61. * Alan Cox : New kfree_skbmem()
  62. * Alan Cox : Make SO_DEBUG superuser only.
  63. * Alan Cox : Allow anyone to clear SO_DEBUG
  64. * (compatibility fix)
  65. * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
  66. * Alan Cox : Allocator for a socket is settable.
  67. * Alan Cox : SO_ERROR includes soft errors.
  68. * Alan Cox : Allow NULL arguments on some SO_ opts
  69. * Alan Cox : Generic socket allocation to make hooks
  70. * easier (suggested by Craig Metz).
  71. * Michael Pall : SO_ERROR returns positive errno again
  72. * Steve Whitehouse: Added default destructor to free
  73. * protocol private data.
  74. * Steve Whitehouse: Added various other default routines
  75. * common to several socket families.
  76. * Chris Evans : Call suser() check last on F_SETOWN
  77. * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78. * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
  79. * Andi Kleen : Fix write_space callback
  80. * Chris Evans : Security fixes - signedness again
  81. * Arnaldo C. Melo : cleanups, use skb_queue_purge
  82. *
  83. * To Fix:
  84. *
  85. *
  86. * This program is free software; you can redistribute it and/or
  87. * modify it under the terms of the GNU General Public License
  88. * as published by the Free Software Foundation; either version
  89. * 2 of the License, or (at your option) any later version.
  90. */
  91. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  92. #include <linux/capability.h>
  93. #include <linux/errno.h>
  94. #include <linux/errqueue.h>
  95. #include <linux/types.h>
  96. #include <linux/socket.h>
  97. #include <linux/in.h>
  98. #include <linux/kernel.h>
  99. #include <linux/module.h>
  100. #include <linux/proc_fs.h>
  101. #include <linux/seq_file.h>
  102. #include <linux/sched.h>
  103. #include <linux/timer.h>
  104. #include <linux/string.h>
  105. #include <linux/sockios.h>
  106. #include <linux/net.h>
  107. #include <linux/mm.h>
  108. #include <linux/slab.h>
  109. #include <linux/interrupt.h>
  110. #include <linux/poll.h>
  111. #include <linux/tcp.h>
  112. #include <linux/init.h>
  113. #include <linux/highmem.h>
  114. #include <linux/user_namespace.h>
  115. #include <linux/static_key.h>
  116. #include <linux/memcontrol.h>
  117. #include <linux/prefetch.h>
  118. #include <asm/uaccess.h>
  119. #include <linux/netdevice.h>
  120. #include <net/protocol.h>
  121. #include <linux/skbuff.h>
  122. #include <net/net_namespace.h>
  123. #include <net/request_sock.h>
  124. #include <net/sock.h>
  125. #include <linux/net_tstamp.h>
  126. #include <net/xfrm.h>
  127. #include <linux/ipsec.h>
  128. #include <net/cls_cgroup.h>
  129. #include <net/netprio_cgroup.h>
  130. #include <linux/filter.h>
  131. #include <trace/events/sock.h>
  132. #ifdef CONFIG_INET
  133. #include <net/tcp.h>
  134. #endif
  135. #include <net/busy_poll.h>
  136. static DEFINE_MUTEX(proto_list_mutex);
  137. static LIST_HEAD(proto_list);
  138. #ifdef CONFIG_MEMCG_KMEM
  139. int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
  140. {
  141. struct proto *proto;
  142. int ret = 0;
  143. mutex_lock(&proto_list_mutex);
  144. list_for_each_entry(proto, &proto_list, node) {
  145. if (proto->init_cgroup) {
  146. ret = proto->init_cgroup(memcg, ss);
  147. if (ret)
  148. goto out;
  149. }
  150. }
  151. mutex_unlock(&proto_list_mutex);
  152. return ret;
  153. out:
  154. list_for_each_entry_continue_reverse(proto, &proto_list, node)
  155. if (proto->destroy_cgroup)
  156. proto->destroy_cgroup(memcg);
  157. mutex_unlock(&proto_list_mutex);
  158. return ret;
  159. }
  160. void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
  161. {
  162. struct proto *proto;
  163. mutex_lock(&proto_list_mutex);
  164. list_for_each_entry_reverse(proto, &proto_list, node)
  165. if (proto->destroy_cgroup)
  166. proto->destroy_cgroup(memcg);
  167. mutex_unlock(&proto_list_mutex);
  168. }
  169. #endif
  170. /*
  171. * Each address family might have different locking rules, so we have
  172. * one slock key per address family:
  173. */
  174. static struct lock_class_key af_family_keys[AF_MAX];
  175. static struct lock_class_key af_family_slock_keys[AF_MAX];
  176. #if defined(CONFIG_MEMCG_KMEM)
  177. struct static_key memcg_socket_limit_enabled;
  178. EXPORT_SYMBOL(memcg_socket_limit_enabled);
  179. #endif
  180. /*
  181. * Make lock validator output more readable. (we pre-construct these
  182. * strings build-time, so that runtime initialization of socket
  183. * locks is fast):
  184. */
  185. static const char *const af_family_key_strings[AF_MAX+1] = {
  186. "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
  187. "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
  188. "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
  189. "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
  190. "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
  191. "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
  192. "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
  193. "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
  194. "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
  195. "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
  196. "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
  197. "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
  198. "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
  199. "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
  200. };
  201. static const char *const af_family_slock_key_strings[AF_MAX+1] = {
  202. "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
  203. "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
  204. "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
  205. "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
  206. "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
  207. "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
  208. "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
  209. "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
  210. "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
  211. "slock-27" , "slock-28" , "slock-AF_CAN" ,
  212. "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
  213. "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
  214. "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
  215. "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
  216. };
  217. static const char *const af_family_clock_key_strings[AF_MAX+1] = {
  218. "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
  219. "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
  220. "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
  221. "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
  222. "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
  223. "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
  224. "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
  225. "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
  226. "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
  227. "clock-27" , "clock-28" , "clock-AF_CAN" ,
  228. "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
  229. "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
  230. "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
  231. "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
  232. };
  233. /*
  234. * sk_callback_lock locking rules are per-address-family,
  235. * so split the lock classes by using a per-AF key:
  236. */
  237. static struct lock_class_key af_callback_keys[AF_MAX];
  238. /* Take into consideration the size of the struct sk_buff overhead in the
  239. * determination of these values, since that is non-constant across
  240. * platforms. This makes socket queueing behavior and performance
  241. * not depend upon such differences.
  242. */
  243. #define _SK_MEM_PACKETS 256
  244. #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
  245. #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  246. #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  247. /* Run time adjustable parameters. */
  248. __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
  249. EXPORT_SYMBOL(sysctl_wmem_max);
  250. __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
  251. EXPORT_SYMBOL(sysctl_rmem_max);
  252. __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
  253. __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
  254. /* Maximal space eaten by iovec or ancillary data plus some space */
  255. int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
  256. EXPORT_SYMBOL(sysctl_optmem_max);
  257. struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
  258. EXPORT_SYMBOL_GPL(memalloc_socks);
  259. /**
  260. * sk_set_memalloc - sets %SOCK_MEMALLOC
  261. * @sk: socket to set it on
  262. *
  263. * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
  264. * It's the responsibility of the admin to adjust min_free_kbytes
  265. * to meet the requirements
  266. */
  267. void sk_set_memalloc(struct sock *sk)
  268. {
  269. sock_set_flag(sk, SOCK_MEMALLOC);
  270. sk->sk_allocation |= __GFP_MEMALLOC;
  271. static_key_slow_inc(&memalloc_socks);
  272. }
  273. EXPORT_SYMBOL_GPL(sk_set_memalloc);
  274. void sk_clear_memalloc(struct sock *sk)
  275. {
  276. sock_reset_flag(sk, SOCK_MEMALLOC);
  277. sk->sk_allocation &= ~__GFP_MEMALLOC;
  278. static_key_slow_dec(&memalloc_socks);
  279. /*
  280. * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
  281. * progress of swapping. However, if SOCK_MEMALLOC is cleared while
  282. * it has rmem allocations there is a risk that the user of the
  283. * socket cannot make forward progress due to exceeding the rmem
  284. * limits. By rights, sk_clear_memalloc() should only be called
  285. * on sockets being torn down but warn and reset the accounting if
  286. * that assumption breaks.
  287. */
  288. if (WARN_ON(sk->sk_forward_alloc))
  289. sk_mem_reclaim(sk);
  290. }
  291. EXPORT_SYMBOL_GPL(sk_clear_memalloc);
  292. int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  293. {
  294. int ret;
  295. unsigned long pflags = current->flags;
  296. /* these should have been dropped before queueing */
  297. BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
  298. current->flags |= PF_MEMALLOC;
  299. ret = sk->sk_backlog_rcv(sk, skb);
  300. tsk_restore_flags(current, pflags, PF_MEMALLOC);
  301. return ret;
  302. }
  303. EXPORT_SYMBOL(__sk_backlog_rcv);
  304. static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
  305. {
  306. struct timeval tv;
  307. if (optlen < sizeof(tv))
  308. return -EINVAL;
  309. if (copy_from_user(&tv, optval, sizeof(tv)))
  310. return -EFAULT;
  311. if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
  312. return -EDOM;
  313. if (tv.tv_sec < 0) {
  314. static int warned __read_mostly;
  315. *timeo_p = 0;
  316. if (warned < 10 && net_ratelimit()) {
  317. warned++;
  318. pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
  319. __func__, current->comm, task_pid_nr(current));
  320. }
  321. return 0;
  322. }
  323. *timeo_p = MAX_SCHEDULE_TIMEOUT;
  324. if (tv.tv_sec == 0 && tv.tv_usec == 0)
  325. return 0;
  326. if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
  327. *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
  328. return 0;
  329. }
  330. static void sock_warn_obsolete_bsdism(const char *name)
  331. {
  332. static int warned;
  333. static char warncomm[TASK_COMM_LEN];
  334. if (strcmp(warncomm, current->comm) && warned < 5) {
  335. strcpy(warncomm, current->comm);
  336. pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
  337. warncomm, name);
  338. warned++;
  339. }
  340. }
  341. #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
  342. static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
  343. {
  344. if (sk->sk_flags & flags) {
  345. sk->sk_flags &= ~flags;
  346. if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
  347. net_disable_timestamp();
  348. }
  349. }
  350. int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  351. {
  352. int err;
  353. int skb_len;
  354. unsigned long flags;
  355. struct sk_buff_head *list = &sk->sk_receive_queue;
  356. if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
  357. atomic_inc(&sk->sk_drops);
  358. trace_sock_rcvqueue_full(sk, skb);
  359. return -ENOMEM;
  360. }
  361. err = sk_filter(sk, skb);
  362. if (err)
  363. return err;
  364. if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
  365. atomic_inc(&sk->sk_drops);
  366. return -ENOBUFS;
  367. }
  368. skb->dev = NULL;
  369. skb_set_owner_r(skb, sk);
  370. /* Cache the SKB length before we tack it onto the receive
  371. * queue. Once it is added it no longer belongs to us and
  372. * may be freed by other threads of control pulling packets
  373. * from the queue.
  374. */
  375. skb_len = skb->len;
  376. /* we escape from rcu protected region, make sure we dont leak
  377. * a norefcounted dst
  378. */
  379. skb_dst_force(skb);
  380. spin_lock_irqsave(&list->lock, flags);
  381. skb->dropcount = atomic_read(&sk->sk_drops);
  382. __skb_queue_tail(list, skb);
  383. spin_unlock_irqrestore(&list->lock, flags);
  384. if (!sock_flag(sk, SOCK_DEAD))
  385. sk->sk_data_ready(sk, skb_len);
  386. return 0;
  387. }
  388. EXPORT_SYMBOL(sock_queue_rcv_skb);
  389. int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
  390. {
  391. int rc = NET_RX_SUCCESS;
  392. if (sk_filter(sk, skb))
  393. goto discard_and_relse;
  394. skb->dev = NULL;
  395. if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
  396. atomic_inc(&sk->sk_drops);
  397. goto discard_and_relse;
  398. }
  399. if (nested)
  400. bh_lock_sock_nested(sk);
  401. else
  402. bh_lock_sock(sk);
  403. if (!sock_owned_by_user(sk)) {
  404. /*
  405. * trylock + unlock semantics:
  406. */
  407. mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
  408. rc = sk_backlog_rcv(sk, skb);
  409. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  410. } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
  411. bh_unlock_sock(sk);
  412. atomic_inc(&sk->sk_drops);
  413. goto discard_and_relse;
  414. }
  415. bh_unlock_sock(sk);
  416. out:
  417. sock_put(sk);
  418. return rc;
  419. discard_and_relse:
  420. kfree_skb(skb);
  421. goto out;
  422. }
  423. EXPORT_SYMBOL(sk_receive_skb);
  424. void sk_reset_txq(struct sock *sk)
  425. {
  426. sk_tx_queue_clear(sk);
  427. }
  428. EXPORT_SYMBOL(sk_reset_txq);
  429. struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
  430. {
  431. struct dst_entry *dst = __sk_dst_get(sk);
  432. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  433. sk_tx_queue_clear(sk);
  434. RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
  435. dst_release(dst);
  436. return NULL;
  437. }
  438. return dst;
  439. }
  440. EXPORT_SYMBOL(__sk_dst_check);
  441. struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
  442. {
  443. struct dst_entry *dst = sk_dst_get(sk);
  444. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  445. sk_dst_reset(sk);
  446. dst_release(dst);
  447. return NULL;
  448. }
  449. return dst;
  450. }
  451. EXPORT_SYMBOL(sk_dst_check);
  452. static int sock_setbindtodevice(struct sock *sk, char __user *optval,
  453. int optlen)
  454. {
  455. int ret = -ENOPROTOOPT;
  456. #ifdef CONFIG_NETDEVICES
  457. struct net *net = sock_net(sk);
  458. char devname[IFNAMSIZ];
  459. int index;
  460. /* Sorry... */
  461. ret = -EPERM;
  462. if (!ns_capable(net->user_ns, CAP_NET_RAW))
  463. goto out;
  464. ret = -EINVAL;
  465. if (optlen < 0)
  466. goto out;
  467. /* Bind this socket to a particular device like "eth0",
  468. * as specified in the passed interface name. If the
  469. * name is "" or the option length is zero the socket
  470. * is not bound.
  471. */
  472. if (optlen > IFNAMSIZ - 1)
  473. optlen = IFNAMSIZ - 1;
  474. memset(devname, 0, sizeof(devname));
  475. ret = -EFAULT;
  476. if (copy_from_user(devname, optval, optlen))
  477. goto out;
  478. index = 0;
  479. if (devname[0] != '\0') {
  480. struct net_device *dev;
  481. rcu_read_lock();
  482. dev = dev_get_by_name_rcu(net, devname);
  483. if (dev)
  484. index = dev->ifindex;
  485. rcu_read_unlock();
  486. ret = -ENODEV;
  487. if (!dev)
  488. goto out;
  489. }
  490. lock_sock(sk);
  491. sk->sk_bound_dev_if = index;
  492. sk_dst_reset(sk);
  493. release_sock(sk);
  494. ret = 0;
  495. out:
  496. #endif
  497. return ret;
  498. }
  499. static int sock_getbindtodevice(struct sock *sk, char __user *optval,
  500. int __user *optlen, int len)
  501. {
  502. int ret = -ENOPROTOOPT;
  503. #ifdef CONFIG_NETDEVICES
  504. struct net *net = sock_net(sk);
  505. char devname[IFNAMSIZ];
  506. if (sk->sk_bound_dev_if == 0) {
  507. len = 0;
  508. goto zero;
  509. }
  510. ret = -EINVAL;
  511. if (len < IFNAMSIZ)
  512. goto out;
  513. ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
  514. if (ret)
  515. goto out;
  516. len = strlen(devname) + 1;
  517. ret = -EFAULT;
  518. if (copy_to_user(optval, devname, len))
  519. goto out;
  520. zero:
  521. ret = -EFAULT;
  522. if (put_user(len, optlen))
  523. goto out;
  524. ret = 0;
  525. out:
  526. #endif
  527. return ret;
  528. }
  529. static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
  530. {
  531. if (valbool)
  532. sock_set_flag(sk, bit);
  533. else
  534. sock_reset_flag(sk, bit);
  535. }
  536. /*
  537. * This is meant for all protocols to use and covers goings on
  538. * at the socket level. Everything here is generic.
  539. */
  540. int sock_setsockopt(struct socket *sock, int level, int optname,
  541. char __user *optval, unsigned int optlen)
  542. {
  543. struct sock *sk = sock->sk;
  544. int val;
  545. int valbool;
  546. struct linger ling;
  547. int ret = 0;
  548. /*
  549. * Options without arguments
  550. */
  551. if (optname == SO_BINDTODEVICE)
  552. return sock_setbindtodevice(sk, optval, optlen);
  553. if (optlen < sizeof(int))
  554. return -EINVAL;
  555. if (get_user(val, (int __user *)optval))
  556. return -EFAULT;
  557. valbool = val ? 1 : 0;
  558. lock_sock(sk);
  559. switch (optname) {
  560. case SO_DEBUG:
  561. if (val && !capable(CAP_NET_ADMIN))
  562. ret = -EACCES;
  563. else
  564. sock_valbool_flag(sk, SOCK_DBG, valbool);
  565. break;
  566. case SO_REUSEADDR:
  567. sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
  568. break;
  569. case SO_REUSEPORT:
  570. sk->sk_reuseport = valbool;
  571. break;
  572. case SO_TYPE:
  573. case SO_PROTOCOL:
  574. case SO_DOMAIN:
  575. case SO_ERROR:
  576. ret = -ENOPROTOOPT;
  577. break;
  578. case SO_DONTROUTE:
  579. sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
  580. break;
  581. case SO_BROADCAST:
  582. sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
  583. break;
  584. case SO_SNDBUF:
  585. /* Don't error on this BSD doesn't and if you think
  586. * about it this is right. Otherwise apps have to
  587. * play 'guess the biggest size' games. RCVBUF/SNDBUF
  588. * are treated in BSD as hints
  589. */
  590. val = min_t(u32, val, sysctl_wmem_max);
  591. set_sndbuf:
  592. sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
  593. sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
  594. /* Wake up sending tasks if we upped the value. */
  595. sk->sk_write_space(sk);
  596. break;
  597. case SO_SNDBUFFORCE:
  598. if (!capable(CAP_NET_ADMIN)) {
  599. ret = -EPERM;
  600. break;
  601. }
  602. goto set_sndbuf;
  603. case SO_RCVBUF:
  604. /* Don't error on this BSD doesn't and if you think
  605. * about it this is right. Otherwise apps have to
  606. * play 'guess the biggest size' games. RCVBUF/SNDBUF
  607. * are treated in BSD as hints
  608. */
  609. val = min_t(u32, val, sysctl_rmem_max);
  610. set_rcvbuf:
  611. sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
  612. /*
  613. * We double it on the way in to account for
  614. * "struct sk_buff" etc. overhead. Applications
  615. * assume that the SO_RCVBUF setting they make will
  616. * allow that much actual data to be received on that
  617. * socket.
  618. *
  619. * Applications are unaware that "struct sk_buff" and
  620. * other overheads allocate from the receive buffer
  621. * during socket buffer allocation.
  622. *
  623. * And after considering the possible alternatives,
  624. * returning the value we actually used in getsockopt
  625. * is the most desirable behavior.
  626. */
  627. sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
  628. break;
  629. case SO_RCVBUFFORCE:
  630. if (!capable(CAP_NET_ADMIN)) {
  631. ret = -EPERM;
  632. break;
  633. }
  634. goto set_rcvbuf;
  635. case SO_KEEPALIVE:
  636. #ifdef CONFIG_INET
  637. if (sk->sk_protocol == IPPROTO_TCP &&
  638. sk->sk_type == SOCK_STREAM)
  639. tcp_set_keepalive(sk, valbool);
  640. #endif
  641. sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
  642. break;
  643. case SO_OOBINLINE:
  644. sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
  645. break;
  646. case SO_NO_CHECK:
  647. sk->sk_no_check = valbool;
  648. break;
  649. case SO_PRIORITY:
  650. if ((val >= 0 && val <= 6) ||
  651. ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  652. sk->sk_priority = val;
  653. else
  654. ret = -EPERM;
  655. break;
  656. case SO_LINGER:
  657. if (optlen < sizeof(ling)) {
  658. ret = -EINVAL; /* 1003.1g */
  659. break;
  660. }
  661. if (copy_from_user(&ling, optval, sizeof(ling))) {
  662. ret = -EFAULT;
  663. break;
  664. }
  665. if (!ling.l_onoff)
  666. sock_reset_flag(sk, SOCK_LINGER);
  667. else {
  668. #if (BITS_PER_LONG == 32)
  669. if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
  670. sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
  671. else
  672. #endif
  673. sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
  674. sock_set_flag(sk, SOCK_LINGER);
  675. }
  676. break;
  677. case SO_BSDCOMPAT:
  678. sock_warn_obsolete_bsdism("setsockopt");
  679. break;
  680. case SO_PASSCRED:
  681. if (valbool)
  682. set_bit(SOCK_PASSCRED, &sock->flags);
  683. else
  684. clear_bit(SOCK_PASSCRED, &sock->flags);
  685. break;
  686. case SO_TIMESTAMP:
  687. case SO_TIMESTAMPNS:
  688. if (valbool) {
  689. if (optname == SO_TIMESTAMP)
  690. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  691. else
  692. sock_set_flag(sk, SOCK_RCVTSTAMPNS);
  693. sock_set_flag(sk, SOCK_RCVTSTAMP);
  694. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  695. } else {
  696. sock_reset_flag(sk, SOCK_RCVTSTAMP);
  697. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  698. }
  699. break;
  700. case SO_TIMESTAMPING:
  701. if (val & ~SOF_TIMESTAMPING_MASK) {
  702. ret = -EINVAL;
  703. break;
  704. }
  705. sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
  706. val & SOF_TIMESTAMPING_TX_HARDWARE);
  707. sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
  708. val & SOF_TIMESTAMPING_TX_SOFTWARE);
  709. sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
  710. val & SOF_TIMESTAMPING_RX_HARDWARE);
  711. if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
  712. sock_enable_timestamp(sk,
  713. SOCK_TIMESTAMPING_RX_SOFTWARE);
  714. else
  715. sock_disable_timestamp(sk,
  716. (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
  717. sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
  718. val & SOF_TIMESTAMPING_SOFTWARE);
  719. sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
  720. val & SOF_TIMESTAMPING_SYS_HARDWARE);
  721. sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
  722. val & SOF_TIMESTAMPING_RAW_HARDWARE);
  723. break;
  724. case SO_RCVLOWAT:
  725. if (val < 0)
  726. val = INT_MAX;
  727. sk->sk_rcvlowat = val ? : 1;
  728. break;
  729. case SO_RCVTIMEO:
  730. ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
  731. break;
  732. case SO_SNDTIMEO:
  733. ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
  734. break;
  735. case SO_ATTACH_FILTER:
  736. ret = -EINVAL;
  737. if (optlen == sizeof(struct sock_fprog)) {
  738. struct sock_fprog fprog;
  739. ret = -EFAULT;
  740. if (copy_from_user(&fprog, optval, sizeof(fprog)))
  741. break;
  742. ret = sk_attach_filter(&fprog, sk);
  743. }
  744. break;
  745. case SO_DETACH_FILTER:
  746. ret = sk_detach_filter(sk);
  747. break;
  748. case SO_LOCK_FILTER:
  749. if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
  750. ret = -EPERM;
  751. else
  752. sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
  753. break;
  754. case SO_PASSSEC:
  755. if (valbool)
  756. set_bit(SOCK_PASSSEC, &sock->flags);
  757. else
  758. clear_bit(SOCK_PASSSEC, &sock->flags);
  759. break;
  760. case SO_MARK:
  761. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  762. ret = -EPERM;
  763. else
  764. sk->sk_mark = val;
  765. break;
  766. /* We implement the SO_SNDLOWAT etc to
  767. not be settable (1003.1g 5.3) */
  768. case SO_RXQ_OVFL:
  769. sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
  770. break;
  771. case SO_WIFI_STATUS:
  772. sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
  773. break;
  774. case SO_PEEK_OFF:
  775. if (sock->ops->set_peek_off)
  776. sock->ops->set_peek_off(sk, val);
  777. else
  778. ret = -EOPNOTSUPP;
  779. break;
  780. case SO_NOFCS:
  781. sock_valbool_flag(sk, SOCK_NOFCS, valbool);
  782. break;
  783. case SO_SELECT_ERR_QUEUE:
  784. sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
  785. break;
  786. #ifdef CONFIG_NET_RX_BUSY_POLL
  787. case SO_BUSY_POLL:
  788. /* allow unprivileged users to decrease the value */
  789. if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
  790. ret = -EPERM;
  791. else {
  792. if (val < 0)
  793. ret = -EINVAL;
  794. else
  795. sk->sk_ll_usec = val;
  796. }
  797. break;
  798. #endif
  799. default:
  800. ret = -ENOPROTOOPT;
  801. break;
  802. }
  803. release_sock(sk);
  804. return ret;
  805. }
  806. EXPORT_SYMBOL(sock_setsockopt);
  807. void cred_to_ucred(struct pid *pid, const struct cred *cred,
  808. struct ucred *ucred)
  809. {
  810. ucred->pid = pid_vnr(pid);
  811. ucred->uid = ucred->gid = -1;
  812. if (cred) {
  813. struct user_namespace *current_ns = current_user_ns();
  814. ucred->uid = from_kuid_munged(current_ns, cred->euid);
  815. ucred->gid = from_kgid_munged(current_ns, cred->egid);
  816. }
  817. }
  818. EXPORT_SYMBOL_GPL(cred_to_ucred);
  819. int sock_getsockopt(struct socket *sock, int level, int optname,
  820. char __user *optval, int __user *optlen)
  821. {
  822. struct sock *sk = sock->sk;
  823. union {
  824. int val;
  825. struct linger ling;
  826. struct timeval tm;
  827. } v;
  828. int lv = sizeof(int);
  829. int len;
  830. if (get_user(len, optlen))
  831. return -EFAULT;
  832. if (len < 0)
  833. return -EINVAL;
  834. memset(&v, 0, sizeof(v));
  835. switch (optname) {
  836. case SO_DEBUG:
  837. v.val = sock_flag(sk, SOCK_DBG);
  838. break;
  839. case SO_DONTROUTE:
  840. v.val = sock_flag(sk, SOCK_LOCALROUTE);
  841. break;
  842. case SO_BROADCAST:
  843. v.val = sock_flag(sk, SOCK_BROADCAST);
  844. break;
  845. case SO_SNDBUF:
  846. v.val = sk->sk_sndbuf;
  847. break;
  848. case SO_RCVBUF:
  849. v.val = sk->sk_rcvbuf;
  850. break;
  851. case SO_REUSEADDR:
  852. v.val = sk->sk_reuse;
  853. break;
  854. case SO_REUSEPORT:
  855. v.val = sk->sk_reuseport;
  856. break;
  857. case SO_KEEPALIVE:
  858. v.val = sock_flag(sk, SOCK_KEEPOPEN);
  859. break;
  860. case SO_TYPE:
  861. v.val = sk->sk_type;
  862. break;
  863. case SO_PROTOCOL:
  864. v.val = sk->sk_protocol;
  865. break;
  866. case SO_DOMAIN:
  867. v.val = sk->sk_family;
  868. break;
  869. case SO_ERROR:
  870. v.val = -sock_error(sk);
  871. if (v.val == 0)
  872. v.val = xchg(&sk->sk_err_soft, 0);
  873. break;
  874. case SO_OOBINLINE:
  875. v.val = sock_flag(sk, SOCK_URGINLINE);
  876. break;
  877. case SO_NO_CHECK:
  878. v.val = sk->sk_no_check;
  879. break;
  880. case SO_PRIORITY:
  881. v.val = sk->sk_priority;
  882. break;
  883. case SO_LINGER:
  884. lv = sizeof(v.ling);
  885. v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
  886. v.ling.l_linger = sk->sk_lingertime / HZ;
  887. break;
  888. case SO_BSDCOMPAT:
  889. sock_warn_obsolete_bsdism("getsockopt");
  890. break;
  891. case SO_TIMESTAMP:
  892. v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
  893. !sock_flag(sk, SOCK_RCVTSTAMPNS);
  894. break;
  895. case SO_TIMESTAMPNS:
  896. v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
  897. break;
  898. case SO_TIMESTAMPING:
  899. v.val = 0;
  900. if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
  901. v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
  902. if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
  903. v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
  904. if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
  905. v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
  906. if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
  907. v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
  908. if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
  909. v.val |= SOF_TIMESTAMPING_SOFTWARE;
  910. if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
  911. v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
  912. if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
  913. v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
  914. break;
  915. case SO_RCVTIMEO:
  916. lv = sizeof(struct timeval);
  917. if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
  918. v.tm.tv_sec = 0;
  919. v.tm.tv_usec = 0;
  920. } else {
  921. v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
  922. v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
  923. }
  924. break;
  925. case SO_SNDTIMEO:
  926. lv = sizeof(struct timeval);
  927. if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
  928. v.tm.tv_sec = 0;
  929. v.tm.tv_usec = 0;
  930. } else {
  931. v.tm.tv_sec = sk->sk_sndtimeo / HZ;
  932. v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
  933. }
  934. break;
  935. case SO_RCVLOWAT:
  936. v.val = sk->sk_rcvlowat;
  937. break;
  938. case SO_SNDLOWAT:
  939. v.val = 1;
  940. break;
  941. case SO_PASSCRED:
  942. v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
  943. break;
  944. case SO_PEERCRED:
  945. {
  946. struct ucred peercred;
  947. if (len > sizeof(peercred))
  948. len = sizeof(peercred);
  949. cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
  950. if (copy_to_user(optval, &peercred, len))
  951. return -EFAULT;
  952. goto lenout;
  953. }
  954. case SO_PEERNAME:
  955. {
  956. char address[128];
  957. if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
  958. return -ENOTCONN;
  959. if (lv < len)
  960. return -EINVAL;
  961. if (copy_to_user(optval, address, len))
  962. return -EFAULT;
  963. goto lenout;
  964. }
  965. /* Dubious BSD thing... Probably nobody even uses it, but
  966. * the UNIX standard wants it for whatever reason... -DaveM
  967. */
  968. case SO_ACCEPTCONN:
  969. v.val = sk->sk_state == TCP_LISTEN;
  970. break;
  971. case SO_PASSSEC:
  972. v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
  973. break;
  974. case SO_PEERSEC:
  975. return security_socket_getpeersec_stream(sock, optval, optlen, len);
  976. case SO_MARK:
  977. v.val = sk->sk_mark;
  978. break;
  979. case SO_RXQ_OVFL:
  980. v.val = sock_flag(sk, SOCK_RXQ_OVFL);
  981. break;
  982. case SO_WIFI_STATUS:
  983. v.val = sock_flag(sk, SOCK_WIFI_STATUS);
  984. break;
  985. case SO_PEEK_OFF:
  986. if (!sock->ops->set_peek_off)
  987. return -EOPNOTSUPP;
  988. v.val = sk->sk_peek_off;
  989. break;
  990. case SO_NOFCS:
  991. v.val = sock_flag(sk, SOCK_NOFCS);
  992. break;
  993. case SO_BINDTODEVICE:
  994. return sock_getbindtodevice(sk, optval, optlen, len);
  995. case SO_GET_FILTER:
  996. len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
  997. if (len < 0)
  998. return len;
  999. goto lenout;
  1000. case SO_LOCK_FILTER:
  1001. v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
  1002. break;
  1003. case SO_SELECT_ERR_QUEUE:
  1004. v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
  1005. break;
  1006. #ifdef CONFIG_NET_RX_BUSY_POLL
  1007. case SO_BUSY_POLL:
  1008. v.val = sk->sk_ll_usec;
  1009. break;
  1010. #endif
  1011. default:
  1012. return -ENOPROTOOPT;
  1013. }
  1014. if (len > lv)
  1015. len = lv;
  1016. if (copy_to_user(optval, &v, len))
  1017. return -EFAULT;
  1018. lenout:
  1019. if (put_user(len, optlen))
  1020. return -EFAULT;
  1021. return 0;
  1022. }
  1023. /*
  1024. * Initialize an sk_lock.
  1025. *
  1026. * (We also register the sk_lock with the lock validator.)
  1027. */
  1028. static inline void sock_lock_init(struct sock *sk)
  1029. {
  1030. sock_lock_init_class_and_name(sk,
  1031. af_family_slock_key_strings[sk->sk_family],
  1032. af_family_slock_keys + sk->sk_family,
  1033. af_family_key_strings[sk->sk_family],
  1034. af_family_keys + sk->sk_family);
  1035. }
  1036. /*
  1037. * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
  1038. * even temporarly, because of RCU lookups. sk_node should also be left as is.
  1039. * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
  1040. */
  1041. static void sock_copy(struct sock *nsk, const struct sock *osk)
  1042. {
  1043. #ifdef CONFIG_SECURITY_NETWORK
  1044. void *sptr = nsk->sk_security;
  1045. #endif
  1046. memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
  1047. memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
  1048. osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
  1049. #ifdef CONFIG_SECURITY_NETWORK
  1050. nsk->sk_security = sptr;
  1051. security_sk_clone(osk, nsk);
  1052. #endif
  1053. }
  1054. void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
  1055. {
  1056. unsigned long nulls1, nulls2;
  1057. nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
  1058. nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
  1059. if (nulls1 > nulls2)
  1060. swap(nulls1, nulls2);
  1061. if (nulls1 != 0)
  1062. memset((char *)sk, 0, nulls1);
  1063. memset((char *)sk + nulls1 + sizeof(void *), 0,
  1064. nulls2 - nulls1 - sizeof(void *));
  1065. memset((char *)sk + nulls2 + sizeof(void *), 0,
  1066. size - nulls2 - sizeof(void *));
  1067. }
  1068. EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
  1069. static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
  1070. int family)
  1071. {
  1072. struct sock *sk;
  1073. struct kmem_cache *slab;
  1074. slab = prot->slab;
  1075. if (slab != NULL) {
  1076. sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
  1077. if (!sk)
  1078. return sk;
  1079. if (priority & __GFP_ZERO) {
  1080. if (prot->clear_sk)
  1081. prot->clear_sk(sk, prot->obj_size);
  1082. else
  1083. sk_prot_clear_nulls(sk, prot->obj_size);
  1084. }
  1085. } else
  1086. sk = kmalloc(prot->obj_size, priority);
  1087. if (sk != NULL) {
  1088. kmemcheck_annotate_bitfield(sk, flags);
  1089. if (security_sk_alloc(sk, family, priority))
  1090. goto out_free;
  1091. if (!try_module_get(prot->owner))
  1092. goto out_free_sec;
  1093. sk_tx_queue_clear(sk);
  1094. }
  1095. return sk;
  1096. out_free_sec:
  1097. security_sk_free(sk);
  1098. out_free:
  1099. if (slab != NULL)
  1100. kmem_cache_free(slab, sk);
  1101. else
  1102. kfree(sk);
  1103. return NULL;
  1104. }
  1105. static void sk_prot_free(struct proto *prot, struct sock *sk)
  1106. {
  1107. struct kmem_cache *slab;
  1108. struct module *owner;
  1109. owner = prot->owner;
  1110. slab = prot->slab;
  1111. security_sk_free(sk);
  1112. if (slab != NULL)
  1113. kmem_cache_free(slab, sk);
  1114. else
  1115. kfree(sk);
  1116. module_put(owner);
  1117. }
  1118. #if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
  1119. void sock_update_classid(struct sock *sk)
  1120. {
  1121. u32 classid;
  1122. classid = task_cls_classid(current);
  1123. if (classid != sk->sk_classid)
  1124. sk->sk_classid = classid;
  1125. }
  1126. EXPORT_SYMBOL(sock_update_classid);
  1127. #endif
  1128. #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
  1129. void sock_update_netprioidx(struct sock *sk)
  1130. {
  1131. if (in_interrupt())
  1132. return;
  1133. sk->sk_cgrp_prioidx = task_netprioidx(current);
  1134. }
  1135. EXPORT_SYMBOL_GPL(sock_update_netprioidx);
  1136. #endif
  1137. /**
  1138. * sk_alloc - All socket objects are allocated here
  1139. * @net: the applicable net namespace
  1140. * @family: protocol family
  1141. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  1142. * @prot: struct proto associated with this new sock instance
  1143. */
  1144. struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
  1145. struct proto *prot)
  1146. {
  1147. struct sock *sk;
  1148. sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
  1149. if (sk) {
  1150. sk->sk_family = family;
  1151. /*
  1152. * See comment in struct sock definition to understand
  1153. * why we need sk_prot_creator -acme
  1154. */
  1155. sk->sk_prot = sk->sk_prot_creator = prot;
  1156. sock_lock_init(sk);
  1157. sock_net_set(sk, get_net(net));
  1158. atomic_set(&sk->sk_wmem_alloc, 1);
  1159. sock_update_classid(sk);
  1160. sock_update_netprioidx(sk);
  1161. }
  1162. return sk;
  1163. }
  1164. EXPORT_SYMBOL(sk_alloc);
  1165. static void __sk_free(struct sock *sk)
  1166. {
  1167. struct sk_filter *filter;
  1168. if (sk->sk_destruct)
  1169. sk->sk_destruct(sk);
  1170. filter = rcu_dereference_check(sk->sk_filter,
  1171. atomic_read(&sk->sk_wmem_alloc) == 0);
  1172. if (filter) {
  1173. sk_filter_uncharge(sk, filter);
  1174. RCU_INIT_POINTER(sk->sk_filter, NULL);
  1175. }
  1176. sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
  1177. if (atomic_read(&sk->sk_omem_alloc))
  1178. pr_debug("%s: optmem leakage (%d bytes) detected\n",
  1179. __func__, atomic_read(&sk->sk_omem_alloc));
  1180. if (sk->sk_peer_cred)
  1181. put_cred(sk->sk_peer_cred);
  1182. put_pid(sk->sk_peer_pid);
  1183. put_net(sock_net(sk));
  1184. sk_prot_free(sk->sk_prot_creator, sk);
  1185. }
  1186. void sk_free(struct sock *sk)
  1187. {
  1188. /*
  1189. * We subtract one from sk_wmem_alloc and can know if
  1190. * some packets are still in some tx queue.
  1191. * If not null, sock_wfree() will call __sk_free(sk) later
  1192. */
  1193. if (atomic_dec_and_test(&sk->sk_wmem_alloc))
  1194. __sk_free(sk);
  1195. }
  1196. EXPORT_SYMBOL(sk_free);
  1197. /*
  1198. * Last sock_put should drop reference to sk->sk_net. It has already
  1199. * been dropped in sk_change_net. Taking reference to stopping namespace
  1200. * is not an option.
  1201. * Take reference to a socket to remove it from hash _alive_ and after that
  1202. * destroy it in the context of init_net.
  1203. */
  1204. void sk_release_kernel(struct sock *sk)
  1205. {
  1206. if (sk == NULL || sk->sk_socket == NULL)
  1207. return;
  1208. sock_hold(sk);
  1209. sock_release(sk->sk_socket);
  1210. release_net(sock_net(sk));
  1211. sock_net_set(sk, get_net(&init_net));
  1212. sock_put(sk);
  1213. }
  1214. EXPORT_SYMBOL(sk_release_kernel);
  1215. static void sk_update_clone(const struct sock *sk, struct sock *newsk)
  1216. {
  1217. if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
  1218. sock_update_memcg(newsk);
  1219. }
  1220. /**
  1221. * sk_clone_lock - clone a socket, and lock its clone
  1222. * @sk: the socket to clone
  1223. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  1224. *
  1225. * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
  1226. */
  1227. struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
  1228. {
  1229. struct sock *newsk;
  1230. newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
  1231. if (newsk != NULL) {
  1232. struct sk_filter *filter;
  1233. sock_copy(newsk, sk);
  1234. /* SANITY */
  1235. get_net(sock_net(newsk));
  1236. sk_node_init(&newsk->sk_node);
  1237. sock_lock_init(newsk);
  1238. bh_lock_sock(newsk);
  1239. newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
  1240. newsk->sk_backlog.len = 0;
  1241. atomic_set(&newsk->sk_rmem_alloc, 0);
  1242. /*
  1243. * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
  1244. */
  1245. atomic_set(&newsk->sk_wmem_alloc, 1);
  1246. atomic_set(&newsk->sk_omem_alloc, 0);
  1247. skb_queue_head_init(&newsk->sk_receive_queue);
  1248. skb_queue_head_init(&newsk->sk_write_queue);
  1249. #ifdef CONFIG_NET_DMA
  1250. skb_queue_head_init(&newsk->sk_async_wait_queue);
  1251. #endif
  1252. spin_lock_init(&newsk->sk_dst_lock);
  1253. rwlock_init(&newsk->sk_callback_lock);
  1254. lockdep_set_class_and_name(&newsk->sk_callback_lock,
  1255. af_callback_keys + newsk->sk_family,
  1256. af_family_clock_key_strings[newsk->sk_family]);
  1257. newsk->sk_dst_cache = NULL;
  1258. newsk->sk_wmem_queued = 0;
  1259. newsk->sk_forward_alloc = 0;
  1260. newsk->sk_send_head = NULL;
  1261. newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
  1262. sock_reset_flag(newsk, SOCK_DONE);
  1263. skb_queue_head_init(&newsk->sk_error_queue);
  1264. filter = rcu_dereference_protected(newsk->sk_filter, 1);
  1265. if (filter != NULL)
  1266. sk_filter_charge(newsk, filter);
  1267. if (unlikely(xfrm_sk_clone_policy(newsk))) {
  1268. /* It is still raw copy of parent, so invalidate
  1269. * destructor and make plain sk_free() */
  1270. newsk->sk_destruct = NULL;
  1271. bh_unlock_sock(newsk);
  1272. sk_free(newsk);
  1273. newsk = NULL;
  1274. goto out;
  1275. }
  1276. newsk->sk_err = 0;
  1277. newsk->sk_priority = 0;
  1278. /*
  1279. * Before updating sk_refcnt, we must commit prior changes to memory
  1280. * (Documentation/RCU/rculist_nulls.txt for details)
  1281. */
  1282. smp_wmb();
  1283. atomic_set(&newsk->sk_refcnt, 2);
  1284. /*
  1285. * Increment the counter in the same struct proto as the master
  1286. * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
  1287. * is the same as sk->sk_prot->socks, as this field was copied
  1288. * with memcpy).
  1289. *
  1290. * This _changes_ the previous behaviour, where
  1291. * tcp_create_openreq_child always was incrementing the
  1292. * equivalent to tcp_prot->socks (inet_sock_nr), so this have
  1293. * to be taken into account in all callers. -acme
  1294. */
  1295. sk_refcnt_debug_inc(newsk);
  1296. sk_set_socket(newsk, NULL);
  1297. newsk->sk_wq = NULL;
  1298. sk_update_clone(sk, newsk);
  1299. if (newsk->sk_prot->sockets_allocated)
  1300. sk_sockets_allocated_inc(newsk);
  1301. if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
  1302. net_enable_timestamp();
  1303. }
  1304. out:
  1305. return newsk;
  1306. }
  1307. EXPORT_SYMBOL_GPL(sk_clone_lock);
  1308. void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
  1309. {
  1310. __sk_dst_set(sk, dst);
  1311. sk->sk_route_caps = dst->dev->features;
  1312. if (sk->sk_route_caps & NETIF_F_GSO)
  1313. sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
  1314. sk->sk_route_caps &= ~sk->sk_route_nocaps;
  1315. if (sk_can_gso(sk)) {
  1316. if (dst->header_len) {
  1317. sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
  1318. } else {
  1319. sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
  1320. sk->sk_gso_max_size = dst->dev->gso_max_size;
  1321. sk->sk_gso_max_segs = dst->dev->gso_max_segs;
  1322. }
  1323. }
  1324. }
  1325. EXPORT_SYMBOL_GPL(sk_setup_caps);
  1326. /*
  1327. * Simple resource managers for sockets.
  1328. */
  1329. /*
  1330. * Write buffer destructor automatically called from kfree_skb.
  1331. */
  1332. void sock_wfree(struct sk_buff *skb)
  1333. {
  1334. struct sock *sk = skb->sk;
  1335. unsigned int len = skb->truesize;
  1336. if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
  1337. /*
  1338. * Keep a reference on sk_wmem_alloc, this will be released
  1339. * after sk_write_space() call
  1340. */
  1341. atomic_sub(len - 1, &sk->sk_wmem_alloc);
  1342. sk->sk_write_space(sk);
  1343. len = 1;
  1344. }
  1345. /*
  1346. * if sk_wmem_alloc reaches 0, we must finish what sk_free()
  1347. * could not do because of in-flight packets
  1348. */
  1349. if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
  1350. __sk_free(sk);
  1351. }
  1352. EXPORT_SYMBOL(sock_wfree);
  1353. void skb_orphan_partial(struct sk_buff *skb)
  1354. {
  1355. /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
  1356. * so we do not completely orphan skb, but transfert all
  1357. * accounted bytes but one, to avoid unexpected reorders.
  1358. */
  1359. if (skb->destructor == sock_wfree
  1360. #ifdef CONFIG_INET
  1361. || skb->destructor == tcp_wfree
  1362. #endif
  1363. ) {
  1364. atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
  1365. skb->truesize = 1;
  1366. } else {
  1367. skb_orphan(skb);
  1368. }
  1369. }
  1370. EXPORT_SYMBOL(skb_orphan_partial);
  1371. /*
  1372. * Read buffer destructor automatically called from kfree_skb.
  1373. */
  1374. void sock_rfree(struct sk_buff *skb)
  1375. {
  1376. struct sock *sk = skb->sk;
  1377. unsigned int len = skb->truesize;
  1378. atomic_sub(len, &sk->sk_rmem_alloc);
  1379. sk_mem_uncharge(sk, len);
  1380. }
  1381. EXPORT_SYMBOL(sock_rfree);
  1382. void sock_edemux(struct sk_buff *skb)
  1383. {
  1384. struct sock *sk = skb->sk;
  1385. #ifdef CONFIG_INET
  1386. if (sk->sk_state == TCP_TIME_WAIT)
  1387. inet_twsk_put(inet_twsk(sk));
  1388. else
  1389. #endif
  1390. sock_put(sk);
  1391. }
  1392. EXPORT_SYMBOL(sock_edemux);
  1393. kuid_t sock_i_uid(struct sock *sk)
  1394. {
  1395. kuid_t uid;
  1396. read_lock_bh(&sk->sk_callback_lock);
  1397. uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
  1398. read_unlock_bh(&sk->sk_callback_lock);
  1399. return uid;
  1400. }
  1401. EXPORT_SYMBOL(sock_i_uid);
  1402. unsigned long sock_i_ino(struct sock *sk)
  1403. {
  1404. unsigned long ino;
  1405. read_lock_bh(&sk->sk_callback_lock);
  1406. ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
  1407. read_unlock_bh(&sk->sk_callback_lock);
  1408. return ino;
  1409. }
  1410. EXPORT_SYMBOL(sock_i_ino);
  1411. /*
  1412. * Allocate a skb from the socket's send buffer.
  1413. */
  1414. struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
  1415. gfp_t priority)
  1416. {
  1417. if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
  1418. struct sk_buff *skb = alloc_skb(size, priority);
  1419. if (skb) {
  1420. skb_set_owner_w(skb, sk);
  1421. return skb;
  1422. }
  1423. }
  1424. return NULL;
  1425. }
  1426. EXPORT_SYMBOL(sock_wmalloc);
  1427. /*
  1428. * Allocate a skb from the socket's receive buffer.
  1429. */
  1430. struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
  1431. gfp_t priority)
  1432. {
  1433. if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
  1434. struct sk_buff *skb = alloc_skb(size, priority);
  1435. if (skb) {
  1436. skb_set_owner_r(skb, sk);
  1437. return skb;
  1438. }
  1439. }
  1440. return NULL;
  1441. }
  1442. /*
  1443. * Allocate a memory block from the socket's option memory buffer.
  1444. */
  1445. void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
  1446. {
  1447. if ((unsigned int)size <= sysctl_optmem_max &&
  1448. atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
  1449. void *mem;
  1450. /* First do the add, to avoid the race if kmalloc
  1451. * might sleep.
  1452. */
  1453. atomic_add(size, &sk->sk_omem_alloc);
  1454. mem = kmalloc(size, priority);
  1455. if (mem)
  1456. return mem;
  1457. atomic_sub(size, &sk->sk_omem_alloc);
  1458. }
  1459. return NULL;
  1460. }
  1461. EXPORT_SYMBOL(sock_kmalloc);
  1462. /*
  1463. * Free an option memory block.
  1464. */
  1465. void sock_kfree_s(struct sock *sk, void *mem, int size)
  1466. {
  1467. kfree(mem);
  1468. atomic_sub(size, &sk->sk_omem_alloc);
  1469. }
  1470. EXPORT_SYMBOL(sock_kfree_s);
  1471. /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
  1472. I think, these locks should be removed for datagram sockets.
  1473. */
  1474. static long sock_wait_for_wmem(struct sock *sk, long timeo)
  1475. {
  1476. DEFINE_WAIT(wait);
  1477. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  1478. for (;;) {
  1479. if (!timeo)
  1480. break;
  1481. if (signal_pending(current))
  1482. break;
  1483. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1484. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1485. if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
  1486. break;
  1487. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1488. break;
  1489. if (sk->sk_err)
  1490. break;
  1491. timeo = schedule_timeout(timeo);
  1492. }
  1493. finish_wait(sk_sleep(sk), &wait);
  1494. return timeo;
  1495. }
  1496. /*
  1497. * Generic send/receive buffer handlers
  1498. */
  1499. struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
  1500. unsigned long data_len, int noblock,
  1501. int *errcode, int max_page_order)
  1502. {
  1503. struct sk_buff *skb = NULL;
  1504. unsigned long chunk;
  1505. gfp_t gfp_mask;
  1506. long timeo;
  1507. int err;
  1508. int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  1509. struct page *page;
  1510. int i;
  1511. err = -EMSGSIZE;
  1512. if (npages > MAX_SKB_FRAGS)
  1513. goto failure;
  1514. timeo = sock_sndtimeo(sk, noblock);
  1515. while (!skb) {
  1516. err = sock_error(sk);
  1517. if (err != 0)
  1518. goto failure;
  1519. err = -EPIPE;
  1520. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1521. goto failure;
  1522. if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
  1523. set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  1524. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1525. err = -EAGAIN;
  1526. if (!timeo)
  1527. goto failure;
  1528. if (signal_pending(current))
  1529. goto interrupted;
  1530. timeo = sock_wait_for_wmem(sk, timeo);
  1531. continue;
  1532. }
  1533. err = -ENOBUFS;
  1534. gfp_mask = sk->sk_allocation;
  1535. if (gfp_mask & __GFP_WAIT)
  1536. gfp_mask |= __GFP_REPEAT;
  1537. skb = alloc_skb(header_len, gfp_mask);
  1538. if (!skb)
  1539. goto failure;
  1540. skb->truesize += data_len;
  1541. for (i = 0; npages > 0; i++) {
  1542. int order = max_page_order;
  1543. while (order) {
  1544. if (npages >= 1 << order) {
  1545. page = alloc_pages(sk->sk_allocation |
  1546. __GFP_COMP | __GFP_NOWARN,
  1547. order);
  1548. if (page)
  1549. goto fill_page;
  1550. }
  1551. order--;
  1552. }
  1553. page = alloc_page(sk->sk_allocation);
  1554. if (!page)
  1555. goto failure;
  1556. fill_page:
  1557. chunk = min_t(unsigned long, data_len,
  1558. PAGE_SIZE << order);
  1559. skb_fill_page_desc(skb, i, page, 0, chunk);
  1560. data_len -= chunk;
  1561. npages -= 1 << order;
  1562. }
  1563. }
  1564. skb_set_owner_w(skb, sk);
  1565. return skb;
  1566. interrupted:
  1567. err = sock_intr_errno(timeo);
  1568. failure:
  1569. kfree_skb(skb);
  1570. *errcode = err;
  1571. return NULL;
  1572. }
  1573. EXPORT_SYMBOL(sock_alloc_send_pskb);
  1574. struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
  1575. int noblock, int *errcode)
  1576. {
  1577. return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
  1578. }
  1579. EXPORT_SYMBOL(sock_alloc_send_skb);
  1580. /* On 32bit arches, an skb frag is limited to 2^15 */
  1581. #define SKB_FRAG_PAGE_ORDER get_order(32768)
  1582. bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
  1583. {
  1584. int order;
  1585. if (pfrag->page) {
  1586. if (atomic_read(&pfrag->page->_count) == 1) {
  1587. pfrag->offset = 0;
  1588. return true;
  1589. }
  1590. if (pfrag->offset < pfrag->size)
  1591. return true;
  1592. put_page(pfrag->page);
  1593. }
  1594. /* We restrict high order allocations to users that can afford to wait */
  1595. order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
  1596. do {
  1597. gfp_t gfp = sk->sk_allocation;
  1598. if (order)
  1599. gfp |= __GFP_COMP | __GFP_NOWARN;
  1600. pfrag->page = alloc_pages(gfp, order);
  1601. if (likely(pfrag->page)) {
  1602. pfrag->offset = 0;
  1603. pfrag->size = PAGE_SIZE << order;
  1604. return true;
  1605. }
  1606. } while (--order >= 0);
  1607. sk_enter_memory_pressure(sk);
  1608. sk_stream_moderate_sndbuf(sk);
  1609. return false;
  1610. }
  1611. EXPORT_SYMBOL(sk_page_frag_refill);
  1612. static void __lock_sock(struct sock *sk)
  1613. __releases(&sk->sk_lock.slock)
  1614. __acquires(&sk->sk_lock.slock)
  1615. {
  1616. DEFINE_WAIT(wait);
  1617. for (;;) {
  1618. prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
  1619. TASK_UNINTERRUPTIBLE);
  1620. spin_unlock_bh(&sk->sk_lock.slock);
  1621. schedule();
  1622. spin_lock_bh(&sk->sk_lock.slock);
  1623. if (!sock_owned_by_user(sk))
  1624. break;
  1625. }
  1626. finish_wait(&sk->sk_lock.wq, &wait);
  1627. }
  1628. static void __release_sock(struct sock *sk)
  1629. __releases(&sk->sk_lock.slock)
  1630. __acquires(&sk->sk_lock.slock)
  1631. {
  1632. struct sk_buff *skb = sk->sk_backlog.head;
  1633. do {
  1634. sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
  1635. bh_unlock_sock(sk);
  1636. do {
  1637. struct sk_buff *next = skb->next;
  1638. prefetch(next);
  1639. WARN_ON_ONCE(skb_dst_is_noref(skb));
  1640. skb->next = NULL;
  1641. sk_backlog_rcv(sk, skb);
  1642. /*
  1643. * We are in process context here with softirqs
  1644. * disabled, use cond_resched_softirq() to preempt.
  1645. * This is safe to do because we've taken the backlog
  1646. * queue private:
  1647. */
  1648. cond_resched_softirq();
  1649. skb = next;
  1650. } while (skb != NULL);
  1651. bh_lock_sock(sk);
  1652. } while ((skb = sk->sk_backlog.head) != NULL);
  1653. /*
  1654. * Doing the zeroing here guarantee we can not loop forever
  1655. * while a wild producer attempts to flood us.
  1656. */
  1657. sk->sk_backlog.len = 0;
  1658. }
  1659. /**
  1660. * sk_wait_data - wait for data to arrive at sk_receive_queue
  1661. * @sk: sock to wait on
  1662. * @timeo: for how long
  1663. *
  1664. * Now socket state including sk->sk_err is changed only under lock,
  1665. * hence we may omit checks after joining wait queue.
  1666. * We check receive queue before schedule() only as optimization;
  1667. * it is very likely that release_sock() added new data.
  1668. */
  1669. int sk_wait_data(struct sock *sk, long *timeo)
  1670. {
  1671. int rc;
  1672. DEFINE_WAIT(wait);
  1673. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1674. set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
  1675. rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
  1676. clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
  1677. finish_wait(sk_sleep(sk), &wait);
  1678. return rc;
  1679. }
  1680. EXPORT_SYMBOL(sk_wait_data);
  1681. /**
  1682. * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
  1683. * @sk: socket
  1684. * @size: memory size to allocate
  1685. * @kind: allocation type
  1686. *
  1687. * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
  1688. * rmem allocation. This function assumes that protocols which have
  1689. * memory_pressure use sk_wmem_queued as write buffer accounting.
  1690. */
  1691. int __sk_mem_schedule(struct sock *sk, int size, int kind)
  1692. {
  1693. struct proto *prot = sk->sk_prot;
  1694. int amt = sk_mem_pages(size);
  1695. long allocated;
  1696. int parent_status = UNDER_LIMIT;
  1697. sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
  1698. allocated = sk_memory_allocated_add(sk, amt, &parent_status);
  1699. /* Under limit. */
  1700. if (parent_status == UNDER_LIMIT &&
  1701. allocated <= sk_prot_mem_limits(sk, 0)) {
  1702. sk_leave_memory_pressure(sk);
  1703. return 1;
  1704. }
  1705. /* Under pressure. (we or our parents) */
  1706. if ((parent_status > SOFT_LIMIT) ||
  1707. allocated > sk_prot_mem_limits(sk, 1))
  1708. sk_enter_memory_pressure(sk);
  1709. /* Over hard limit (we or our parents) */
  1710. if ((parent_status == OVER_LIMIT) ||
  1711. (allocated > sk_prot_mem_limits(sk, 2)))
  1712. goto suppress_allocation;
  1713. /* guarantee minimum buffer size under pressure */
  1714. if (kind == SK_MEM_RECV) {
  1715. if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
  1716. return 1;
  1717. } else { /* SK_MEM_SEND */
  1718. if (sk->sk_type == SOCK_STREAM) {
  1719. if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
  1720. return 1;
  1721. } else if (atomic_read(&sk->sk_wmem_alloc) <
  1722. prot->sysctl_wmem[0])
  1723. return 1;
  1724. }
  1725. if (sk_has_memory_pressure(sk)) {
  1726. int alloc;
  1727. if (!sk_under_memory_pressure(sk))
  1728. return 1;
  1729. alloc = sk_sockets_allocated_read_positive(sk);
  1730. if (sk_prot_mem_limits(sk, 2) > alloc *
  1731. sk_mem_pages(sk->sk_wmem_queued +
  1732. atomic_read(&sk->sk_rmem_alloc) +
  1733. sk->sk_forward_alloc))
  1734. return 1;
  1735. }
  1736. suppress_allocation:
  1737. if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
  1738. sk_stream_moderate_sndbuf(sk);
  1739. /* Fail only if socket is _under_ its sndbuf.
  1740. * In this case we cannot block, so that we have to fail.
  1741. */
  1742. if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
  1743. return 1;
  1744. }
  1745. trace_sock_exceed_buf_limit(sk, prot, allocated);
  1746. /* Alas. Undo changes. */
  1747. sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
  1748. sk_memory_allocated_sub(sk, amt);
  1749. return 0;
  1750. }
  1751. EXPORT_SYMBOL(__sk_mem_schedule);
  1752. /**
  1753. * __sk_reclaim - reclaim memory_allocated
  1754. * @sk: socket
  1755. */
  1756. void __sk_mem_reclaim(struct sock *sk)
  1757. {
  1758. sk_memory_allocated_sub(sk,
  1759. sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
  1760. sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
  1761. if (sk_under_memory_pressure(sk) &&
  1762. (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
  1763. sk_leave_memory_pressure(sk);
  1764. }
  1765. EXPORT_SYMBOL(__sk_mem_reclaim);
  1766. /*
  1767. * Set of default routines for initialising struct proto_ops when
  1768. * the protocol does not support a particular function. In certain
  1769. * cases where it makes no sense for a protocol to have a "do nothing"
  1770. * function, some default processing is provided.
  1771. */
  1772. int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
  1773. {
  1774. return -EOPNOTSUPP;
  1775. }
  1776. EXPORT_SYMBOL(sock_no_bind);
  1777. int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
  1778. int len, int flags)
  1779. {
  1780. return -EOPNOTSUPP;
  1781. }
  1782. EXPORT_SYMBOL(sock_no_connect);
  1783. int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
  1784. {
  1785. return -EOPNOTSUPP;
  1786. }
  1787. EXPORT_SYMBOL(sock_no_socketpair);
  1788. int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
  1789. {
  1790. return -EOPNOTSUPP;
  1791. }
  1792. EXPORT_SYMBOL(sock_no_accept);
  1793. int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
  1794. int *len, int peer)
  1795. {
  1796. return -EOPNOTSUPP;
  1797. }
  1798. EXPORT_SYMBOL(sock_no_getname);
  1799. unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
  1800. {
  1801. return 0;
  1802. }
  1803. EXPORT_SYMBOL(sock_no_poll);
  1804. int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1805. {
  1806. return -EOPNOTSUPP;
  1807. }
  1808. EXPORT_SYMBOL(sock_no_ioctl);
  1809. int sock_no_listen(struct socket *sock, int backlog)
  1810. {
  1811. return -EOPNOTSUPP;
  1812. }
  1813. EXPORT_SYMBOL(sock_no_listen);
  1814. int sock_no_shutdown(struct socket *sock, int how)
  1815. {
  1816. return -EOPNOTSUPP;
  1817. }
  1818. EXPORT_SYMBOL(sock_no_shutdown);
  1819. int sock_no_setsockopt(struct socket *sock, int level, int optname,
  1820. char __user *optval, unsigned int optlen)
  1821. {
  1822. return -EOPNOTSUPP;
  1823. }
  1824. EXPORT_SYMBOL(sock_no_setsockopt);
  1825. int sock_no_getsockopt(struct socket *sock, int level, int optname,
  1826. char __user *optval, int __user *optlen)
  1827. {
  1828. return -EOPNOTSUPP;
  1829. }
  1830. EXPORT_SYMBOL(sock_no_getsockopt);
  1831. int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
  1832. size_t len)
  1833. {
  1834. return -EOPNOTSUPP;
  1835. }
  1836. EXPORT_SYMBOL(sock_no_sendmsg);
  1837. int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
  1838. size_t len, int flags)
  1839. {
  1840. return -EOPNOTSUPP;
  1841. }
  1842. EXPORT_SYMBOL(sock_no_recvmsg);
  1843. int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
  1844. {
  1845. /* Mirror missing mmap method error code */
  1846. return -ENODEV;
  1847. }
  1848. EXPORT_SYMBOL(sock_no_mmap);
  1849. ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
  1850. {
  1851. ssize_t res;
  1852. struct msghdr msg = {.msg_flags = flags};
  1853. struct kvec iov;
  1854. char *kaddr = kmap(page);
  1855. iov.iov_base = kaddr + offset;
  1856. iov.iov_len = size;
  1857. res = kernel_sendmsg(sock, &msg, &iov, 1, size);
  1858. kunmap(page);
  1859. return res;
  1860. }
  1861. EXPORT_SYMBOL(sock_no_sendpage);
  1862. /*
  1863. * Default Socket Callbacks
  1864. */
  1865. static void sock_def_wakeup(struct sock *sk)
  1866. {
  1867. struct socket_wq *wq;
  1868. rcu_read_lock();
  1869. wq = rcu_dereference(sk->sk_wq);
  1870. if (wq_has_sleeper(wq))
  1871. wake_up_interruptible_all(&wq->wait);
  1872. rcu_read_unlock();
  1873. }
  1874. static void sock_def_error_report(struct sock *sk)
  1875. {
  1876. struct socket_wq *wq;
  1877. rcu_read_lock();
  1878. wq = rcu_dereference(sk->sk_wq);
  1879. if (wq_has_sleeper(wq))
  1880. wake_up_interruptible_poll(&wq->wait, POLLERR);
  1881. sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
  1882. rcu_read_unlock();
  1883. }
  1884. static void sock_def_readable(struct sock *sk, int len)
  1885. {
  1886. struct socket_wq *wq;
  1887. rcu_read_lock();
  1888. wq = rcu_dereference(sk->sk_wq);
  1889. if (wq_has_sleeper(wq))
  1890. wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
  1891. POLLRDNORM | POLLRDBAND);
  1892. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  1893. rcu_read_unlock();
  1894. }
  1895. static void sock_def_write_space(struct sock *sk)
  1896. {
  1897. struct socket_wq *wq;
  1898. rcu_read_lock();
  1899. /* Do not wake up a writer until he can make "significant"
  1900. * progress. --DaveM
  1901. */
  1902. if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
  1903. wq = rcu_dereference(sk->sk_wq);
  1904. if (wq_has_sleeper(wq))
  1905. wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
  1906. POLLWRNORM | POLLWRBAND);
  1907. /* Should agree with poll, otherwise some programs break */
  1908. if (sock_writeable(sk))
  1909. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  1910. }
  1911. rcu_read_unlock();
  1912. }
  1913. static void sock_def_destruct(struct sock *sk)
  1914. {
  1915. kfree(sk->sk_protinfo);
  1916. }
  1917. void sk_send_sigurg(struct sock *sk)
  1918. {
  1919. if (sk->sk_socket && sk->sk_socket->file)
  1920. if (send_sigurg(&sk->sk_socket->file->f_owner))
  1921. sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
  1922. }
  1923. EXPORT_SYMBOL(sk_send_sigurg);
  1924. void sk_reset_timer(struct sock *sk, struct timer_list* timer,
  1925. unsigned long expires)
  1926. {
  1927. if (!mod_timer(timer, expires))
  1928. sock_hold(sk);
  1929. }
  1930. EXPORT_SYMBOL(sk_reset_timer);
  1931. void sk_stop_timer(struct sock *sk, struct timer_list* timer)
  1932. {
  1933. if (del_timer(timer))
  1934. __sock_put(sk);
  1935. }
  1936. EXPORT_SYMBOL(sk_stop_timer);
  1937. void sock_init_data(struct socket *sock, struct sock *sk)
  1938. {
  1939. skb_queue_head_init(&sk->sk_receive_queue);
  1940. skb_queue_head_init(&sk->sk_write_queue);
  1941. skb_queue_head_init(&sk->sk_error_queue);
  1942. #ifdef CONFIG_NET_DMA
  1943. skb_queue_head_init(&sk->sk_async_wait_queue);
  1944. #endif
  1945. sk->sk_send_head = NULL;
  1946. init_timer(&sk->sk_timer);
  1947. sk->sk_allocation = GFP_KERNEL;
  1948. sk->sk_rcvbuf = sysctl_rmem_default;
  1949. sk->sk_sndbuf = sysctl_wmem_default;
  1950. sk->sk_state = TCP_CLOSE;
  1951. sk_set_socket(sk, sock);
  1952. sock_set_flag(sk, SOCK_ZAPPED);
  1953. if (sock) {
  1954. sk->sk_type = sock->type;
  1955. sk->sk_wq = sock->wq;
  1956. sock->sk = sk;
  1957. } else
  1958. sk->sk_wq = NULL;
  1959. spin_lock_init(&sk->sk_dst_lock);
  1960. rwlock_init(&sk->sk_callback_lock);
  1961. lockdep_set_class_and_name(&sk->sk_callback_lock,
  1962. af_callback_keys + sk->sk_family,
  1963. af_family_clock_key_strings[sk->sk_family]);
  1964. sk->sk_state_change = sock_def_wakeup;
  1965. sk->sk_data_ready = sock_def_readable;
  1966. sk->sk_write_space = sock_def_write_space;
  1967. sk->sk_error_report = sock_def_error_report;
  1968. sk->sk_destruct = sock_def_destruct;
  1969. sk->sk_frag.page = NULL;
  1970. sk->sk_frag.offset = 0;
  1971. sk->sk_peek_off = -1;
  1972. sk->sk_peer_pid = NULL;
  1973. sk->sk_peer_cred = NULL;
  1974. sk->sk_write_pending = 0;
  1975. sk->sk_rcvlowat = 1;
  1976. sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
  1977. sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
  1978. sk->sk_stamp = ktime_set(-1L, 0);
  1979. #ifdef CONFIG_NET_RX_BUSY_POLL
  1980. sk->sk_napi_id = 0;
  1981. sk->sk_ll_usec = sysctl_net_busy_read;
  1982. #endif
  1983. /*
  1984. * Before updating sk_refcnt, we must commit prior changes to memory
  1985. * (Documentation/RCU/rculist_nulls.txt for details)
  1986. */
  1987. smp_wmb();
  1988. atomic_set(&sk->sk_refcnt, 1);
  1989. atomic_set(&sk->sk_drops, 0);
  1990. }
  1991. EXPORT_SYMBOL(sock_init_data);
  1992. void lock_sock_nested(struct sock *sk, int subclass)
  1993. {
  1994. might_sleep();
  1995. spin_lock_bh(&sk->sk_lock.slock);
  1996. if (sk->sk_lock.owned)
  1997. __lock_sock(sk);
  1998. sk->sk_lock.owned = 1;
  1999. spin_unlock(&sk->sk_lock.slock);
  2000. /*
  2001. * The sk_lock has mutex_lock() semantics here:
  2002. */
  2003. mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
  2004. local_bh_enable();
  2005. }
  2006. EXPORT_SYMBOL(lock_sock_nested);
  2007. void release_sock(struct sock *sk)
  2008. {
  2009. /*
  2010. * The sk_lock has mutex_unlock() semantics:
  2011. */
  2012. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  2013. spin_lock_bh(&sk->sk_lock.slock);
  2014. if (sk->sk_backlog.tail)
  2015. __release_sock(sk);
  2016. if (sk->sk_prot->release_cb)
  2017. sk->sk_prot->release_cb(sk);
  2018. sk->sk_lock.owned = 0;
  2019. if (waitqueue_active(&sk->sk_lock.wq))
  2020. wake_up(&sk->sk_lock.wq);
  2021. spin_unlock_bh(&sk->sk_lock.slock);
  2022. }
  2023. EXPORT_SYMBOL(release_sock);
  2024. /**
  2025. * lock_sock_fast - fast version of lock_sock
  2026. * @sk: socket
  2027. *
  2028. * This version should be used for very small section, where process wont block
  2029. * return false if fast path is taken
  2030. * sk_lock.slock locked, owned = 0, BH disabled
  2031. * return true if slow path is taken
  2032. * sk_lock.slock unlocked, owned = 1, BH enabled
  2033. */
  2034. bool lock_sock_fast(struct sock *sk)
  2035. {
  2036. might_sleep();
  2037. spin_lock_bh(&sk->sk_lock.slock);
  2038. if (!sk->sk_lock.owned)
  2039. /*
  2040. * Note : We must disable BH
  2041. */
  2042. return false;
  2043. __lock_sock(sk);
  2044. sk->sk_lock.owned = 1;
  2045. spin_unlock(&sk->sk_lock.slock);
  2046. /*
  2047. * The sk_lock has mutex_lock() semantics here:
  2048. */
  2049. mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
  2050. local_bh_enable();
  2051. return true;
  2052. }
  2053. EXPORT_SYMBOL(lock_sock_fast);
  2054. int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
  2055. {
  2056. struct timeval tv;
  2057. if (!sock_flag(sk, SOCK_TIMESTAMP))
  2058. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  2059. tv = ktime_to_timeval(sk->sk_stamp);
  2060. if (tv.tv_sec == -1)
  2061. return -ENOENT;
  2062. if (tv.tv_sec == 0) {
  2063. sk->sk_stamp = ktime_get_real();
  2064. tv = ktime_to_timeval(sk->sk_stamp);
  2065. }
  2066. return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
  2067. }
  2068. EXPORT_SYMBOL(sock_get_timestamp);
  2069. int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
  2070. {
  2071. struct timespec ts;
  2072. if (!sock_flag(sk, SOCK_TIMESTAMP))
  2073. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  2074. ts = ktime_to_timespec(sk->sk_stamp);
  2075. if (ts.tv_sec == -1)
  2076. return -ENOENT;
  2077. if (ts.tv_sec == 0) {
  2078. sk->sk_stamp = ktime_get_real();
  2079. ts = ktime_to_timespec(sk->sk_stamp);
  2080. }
  2081. return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
  2082. }
  2083. EXPORT_SYMBOL(sock_get_timestampns);
  2084. void sock_enable_timestamp(struct sock *sk, int flag)
  2085. {
  2086. if (!sock_flag(sk, flag)) {
  2087. unsigned long previous_flags = sk->sk_flags;
  2088. sock_set_flag(sk, flag);
  2089. /*
  2090. * we just set one of the two flags which require net
  2091. * time stamping, but time stamping might have been on
  2092. * already because of the other one
  2093. */
  2094. if (!(previous_flags & SK_FLAGS_TIMESTAMP))
  2095. net_enable_timestamp();
  2096. }
  2097. }
  2098. int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
  2099. int level, int type)
  2100. {
  2101. struct sock_exterr_skb *serr;
  2102. struct sk_buff *skb, *skb2;
  2103. int copied, err;
  2104. err = -EAGAIN;
  2105. skb = skb_dequeue(&sk->sk_error_queue);
  2106. if (skb == NULL)
  2107. goto out;
  2108. copied = skb->len;
  2109. if (copied > len) {
  2110. msg->msg_flags |= MSG_TRUNC;
  2111. copied = len;
  2112. }
  2113. err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
  2114. if (err)
  2115. goto out_free_skb;
  2116. sock_recv_timestamp(msg, sk, skb);
  2117. serr = SKB_EXT_ERR(skb);
  2118. put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
  2119. msg->msg_flags |= MSG_ERRQUEUE;
  2120. err = copied;
  2121. /* Reset and regenerate socket error */
  2122. spin_lock_bh(&sk->sk_error_queue.lock);
  2123. sk->sk_err = 0;
  2124. if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
  2125. sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
  2126. spin_unlock_bh(&sk->sk_error_queue.lock);
  2127. sk->sk_error_report(sk);
  2128. } else
  2129. spin_unlock_bh(&sk->sk_error_queue.lock);
  2130. out_free_skb:
  2131. kfree_skb(skb);
  2132. out:
  2133. return err;
  2134. }
  2135. EXPORT_SYMBOL(sock_recv_errqueue);
  2136. /*
  2137. * Get a socket option on an socket.
  2138. *
  2139. * FIX: POSIX 1003.1g is very ambiguous here. It states that
  2140. * asynchronous errors should be reported by getsockopt. We assume
  2141. * this means if you specify SO_ERROR (otherwise whats the point of it).
  2142. */
  2143. int sock_common_getsockopt(struct socket *sock, int level, int optname,
  2144. char __user *optval, int __user *optlen)
  2145. {
  2146. struct sock *sk = sock->sk;
  2147. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  2148. }
  2149. EXPORT_SYMBOL(sock_common_getsockopt);
  2150. #ifdef CONFIG_COMPAT
  2151. int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
  2152. char __user *optval, int __user *optlen)
  2153. {
  2154. struct sock *sk = sock->sk;
  2155. if (sk->sk_prot->compat_getsockopt != NULL)
  2156. return sk->sk_prot->compat_getsockopt(sk, level, optname,
  2157. optval, optlen);
  2158. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  2159. }
  2160. EXPORT_SYMBOL(compat_sock_common_getsockopt);
  2161. #endif
  2162. int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
  2163. struct msghdr *msg, size_t size, int flags)
  2164. {
  2165. struct sock *sk = sock->sk;
  2166. int addr_len = 0;
  2167. int err;
  2168. err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
  2169. flags & ~MSG_DONTWAIT, &addr_len);
  2170. if (err >= 0)
  2171. msg->msg_namelen = addr_len;
  2172. return err;
  2173. }
  2174. EXPORT_SYMBOL(sock_common_recvmsg);
  2175. /*
  2176. * Set socket options on an inet socket.
  2177. */
  2178. int sock_common_setsockopt(struct socket *sock, int level, int optname,
  2179. char __user *optval, unsigned int optlen)
  2180. {
  2181. struct sock *sk = sock->sk;
  2182. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  2183. }
  2184. EXPORT_SYMBOL(sock_common_setsockopt);
  2185. #ifdef CONFIG_COMPAT
  2186. int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
  2187. char __user *optval, unsigned int optlen)
  2188. {
  2189. struct sock *sk = sock->sk;
  2190. if (sk->sk_prot->compat_setsockopt != NULL)
  2191. return sk->sk_prot->compat_setsockopt(sk, level, optname,
  2192. optval, optlen);
  2193. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  2194. }
  2195. EXPORT_SYMBOL(compat_sock_common_setsockopt);
  2196. #endif
  2197. void sk_common_release(struct sock *sk)
  2198. {
  2199. if (sk->sk_prot->destroy)
  2200. sk->sk_prot->destroy(sk);
  2201. /*
  2202. * Observation: when sock_common_release is called, processes have
  2203. * no access to socket. But net still has.
  2204. * Step one, detach it from networking:
  2205. *
  2206. * A. Remove from hash tables.
  2207. */
  2208. sk->sk_prot->unhash(sk);
  2209. /*
  2210. * In this point socket cannot receive new packets, but it is possible
  2211. * that some packets are in flight because some CPU runs receiver and
  2212. * did hash table lookup before we unhashed socket. They will achieve
  2213. * receive queue and will be purged by socket destructor.
  2214. *
  2215. * Also we still have packets pending on receive queue and probably,
  2216. * our own packets waiting in device queues. sock_destroy will drain
  2217. * receive queue, but transmitted packets will delay socket destruction
  2218. * until the last reference will be released.
  2219. */
  2220. sock_orphan(sk);
  2221. xfrm_sk_free_policy(sk);
  2222. sk_refcnt_debug_release(sk);
  2223. if (sk->sk_frag.page) {
  2224. put_page(sk->sk_frag.page);
  2225. sk->sk_frag.page = NULL;
  2226. }
  2227. sock_put(sk);
  2228. }
  2229. EXPORT_SYMBOL(sk_common_release);
  2230. #ifdef CONFIG_PROC_FS
  2231. #define PROTO_INUSE_NR 64 /* should be enough for the first time */
  2232. struct prot_inuse {
  2233. int val[PROTO_INUSE_NR];
  2234. };
  2235. static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
  2236. #ifdef CONFIG_NET_NS
  2237. void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
  2238. {
  2239. __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
  2240. }
  2241. EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
  2242. int sock_prot_inuse_get(struct net *net, struct proto *prot)
  2243. {
  2244. int cpu, idx = prot->inuse_idx;
  2245. int res = 0;
  2246. for_each_possible_cpu(cpu)
  2247. res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
  2248. return res >= 0 ? res : 0;
  2249. }
  2250. EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
  2251. static int __net_init sock_inuse_init_net(struct net *net)
  2252. {
  2253. net->core.inuse = alloc_percpu(struct prot_inuse);
  2254. return net->core.inuse ? 0 : -ENOMEM;
  2255. }
  2256. static void __net_exit sock_inuse_exit_net(struct net *net)
  2257. {
  2258. free_percpu(net->core.inuse);
  2259. }
  2260. static struct pernet_operations net_inuse_ops = {
  2261. .init = sock_inuse_init_net,
  2262. .exit = sock_inuse_exit_net,
  2263. };
  2264. static __init int net_inuse_init(void)
  2265. {
  2266. if (register_pernet_subsys(&net_inuse_ops))
  2267. panic("Cannot initialize net inuse counters");
  2268. return 0;
  2269. }
  2270. core_initcall(net_inuse_init);
  2271. #else
  2272. static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
  2273. void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
  2274. {
  2275. __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
  2276. }
  2277. EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
  2278. int sock_prot_inuse_get(struct net *net, struct proto *prot)
  2279. {
  2280. int cpu, idx = prot->inuse_idx;
  2281. int res = 0;
  2282. for_each_possible_cpu(cpu)
  2283. res += per_cpu(prot_inuse, cpu).val[idx];
  2284. return res >= 0 ? res : 0;
  2285. }
  2286. EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
  2287. #endif
  2288. static void assign_proto_idx(struct proto *prot)
  2289. {
  2290. prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
  2291. if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
  2292. pr_err("PROTO_INUSE_NR exhausted\n");
  2293. return;
  2294. }
  2295. set_bit(prot->inuse_idx, proto_inuse_idx);
  2296. }
  2297. static void release_proto_idx(struct proto *prot)
  2298. {
  2299. if (prot->inuse_idx != PROTO_INUSE_NR - 1)
  2300. clear_bit(prot->inuse_idx, proto_inuse_idx);
  2301. }
  2302. #else
  2303. static inline void assign_proto_idx(struct proto *prot)
  2304. {
  2305. }
  2306. static inline void release_proto_idx(struct proto *prot)
  2307. {
  2308. }
  2309. #endif
  2310. int proto_register(struct proto *prot, int alloc_slab)
  2311. {
  2312. if (alloc_slab) {
  2313. prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
  2314. SLAB_HWCACHE_ALIGN | prot->slab_flags,
  2315. NULL);
  2316. if (prot->slab == NULL) {
  2317. pr_crit("%s: Can't create sock SLAB cache!\n",
  2318. prot->name);
  2319. goto out;
  2320. }
  2321. if (prot->rsk_prot != NULL) {
  2322. prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
  2323. if (prot->rsk_prot->slab_name == NULL)
  2324. goto out_free_sock_slab;
  2325. prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
  2326. prot->rsk_prot->obj_size, 0,
  2327. SLAB_HWCACHE_ALIGN, NULL);
  2328. if (prot->rsk_prot->slab == NULL) {
  2329. pr_crit("%s: Can't create request sock SLAB cache!\n",
  2330. prot->name);
  2331. goto out_free_request_sock_slab_name;
  2332. }
  2333. }
  2334. if (prot->twsk_prot != NULL) {
  2335. prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
  2336. if (prot->twsk_prot->twsk_slab_name == NULL)
  2337. goto out_free_request_sock_slab;
  2338. prot->twsk_prot->twsk_slab =
  2339. kmem_cache_create(prot->twsk_prot->twsk_slab_name,
  2340. prot->twsk_prot->twsk_obj_size,
  2341. 0,
  2342. SLAB_HWCACHE_ALIGN |
  2343. prot->slab_flags,
  2344. NULL);
  2345. if (prot->twsk_prot->twsk_slab == NULL)
  2346. goto out_free_timewait_sock_slab_name;
  2347. }
  2348. }
  2349. mutex_lock(&proto_list_mutex);
  2350. list_add(&prot->node, &proto_list);
  2351. assign_proto_idx(prot);
  2352. mutex_unlock(&proto_list_mutex);
  2353. return 0;
  2354. out_free_timewait_sock_slab_name:
  2355. kfree(prot->twsk_prot->twsk_slab_name);
  2356. out_free_request_sock_slab:
  2357. if (prot->rsk_prot && prot->rsk_prot->slab) {
  2358. kmem_cache_destroy(prot->rsk_prot->slab);
  2359. prot->rsk_prot->slab = NULL;
  2360. }
  2361. out_free_request_sock_slab_name:
  2362. if (prot->rsk_prot)
  2363. kfree(prot->rsk_prot->slab_name);
  2364. out_free_sock_slab:
  2365. kmem_cache_destroy(prot->slab);
  2366. prot->slab = NULL;
  2367. out:
  2368. return -ENOBUFS;
  2369. }
  2370. EXPORT_SYMBOL(proto_register);
  2371. void proto_unregister(struct proto *prot)
  2372. {
  2373. mutex_lock(&proto_list_mutex);
  2374. release_proto_idx(prot);
  2375. list_del(&prot->node);
  2376. mutex_unlock(&proto_list_mutex);
  2377. if (prot->slab != NULL) {
  2378. kmem_cache_destroy(prot->slab);
  2379. prot->slab = NULL;
  2380. }
  2381. if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
  2382. kmem_cache_destroy(prot->rsk_prot->slab);
  2383. kfree(prot->rsk_prot->slab_name);
  2384. prot->rsk_prot->slab = NULL;
  2385. }
  2386. if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
  2387. kmem_cache_destroy(prot->twsk_prot->twsk_slab);
  2388. kfree(prot->twsk_prot->twsk_slab_name);
  2389. prot->twsk_prot->twsk_slab = NULL;
  2390. }
  2391. }
  2392. EXPORT_SYMBOL(proto_unregister);
  2393. #ifdef CONFIG_PROC_FS
  2394. static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
  2395. __acquires(proto_list_mutex)
  2396. {
  2397. mutex_lock(&proto_list_mutex);
  2398. return seq_list_start_head(&proto_list, *pos);
  2399. }
  2400. static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2401. {
  2402. return seq_list_next(v, &proto_list, pos);
  2403. }
  2404. static void proto_seq_stop(struct seq_file *seq, void *v)
  2405. __releases(proto_list_mutex)
  2406. {
  2407. mutex_unlock(&proto_list_mutex);
  2408. }
  2409. static char proto_method_implemented(const void *method)
  2410. {
  2411. return method == NULL ? 'n' : 'y';
  2412. }
  2413. static long sock_prot_memory_allocated(struct proto *proto)
  2414. {
  2415. return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
  2416. }
  2417. static char *sock_prot_memory_pressure(struct proto *proto)
  2418. {
  2419. return proto->memory_pressure != NULL ?
  2420. proto_memory_pressure(proto) ? "yes" : "no" : "NI";
  2421. }
  2422. static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
  2423. {
  2424. seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
  2425. "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
  2426. proto->name,
  2427. proto->obj_size,
  2428. sock_prot_inuse_get(seq_file_net(seq), proto),
  2429. sock_prot_memory_allocated(proto),
  2430. sock_prot_memory_pressure(proto),
  2431. proto->max_header,
  2432. proto->slab == NULL ? "no" : "yes",
  2433. module_name(proto->owner),
  2434. proto_method_implemented(proto->close),
  2435. proto_method_implemented(proto->connect),
  2436. proto_method_implemented(proto->disconnect),
  2437. proto_method_implemented(proto->accept),
  2438. proto_method_implemented(proto->ioctl),
  2439. proto_method_implemented(proto->init),
  2440. proto_method_implemented(proto->destroy),
  2441. proto_method_implemented(proto->shutdown),
  2442. proto_method_implemented(proto->setsockopt),
  2443. proto_method_implemented(proto->getsockopt),
  2444. proto_method_implemented(proto->sendmsg),
  2445. proto_method_implemented(proto->recvmsg),
  2446. proto_method_implemented(proto->sendpage),
  2447. proto_method_implemented(proto->bind),
  2448. proto_method_implemented(proto->backlog_rcv),
  2449. proto_method_implemented(proto->hash),
  2450. proto_method_implemented(proto->unhash),
  2451. proto_method_implemented(proto->get_port),
  2452. proto_method_implemented(proto->enter_memory_pressure));
  2453. }
  2454. static int proto_seq_show(struct seq_file *seq, void *v)
  2455. {
  2456. if (v == &proto_list)
  2457. seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
  2458. "protocol",
  2459. "size",
  2460. "sockets",
  2461. "memory",
  2462. "press",
  2463. "maxhdr",
  2464. "slab",
  2465. "module",
  2466. "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
  2467. else
  2468. proto_seq_printf(seq, list_entry(v, struct proto, node));
  2469. return 0;
  2470. }
  2471. static const struct seq_operations proto_seq_ops = {
  2472. .start = proto_seq_start,
  2473. .next = proto_seq_next,
  2474. .stop = proto_seq_stop,
  2475. .show = proto_seq_show,
  2476. };
  2477. static int proto_seq_open(struct inode *inode, struct file *file)
  2478. {
  2479. return seq_open_net(inode, file, &proto_seq_ops,
  2480. sizeof(struct seq_net_private));
  2481. }
  2482. static const struct file_operations proto_seq_fops = {
  2483. .owner = THIS_MODULE,
  2484. .open = proto_seq_open,
  2485. .read = seq_read,
  2486. .llseek = seq_lseek,
  2487. .release = seq_release_net,
  2488. };
  2489. static __net_init int proto_init_net(struct net *net)
  2490. {
  2491. if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
  2492. return -ENOMEM;
  2493. return 0;
  2494. }
  2495. static __net_exit void proto_exit_net(struct net *net)
  2496. {
  2497. remove_proc_entry("protocols", net->proc_net);
  2498. }
  2499. static __net_initdata struct pernet_operations proto_net_ops = {
  2500. .init = proto_init_net,
  2501. .exit = proto_exit_net,
  2502. };
  2503. static int __init proto_init(void)
  2504. {
  2505. return register_pernet_subsys(&proto_net_ops);
  2506. }
  2507. subsys_initcall(proto_init);
  2508. #endif /* PROC_FS */