sock.h 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the AF_INET socket handler.
  7. *
  8. * Version: @(#)sock.h 1.0.4 05/13/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Florian La Roche <flla@stud.uni-sb.de>
  14. *
  15. * Fixes:
  16. * Alan Cox : Volatiles in skbuff pointers. See
  17. * skbuff comments. May be overdone,
  18. * better to prove they can be removed
  19. * than the reverse.
  20. * Alan Cox : Added a zapped field for tcp to note
  21. * a socket is reset and must stay shut up
  22. * Alan Cox : New fields for options
  23. * Pauline Middelink : identd support
  24. * Alan Cox : Eliminate low level recv/recvfrom
  25. * David S. Miller : New socket lookup architecture.
  26. * Steve Whitehouse: Default routines for sock_ops
  27. * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made
  28. * protinfo be just a void pointer, as the
  29. * protocol specific parts were moved to
  30. * respective headers and ipv4/v6, etc now
  31. * use private slabcaches for its socks
  32. * Pedro Hortas : New flags field for socket options
  33. *
  34. *
  35. * This program is free software; you can redistribute it and/or
  36. * modify it under the terms of the GNU General Public License
  37. * as published by the Free Software Foundation; either version
  38. * 2 of the License, or (at your option) any later version.
  39. */
  40. #ifndef _SOCK_H
  41. #define _SOCK_H
  42. #include <linux/kernel.h>
  43. #include <linux/list.h>
  44. #include <linux/list_nulls.h>
  45. #include <linux/timer.h>
  46. #include <linux/cache.h>
  47. #include <linux/module.h>
  48. #include <linux/lockdep.h>
  49. #include <linux/netdevice.h>
  50. #include <linux/skbuff.h> /* struct sk_buff */
  51. #include <linux/mm.h>
  52. #include <linux/security.h>
  53. #include <linux/filter.h>
  54. #include <linux/rculist_nulls.h>
  55. #include <linux/poll.h>
  56. #include <asm/atomic.h>
  57. #include <net/dst.h>
  58. #include <net/checksum.h>
  59. /*
  60. * This structure really needs to be cleaned up.
  61. * Most of it is for TCP, and not used by any of
  62. * the other protocols.
  63. */
  64. /* Define this to get the SOCK_DBG debugging facility. */
  65. #define SOCK_DEBUGGING
  66. #ifdef SOCK_DEBUGGING
  67. #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
  68. printk(KERN_DEBUG msg); } while (0)
  69. #else
  70. /* Validate arguments and do nothing */
  71. static void inline int __attribute__ ((format (printf, 2, 3)))
  72. SOCK_DEBUG(struct sock *sk, const char *msg, ...)
  73. {
  74. }
  75. #endif
  76. /* This is the per-socket lock. The spinlock provides a synchronization
  77. * between user contexts and software interrupt processing, whereas the
  78. * mini-semaphore synchronizes multiple users amongst themselves.
  79. */
  80. typedef struct {
  81. spinlock_t slock;
  82. int owned;
  83. wait_queue_head_t wq;
  84. /*
  85. * We express the mutex-alike socket_lock semantics
  86. * to the lock validator by explicitly managing
  87. * the slock as a lock variant (in addition to
  88. * the slock itself):
  89. */
  90. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  91. struct lockdep_map dep_map;
  92. #endif
  93. } socket_lock_t;
  94. struct sock;
  95. struct proto;
  96. struct net;
  97. /**
  98. * struct sock_common - minimal network layer representation of sockets
  99. * @skc_node: main hash linkage for various protocol lookup tables
  100. * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
  101. * @skc_refcnt: reference count
  102. * @skc_hash: hash value used with various protocol lookup tables
  103. * @skc_family: network address family
  104. * @skc_state: Connection state
  105. * @skc_reuse: %SO_REUSEADDR setting
  106. * @skc_bound_dev_if: bound device index if != 0
  107. * @skc_bind_node: bind hash linkage for various protocol lookup tables
  108. * @skc_prot: protocol handlers inside a network family
  109. * @skc_net: reference to the network namespace of this socket
  110. *
  111. * This is the minimal network layer representation of sockets, the header
  112. * for struct sock and struct inet_timewait_sock.
  113. */
  114. struct sock_common {
  115. /*
  116. * first fields are not copied in sock_copy()
  117. */
  118. union {
  119. struct hlist_node skc_node;
  120. struct hlist_nulls_node skc_nulls_node;
  121. };
  122. atomic_t skc_refcnt;
  123. unsigned int skc_hash;
  124. unsigned short skc_family;
  125. volatile unsigned char skc_state;
  126. unsigned char skc_reuse;
  127. int skc_bound_dev_if;
  128. struct hlist_node skc_bind_node;
  129. struct proto *skc_prot;
  130. #ifdef CONFIG_NET_NS
  131. struct net *skc_net;
  132. #endif
  133. };
  134. /**
  135. * struct sock - network layer representation of sockets
  136. * @__sk_common: shared layout with inet_timewait_sock
  137. * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
  138. * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
  139. * @sk_lock: synchronizer
  140. * @sk_rcvbuf: size of receive buffer in bytes
  141. * @sk_sleep: sock wait queue
  142. * @sk_dst_cache: destination cache
  143. * @sk_dst_lock: destination cache lock
  144. * @sk_policy: flow policy
  145. * @sk_rmem_alloc: receive queue bytes committed
  146. * @sk_receive_queue: incoming packets
  147. * @sk_wmem_alloc: transmit queue bytes committed
  148. * @sk_write_queue: Packet sending queue
  149. * @sk_async_wait_queue: DMA copied packets
  150. * @sk_omem_alloc: "o" is "option" or "other"
  151. * @sk_wmem_queued: persistent queue size
  152. * @sk_forward_alloc: space allocated forward
  153. * @sk_allocation: allocation mode
  154. * @sk_sndbuf: size of send buffer in bytes
  155. * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
  156. * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
  157. * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
  158. * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
  159. * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
  160. * @sk_gso_max_size: Maximum GSO segment size to build
  161. * @sk_lingertime: %SO_LINGER l_linger setting
  162. * @sk_backlog: always used with the per-socket spinlock held
  163. * @sk_callback_lock: used with the callbacks in the end of this struct
  164. * @sk_error_queue: rarely used
  165. * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
  166. * IPV6_ADDRFORM for instance)
  167. * @sk_err: last error
  168. * @sk_err_soft: errors that don't cause failure but are the cause of a
  169. * persistent failure not just 'timed out'
  170. * @sk_drops: raw/udp drops counter
  171. * @sk_ack_backlog: current listen backlog
  172. * @sk_max_ack_backlog: listen backlog set in listen()
  173. * @sk_priority: %SO_PRIORITY setting
  174. * @sk_type: socket type (%SOCK_STREAM, etc)
  175. * @sk_protocol: which protocol this socket belongs in this network family
  176. * @sk_peercred: %SO_PEERCRED setting
  177. * @sk_rcvlowat: %SO_RCVLOWAT setting
  178. * @sk_rcvtimeo: %SO_RCVTIMEO setting
  179. * @sk_sndtimeo: %SO_SNDTIMEO setting
  180. * @sk_filter: socket filtering instructions
  181. * @sk_protinfo: private area, net family specific, when not using slab
  182. * @sk_timer: sock cleanup timer
  183. * @sk_stamp: time stamp of last packet received
  184. * @sk_socket: Identd and reporting IO signals
  185. * @sk_user_data: RPC layer private data
  186. * @sk_sndmsg_page: cached page for sendmsg
  187. * @sk_sndmsg_off: cached offset for sendmsg
  188. * @sk_send_head: front of stuff to transmit
  189. * @sk_security: used by security modules
  190. * @sk_mark: generic packet mark
  191. * @sk_write_pending: a write to stream socket waits to start
  192. * @sk_state_change: callback to indicate change in the state of the sock
  193. * @sk_data_ready: callback to indicate there is data to be processed
  194. * @sk_write_space: callback to indicate there is bf sending space available
  195. * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
  196. * @sk_backlog_rcv: callback to process the backlog
  197. * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
  198. */
  199. struct sock {
  200. /*
  201. * Now struct inet_timewait_sock also uses sock_common, so please just
  202. * don't add nothing before this first member (__sk_common) --acme
  203. */
  204. struct sock_common __sk_common;
  205. #define sk_node __sk_common.skc_node
  206. #define sk_nulls_node __sk_common.skc_nulls_node
  207. #define sk_refcnt __sk_common.skc_refcnt
  208. #define sk_copy_start __sk_common.skc_hash
  209. #define sk_hash __sk_common.skc_hash
  210. #define sk_family __sk_common.skc_family
  211. #define sk_state __sk_common.skc_state
  212. #define sk_reuse __sk_common.skc_reuse
  213. #define sk_bound_dev_if __sk_common.skc_bound_dev_if
  214. #define sk_bind_node __sk_common.skc_bind_node
  215. #define sk_prot __sk_common.skc_prot
  216. #define sk_net __sk_common.skc_net
  217. kmemcheck_bitfield_begin(flags);
  218. unsigned char sk_shutdown : 2,
  219. sk_no_check : 2,
  220. sk_userlocks : 4;
  221. kmemcheck_bitfield_end(flags);
  222. unsigned char sk_protocol;
  223. unsigned short sk_type;
  224. int sk_rcvbuf;
  225. socket_lock_t sk_lock;
  226. /*
  227. * The backlog queue is special, it is always used with
  228. * the per-socket spinlock held and requires low latency
  229. * access. Therefore we special case it's implementation.
  230. */
  231. struct {
  232. struct sk_buff *head;
  233. struct sk_buff *tail;
  234. } sk_backlog;
  235. wait_queue_head_t *sk_sleep;
  236. struct dst_entry *sk_dst_cache;
  237. #ifdef CONFIG_XFRM
  238. struct xfrm_policy *sk_policy[2];
  239. #endif
  240. rwlock_t sk_dst_lock;
  241. atomic_t sk_rmem_alloc;
  242. atomic_t sk_wmem_alloc;
  243. atomic_t sk_omem_alloc;
  244. int sk_sndbuf;
  245. struct sk_buff_head sk_receive_queue;
  246. struct sk_buff_head sk_write_queue;
  247. #ifdef CONFIG_NET_DMA
  248. struct sk_buff_head sk_async_wait_queue;
  249. #endif
  250. int sk_wmem_queued;
  251. int sk_forward_alloc;
  252. gfp_t sk_allocation;
  253. int sk_route_caps;
  254. int sk_gso_type;
  255. unsigned int sk_gso_max_size;
  256. int sk_rcvlowat;
  257. unsigned long sk_flags;
  258. unsigned long sk_lingertime;
  259. struct sk_buff_head sk_error_queue;
  260. struct proto *sk_prot_creator;
  261. rwlock_t sk_callback_lock;
  262. int sk_err,
  263. sk_err_soft;
  264. atomic_t sk_drops;
  265. unsigned short sk_ack_backlog;
  266. unsigned short sk_max_ack_backlog;
  267. __u32 sk_priority;
  268. struct ucred sk_peercred;
  269. long sk_rcvtimeo;
  270. long sk_sndtimeo;
  271. struct sk_filter *sk_filter;
  272. void *sk_protinfo;
  273. struct timer_list sk_timer;
  274. ktime_t sk_stamp;
  275. struct socket *sk_socket;
  276. void *sk_user_data;
  277. struct page *sk_sndmsg_page;
  278. struct sk_buff *sk_send_head;
  279. __u32 sk_sndmsg_off;
  280. int sk_write_pending;
  281. #ifdef CONFIG_SECURITY
  282. void *sk_security;
  283. #endif
  284. __u32 sk_mark;
  285. /* XXX 4 bytes hole on 64 bit */
  286. void (*sk_state_change)(struct sock *sk);
  287. void (*sk_data_ready)(struct sock *sk, int bytes);
  288. void (*sk_write_space)(struct sock *sk);
  289. void (*sk_error_report)(struct sock *sk);
  290. int (*sk_backlog_rcv)(struct sock *sk,
  291. struct sk_buff *skb);
  292. void (*sk_destruct)(struct sock *sk);
  293. };
  294. /*
  295. * Hashed lists helper routines
  296. */
  297. static inline struct sock *__sk_head(const struct hlist_head *head)
  298. {
  299. return hlist_entry(head->first, struct sock, sk_node);
  300. }
  301. static inline struct sock *sk_head(const struct hlist_head *head)
  302. {
  303. return hlist_empty(head) ? NULL : __sk_head(head);
  304. }
  305. static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
  306. {
  307. return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
  308. }
  309. static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
  310. {
  311. return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
  312. }
  313. static inline struct sock *sk_next(const struct sock *sk)
  314. {
  315. return sk->sk_node.next ?
  316. hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
  317. }
  318. static inline struct sock *sk_nulls_next(const struct sock *sk)
  319. {
  320. return (!is_a_nulls(sk->sk_nulls_node.next)) ?
  321. hlist_nulls_entry(sk->sk_nulls_node.next,
  322. struct sock, sk_nulls_node) :
  323. NULL;
  324. }
  325. static inline int sk_unhashed(const struct sock *sk)
  326. {
  327. return hlist_unhashed(&sk->sk_node);
  328. }
  329. static inline int sk_hashed(const struct sock *sk)
  330. {
  331. return !sk_unhashed(sk);
  332. }
  333. static __inline__ void sk_node_init(struct hlist_node *node)
  334. {
  335. node->pprev = NULL;
  336. }
  337. static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
  338. {
  339. node->pprev = NULL;
  340. }
  341. static __inline__ void __sk_del_node(struct sock *sk)
  342. {
  343. __hlist_del(&sk->sk_node);
  344. }
  345. static __inline__ int __sk_del_node_init(struct sock *sk)
  346. {
  347. if (sk_hashed(sk)) {
  348. __sk_del_node(sk);
  349. sk_node_init(&sk->sk_node);
  350. return 1;
  351. }
  352. return 0;
  353. }
  354. /* Grab socket reference count. This operation is valid only
  355. when sk is ALREADY grabbed f.e. it is found in hash table
  356. or a list and the lookup is made under lock preventing hash table
  357. modifications.
  358. */
  359. static inline void sock_hold(struct sock *sk)
  360. {
  361. atomic_inc(&sk->sk_refcnt);
  362. }
  363. /* Ungrab socket in the context, which assumes that socket refcnt
  364. cannot hit zero, f.e. it is true in context of any socketcall.
  365. */
  366. static inline void __sock_put(struct sock *sk)
  367. {
  368. atomic_dec(&sk->sk_refcnt);
  369. }
  370. static __inline__ int sk_del_node_init(struct sock *sk)
  371. {
  372. int rc = __sk_del_node_init(sk);
  373. if (rc) {
  374. /* paranoid for a while -acme */
  375. WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
  376. __sock_put(sk);
  377. }
  378. return rc;
  379. }
  380. static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
  381. {
  382. if (sk_hashed(sk)) {
  383. hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
  384. return 1;
  385. }
  386. return 0;
  387. }
  388. static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
  389. {
  390. int rc = __sk_nulls_del_node_init_rcu(sk);
  391. if (rc) {
  392. /* paranoid for a while -acme */
  393. WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
  394. __sock_put(sk);
  395. }
  396. return rc;
  397. }
  398. static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
  399. {
  400. hlist_add_head(&sk->sk_node, list);
  401. }
  402. static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
  403. {
  404. sock_hold(sk);
  405. __sk_add_node(sk, list);
  406. }
  407. static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
  408. {
  409. hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
  410. }
  411. static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
  412. {
  413. sock_hold(sk);
  414. __sk_nulls_add_node_rcu(sk, list);
  415. }
  416. static __inline__ void __sk_del_bind_node(struct sock *sk)
  417. {
  418. __hlist_del(&sk->sk_bind_node);
  419. }
  420. static __inline__ void sk_add_bind_node(struct sock *sk,
  421. struct hlist_head *list)
  422. {
  423. hlist_add_head(&sk->sk_bind_node, list);
  424. }
  425. #define sk_for_each(__sk, node, list) \
  426. hlist_for_each_entry(__sk, node, list, sk_node)
  427. #define sk_nulls_for_each(__sk, node, list) \
  428. hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
  429. #define sk_nulls_for_each_rcu(__sk, node, list) \
  430. hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
  431. #define sk_for_each_from(__sk, node) \
  432. if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
  433. hlist_for_each_entry_from(__sk, node, sk_node)
  434. #define sk_nulls_for_each_from(__sk, node) \
  435. if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
  436. hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
  437. #define sk_for_each_continue(__sk, node) \
  438. if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
  439. hlist_for_each_entry_continue(__sk, node, sk_node)
  440. #define sk_for_each_safe(__sk, node, tmp, list) \
  441. hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
  442. #define sk_for_each_bound(__sk, node, list) \
  443. hlist_for_each_entry(__sk, node, list, sk_bind_node)
  444. /* Sock flags */
  445. enum sock_flags {
  446. SOCK_DEAD,
  447. SOCK_DONE,
  448. SOCK_URGINLINE,
  449. SOCK_KEEPOPEN,
  450. SOCK_LINGER,
  451. SOCK_DESTROY,
  452. SOCK_BROADCAST,
  453. SOCK_TIMESTAMP,
  454. SOCK_ZAPPED,
  455. SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
  456. SOCK_DBG, /* %SO_DEBUG setting */
  457. SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
  458. SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
  459. SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
  460. SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
  461. SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */
  462. SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */
  463. SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */
  464. SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
  465. SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */
  466. SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
  467. SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
  468. SOCK_FASYNC, /* fasync() active */
  469. };
  470. static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
  471. {
  472. nsk->sk_flags = osk->sk_flags;
  473. }
  474. static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
  475. {
  476. __set_bit(flag, &sk->sk_flags);
  477. }
  478. static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
  479. {
  480. __clear_bit(flag, &sk->sk_flags);
  481. }
  482. static inline int sock_flag(struct sock *sk, enum sock_flags flag)
  483. {
  484. return test_bit(flag, &sk->sk_flags);
  485. }
  486. static inline void sk_acceptq_removed(struct sock *sk)
  487. {
  488. sk->sk_ack_backlog--;
  489. }
  490. static inline void sk_acceptq_added(struct sock *sk)
  491. {
  492. sk->sk_ack_backlog++;
  493. }
  494. static inline int sk_acceptq_is_full(struct sock *sk)
  495. {
  496. return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
  497. }
  498. /*
  499. * Compute minimal free write space needed to queue new packets.
  500. */
  501. static inline int sk_stream_min_wspace(struct sock *sk)
  502. {
  503. return sk->sk_wmem_queued >> 1;
  504. }
  505. static inline int sk_stream_wspace(struct sock *sk)
  506. {
  507. return sk->sk_sndbuf - sk->sk_wmem_queued;
  508. }
  509. extern void sk_stream_write_space(struct sock *sk);
  510. static inline int sk_stream_memory_free(struct sock *sk)
  511. {
  512. return sk->sk_wmem_queued < sk->sk_sndbuf;
  513. }
  514. /* The per-socket spinlock must be held here. */
  515. static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
  516. {
  517. if (!sk->sk_backlog.tail) {
  518. sk->sk_backlog.head = sk->sk_backlog.tail = skb;
  519. } else {
  520. sk->sk_backlog.tail->next = skb;
  521. sk->sk_backlog.tail = skb;
  522. }
  523. skb->next = NULL;
  524. }
  525. static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  526. {
  527. return sk->sk_backlog_rcv(sk, skb);
  528. }
  529. #define sk_wait_event(__sk, __timeo, __condition) \
  530. ({ int __rc; \
  531. release_sock(__sk); \
  532. __rc = __condition; \
  533. if (!__rc) { \
  534. *(__timeo) = schedule_timeout(*(__timeo)); \
  535. } \
  536. lock_sock(__sk); \
  537. __rc = __condition; \
  538. __rc; \
  539. })
  540. extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
  541. extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
  542. extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
  543. extern int sk_stream_error(struct sock *sk, int flags, int err);
  544. extern void sk_stream_kill_queues(struct sock *sk);
  545. extern int sk_wait_data(struct sock *sk, long *timeo);
  546. struct request_sock_ops;
  547. struct timewait_sock_ops;
  548. struct inet_hashinfo;
  549. struct raw_hashinfo;
  550. /* Networking protocol blocks we attach to sockets.
  551. * socket layer -> transport layer interface
  552. * transport -> network interface is defined by struct inet_proto
  553. */
  554. struct proto {
  555. void (*close)(struct sock *sk,
  556. long timeout);
  557. int (*connect)(struct sock *sk,
  558. struct sockaddr *uaddr,
  559. int addr_len);
  560. int (*disconnect)(struct sock *sk, int flags);
  561. struct sock * (*accept) (struct sock *sk, int flags, int *err);
  562. int (*ioctl)(struct sock *sk, int cmd,
  563. unsigned long arg);
  564. int (*init)(struct sock *sk);
  565. void (*destroy)(struct sock *sk);
  566. void (*shutdown)(struct sock *sk, int how);
  567. int (*setsockopt)(struct sock *sk, int level,
  568. int optname, char __user *optval,
  569. unsigned int optlen);
  570. int (*getsockopt)(struct sock *sk, int level,
  571. int optname, char __user *optval,
  572. int __user *option);
  573. #ifdef CONFIG_COMPAT
  574. int (*compat_setsockopt)(struct sock *sk,
  575. int level,
  576. int optname, char __user *optval,
  577. unsigned int optlen);
  578. int (*compat_getsockopt)(struct sock *sk,
  579. int level,
  580. int optname, char __user *optval,
  581. int __user *option);
  582. #endif
  583. int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
  584. struct msghdr *msg, size_t len);
  585. int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
  586. struct msghdr *msg,
  587. size_t len, int noblock, int flags,
  588. int *addr_len);
  589. int (*sendpage)(struct sock *sk, struct page *page,
  590. int offset, size_t size, int flags);
  591. int (*bind)(struct sock *sk,
  592. struct sockaddr *uaddr, int addr_len);
  593. int (*backlog_rcv) (struct sock *sk,
  594. struct sk_buff *skb);
  595. /* Keeping track of sk's, looking them up, and port selection methods. */
  596. void (*hash)(struct sock *sk);
  597. void (*unhash)(struct sock *sk);
  598. int (*get_port)(struct sock *sk, unsigned short snum);
  599. /* Keeping track of sockets in use */
  600. #ifdef CONFIG_PROC_FS
  601. unsigned int inuse_idx;
  602. #endif
  603. /* Memory pressure */
  604. void (*enter_memory_pressure)(struct sock *sk);
  605. atomic_t *memory_allocated; /* Current allocated memory. */
  606. struct percpu_counter *sockets_allocated; /* Current number of sockets. */
  607. /*
  608. * Pressure flag: try to collapse.
  609. * Technical note: it is used by multiple contexts non atomically.
  610. * All the __sk_mem_schedule() is of this nature: accounting
  611. * is strict, actions are advisory and have some latency.
  612. */
  613. int *memory_pressure;
  614. int *sysctl_mem;
  615. int *sysctl_wmem;
  616. int *sysctl_rmem;
  617. int max_header;
  618. struct kmem_cache *slab;
  619. unsigned int obj_size;
  620. int slab_flags;
  621. struct percpu_counter *orphan_count;
  622. struct request_sock_ops *rsk_prot;
  623. struct timewait_sock_ops *twsk_prot;
  624. union {
  625. struct inet_hashinfo *hashinfo;
  626. struct udp_table *udp_table;
  627. struct raw_hashinfo *raw_hash;
  628. } h;
  629. struct module *owner;
  630. char name[32];
  631. struct list_head node;
  632. #ifdef SOCK_REFCNT_DEBUG
  633. atomic_t socks;
  634. #endif
  635. };
  636. extern int proto_register(struct proto *prot, int alloc_slab);
  637. extern void proto_unregister(struct proto *prot);
  638. #ifdef SOCK_REFCNT_DEBUG
  639. static inline void sk_refcnt_debug_inc(struct sock *sk)
  640. {
  641. atomic_inc(&sk->sk_prot->socks);
  642. }
  643. static inline void sk_refcnt_debug_dec(struct sock *sk)
  644. {
  645. atomic_dec(&sk->sk_prot->socks);
  646. printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
  647. sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
  648. }
  649. static inline void sk_refcnt_debug_release(const struct sock *sk)
  650. {
  651. if (atomic_read(&sk->sk_refcnt) != 1)
  652. printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
  653. sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
  654. }
  655. #else /* SOCK_REFCNT_DEBUG */
  656. #define sk_refcnt_debug_inc(sk) do { } while (0)
  657. #define sk_refcnt_debug_dec(sk) do { } while (0)
  658. #define sk_refcnt_debug_release(sk) do { } while (0)
  659. #endif /* SOCK_REFCNT_DEBUG */
  660. #ifdef CONFIG_PROC_FS
  661. /* Called with local bh disabled */
  662. extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
  663. extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
  664. #else
  665. static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
  666. int inc)
  667. {
  668. }
  669. #endif
  670. /* With per-bucket locks this operation is not-atomic, so that
  671. * this version is not worse.
  672. */
  673. static inline void __sk_prot_rehash(struct sock *sk)
  674. {
  675. sk->sk_prot->unhash(sk);
  676. sk->sk_prot->hash(sk);
  677. }
  678. /* About 10 seconds */
  679. #define SOCK_DESTROY_TIME (10*HZ)
  680. /* Sockets 0-1023 can't be bound to unless you are superuser */
  681. #define PROT_SOCK 1024
  682. #define SHUTDOWN_MASK 3
  683. #define RCV_SHUTDOWN 1
  684. #define SEND_SHUTDOWN 2
  685. #define SOCK_SNDBUF_LOCK 1
  686. #define SOCK_RCVBUF_LOCK 2
  687. #define SOCK_BINDADDR_LOCK 4
  688. #define SOCK_BINDPORT_LOCK 8
  689. /* sock_iocb: used to kick off async processing of socket ios */
  690. struct sock_iocb {
  691. struct list_head list;
  692. int flags;
  693. int size;
  694. struct socket *sock;
  695. struct sock *sk;
  696. struct scm_cookie *scm;
  697. struct msghdr *msg, async_msg;
  698. struct kiocb *kiocb;
  699. };
  700. static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
  701. {
  702. return (struct sock_iocb *)iocb->private;
  703. }
  704. static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
  705. {
  706. return si->kiocb;
  707. }
  708. struct socket_alloc {
  709. struct socket socket;
  710. struct inode vfs_inode;
  711. };
  712. static inline struct socket *SOCKET_I(struct inode *inode)
  713. {
  714. return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
  715. }
  716. static inline struct inode *SOCK_INODE(struct socket *socket)
  717. {
  718. return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
  719. }
  720. /*
  721. * Functions for memory accounting
  722. */
  723. extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
  724. extern void __sk_mem_reclaim(struct sock *sk);
  725. #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
  726. #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
  727. #define SK_MEM_SEND 0
  728. #define SK_MEM_RECV 1
  729. static inline int sk_mem_pages(int amt)
  730. {
  731. return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
  732. }
  733. static inline int sk_has_account(struct sock *sk)
  734. {
  735. /* return true if protocol supports memory accounting */
  736. return !!sk->sk_prot->memory_allocated;
  737. }
  738. static inline int sk_wmem_schedule(struct sock *sk, int size)
  739. {
  740. if (!sk_has_account(sk))
  741. return 1;
  742. return size <= sk->sk_forward_alloc ||
  743. __sk_mem_schedule(sk, size, SK_MEM_SEND);
  744. }
  745. static inline int sk_rmem_schedule(struct sock *sk, int size)
  746. {
  747. if (!sk_has_account(sk))
  748. return 1;
  749. return size <= sk->sk_forward_alloc ||
  750. __sk_mem_schedule(sk, size, SK_MEM_RECV);
  751. }
  752. static inline void sk_mem_reclaim(struct sock *sk)
  753. {
  754. if (!sk_has_account(sk))
  755. return;
  756. if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
  757. __sk_mem_reclaim(sk);
  758. }
  759. static inline void sk_mem_reclaim_partial(struct sock *sk)
  760. {
  761. if (!sk_has_account(sk))
  762. return;
  763. if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
  764. __sk_mem_reclaim(sk);
  765. }
  766. static inline void sk_mem_charge(struct sock *sk, int size)
  767. {
  768. if (!sk_has_account(sk))
  769. return;
  770. sk->sk_forward_alloc -= size;
  771. }
  772. static inline void sk_mem_uncharge(struct sock *sk, int size)
  773. {
  774. if (!sk_has_account(sk))
  775. return;
  776. sk->sk_forward_alloc += size;
  777. }
  778. static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
  779. {
  780. sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
  781. sk->sk_wmem_queued -= skb->truesize;
  782. sk_mem_uncharge(sk, skb->truesize);
  783. __kfree_skb(skb);
  784. }
  785. /* Used by processes to "lock" a socket state, so that
  786. * interrupts and bottom half handlers won't change it
  787. * from under us. It essentially blocks any incoming
  788. * packets, so that we won't get any new data or any
  789. * packets that change the state of the socket.
  790. *
  791. * While locked, BH processing will add new packets to
  792. * the backlog queue. This queue is processed by the
  793. * owner of the socket lock right before it is released.
  794. *
  795. * Since ~2.3.5 it is also exclusive sleep lock serializing
  796. * accesses from user process context.
  797. */
  798. #define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
  799. /*
  800. * Macro so as to not evaluate some arguments when
  801. * lockdep is not enabled.
  802. *
  803. * Mark both the sk_lock and the sk_lock.slock as a
  804. * per-address-family lock class.
  805. */
  806. #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
  807. do { \
  808. sk->sk_lock.owned = 0; \
  809. init_waitqueue_head(&sk->sk_lock.wq); \
  810. spin_lock_init(&(sk)->sk_lock.slock); \
  811. debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
  812. sizeof((sk)->sk_lock)); \
  813. lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
  814. (skey), (sname)); \
  815. lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
  816. } while (0)
  817. extern void lock_sock_nested(struct sock *sk, int subclass);
  818. static inline void lock_sock(struct sock *sk)
  819. {
  820. lock_sock_nested(sk, 0);
  821. }
  822. extern void release_sock(struct sock *sk);
  823. /* BH context may only use the following locking interface. */
  824. #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
  825. #define bh_lock_sock_nested(__sk) \
  826. spin_lock_nested(&((__sk)->sk_lock.slock), \
  827. SINGLE_DEPTH_NESTING)
  828. #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
  829. extern struct sock *sk_alloc(struct net *net, int family,
  830. gfp_t priority,
  831. struct proto *prot);
  832. extern void sk_free(struct sock *sk);
  833. extern void sk_release_kernel(struct sock *sk);
  834. extern struct sock *sk_clone(const struct sock *sk,
  835. const gfp_t priority);
  836. extern struct sk_buff *sock_wmalloc(struct sock *sk,
  837. unsigned long size, int force,
  838. gfp_t priority);
  839. extern struct sk_buff *sock_rmalloc(struct sock *sk,
  840. unsigned long size, int force,
  841. gfp_t priority);
  842. extern void sock_wfree(struct sk_buff *skb);
  843. extern void sock_rfree(struct sk_buff *skb);
  844. extern int sock_setsockopt(struct socket *sock, int level,
  845. int op, char __user *optval,
  846. unsigned int optlen);
  847. extern int sock_getsockopt(struct socket *sock, int level,
  848. int op, char __user *optval,
  849. int __user *optlen);
  850. extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
  851. unsigned long size,
  852. int noblock,
  853. int *errcode);
  854. extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
  855. unsigned long header_len,
  856. unsigned long data_len,
  857. int noblock,
  858. int *errcode);
  859. extern void *sock_kmalloc(struct sock *sk, int size,
  860. gfp_t priority);
  861. extern void sock_kfree_s(struct sock *sk, void *mem, int size);
  862. extern void sk_send_sigurg(struct sock *sk);
  863. /*
  864. * Functions to fill in entries in struct proto_ops when a protocol
  865. * does not implement a particular function.
  866. */
  867. extern int sock_no_bind(struct socket *,
  868. struct sockaddr *, int);
  869. extern int sock_no_connect(struct socket *,
  870. struct sockaddr *, int, int);
  871. extern int sock_no_socketpair(struct socket *,
  872. struct socket *);
  873. extern int sock_no_accept(struct socket *,
  874. struct socket *, int);
  875. extern int sock_no_getname(struct socket *,
  876. struct sockaddr *, int *, int);
  877. extern unsigned int sock_no_poll(struct file *, struct socket *,
  878. struct poll_table_struct *);
  879. extern int sock_no_ioctl(struct socket *, unsigned int,
  880. unsigned long);
  881. extern int sock_no_listen(struct socket *, int);
  882. extern int sock_no_shutdown(struct socket *, int);
  883. extern int sock_no_getsockopt(struct socket *, int , int,
  884. char __user *, int __user *);
  885. extern int sock_no_setsockopt(struct socket *, int, int,
  886. char __user *, unsigned int);
  887. extern int sock_no_sendmsg(struct kiocb *, struct socket *,
  888. struct msghdr *, size_t);
  889. extern int sock_no_recvmsg(struct kiocb *, struct socket *,
  890. struct msghdr *, size_t, int);
  891. extern int sock_no_mmap(struct file *file,
  892. struct socket *sock,
  893. struct vm_area_struct *vma);
  894. extern ssize_t sock_no_sendpage(struct socket *sock,
  895. struct page *page,
  896. int offset, size_t size,
  897. int flags);
  898. /*
  899. * Functions to fill in entries in struct proto_ops when a protocol
  900. * uses the inet style.
  901. */
  902. extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
  903. char __user *optval, int __user *optlen);
  904. extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
  905. struct msghdr *msg, size_t size, int flags);
  906. extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
  907. char __user *optval, unsigned int optlen);
  908. extern int compat_sock_common_getsockopt(struct socket *sock, int level,
  909. int optname, char __user *optval, int __user *optlen);
  910. extern int compat_sock_common_setsockopt(struct socket *sock, int level,
  911. int optname, char __user *optval, unsigned int optlen);
  912. extern void sk_common_release(struct sock *sk);
  913. /*
  914. * Default socket callbacks and setup code
  915. */
  916. /* Initialise core socket variables */
  917. extern void sock_init_data(struct socket *sock, struct sock *sk);
  918. /**
  919. * sk_filter_release: Release a socket filter
  920. * @fp: filter to remove
  921. *
  922. * Remove a filter from a socket and release its resources.
  923. */
  924. static inline void sk_filter_release(struct sk_filter *fp)
  925. {
  926. if (atomic_dec_and_test(&fp->refcnt))
  927. kfree(fp);
  928. }
  929. static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
  930. {
  931. unsigned int size = sk_filter_len(fp);
  932. atomic_sub(size, &sk->sk_omem_alloc);
  933. sk_filter_release(fp);
  934. }
  935. static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
  936. {
  937. atomic_inc(&fp->refcnt);
  938. atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
  939. }
  940. /*
  941. * Socket reference counting postulates.
  942. *
  943. * * Each user of socket SHOULD hold a reference count.
  944. * * Each access point to socket (an hash table bucket, reference from a list,
  945. * running timer, skb in flight MUST hold a reference count.
  946. * * When reference count hits 0, it means it will never increase back.
  947. * * When reference count hits 0, it means that no references from
  948. * outside exist to this socket and current process on current CPU
  949. * is last user and may/should destroy this socket.
  950. * * sk_free is called from any context: process, BH, IRQ. When
  951. * it is called, socket has no references from outside -> sk_free
  952. * may release descendant resources allocated by the socket, but
  953. * to the time when it is called, socket is NOT referenced by any
  954. * hash tables, lists etc.
  955. * * Packets, delivered from outside (from network or from another process)
  956. * and enqueued on receive/error queues SHOULD NOT grab reference count,
  957. * when they sit in queue. Otherwise, packets will leak to hole, when
  958. * socket is looked up by one cpu and unhasing is made by another CPU.
  959. * It is true for udp/raw, netlink (leak to receive and error queues), tcp
  960. * (leak to backlog). Packet socket does all the processing inside
  961. * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
  962. * use separate SMP lock, so that they are prone too.
  963. */
  964. /* Ungrab socket and destroy it, if it was the last reference. */
  965. static inline void sock_put(struct sock *sk)
  966. {
  967. if (atomic_dec_and_test(&sk->sk_refcnt))
  968. sk_free(sk);
  969. }
  970. extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
  971. const int nested);
  972. static inline void sk_set_socket(struct sock *sk, struct socket *sock)
  973. {
  974. sk->sk_socket = sock;
  975. }
  976. /* Detach socket from process context.
  977. * Announce socket dead, detach it from wait queue and inode.
  978. * Note that parent inode held reference count on this struct sock,
  979. * we do not release it in this function, because protocol
  980. * probably wants some additional cleanups or even continuing
  981. * to work with this socket (TCP).
  982. */
  983. static inline void sock_orphan(struct sock *sk)
  984. {
  985. write_lock_bh(&sk->sk_callback_lock);
  986. sock_set_flag(sk, SOCK_DEAD);
  987. sk_set_socket(sk, NULL);
  988. sk->sk_sleep = NULL;
  989. write_unlock_bh(&sk->sk_callback_lock);
  990. }
  991. static inline void sock_graft(struct sock *sk, struct socket *parent)
  992. {
  993. write_lock_bh(&sk->sk_callback_lock);
  994. sk->sk_sleep = &parent->wait;
  995. parent->sk = sk;
  996. sk_set_socket(sk, parent);
  997. security_sock_graft(sk, parent);
  998. write_unlock_bh(&sk->sk_callback_lock);
  999. }
  1000. extern int sock_i_uid(struct sock *sk);
  1001. extern unsigned long sock_i_ino(struct sock *sk);
  1002. static inline struct dst_entry *
  1003. __sk_dst_get(struct sock *sk)
  1004. {
  1005. return sk->sk_dst_cache;
  1006. }
  1007. static inline struct dst_entry *
  1008. sk_dst_get(struct sock *sk)
  1009. {
  1010. struct dst_entry *dst;
  1011. read_lock(&sk->sk_dst_lock);
  1012. dst = sk->sk_dst_cache;
  1013. if (dst)
  1014. dst_hold(dst);
  1015. read_unlock(&sk->sk_dst_lock);
  1016. return dst;
  1017. }
  1018. static inline void
  1019. __sk_dst_set(struct sock *sk, struct dst_entry *dst)
  1020. {
  1021. struct dst_entry *old_dst;
  1022. old_dst = sk->sk_dst_cache;
  1023. sk->sk_dst_cache = dst;
  1024. dst_release(old_dst);
  1025. }
  1026. static inline void
  1027. sk_dst_set(struct sock *sk, struct dst_entry *dst)
  1028. {
  1029. write_lock(&sk->sk_dst_lock);
  1030. __sk_dst_set(sk, dst);
  1031. write_unlock(&sk->sk_dst_lock);
  1032. }
  1033. static inline void
  1034. __sk_dst_reset(struct sock *sk)
  1035. {
  1036. struct dst_entry *old_dst;
  1037. old_dst = sk->sk_dst_cache;
  1038. sk->sk_dst_cache = NULL;
  1039. dst_release(old_dst);
  1040. }
  1041. static inline void
  1042. sk_dst_reset(struct sock *sk)
  1043. {
  1044. write_lock(&sk->sk_dst_lock);
  1045. __sk_dst_reset(sk);
  1046. write_unlock(&sk->sk_dst_lock);
  1047. }
  1048. extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
  1049. extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
  1050. static inline int sk_can_gso(const struct sock *sk)
  1051. {
  1052. return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
  1053. }
  1054. extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
  1055. static inline int skb_copy_to_page(struct sock *sk, char __user *from,
  1056. struct sk_buff *skb, struct page *page,
  1057. int off, int copy)
  1058. {
  1059. if (skb->ip_summed == CHECKSUM_NONE) {
  1060. int err = 0;
  1061. __wsum csum = csum_and_copy_from_user(from,
  1062. page_address(page) + off,
  1063. copy, 0, &err);
  1064. if (err)
  1065. return err;
  1066. skb->csum = csum_block_add(skb->csum, csum, skb->len);
  1067. } else if (copy_from_user(page_address(page) + off, from, copy))
  1068. return -EFAULT;
  1069. skb->len += copy;
  1070. skb->data_len += copy;
  1071. skb->truesize += copy;
  1072. sk->sk_wmem_queued += copy;
  1073. sk_mem_charge(sk, copy);
  1074. return 0;
  1075. }
  1076. /**
  1077. * sk_wmem_alloc_get - returns write allocations
  1078. * @sk: socket
  1079. *
  1080. * Returns sk_wmem_alloc minus initial offset of one
  1081. */
  1082. static inline int sk_wmem_alloc_get(const struct sock *sk)
  1083. {
  1084. return atomic_read(&sk->sk_wmem_alloc) - 1;
  1085. }
  1086. /**
  1087. * sk_rmem_alloc_get - returns read allocations
  1088. * @sk: socket
  1089. *
  1090. * Returns sk_rmem_alloc
  1091. */
  1092. static inline int sk_rmem_alloc_get(const struct sock *sk)
  1093. {
  1094. return atomic_read(&sk->sk_rmem_alloc);
  1095. }
  1096. /**
  1097. * sk_has_allocations - check if allocations are outstanding
  1098. * @sk: socket
  1099. *
  1100. * Returns true if socket has write or read allocations
  1101. */
  1102. static inline int sk_has_allocations(const struct sock *sk)
  1103. {
  1104. return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
  1105. }
  1106. /**
  1107. * sk_has_sleeper - check if there are any waiting processes
  1108. * @sk: socket
  1109. *
  1110. * Returns true if socket has waiting processes
  1111. *
  1112. * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
  1113. * barrier call. They were added due to the race found within the tcp code.
  1114. *
  1115. * Consider following tcp code paths:
  1116. *
  1117. * CPU1 CPU2
  1118. *
  1119. * sys_select receive packet
  1120. * ... ...
  1121. * __add_wait_queue update tp->rcv_nxt
  1122. * ... ...
  1123. * tp->rcv_nxt check sock_def_readable
  1124. * ... {
  1125. * schedule ...
  1126. * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  1127. * wake_up_interruptible(sk->sk_sleep)
  1128. * ...
  1129. * }
  1130. *
  1131. * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
  1132. * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
  1133. * could then endup calling schedule and sleep forever if there are no more
  1134. * data on the socket.
  1135. *
  1136. * The sk_has_sleeper is always called right after a call to read_lock, so we
  1137. * can use smp_mb__after_lock barrier.
  1138. */
  1139. static inline int sk_has_sleeper(struct sock *sk)
  1140. {
  1141. /*
  1142. * We need to be sure we are in sync with the
  1143. * add_wait_queue modifications to the wait queue.
  1144. *
  1145. * This memory barrier is paired in the sock_poll_wait.
  1146. */
  1147. smp_mb__after_lock();
  1148. return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
  1149. }
  1150. /**
  1151. * sock_poll_wait - place memory barrier behind the poll_wait call.
  1152. * @filp: file
  1153. * @wait_address: socket wait queue
  1154. * @p: poll_table
  1155. *
  1156. * See the comments in the sk_has_sleeper function.
  1157. */
  1158. static inline void sock_poll_wait(struct file *filp,
  1159. wait_queue_head_t *wait_address, poll_table *p)
  1160. {
  1161. if (p && wait_address) {
  1162. poll_wait(filp, wait_address, p);
  1163. /*
  1164. * We need to be sure we are in sync with the
  1165. * socket flags modification.
  1166. *
  1167. * This memory barrier is paired in the sk_has_sleeper.
  1168. */
  1169. smp_mb();
  1170. }
  1171. }
  1172. /*
  1173. * Queue a received datagram if it will fit. Stream and sequenced
  1174. * protocols can't normally use this as they need to fit buffers in
  1175. * and play with them.
  1176. *
  1177. * Inlined as it's very short and called for pretty much every
  1178. * packet ever received.
  1179. */
  1180. static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
  1181. {
  1182. skb_orphan(skb);
  1183. skb->sk = sk;
  1184. skb->destructor = sock_wfree;
  1185. /*
  1186. * We used to take a refcount on sk, but following operation
  1187. * is enough to guarantee sk_free() wont free this sock until
  1188. * all in-flight packets are completed
  1189. */
  1190. atomic_add(skb->truesize, &sk->sk_wmem_alloc);
  1191. }
  1192. static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
  1193. {
  1194. skb_orphan(skb);
  1195. skb->sk = sk;
  1196. skb->destructor = sock_rfree;
  1197. atomic_add(skb->truesize, &sk->sk_rmem_alloc);
  1198. sk_mem_charge(sk, skb->truesize);
  1199. }
  1200. extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
  1201. unsigned long expires);
  1202. extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
  1203. extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
  1204. static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
  1205. {
  1206. /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
  1207. number of warnings when compiling with -W --ANK
  1208. */
  1209. if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
  1210. (unsigned)sk->sk_rcvbuf)
  1211. return -ENOMEM;
  1212. skb_set_owner_r(skb, sk);
  1213. skb_queue_tail(&sk->sk_error_queue, skb);
  1214. if (!sock_flag(sk, SOCK_DEAD))
  1215. sk->sk_data_ready(sk, skb->len);
  1216. return 0;
  1217. }
  1218. /*
  1219. * Recover an error report and clear atomically
  1220. */
  1221. static inline int sock_error(struct sock *sk)
  1222. {
  1223. int err;
  1224. if (likely(!sk->sk_err))
  1225. return 0;
  1226. err = xchg(&sk->sk_err, 0);
  1227. return -err;
  1228. }
  1229. static inline unsigned long sock_wspace(struct sock *sk)
  1230. {
  1231. int amt = 0;
  1232. if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
  1233. amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
  1234. if (amt < 0)
  1235. amt = 0;
  1236. }
  1237. return amt;
  1238. }
  1239. static inline void sk_wake_async(struct sock *sk, int how, int band)
  1240. {
  1241. if (sock_flag(sk, SOCK_FASYNC))
  1242. sock_wake_async(sk->sk_socket, how, band);
  1243. }
  1244. #define SOCK_MIN_SNDBUF 2048
  1245. #define SOCK_MIN_RCVBUF 256
  1246. static inline void sk_stream_moderate_sndbuf(struct sock *sk)
  1247. {
  1248. if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
  1249. sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
  1250. sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
  1251. }
  1252. }
  1253. struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
  1254. static inline struct page *sk_stream_alloc_page(struct sock *sk)
  1255. {
  1256. struct page *page = NULL;
  1257. page = alloc_pages(sk->sk_allocation, 0);
  1258. if (!page) {
  1259. sk->sk_prot->enter_memory_pressure(sk);
  1260. sk_stream_moderate_sndbuf(sk);
  1261. }
  1262. return page;
  1263. }
  1264. /*
  1265. * Default write policy as shown to user space via poll/select/SIGIO
  1266. */
  1267. static inline int sock_writeable(const struct sock *sk)
  1268. {
  1269. return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
  1270. }
  1271. static inline gfp_t gfp_any(void)
  1272. {
  1273. return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
  1274. }
  1275. static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
  1276. {
  1277. return noblock ? 0 : sk->sk_rcvtimeo;
  1278. }
  1279. static inline long sock_sndtimeo(const struct sock *sk, int noblock)
  1280. {
  1281. return noblock ? 0 : sk->sk_sndtimeo;
  1282. }
  1283. static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
  1284. {
  1285. return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
  1286. }
  1287. /* Alas, with timeout socket operations are not restartable.
  1288. * Compare this to poll().
  1289. */
  1290. static inline int sock_intr_errno(long timeo)
  1291. {
  1292. return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
  1293. }
  1294. extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
  1295. struct sk_buff *skb);
  1296. static __inline__ void
  1297. sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
  1298. {
  1299. ktime_t kt = skb->tstamp;
  1300. struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
  1301. /*
  1302. * generate control messages if
  1303. * - receive time stamping in software requested (SOCK_RCVTSTAMP
  1304. * or SOCK_TIMESTAMPING_RX_SOFTWARE)
  1305. * - software time stamp available and wanted
  1306. * (SOCK_TIMESTAMPING_SOFTWARE)
  1307. * - hardware time stamps available and wanted
  1308. * (SOCK_TIMESTAMPING_SYS_HARDWARE or
  1309. * SOCK_TIMESTAMPING_RAW_HARDWARE)
  1310. */
  1311. if (sock_flag(sk, SOCK_RCVTSTAMP) ||
  1312. sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
  1313. (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
  1314. (hwtstamps->hwtstamp.tv64 &&
  1315. sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
  1316. (hwtstamps->syststamp.tv64 &&
  1317. sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
  1318. __sock_recv_timestamp(msg, sk, skb);
  1319. else
  1320. sk->sk_stamp = kt;
  1321. }
  1322. /**
  1323. * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
  1324. * @msg: outgoing packet
  1325. * @sk: socket sending this packet
  1326. * @shtx: filled with instructions for time stamping
  1327. *
  1328. * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if
  1329. * parameters are invalid.
  1330. */
  1331. extern int sock_tx_timestamp(struct msghdr *msg,
  1332. struct sock *sk,
  1333. union skb_shared_tx *shtx);
  1334. /**
  1335. * sk_eat_skb - Release a skb if it is no longer needed
  1336. * @sk: socket to eat this skb from
  1337. * @skb: socket buffer to eat
  1338. * @copied_early: flag indicating whether DMA operations copied this data early
  1339. *
  1340. * This routine must be called with interrupts disabled or with the socket
  1341. * locked so that the sk_buff queue operation is ok.
  1342. */
  1343. #ifdef CONFIG_NET_DMA
  1344. static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
  1345. {
  1346. __skb_unlink(skb, &sk->sk_receive_queue);
  1347. if (!copied_early)
  1348. __kfree_skb(skb);
  1349. else
  1350. __skb_queue_tail(&sk->sk_async_wait_queue, skb);
  1351. }
  1352. #else
  1353. static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
  1354. {
  1355. __skb_unlink(skb, &sk->sk_receive_queue);
  1356. __kfree_skb(skb);
  1357. }
  1358. #endif
  1359. static inline
  1360. struct net *sock_net(const struct sock *sk)
  1361. {
  1362. #ifdef CONFIG_NET_NS
  1363. return sk->sk_net;
  1364. #else
  1365. return &init_net;
  1366. #endif
  1367. }
  1368. static inline
  1369. void sock_net_set(struct sock *sk, struct net *net)
  1370. {
  1371. #ifdef CONFIG_NET_NS
  1372. sk->sk_net = net;
  1373. #endif
  1374. }
  1375. /*
  1376. * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
  1377. * They should not hold a referrence to a namespace in order to allow
  1378. * to stop it.
  1379. * Sockets after sk_change_net should be released using sk_release_kernel
  1380. */
  1381. static inline void sk_change_net(struct sock *sk, struct net *net)
  1382. {
  1383. put_net(sock_net(sk));
  1384. sock_net_set(sk, hold_net(net));
  1385. }
  1386. static inline struct sock *skb_steal_sock(struct sk_buff *skb)
  1387. {
  1388. if (unlikely(skb->sk)) {
  1389. struct sock *sk = skb->sk;
  1390. skb->destructor = NULL;
  1391. skb->sk = NULL;
  1392. return sk;
  1393. }
  1394. return NULL;
  1395. }
  1396. extern void sock_enable_timestamp(struct sock *sk, int flag);
  1397. extern int sock_get_timestamp(struct sock *, struct timeval __user *);
  1398. extern int sock_get_timestampns(struct sock *, struct timespec __user *);
  1399. /*
  1400. * Enable debug/info messages
  1401. */
  1402. extern int net_msg_warn;
  1403. #define NETDEBUG(fmt, args...) \
  1404. do { if (net_msg_warn) printk(fmt,##args); } while (0)
  1405. #define LIMIT_NETDEBUG(fmt, args...) \
  1406. do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
  1407. extern __u32 sysctl_wmem_max;
  1408. extern __u32 sysctl_rmem_max;
  1409. extern void sk_init(void);
  1410. extern int sysctl_optmem_max;
  1411. extern __u32 sysctl_wmem_default;
  1412. extern __u32 sysctl_rmem_default;
  1413. #endif /* _SOCK_H */