sock.h 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Definitions for the AF_INET socket handler.
  7. *
  8. * Version: @(#)sock.h 1.0.4 05/13/93
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  13. * Florian La Roche <flla@stud.uni-sb.de>
  14. *
  15. * Fixes:
  16. * Alan Cox : Volatiles in skbuff pointers. See
  17. * skbuff comments. May be overdone,
  18. * better to prove they can be removed
  19. * than the reverse.
  20. * Alan Cox : Added a zapped field for tcp to note
  21. * a socket is reset and must stay shut up
  22. * Alan Cox : New fields for options
  23. * Pauline Middelink : identd support
  24. * Alan Cox : Eliminate low level recv/recvfrom
  25. * David S. Miller : New socket lookup architecture.
  26. * Steve Whitehouse: Default routines for sock_ops
  27. * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made
  28. * protinfo be just a void pointer, as the
  29. * protocol specific parts were moved to
  30. * respective headers and ipv4/v6, etc now
  31. * use private slabcaches for its socks
  32. * Pedro Hortas : New flags field for socket options
  33. *
  34. *
  35. * This program is free software; you can redistribute it and/or
  36. * modify it under the terms of the GNU General Public License
  37. * as published by the Free Software Foundation; either version
  38. * 2 of the License, or (at your option) any later version.
  39. */
  40. #ifndef _SOCK_H
  41. #define _SOCK_H
  42. #include <linux/kernel.h>
  43. #include <linux/list.h>
  44. #include <linux/timer.h>
  45. #include <linux/cache.h>
  46. #include <linux/module.h>
  47. #include <linux/lockdep.h>
  48. #include <linux/netdevice.h>
  49. #include <linux/pcounter.h>
  50. #include <linux/skbuff.h> /* struct sk_buff */
  51. #include <linux/mm.h>
  52. #include <linux/security.h>
  53. #include <linux/filter.h>
  54. #include <asm/atomic.h>
  55. #include <net/dst.h>
  56. #include <net/checksum.h>
  57. #include <net/net_namespace.h>
  58. /*
  59. * This structure really needs to be cleaned up.
  60. * Most of it is for TCP, and not used by any of
  61. * the other protocols.
  62. */
  63. /* Define this to get the SOCK_DBG debugging facility. */
  64. #define SOCK_DEBUGGING
  65. #ifdef SOCK_DEBUGGING
  66. #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
  67. printk(KERN_DEBUG msg); } while (0)
  68. #else
  69. #define SOCK_DEBUG(sk, msg...) do { } while (0)
  70. #endif
  71. /* This is the per-socket lock. The spinlock provides a synchronization
  72. * between user contexts and software interrupt processing, whereas the
  73. * mini-semaphore synchronizes multiple users amongst themselves.
  74. */
  75. typedef struct {
  76. spinlock_t slock;
  77. int owned;
  78. wait_queue_head_t wq;
  79. /*
  80. * We express the mutex-alike socket_lock semantics
  81. * to the lock validator by explicitly managing
  82. * the slock as a lock variant (in addition to
  83. * the slock itself):
  84. */
  85. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  86. struct lockdep_map dep_map;
  87. #endif
  88. } socket_lock_t;
  89. struct sock;
  90. struct proto;
  91. /**
  92. * struct sock_common - minimal network layer representation of sockets
  93. * @skc_family: network address family
  94. * @skc_state: Connection state
  95. * @skc_reuse: %SO_REUSEADDR setting
  96. * @skc_bound_dev_if: bound device index if != 0
  97. * @skc_node: main hash linkage for various protocol lookup tables
  98. * @skc_bind_node: bind hash linkage for various protocol lookup tables
  99. * @skc_refcnt: reference count
  100. * @skc_hash: hash value used with various protocol lookup tables
  101. * @skc_prot: protocol handlers inside a network family
  102. * @skc_net: reference to the network namespace of this socket
  103. *
  104. * This is the minimal network layer representation of sockets, the header
  105. * for struct sock and struct inet_timewait_sock.
  106. */
  107. struct sock_common {
  108. unsigned short skc_family;
  109. volatile unsigned char skc_state;
  110. unsigned char skc_reuse;
  111. int skc_bound_dev_if;
  112. struct hlist_node skc_node;
  113. struct hlist_node skc_bind_node;
  114. atomic_t skc_refcnt;
  115. unsigned int skc_hash;
  116. struct proto *skc_prot;
  117. struct net *skc_net;
  118. };
  119. /**
  120. * struct sock - network layer representation of sockets
  121. * @__sk_common: shared layout with inet_timewait_sock
  122. * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
  123. * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
  124. * @sk_lock: synchronizer
  125. * @sk_rcvbuf: size of receive buffer in bytes
  126. * @sk_sleep: sock wait queue
  127. * @sk_dst_cache: destination cache
  128. * @sk_dst_lock: destination cache lock
  129. * @sk_policy: flow policy
  130. * @sk_rmem_alloc: receive queue bytes committed
  131. * @sk_receive_queue: incoming packets
  132. * @sk_wmem_alloc: transmit queue bytes committed
  133. * @sk_write_queue: Packet sending queue
  134. * @sk_async_wait_queue: DMA copied packets
  135. * @sk_omem_alloc: "o" is "option" or "other"
  136. * @sk_wmem_queued: persistent queue size
  137. * @sk_forward_alloc: space allocated forward
  138. * @sk_allocation: allocation mode
  139. * @sk_sndbuf: size of send buffer in bytes
  140. * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
  141. * %SO_OOBINLINE settings
  142. * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
  143. * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
  144. * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
  145. * @sk_lingertime: %SO_LINGER l_linger setting
  146. * @sk_backlog: always used with the per-socket spinlock held
  147. * @sk_callback_lock: used with the callbacks in the end of this struct
  148. * @sk_error_queue: rarely used
  149. * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
  150. * IPV6_ADDRFORM for instance)
  151. * @sk_err: last error
  152. * @sk_err_soft: errors that don't cause failure but are the cause of a
  153. * persistent failure not just 'timed out'
  154. * @sk_drops: raw drops counter
  155. * @sk_ack_backlog: current listen backlog
  156. * @sk_max_ack_backlog: listen backlog set in listen()
  157. * @sk_priority: %SO_PRIORITY setting
  158. * @sk_type: socket type (%SOCK_STREAM, etc)
  159. * @sk_protocol: which protocol this socket belongs in this network family
  160. * @sk_peercred: %SO_PEERCRED setting
  161. * @sk_rcvlowat: %SO_RCVLOWAT setting
  162. * @sk_rcvtimeo: %SO_RCVTIMEO setting
  163. * @sk_sndtimeo: %SO_SNDTIMEO setting
  164. * @sk_filter: socket filtering instructions
  165. * @sk_protinfo: private area, net family specific, when not using slab
  166. * @sk_timer: sock cleanup timer
  167. * @sk_stamp: time stamp of last packet received
  168. * @sk_socket: Identd and reporting IO signals
  169. * @sk_user_data: RPC layer private data
  170. * @sk_sndmsg_page: cached page for sendmsg
  171. * @sk_sndmsg_off: cached offset for sendmsg
  172. * @sk_send_head: front of stuff to transmit
  173. * @sk_security: used by security modules
  174. * @sk_write_pending: a write to stream socket waits to start
  175. * @sk_state_change: callback to indicate change in the state of the sock
  176. * @sk_data_ready: callback to indicate there is data to be processed
  177. * @sk_write_space: callback to indicate there is bf sending space available
  178. * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
  179. * @sk_backlog_rcv: callback to process the backlog
  180. * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
  181. */
  182. struct sock {
  183. /*
  184. * Now struct inet_timewait_sock also uses sock_common, so please just
  185. * don't add nothing before this first member (__sk_common) --acme
  186. */
  187. struct sock_common __sk_common;
  188. #define sk_family __sk_common.skc_family
  189. #define sk_state __sk_common.skc_state
  190. #define sk_reuse __sk_common.skc_reuse
  191. #define sk_bound_dev_if __sk_common.skc_bound_dev_if
  192. #define sk_node __sk_common.skc_node
  193. #define sk_bind_node __sk_common.skc_bind_node
  194. #define sk_refcnt __sk_common.skc_refcnt
  195. #define sk_hash __sk_common.skc_hash
  196. #define sk_prot __sk_common.skc_prot
  197. #define sk_net __sk_common.skc_net
  198. unsigned char sk_shutdown : 2,
  199. sk_no_check : 2,
  200. sk_userlocks : 4;
  201. unsigned char sk_protocol;
  202. unsigned short sk_type;
  203. int sk_rcvbuf;
  204. socket_lock_t sk_lock;
  205. /*
  206. * The backlog queue is special, it is always used with
  207. * the per-socket spinlock held and requires low latency
  208. * access. Therefore we special case it's implementation.
  209. */
  210. struct {
  211. struct sk_buff *head;
  212. struct sk_buff *tail;
  213. } sk_backlog;
  214. wait_queue_head_t *sk_sleep;
  215. struct dst_entry *sk_dst_cache;
  216. struct xfrm_policy *sk_policy[2];
  217. rwlock_t sk_dst_lock;
  218. atomic_t sk_rmem_alloc;
  219. atomic_t sk_wmem_alloc;
  220. atomic_t sk_omem_alloc;
  221. int sk_sndbuf;
  222. struct sk_buff_head sk_receive_queue;
  223. struct sk_buff_head sk_write_queue;
  224. struct sk_buff_head sk_async_wait_queue;
  225. int sk_wmem_queued;
  226. int sk_forward_alloc;
  227. gfp_t sk_allocation;
  228. int sk_route_caps;
  229. int sk_gso_type;
  230. int sk_rcvlowat;
  231. unsigned long sk_flags;
  232. unsigned long sk_lingertime;
  233. struct sk_buff_head sk_error_queue;
  234. struct proto *sk_prot_creator;
  235. rwlock_t sk_callback_lock;
  236. int sk_err,
  237. sk_err_soft;
  238. atomic_t sk_drops;
  239. unsigned short sk_ack_backlog;
  240. unsigned short sk_max_ack_backlog;
  241. __u32 sk_priority;
  242. struct ucred sk_peercred;
  243. long sk_rcvtimeo;
  244. long sk_sndtimeo;
  245. struct sk_filter *sk_filter;
  246. void *sk_protinfo;
  247. struct timer_list sk_timer;
  248. ktime_t sk_stamp;
  249. struct socket *sk_socket;
  250. void *sk_user_data;
  251. struct page *sk_sndmsg_page;
  252. struct sk_buff *sk_send_head;
  253. __u32 sk_sndmsg_off;
  254. int sk_write_pending;
  255. void *sk_security;
  256. void (*sk_state_change)(struct sock *sk);
  257. void (*sk_data_ready)(struct sock *sk, int bytes);
  258. void (*sk_write_space)(struct sock *sk);
  259. void (*sk_error_report)(struct sock *sk);
  260. int (*sk_backlog_rcv)(struct sock *sk,
  261. struct sk_buff *skb);
  262. void (*sk_destruct)(struct sock *sk);
  263. };
  264. /*
  265. * Hashed lists helper routines
  266. */
  267. static inline struct sock *__sk_head(const struct hlist_head *head)
  268. {
  269. return hlist_entry(head->first, struct sock, sk_node);
  270. }
  271. static inline struct sock *sk_head(const struct hlist_head *head)
  272. {
  273. return hlist_empty(head) ? NULL : __sk_head(head);
  274. }
  275. static inline struct sock *sk_next(const struct sock *sk)
  276. {
  277. return sk->sk_node.next ?
  278. hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
  279. }
  280. static inline int sk_unhashed(const struct sock *sk)
  281. {
  282. return hlist_unhashed(&sk->sk_node);
  283. }
  284. static inline int sk_hashed(const struct sock *sk)
  285. {
  286. return !sk_unhashed(sk);
  287. }
  288. static __inline__ void sk_node_init(struct hlist_node *node)
  289. {
  290. node->pprev = NULL;
  291. }
  292. static __inline__ void __sk_del_node(struct sock *sk)
  293. {
  294. __hlist_del(&sk->sk_node);
  295. }
  296. static __inline__ int __sk_del_node_init(struct sock *sk)
  297. {
  298. if (sk_hashed(sk)) {
  299. __sk_del_node(sk);
  300. sk_node_init(&sk->sk_node);
  301. return 1;
  302. }
  303. return 0;
  304. }
  305. /* Grab socket reference count. This operation is valid only
  306. when sk is ALREADY grabbed f.e. it is found in hash table
  307. or a list and the lookup is made under lock preventing hash table
  308. modifications.
  309. */
  310. static inline void sock_hold(struct sock *sk)
  311. {
  312. atomic_inc(&sk->sk_refcnt);
  313. }
  314. /* Ungrab socket in the context, which assumes that socket refcnt
  315. cannot hit zero, f.e. it is true in context of any socketcall.
  316. */
  317. static inline void __sock_put(struct sock *sk)
  318. {
  319. atomic_dec(&sk->sk_refcnt);
  320. }
  321. static __inline__ int sk_del_node_init(struct sock *sk)
  322. {
  323. int rc = __sk_del_node_init(sk);
  324. if (rc) {
  325. /* paranoid for a while -acme */
  326. WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
  327. __sock_put(sk);
  328. }
  329. return rc;
  330. }
  331. static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
  332. {
  333. hlist_add_head(&sk->sk_node, list);
  334. }
  335. static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
  336. {
  337. sock_hold(sk);
  338. __sk_add_node(sk, list);
  339. }
  340. static __inline__ void __sk_del_bind_node(struct sock *sk)
  341. {
  342. __hlist_del(&sk->sk_bind_node);
  343. }
  344. static __inline__ void sk_add_bind_node(struct sock *sk,
  345. struct hlist_head *list)
  346. {
  347. hlist_add_head(&sk->sk_bind_node, list);
  348. }
  349. #define sk_for_each(__sk, node, list) \
  350. hlist_for_each_entry(__sk, node, list, sk_node)
  351. #define sk_for_each_from(__sk, node) \
  352. if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
  353. hlist_for_each_entry_from(__sk, node, sk_node)
  354. #define sk_for_each_continue(__sk, node) \
  355. if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
  356. hlist_for_each_entry_continue(__sk, node, sk_node)
  357. #define sk_for_each_safe(__sk, node, tmp, list) \
  358. hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
  359. #define sk_for_each_bound(__sk, node, list) \
  360. hlist_for_each_entry(__sk, node, list, sk_bind_node)
  361. /* Sock flags */
  362. enum sock_flags {
  363. SOCK_DEAD,
  364. SOCK_DONE,
  365. SOCK_URGINLINE,
  366. SOCK_KEEPOPEN,
  367. SOCK_LINGER,
  368. SOCK_DESTROY,
  369. SOCK_BROADCAST,
  370. SOCK_TIMESTAMP,
  371. SOCK_ZAPPED,
  372. SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
  373. SOCK_DBG, /* %SO_DEBUG setting */
  374. SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
  375. SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
  376. SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
  377. SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
  378. };
  379. static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
  380. {
  381. nsk->sk_flags = osk->sk_flags;
  382. }
  383. static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
  384. {
  385. __set_bit(flag, &sk->sk_flags);
  386. }
  387. static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
  388. {
  389. __clear_bit(flag, &sk->sk_flags);
  390. }
  391. static inline int sock_flag(struct sock *sk, enum sock_flags flag)
  392. {
  393. return test_bit(flag, &sk->sk_flags);
  394. }
  395. static inline void sk_acceptq_removed(struct sock *sk)
  396. {
  397. sk->sk_ack_backlog--;
  398. }
  399. static inline void sk_acceptq_added(struct sock *sk)
  400. {
  401. sk->sk_ack_backlog++;
  402. }
  403. static inline int sk_acceptq_is_full(struct sock *sk)
  404. {
  405. return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
  406. }
  407. /*
  408. * Compute minimal free write space needed to queue new packets.
  409. */
  410. static inline int sk_stream_min_wspace(struct sock *sk)
  411. {
  412. return sk->sk_wmem_queued / 2;
  413. }
  414. static inline int sk_stream_wspace(struct sock *sk)
  415. {
  416. return sk->sk_sndbuf - sk->sk_wmem_queued;
  417. }
  418. extern void sk_stream_write_space(struct sock *sk);
  419. static inline int sk_stream_memory_free(struct sock *sk)
  420. {
  421. return sk->sk_wmem_queued < sk->sk_sndbuf;
  422. }
  423. extern void sk_stream_rfree(struct sk_buff *skb);
  424. static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
  425. {
  426. skb->sk = sk;
  427. skb->destructor = sk_stream_rfree;
  428. atomic_add(skb->truesize, &sk->sk_rmem_alloc);
  429. sk->sk_forward_alloc -= skb->truesize;
  430. }
  431. static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
  432. {
  433. skb_truesize_check(skb);
  434. sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
  435. sk->sk_wmem_queued -= skb->truesize;
  436. sk->sk_forward_alloc += skb->truesize;
  437. __kfree_skb(skb);
  438. }
  439. /* The per-socket spinlock must be held here. */
  440. static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
  441. {
  442. if (!sk->sk_backlog.tail) {
  443. sk->sk_backlog.head = sk->sk_backlog.tail = skb;
  444. } else {
  445. sk->sk_backlog.tail->next = skb;
  446. sk->sk_backlog.tail = skb;
  447. }
  448. skb->next = NULL;
  449. }
  450. #define sk_wait_event(__sk, __timeo, __condition) \
  451. ({ int __rc; \
  452. release_sock(__sk); \
  453. __rc = __condition; \
  454. if (!__rc) { \
  455. *(__timeo) = schedule_timeout(*(__timeo)); \
  456. } \
  457. lock_sock(__sk); \
  458. __rc = __condition; \
  459. __rc; \
  460. })
  461. extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
  462. extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
  463. extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
  464. extern int sk_stream_error(struct sock *sk, int flags, int err);
  465. extern void sk_stream_kill_queues(struct sock *sk);
  466. extern int sk_wait_data(struct sock *sk, long *timeo);
  467. struct request_sock_ops;
  468. struct timewait_sock_ops;
  469. /* Networking protocol blocks we attach to sockets.
  470. * socket layer -> transport layer interface
  471. * transport -> network interface is defined by struct inet_proto
  472. */
  473. struct proto {
  474. void (*close)(struct sock *sk,
  475. long timeout);
  476. int (*connect)(struct sock *sk,
  477. struct sockaddr *uaddr,
  478. int addr_len);
  479. int (*disconnect)(struct sock *sk, int flags);
  480. struct sock * (*accept) (struct sock *sk, int flags, int *err);
  481. int (*ioctl)(struct sock *sk, int cmd,
  482. unsigned long arg);
  483. int (*init)(struct sock *sk);
  484. int (*destroy)(struct sock *sk);
  485. void (*shutdown)(struct sock *sk, int how);
  486. int (*setsockopt)(struct sock *sk, int level,
  487. int optname, char __user *optval,
  488. int optlen);
  489. int (*getsockopt)(struct sock *sk, int level,
  490. int optname, char __user *optval,
  491. int __user *option);
  492. int (*compat_setsockopt)(struct sock *sk,
  493. int level,
  494. int optname, char __user *optval,
  495. int optlen);
  496. int (*compat_getsockopt)(struct sock *sk,
  497. int level,
  498. int optname, char __user *optval,
  499. int __user *option);
  500. int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
  501. struct msghdr *msg, size_t len);
  502. int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
  503. struct msghdr *msg,
  504. size_t len, int noblock, int flags,
  505. int *addr_len);
  506. int (*sendpage)(struct sock *sk, struct page *page,
  507. int offset, size_t size, int flags);
  508. int (*bind)(struct sock *sk,
  509. struct sockaddr *uaddr, int addr_len);
  510. int (*backlog_rcv) (struct sock *sk,
  511. struct sk_buff *skb);
  512. /* Keeping track of sk's, looking them up, and port selection methods. */
  513. void (*hash)(struct sock *sk);
  514. void (*unhash)(struct sock *sk);
  515. int (*get_port)(struct sock *sk, unsigned short snum);
  516. /* Keeping track of sockets in use */
  517. struct pcounter inuse;
  518. /* Memory pressure */
  519. void (*enter_memory_pressure)(void);
  520. atomic_t *memory_allocated; /* Current allocated memory. */
  521. atomic_t *sockets_allocated; /* Current number of sockets. */
  522. /*
  523. * Pressure flag: try to collapse.
  524. * Technical note: it is used by multiple contexts non atomically.
  525. * All the sk_stream_mem_schedule() is of this nature: accounting
  526. * is strict, actions are advisory and have some latency.
  527. */
  528. int *memory_pressure;
  529. int *sysctl_mem;
  530. int *sysctl_wmem;
  531. int *sysctl_rmem;
  532. int max_header;
  533. struct kmem_cache *slab;
  534. unsigned int obj_size;
  535. atomic_t *orphan_count;
  536. struct request_sock_ops *rsk_prot;
  537. struct timewait_sock_ops *twsk_prot;
  538. struct module *owner;
  539. char name[32];
  540. struct list_head node;
  541. #ifdef SOCK_REFCNT_DEBUG
  542. atomic_t socks;
  543. #endif
  544. };
  545. #define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME)
  546. #define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse)
  547. extern int proto_register(struct proto *prot, int alloc_slab);
  548. extern void proto_unregister(struct proto *prot);
  549. #ifdef SOCK_REFCNT_DEBUG
  550. static inline void sk_refcnt_debug_inc(struct sock *sk)
  551. {
  552. atomic_inc(&sk->sk_prot->socks);
  553. }
  554. static inline void sk_refcnt_debug_dec(struct sock *sk)
  555. {
  556. atomic_dec(&sk->sk_prot->socks);
  557. printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
  558. sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
  559. }
  560. static inline void sk_refcnt_debug_release(const struct sock *sk)
  561. {
  562. if (atomic_read(&sk->sk_refcnt) != 1)
  563. printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
  564. sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
  565. }
  566. #else /* SOCK_REFCNT_DEBUG */
  567. #define sk_refcnt_debug_inc(sk) do { } while (0)
  568. #define sk_refcnt_debug_dec(sk) do { } while (0)
  569. #define sk_refcnt_debug_release(sk) do { } while (0)
  570. #endif /* SOCK_REFCNT_DEBUG */
  571. /* Called with local bh disabled */
  572. static __inline__ void sock_prot_inc_use(struct proto *prot)
  573. {
  574. pcounter_add(&prot->inuse, 1);
  575. }
  576. static __inline__ void sock_prot_dec_use(struct proto *prot)
  577. {
  578. pcounter_add(&prot->inuse, -1);
  579. }
  580. static __inline__ int sock_prot_inuse(struct proto *proto)
  581. {
  582. return pcounter_getval(&proto->inuse);
  583. }
  584. /* With per-bucket locks this operation is not-atomic, so that
  585. * this version is not worse.
  586. */
  587. static inline void __sk_prot_rehash(struct sock *sk)
  588. {
  589. sk->sk_prot->unhash(sk);
  590. sk->sk_prot->hash(sk);
  591. }
  592. /* About 10 seconds */
  593. #define SOCK_DESTROY_TIME (10*HZ)
  594. /* Sockets 0-1023 can't be bound to unless you are superuser */
  595. #define PROT_SOCK 1024
  596. #define SHUTDOWN_MASK 3
  597. #define RCV_SHUTDOWN 1
  598. #define SEND_SHUTDOWN 2
  599. #define SOCK_SNDBUF_LOCK 1
  600. #define SOCK_RCVBUF_LOCK 2
  601. #define SOCK_BINDADDR_LOCK 4
  602. #define SOCK_BINDPORT_LOCK 8
  603. /* sock_iocb: used to kick off async processing of socket ios */
  604. struct sock_iocb {
  605. struct list_head list;
  606. int flags;
  607. int size;
  608. struct socket *sock;
  609. struct sock *sk;
  610. struct scm_cookie *scm;
  611. struct msghdr *msg, async_msg;
  612. struct kiocb *kiocb;
  613. };
  614. static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
  615. {
  616. return (struct sock_iocb *)iocb->private;
  617. }
  618. static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
  619. {
  620. return si->kiocb;
  621. }
  622. struct socket_alloc {
  623. struct socket socket;
  624. struct inode vfs_inode;
  625. };
  626. static inline struct socket *SOCKET_I(struct inode *inode)
  627. {
  628. return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
  629. }
  630. static inline struct inode *SOCK_INODE(struct socket *socket)
  631. {
  632. return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
  633. }
  634. extern void __sk_stream_mem_reclaim(struct sock *sk);
  635. extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
  636. #define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
  637. static inline int sk_stream_pages(int amt)
  638. {
  639. return DIV_ROUND_UP(amt, SK_STREAM_MEM_QUANTUM);
  640. }
  641. static inline void sk_stream_mem_reclaim(struct sock *sk)
  642. {
  643. if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
  644. __sk_stream_mem_reclaim(sk);
  645. }
  646. static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
  647. {
  648. return (int)skb->truesize <= sk->sk_forward_alloc ||
  649. sk_stream_mem_schedule(sk, skb->truesize, 1);
  650. }
  651. static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
  652. {
  653. return size <= sk->sk_forward_alloc ||
  654. sk_stream_mem_schedule(sk, size, 0);
  655. }
  656. /* Used by processes to "lock" a socket state, so that
  657. * interrupts and bottom half handlers won't change it
  658. * from under us. It essentially blocks any incoming
  659. * packets, so that we won't get any new data or any
  660. * packets that change the state of the socket.
  661. *
  662. * While locked, BH processing will add new packets to
  663. * the backlog queue. This queue is processed by the
  664. * owner of the socket lock right before it is released.
  665. *
  666. * Since ~2.3.5 it is also exclusive sleep lock serializing
  667. * accesses from user process context.
  668. */
  669. #define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
  670. /*
  671. * Macro so as to not evaluate some arguments when
  672. * lockdep is not enabled.
  673. *
  674. * Mark both the sk_lock and the sk_lock.slock as a
  675. * per-address-family lock class.
  676. */
  677. #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
  678. do { \
  679. sk->sk_lock.owned = 0; \
  680. init_waitqueue_head(&sk->sk_lock.wq); \
  681. spin_lock_init(&(sk)->sk_lock.slock); \
  682. debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
  683. sizeof((sk)->sk_lock)); \
  684. lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
  685. (skey), (sname)); \
  686. lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
  687. } while (0)
  688. extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
  689. static inline void lock_sock(struct sock *sk)
  690. {
  691. lock_sock_nested(sk, 0);
  692. }
  693. extern void FASTCALL(release_sock(struct sock *sk));
  694. /* BH context may only use the following locking interface. */
  695. #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
  696. #define bh_lock_sock_nested(__sk) \
  697. spin_lock_nested(&((__sk)->sk_lock.slock), \
  698. SINGLE_DEPTH_NESTING)
  699. #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
  700. extern struct sock *sk_alloc(struct net *net, int family,
  701. gfp_t priority,
  702. struct proto *prot);
  703. extern void sk_free(struct sock *sk);
  704. extern struct sock *sk_clone(const struct sock *sk,
  705. const gfp_t priority);
  706. extern struct sk_buff *sock_wmalloc(struct sock *sk,
  707. unsigned long size, int force,
  708. gfp_t priority);
  709. extern struct sk_buff *sock_rmalloc(struct sock *sk,
  710. unsigned long size, int force,
  711. gfp_t priority);
  712. extern void sock_wfree(struct sk_buff *skb);
  713. extern void sock_rfree(struct sk_buff *skb);
  714. extern int sock_setsockopt(struct socket *sock, int level,
  715. int op, char __user *optval,
  716. int optlen);
  717. extern int sock_getsockopt(struct socket *sock, int level,
  718. int op, char __user *optval,
  719. int __user *optlen);
  720. extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
  721. unsigned long size,
  722. int noblock,
  723. int *errcode);
  724. extern void *sock_kmalloc(struct sock *sk, int size,
  725. gfp_t priority);
  726. extern void sock_kfree_s(struct sock *sk, void *mem, int size);
  727. extern void sk_send_sigurg(struct sock *sk);
  728. /*
  729. * Functions to fill in entries in struct proto_ops when a protocol
  730. * does not implement a particular function.
  731. */
  732. extern int sock_no_bind(struct socket *,
  733. struct sockaddr *, int);
  734. extern int sock_no_connect(struct socket *,
  735. struct sockaddr *, int, int);
  736. extern int sock_no_socketpair(struct socket *,
  737. struct socket *);
  738. extern int sock_no_accept(struct socket *,
  739. struct socket *, int);
  740. extern int sock_no_getname(struct socket *,
  741. struct sockaddr *, int *, int);
  742. extern unsigned int sock_no_poll(struct file *, struct socket *,
  743. struct poll_table_struct *);
  744. extern int sock_no_ioctl(struct socket *, unsigned int,
  745. unsigned long);
  746. extern int sock_no_listen(struct socket *, int);
  747. extern int sock_no_shutdown(struct socket *, int);
  748. extern int sock_no_getsockopt(struct socket *, int , int,
  749. char __user *, int __user *);
  750. extern int sock_no_setsockopt(struct socket *, int, int,
  751. char __user *, int);
  752. extern int sock_no_sendmsg(struct kiocb *, struct socket *,
  753. struct msghdr *, size_t);
  754. extern int sock_no_recvmsg(struct kiocb *, struct socket *,
  755. struct msghdr *, size_t, int);
  756. extern int sock_no_mmap(struct file *file,
  757. struct socket *sock,
  758. struct vm_area_struct *vma);
  759. extern ssize_t sock_no_sendpage(struct socket *sock,
  760. struct page *page,
  761. int offset, size_t size,
  762. int flags);
  763. /*
  764. * Functions to fill in entries in struct proto_ops when a protocol
  765. * uses the inet style.
  766. */
  767. extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
  768. char __user *optval, int __user *optlen);
  769. extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
  770. struct msghdr *msg, size_t size, int flags);
  771. extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
  772. char __user *optval, int optlen);
  773. extern int compat_sock_common_getsockopt(struct socket *sock, int level,
  774. int optname, char __user *optval, int __user *optlen);
  775. extern int compat_sock_common_setsockopt(struct socket *sock, int level,
  776. int optname, char __user *optval, int optlen);
  777. extern void sk_common_release(struct sock *sk);
  778. /*
  779. * Default socket callbacks and setup code
  780. */
  781. /* Initialise core socket variables */
  782. extern void sock_init_data(struct socket *sock, struct sock *sk);
  783. /**
  784. * sk_filter - run a packet through a socket filter
  785. * @sk: sock associated with &sk_buff
  786. * @skb: buffer to filter
  787. * @needlock: set to 1 if the sock is not locked by caller.
  788. *
  789. * Run the filter code and then cut skb->data to correct size returned by
  790. * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
  791. * than pkt_len we keep whole skb->data. This is the socket level
  792. * wrapper to sk_run_filter. It returns 0 if the packet should
  793. * be accepted or -EPERM if the packet should be tossed.
  794. *
  795. */
  796. static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
  797. {
  798. int err;
  799. struct sk_filter *filter;
  800. err = security_sock_rcv_skb(sk, skb);
  801. if (err)
  802. return err;
  803. rcu_read_lock_bh();
  804. filter = rcu_dereference(sk->sk_filter);
  805. if (filter) {
  806. unsigned int pkt_len = sk_run_filter(skb, filter->insns,
  807. filter->len);
  808. err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
  809. }
  810. rcu_read_unlock_bh();
  811. return err;
  812. }
  813. /**
  814. * sk_filter_release: Release a socket filter
  815. * @sk: socket
  816. * @fp: filter to remove
  817. *
  818. * Remove a filter from a socket and release its resources.
  819. */
  820. static inline void sk_filter_release(struct sk_filter *fp)
  821. {
  822. if (atomic_dec_and_test(&fp->refcnt))
  823. kfree(fp);
  824. }
  825. static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
  826. {
  827. unsigned int size = sk_filter_len(fp);
  828. atomic_sub(size, &sk->sk_omem_alloc);
  829. sk_filter_release(fp);
  830. }
  831. static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
  832. {
  833. atomic_inc(&fp->refcnt);
  834. atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
  835. }
  836. /*
  837. * Socket reference counting postulates.
  838. *
  839. * * Each user of socket SHOULD hold a reference count.
  840. * * Each access point to socket (an hash table bucket, reference from a list,
  841. * running timer, skb in flight MUST hold a reference count.
  842. * * When reference count hits 0, it means it will never increase back.
  843. * * When reference count hits 0, it means that no references from
  844. * outside exist to this socket and current process on current CPU
  845. * is last user and may/should destroy this socket.
  846. * * sk_free is called from any context: process, BH, IRQ. When
  847. * it is called, socket has no references from outside -> sk_free
  848. * may release descendant resources allocated by the socket, but
  849. * to the time when it is called, socket is NOT referenced by any
  850. * hash tables, lists etc.
  851. * * Packets, delivered from outside (from network or from another process)
  852. * and enqueued on receive/error queues SHOULD NOT grab reference count,
  853. * when they sit in queue. Otherwise, packets will leak to hole, when
  854. * socket is looked up by one cpu and unhasing is made by another CPU.
  855. * It is true for udp/raw, netlink (leak to receive and error queues), tcp
  856. * (leak to backlog). Packet socket does all the processing inside
  857. * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
  858. * use separate SMP lock, so that they are prone too.
  859. */
  860. /* Ungrab socket and destroy it, if it was the last reference. */
  861. static inline void sock_put(struct sock *sk)
  862. {
  863. if (atomic_dec_and_test(&sk->sk_refcnt))
  864. sk_free(sk);
  865. }
  866. extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
  867. const int nested);
  868. /* Detach socket from process context.
  869. * Announce socket dead, detach it from wait queue and inode.
  870. * Note that parent inode held reference count on this struct sock,
  871. * we do not release it in this function, because protocol
  872. * probably wants some additional cleanups or even continuing
  873. * to work with this socket (TCP).
  874. */
  875. static inline void sock_orphan(struct sock *sk)
  876. {
  877. write_lock_bh(&sk->sk_callback_lock);
  878. sock_set_flag(sk, SOCK_DEAD);
  879. sk->sk_socket = NULL;
  880. sk->sk_sleep = NULL;
  881. write_unlock_bh(&sk->sk_callback_lock);
  882. }
  883. static inline void sock_graft(struct sock *sk, struct socket *parent)
  884. {
  885. write_lock_bh(&sk->sk_callback_lock);
  886. sk->sk_sleep = &parent->wait;
  887. parent->sk = sk;
  888. sk->sk_socket = parent;
  889. security_sock_graft(sk, parent);
  890. write_unlock_bh(&sk->sk_callback_lock);
  891. }
  892. extern int sock_i_uid(struct sock *sk);
  893. extern unsigned long sock_i_ino(struct sock *sk);
  894. static inline struct dst_entry *
  895. __sk_dst_get(struct sock *sk)
  896. {
  897. return sk->sk_dst_cache;
  898. }
  899. static inline struct dst_entry *
  900. sk_dst_get(struct sock *sk)
  901. {
  902. struct dst_entry *dst;
  903. read_lock(&sk->sk_dst_lock);
  904. dst = sk->sk_dst_cache;
  905. if (dst)
  906. dst_hold(dst);
  907. read_unlock(&sk->sk_dst_lock);
  908. return dst;
  909. }
  910. static inline void
  911. __sk_dst_set(struct sock *sk, struct dst_entry *dst)
  912. {
  913. struct dst_entry *old_dst;
  914. old_dst = sk->sk_dst_cache;
  915. sk->sk_dst_cache = dst;
  916. dst_release(old_dst);
  917. }
  918. static inline void
  919. sk_dst_set(struct sock *sk, struct dst_entry *dst)
  920. {
  921. write_lock(&sk->sk_dst_lock);
  922. __sk_dst_set(sk, dst);
  923. write_unlock(&sk->sk_dst_lock);
  924. }
  925. static inline void
  926. __sk_dst_reset(struct sock *sk)
  927. {
  928. struct dst_entry *old_dst;
  929. old_dst = sk->sk_dst_cache;
  930. sk->sk_dst_cache = NULL;
  931. dst_release(old_dst);
  932. }
  933. static inline void
  934. sk_dst_reset(struct sock *sk)
  935. {
  936. write_lock(&sk->sk_dst_lock);
  937. __sk_dst_reset(sk);
  938. write_unlock(&sk->sk_dst_lock);
  939. }
  940. extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
  941. extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
  942. static inline int sk_can_gso(const struct sock *sk)
  943. {
  944. return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
  945. }
  946. extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
  947. static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
  948. {
  949. sk->sk_wmem_queued += skb->truesize;
  950. sk->sk_forward_alloc -= skb->truesize;
  951. }
  952. static inline int skb_copy_to_page(struct sock *sk, char __user *from,
  953. struct sk_buff *skb, struct page *page,
  954. int off, int copy)
  955. {
  956. if (skb->ip_summed == CHECKSUM_NONE) {
  957. int err = 0;
  958. __wsum csum = csum_and_copy_from_user(from,
  959. page_address(page) + off,
  960. copy, 0, &err);
  961. if (err)
  962. return err;
  963. skb->csum = csum_block_add(skb->csum, csum, skb->len);
  964. } else if (copy_from_user(page_address(page) + off, from, copy))
  965. return -EFAULT;
  966. skb->len += copy;
  967. skb->data_len += copy;
  968. skb->truesize += copy;
  969. sk->sk_wmem_queued += copy;
  970. sk->sk_forward_alloc -= copy;
  971. return 0;
  972. }
  973. /*
  974. * Queue a received datagram if it will fit. Stream and sequenced
  975. * protocols can't normally use this as they need to fit buffers in
  976. * and play with them.
  977. *
  978. * Inlined as it's very short and called for pretty much every
  979. * packet ever received.
  980. */
  981. static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
  982. {
  983. sock_hold(sk);
  984. skb->sk = sk;
  985. skb->destructor = sock_wfree;
  986. atomic_add(skb->truesize, &sk->sk_wmem_alloc);
  987. }
  988. static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
  989. {
  990. skb->sk = sk;
  991. skb->destructor = sock_rfree;
  992. atomic_add(skb->truesize, &sk->sk_rmem_alloc);
  993. }
  994. extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
  995. unsigned long expires);
  996. extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
  997. extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
  998. static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
  999. {
  1000. /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
  1001. number of warnings when compiling with -W --ANK
  1002. */
  1003. if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
  1004. (unsigned)sk->sk_rcvbuf)
  1005. return -ENOMEM;
  1006. skb_set_owner_r(skb, sk);
  1007. skb_queue_tail(&sk->sk_error_queue, skb);
  1008. if (!sock_flag(sk, SOCK_DEAD))
  1009. sk->sk_data_ready(sk, skb->len);
  1010. return 0;
  1011. }
  1012. /*
  1013. * Recover an error report and clear atomically
  1014. */
  1015. static inline int sock_error(struct sock *sk)
  1016. {
  1017. int err;
  1018. if (likely(!sk->sk_err))
  1019. return 0;
  1020. err = xchg(&sk->sk_err, 0);
  1021. return -err;
  1022. }
  1023. static inline unsigned long sock_wspace(struct sock *sk)
  1024. {
  1025. int amt = 0;
  1026. if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
  1027. amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
  1028. if (amt < 0)
  1029. amt = 0;
  1030. }
  1031. return amt;
  1032. }
  1033. static inline void sk_wake_async(struct sock *sk, int how, int band)
  1034. {
  1035. if (sk->sk_socket && sk->sk_socket->fasync_list)
  1036. sock_wake_async(sk->sk_socket, how, band);
  1037. }
  1038. #define SOCK_MIN_SNDBUF 2048
  1039. #define SOCK_MIN_RCVBUF 256
  1040. static inline void sk_stream_moderate_sndbuf(struct sock *sk)
  1041. {
  1042. if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
  1043. sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
  1044. sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
  1045. }
  1046. }
  1047. struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
  1048. int size, int mem, gfp_t gfp);
  1049. static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
  1050. int size,
  1051. gfp_t gfp)
  1052. {
  1053. return sk_stream_alloc_pskb(sk, size, 0, gfp);
  1054. }
  1055. static inline struct page *sk_stream_alloc_page(struct sock *sk)
  1056. {
  1057. struct page *page = NULL;
  1058. page = alloc_pages(sk->sk_allocation, 0);
  1059. if (!page) {
  1060. sk->sk_prot->enter_memory_pressure();
  1061. sk_stream_moderate_sndbuf(sk);
  1062. }
  1063. return page;
  1064. }
  1065. /*
  1066. * Default write policy as shown to user space via poll/select/SIGIO
  1067. */
  1068. static inline int sock_writeable(const struct sock *sk)
  1069. {
  1070. return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
  1071. }
  1072. static inline gfp_t gfp_any(void)
  1073. {
  1074. return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
  1075. }
  1076. static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
  1077. {
  1078. return noblock ? 0 : sk->sk_rcvtimeo;
  1079. }
  1080. static inline long sock_sndtimeo(const struct sock *sk, int noblock)
  1081. {
  1082. return noblock ? 0 : sk->sk_sndtimeo;
  1083. }
  1084. static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
  1085. {
  1086. return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
  1087. }
  1088. /* Alas, with timeout socket operations are not restartable.
  1089. * Compare this to poll().
  1090. */
  1091. static inline int sock_intr_errno(long timeo)
  1092. {
  1093. return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
  1094. }
  1095. extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
  1096. struct sk_buff *skb);
  1097. static __inline__ void
  1098. sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
  1099. {
  1100. ktime_t kt = skb->tstamp;
  1101. if (sock_flag(sk, SOCK_RCVTSTAMP))
  1102. __sock_recv_timestamp(msg, sk, skb);
  1103. else
  1104. sk->sk_stamp = kt;
  1105. }
  1106. /**
  1107. * sk_eat_skb - Release a skb if it is no longer needed
  1108. * @sk: socket to eat this skb from
  1109. * @skb: socket buffer to eat
  1110. * @copied_early: flag indicating whether DMA operations copied this data early
  1111. *
  1112. * This routine must be called with interrupts disabled or with the socket
  1113. * locked so that the sk_buff queue operation is ok.
  1114. */
  1115. #ifdef CONFIG_NET_DMA
  1116. static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
  1117. {
  1118. __skb_unlink(skb, &sk->sk_receive_queue);
  1119. if (!copied_early)
  1120. __kfree_skb(skb);
  1121. else
  1122. __skb_queue_tail(&sk->sk_async_wait_queue, skb);
  1123. }
  1124. #else
  1125. static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
  1126. {
  1127. __skb_unlink(skb, &sk->sk_receive_queue);
  1128. __kfree_skb(skb);
  1129. }
  1130. #endif
  1131. extern void sock_enable_timestamp(struct sock *sk);
  1132. extern int sock_get_timestamp(struct sock *, struct timeval __user *);
  1133. extern int sock_get_timestampns(struct sock *, struct timespec __user *);
  1134. /*
  1135. * Enable debug/info messages
  1136. */
  1137. extern int net_msg_warn;
  1138. #define NETDEBUG(fmt, args...) \
  1139. do { if (net_msg_warn) printk(fmt,##args); } while (0)
  1140. #define LIMIT_NETDEBUG(fmt, args...) \
  1141. do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
  1142. /*
  1143. * Macros for sleeping on a socket. Use them like this:
  1144. *
  1145. * SOCK_SLEEP_PRE(sk)
  1146. * if (condition)
  1147. * schedule();
  1148. * SOCK_SLEEP_POST(sk)
  1149. *
  1150. * N.B. These are now obsolete and were, afaik, only ever used in DECnet
  1151. * and when the last use of them in DECnet has gone, I'm intending to
  1152. * remove them.
  1153. */
  1154. #define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
  1155. DECLARE_WAITQUEUE(wait, tsk); \
  1156. tsk->state = TASK_INTERRUPTIBLE; \
  1157. add_wait_queue((sk)->sk_sleep, &wait); \
  1158. release_sock(sk);
  1159. #define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
  1160. remove_wait_queue((sk)->sk_sleep, &wait); \
  1161. lock_sock(sk); \
  1162. }
  1163. extern __u32 sysctl_wmem_max;
  1164. extern __u32 sysctl_rmem_max;
  1165. extern void sk_init(void);
  1166. #ifdef CONFIG_SYSCTL
  1167. extern struct ctl_table core_table[];
  1168. #endif
  1169. extern int sysctl_optmem_max;
  1170. extern __u32 sysctl_wmem_default;
  1171. extern __u32 sysctl_rmem_default;
  1172. #endif /* _SOCK_H */