sctp.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. /* SCTP kernel implementation
  2. * (C) Copyright IBM Corp. 2001, 2004
  3. * Copyright (c) 1999-2000 Cisco, Inc.
  4. * Copyright (c) 1999-2001 Motorola, Inc.
  5. * Copyright (c) 2001-2003 Intel Corp.
  6. *
  7. * This file is part of the SCTP kernel implementation
  8. *
  9. * The base lksctp header.
  10. *
  11. * This SCTP implementation is free software;
  12. * you can redistribute it and/or modify it under the terms of
  13. * the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This SCTP implementation is distributed in the hope that it
  18. * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19. * ************************
  20. * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21. * See the GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with GNU CC; see the file COPYING. If not, write to
  25. * the Free Software Foundation, 59 Temple Place - Suite 330,
  26. * Boston, MA 02111-1307, USA.
  27. *
  28. * Please send any bug reports or fixes you make to the
  29. * email address(es):
  30. * lksctp developers <lksctp-developers@lists.sourceforge.net>
  31. *
  32. * Or submit a bug report through the following website:
  33. * http://www.sf.net/projects/lksctp
  34. *
  35. * Written or modified by:
  36. * La Monte H.P. Yarroll <piggy@acm.org>
  37. * Xingang Guo <xingang.guo@intel.com>
  38. * Jon Grimm <jgrimm@us.ibm.com>
  39. * Daisy Chang <daisyc@us.ibm.com>
  40. * Sridhar Samudrala <sri@us.ibm.com>
  41. * Ardelle Fan <ardelle.fan@intel.com>
  42. * Ryan Layer <rmlayer@us.ibm.com>
  43. * Kevin Gao <kevin.gao@intel.com>
  44. *
  45. * Any bugs reported given to us we will try to fix... any fixes shared will
  46. * be incorporated into the next SCTP release.
  47. */
  48. #ifndef __net_sctp_h__
  49. #define __net_sctp_h__
  50. /* Header Strategy.
  51. * Start getting some control over the header file depencies:
  52. * includes
  53. * constants
  54. * structs
  55. * prototypes
  56. * macros, externs, and inlines
  57. *
  58. * Move test_frame specific items out of the kernel headers
  59. * and into the test frame headers. This is not perfect in any sense
  60. * and will continue to evolve.
  61. */
  62. #include <linux/types.h>
  63. #include <linux/slab.h>
  64. #include <linux/in.h>
  65. #include <linux/tty.h>
  66. #include <linux/proc_fs.h>
  67. #include <linux/spinlock.h>
  68. #include <linux/jiffies.h>
  69. #include <linux/idr.h>
  70. #if IS_ENABLED(CONFIG_IPV6)
  71. #include <net/ipv6.h>
  72. #include <net/ip6_route.h>
  73. #endif
  74. #include <asm/uaccess.h>
  75. #include <asm/page.h>
  76. #include <net/sock.h>
  77. #include <net/snmp.h>
  78. #include <net/sctp/structs.h>
  79. #include <net/sctp/constants.h>
  80. #ifdef CONFIG_IP_SCTP_MODULE
  81. #define SCTP_PROTOSW_FLAG 0
  82. #else /* static! */
  83. #define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
  84. #endif
  85. /*
  86. * Function declarations.
  87. */
  88. /*
  89. * sctp/protocol.c
  90. */
  91. extern int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
  92. sctp_scope_t, gfp_t gfp,
  93. int flags);
  94. extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
  95. extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
  96. extern void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
  97. /*
  98. * sctp/socket.c
  99. */
  100. int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
  101. int sctp_inet_listen(struct socket *sock, int backlog);
  102. void sctp_write_space(struct sock *sk);
  103. void sctp_data_ready(struct sock *sk, int len);
  104. unsigned int sctp_poll(struct file *file, struct socket *sock,
  105. poll_table *wait);
  106. void sctp_sock_rfree(struct sk_buff *skb);
  107. void sctp_copy_sock(struct sock *newsk, struct sock *sk,
  108. struct sctp_association *asoc);
  109. extern struct percpu_counter sctp_sockets_allocated;
  110. extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
  111. /*
  112. * sctp/primitive.c
  113. */
  114. int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg);
  115. int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg);
  116. int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
  117. int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
  118. int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
  119. int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
  120. /*
  121. * sctp/input.c
  122. */
  123. int sctp_rcv(struct sk_buff *skb);
  124. void sctp_v4_err(struct sk_buff *skb, u32 info);
  125. void sctp_hash_established(struct sctp_association *);
  126. void sctp_unhash_established(struct sctp_association *);
  127. void sctp_hash_endpoint(struct sctp_endpoint *);
  128. void sctp_unhash_endpoint(struct sctp_endpoint *);
  129. struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
  130. struct sctphdr *, struct sctp_association **,
  131. struct sctp_transport **);
  132. void sctp_err_finish(struct sock *, struct sctp_association *);
  133. void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
  134. struct sctp_transport *t, __u32 pmtu);
  135. void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
  136. struct sk_buff *);
  137. void sctp_icmp_proto_unreachable(struct sock *sk,
  138. struct sctp_association *asoc,
  139. struct sctp_transport *t);
  140. void sctp_backlog_migrate(struct sctp_association *assoc,
  141. struct sock *oldsk, struct sock *newsk);
  142. /*
  143. * sctp/proc.c
  144. */
  145. int sctp_snmp_proc_init(struct net *net);
  146. void sctp_snmp_proc_exit(struct net *net);
  147. int sctp_eps_proc_init(struct net *net);
  148. void sctp_eps_proc_exit(struct net *net);
  149. int sctp_assocs_proc_init(struct net *net);
  150. void sctp_assocs_proc_exit(struct net *net);
  151. int sctp_remaddr_proc_init(struct net *net);
  152. void sctp_remaddr_proc_exit(struct net *net);
  153. /*
  154. * Module global variables
  155. */
  156. /*
  157. * sctp/protocol.c
  158. */
  159. extern struct kmem_cache *sctp_chunk_cachep __read_mostly;
  160. extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
  161. /*
  162. * Section: Macros, externs, and inlines
  163. */
  164. /* spin lock wrappers. */
  165. #define sctp_spin_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
  166. #define sctp_spin_unlock_irqrestore(lock, flags) \
  167. spin_unlock_irqrestore(lock, flags)
  168. #define sctp_local_bh_disable() local_bh_disable()
  169. #define sctp_local_bh_enable() local_bh_enable()
  170. #define sctp_spin_lock(lock) spin_lock(lock)
  171. #define sctp_spin_unlock(lock) spin_unlock(lock)
  172. #define sctp_write_lock(lock) write_lock(lock)
  173. #define sctp_write_unlock(lock) write_unlock(lock)
  174. #define sctp_read_lock(lock) read_lock(lock)
  175. #define sctp_read_unlock(lock) read_unlock(lock)
  176. /* sock lock wrappers. */
  177. #define sctp_lock_sock(sk) lock_sock(sk)
  178. #define sctp_release_sock(sk) release_sock(sk)
  179. #define sctp_bh_lock_sock(sk) bh_lock_sock(sk)
  180. #define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk)
  181. /* SCTP SNMP MIB stats handlers */
  182. #define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
  183. #define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
  184. #define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
  185. #define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
  186. /* sctp mib definitions */
  187. enum {
  188. SCTP_MIB_NUM = 0,
  189. SCTP_MIB_CURRESTAB, /* CurrEstab */
  190. SCTP_MIB_ACTIVEESTABS, /* ActiveEstabs */
  191. SCTP_MIB_PASSIVEESTABS, /* PassiveEstabs */
  192. SCTP_MIB_ABORTEDS, /* Aborteds */
  193. SCTP_MIB_SHUTDOWNS, /* Shutdowns */
  194. SCTP_MIB_OUTOFBLUES, /* OutOfBlues */
  195. SCTP_MIB_CHECKSUMERRORS, /* ChecksumErrors */
  196. SCTP_MIB_OUTCTRLCHUNKS, /* OutCtrlChunks */
  197. SCTP_MIB_OUTORDERCHUNKS, /* OutOrderChunks */
  198. SCTP_MIB_OUTUNORDERCHUNKS, /* OutUnorderChunks */
  199. SCTP_MIB_INCTRLCHUNKS, /* InCtrlChunks */
  200. SCTP_MIB_INORDERCHUNKS, /* InOrderChunks */
  201. SCTP_MIB_INUNORDERCHUNKS, /* InUnorderChunks */
  202. SCTP_MIB_FRAGUSRMSGS, /* FragUsrMsgs */
  203. SCTP_MIB_REASMUSRMSGS, /* ReasmUsrMsgs */
  204. SCTP_MIB_OUTSCTPPACKS, /* OutSCTPPacks */
  205. SCTP_MIB_INSCTPPACKS, /* InSCTPPacks */
  206. SCTP_MIB_T1_INIT_EXPIREDS,
  207. SCTP_MIB_T1_COOKIE_EXPIREDS,
  208. SCTP_MIB_T2_SHUTDOWN_EXPIREDS,
  209. SCTP_MIB_T3_RTX_EXPIREDS,
  210. SCTP_MIB_T4_RTO_EXPIREDS,
  211. SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS,
  212. SCTP_MIB_DELAY_SACK_EXPIREDS,
  213. SCTP_MIB_AUTOCLOSE_EXPIREDS,
  214. SCTP_MIB_T1_RETRANSMITS,
  215. SCTP_MIB_T3_RETRANSMITS,
  216. SCTP_MIB_PMTUD_RETRANSMITS,
  217. SCTP_MIB_FAST_RETRANSMITS,
  218. SCTP_MIB_IN_PKT_SOFTIRQ,
  219. SCTP_MIB_IN_PKT_BACKLOG,
  220. SCTP_MIB_IN_PKT_DISCARDS,
  221. SCTP_MIB_IN_DATA_CHUNK_DISCARDS,
  222. __SCTP_MIB_MAX
  223. };
  224. #define SCTP_MIB_MAX __SCTP_MIB_MAX
  225. struct sctp_mib {
  226. unsigned long mibs[SCTP_MIB_MAX];
  227. };
  228. /* helper function to track stats about max rto and related transport */
  229. static inline void sctp_max_rto(struct sctp_association *asoc,
  230. struct sctp_transport *trans)
  231. {
  232. if (asoc->stats.max_obs_rto < (__u64)trans->rto) {
  233. asoc->stats.max_obs_rto = trans->rto;
  234. memset(&asoc->stats.obs_rto_ipaddr, 0,
  235. sizeof(struct sockaddr_storage));
  236. memcpy(&asoc->stats.obs_rto_ipaddr, &trans->ipaddr,
  237. trans->af_specific->sockaddr_len);
  238. }
  239. }
  240. /*
  241. * Macros for keeping a global reference of object allocations.
  242. */
  243. #ifdef CONFIG_SCTP_DBG_OBJCNT
  244. extern atomic_t sctp_dbg_objcnt_sock;
  245. extern atomic_t sctp_dbg_objcnt_ep;
  246. extern atomic_t sctp_dbg_objcnt_assoc;
  247. extern atomic_t sctp_dbg_objcnt_transport;
  248. extern atomic_t sctp_dbg_objcnt_chunk;
  249. extern atomic_t sctp_dbg_objcnt_bind_addr;
  250. extern atomic_t sctp_dbg_objcnt_bind_bucket;
  251. extern atomic_t sctp_dbg_objcnt_addr;
  252. extern atomic_t sctp_dbg_objcnt_ssnmap;
  253. extern atomic_t sctp_dbg_objcnt_datamsg;
  254. extern atomic_t sctp_dbg_objcnt_keys;
  255. /* Macros to atomically increment/decrement objcnt counters. */
  256. #define SCTP_DBG_OBJCNT_INC(name) \
  257. atomic_inc(&sctp_dbg_objcnt_## name)
  258. #define SCTP_DBG_OBJCNT_DEC(name) \
  259. atomic_dec(&sctp_dbg_objcnt_## name)
  260. #define SCTP_DBG_OBJCNT(name) \
  261. atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
  262. /* Macro to help create new entries in in the global array of
  263. * objcnt counters.
  264. */
  265. #define SCTP_DBG_OBJCNT_ENTRY(name) \
  266. {.label= #name, .counter= &sctp_dbg_objcnt_## name}
  267. void sctp_dbg_objcnt_init(struct net *);
  268. void sctp_dbg_objcnt_exit(struct net *);
  269. #else
  270. #define SCTP_DBG_OBJCNT_INC(name)
  271. #define SCTP_DBG_OBJCNT_DEC(name)
  272. static inline void sctp_dbg_objcnt_init(struct net *net) { return; }
  273. static inline void sctp_dbg_objcnt_exit(struct net *net) { return; }
  274. #endif /* CONFIG_SCTP_DBG_OBJCOUNT */
  275. #if defined CONFIG_SYSCTL
  276. void sctp_sysctl_register(void);
  277. void sctp_sysctl_unregister(void);
  278. int sctp_sysctl_net_register(struct net *net);
  279. void sctp_sysctl_net_unregister(struct net *net);
  280. #else
  281. static inline void sctp_sysctl_register(void) { return; }
  282. static inline void sctp_sysctl_unregister(void) { return; }
  283. static inline int sctp_sysctl_net_register(struct net *net) { return 0; }
  284. static inline void sctp_sysctl_net_unregister(struct net *net) { return; }
  285. #endif
  286. /* Size of Supported Address Parameter for 'x' address types. */
  287. #define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
  288. #if IS_ENABLED(CONFIG_IPV6)
  289. void sctp_v6_pf_init(void);
  290. void sctp_v6_pf_exit(void);
  291. int sctp_v6_protosw_init(void);
  292. void sctp_v6_protosw_exit(void);
  293. int sctp_v6_add_protocol(void);
  294. void sctp_v6_del_protocol(void);
  295. #else /* #ifdef defined(CONFIG_IPV6) */
  296. static inline void sctp_v6_pf_init(void) { return; }
  297. static inline void sctp_v6_pf_exit(void) { return; }
  298. static inline int sctp_v6_protosw_init(void) { return 0; }
  299. static inline void sctp_v6_protosw_exit(void) { return; }
  300. static inline int sctp_v6_add_protocol(void) { return 0; }
  301. static inline void sctp_v6_del_protocol(void) { return; }
  302. #endif /* #if defined(CONFIG_IPV6) */
  303. /* Map an association to an assoc_id. */
  304. static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
  305. {
  306. return asoc ? asoc->assoc_id : 0;
  307. }
  308. /* Look up the association by its id. */
  309. struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
  310. int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
  311. /* A macro to walk a list of skbs. */
  312. #define sctp_skb_for_each(pos, head, tmp) \
  313. skb_queue_walk_safe(head, pos, tmp)
  314. /* A helper to append an entire skb list (list) to another (head). */
  315. static inline void sctp_skb_list_tail(struct sk_buff_head *list,
  316. struct sk_buff_head *head)
  317. {
  318. unsigned long flags;
  319. sctp_spin_lock_irqsave(&head->lock, flags);
  320. sctp_spin_lock(&list->lock);
  321. skb_queue_splice_tail_init(list, head);
  322. sctp_spin_unlock(&list->lock);
  323. sctp_spin_unlock_irqrestore(&head->lock, flags);
  324. }
  325. /**
  326. * sctp_list_dequeue - remove from the head of the queue
  327. * @list: list to dequeue from
  328. *
  329. * Remove the head of the list. The head item is
  330. * returned or %NULL if the list is empty.
  331. */
  332. static inline struct list_head *sctp_list_dequeue(struct list_head *list)
  333. {
  334. struct list_head *result = NULL;
  335. if (list->next != list) {
  336. result = list->next;
  337. list->next = result->next;
  338. list->next->prev = list;
  339. INIT_LIST_HEAD(result);
  340. }
  341. return result;
  342. }
  343. /* SCTP version of skb_set_owner_r. We need this one because
  344. * of the way we have to do receive buffer accounting on bundled
  345. * chunks.
  346. */
  347. static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
  348. {
  349. struct sctp_ulpevent *event = sctp_skb2event(skb);
  350. skb_orphan(skb);
  351. skb->sk = sk;
  352. skb->destructor = sctp_sock_rfree;
  353. atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
  354. /*
  355. * This mimics the behavior of skb_set_owner_r
  356. */
  357. sk->sk_forward_alloc -= event->rmem_len;
  358. }
  359. /* Tests if the list has one and only one entry. */
  360. static inline int sctp_list_single_entry(struct list_head *head)
  361. {
  362. return (head->next != head) && (head->next == head->prev);
  363. }
  364. /* Generate a random jitter in the range of -50% ~ +50% of input RTO. */
  365. static inline __s32 sctp_jitter(__u32 rto)
  366. {
  367. static __u32 sctp_rand;
  368. __s32 ret;
  369. /* Avoid divide by zero. */
  370. if (!rto)
  371. rto = 1;
  372. sctp_rand += jiffies;
  373. sctp_rand ^= (sctp_rand << 12);
  374. sctp_rand ^= (sctp_rand >> 20);
  375. /* Choose random number from 0 to rto, then move to -50% ~ +50%
  376. * of rto.
  377. */
  378. ret = sctp_rand % rto - (rto >> 1);
  379. return ret;
  380. }
  381. /* Break down data chunks at this point. */
  382. static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
  383. {
  384. struct sctp_sock *sp = sctp_sk(asoc->base.sk);
  385. int frag = pmtu;
  386. frag -= sp->pf->af->net_header_len;
  387. frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk);
  388. if (asoc->user_frag)
  389. frag = min_t(int, frag, asoc->user_frag);
  390. frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN);
  391. return frag;
  392. }
  393. static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc)
  394. {
  395. sctp_assoc_sync_pmtu(sk, asoc);
  396. asoc->pmtu_pending = 0;
  397. }
  398. /* Walk through a list of TLV parameters. Don't trust the
  399. * individual parameter lengths and instead depend on
  400. * the chunk length to indicate when to stop. Make sure
  401. * there is room for a param header too.
  402. */
  403. #define sctp_walk_params(pos, chunk, member)\
  404. _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
  405. #define _sctp_walk_params(pos, chunk, end, member)\
  406. for (pos.v = chunk->member;\
  407. pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
  408. ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
  409. pos.v += WORD_ROUND(ntohs(pos.p->length)))
  410. #define sctp_walk_errors(err, chunk_hdr)\
  411. _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
  412. #define _sctp_walk_errors(err, chunk_hdr, end)\
  413. for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
  414. sizeof(sctp_chunkhdr_t));\
  415. (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
  416. ntohs(err->length) >= sizeof(sctp_errhdr_t); \
  417. err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
  418. #define sctp_walk_fwdtsn(pos, chunk)\
  419. _sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
  420. #define _sctp_walk_fwdtsn(pos, chunk, end)\
  421. for (pos = chunk->subh.fwdtsn_hdr->skip;\
  422. (void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\
  423. pos++)
  424. /* Round an int up to the next multiple of 4. */
  425. #define WORD_ROUND(s) (((s)+3)&~3)
  426. /* External references. */
  427. extern struct proto sctp_prot;
  428. extern struct proto sctpv6_prot;
  429. void sctp_put_port(struct sock *sk);
  430. extern struct idr sctp_assocs_id;
  431. extern spinlock_t sctp_assocs_id_lock;
  432. /* Static inline functions. */
  433. /* Convert from an IP version number to an Address Family symbol. */
  434. static inline int ipver2af(__u8 ipver)
  435. {
  436. switch (ipver) {
  437. case 4:
  438. return AF_INET;
  439. case 6:
  440. return AF_INET6;
  441. default:
  442. return 0;
  443. }
  444. }
  445. /* Convert from an address parameter type to an address family. */
  446. static inline int param_type2af(__be16 type)
  447. {
  448. switch (type) {
  449. case SCTP_PARAM_IPV4_ADDRESS:
  450. return AF_INET;
  451. case SCTP_PARAM_IPV6_ADDRESS:
  452. return AF_INET6;
  453. default:
  454. return 0;
  455. }
  456. }
  457. /* Warning: The following hash functions assume a power of two 'size'. */
  458. /* This is the hash function for the SCTP port hash table. */
  459. static inline int sctp_phashfn(struct net *net, __u16 lport)
  460. {
  461. return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1);
  462. }
  463. /* This is the hash function for the endpoint hash table. */
  464. static inline int sctp_ep_hashfn(struct net *net, __u16 lport)
  465. {
  466. return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1);
  467. }
  468. /* This is the hash function for the association hash table. */
  469. static inline int sctp_assoc_hashfn(struct net *net, __u16 lport, __u16 rport)
  470. {
  471. int h = (lport << 16) + rport + net_hash_mix(net);
  472. h ^= h>>8;
  473. return h & (sctp_assoc_hashsize - 1);
  474. }
  475. /* This is the hash function for the association hash table. This is
  476. * not used yet, but could be used as a better hash function when
  477. * we have a vtag.
  478. */
  479. static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
  480. {
  481. int h = (lport << 16) + rport;
  482. h ^= vtag;
  483. return h & (sctp_assoc_hashsize - 1);
  484. }
  485. #define sctp_for_each_hentry(epb, head) \
  486. hlist_for_each_entry(epb, head, node)
  487. /* Is a socket of this style? */
  488. #define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
  489. static inline int __sctp_style(const struct sock *sk, sctp_socket_type_t style)
  490. {
  491. return sctp_sk(sk)->type == style;
  492. }
  493. /* Is the association in this state? */
  494. #define sctp_state(asoc, state) __sctp_state((asoc), (SCTP_STATE_##state))
  495. static inline int __sctp_state(const struct sctp_association *asoc,
  496. sctp_state_t state)
  497. {
  498. return asoc->state == state;
  499. }
  500. /* Is the socket in this state? */
  501. #define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state))
  502. static inline int __sctp_sstate(const struct sock *sk, sctp_sock_state_t state)
  503. {
  504. return sk->sk_state == state;
  505. }
  506. /* Map v4-mapped v6 address back to v4 address */
  507. static inline void sctp_v6_map_v4(union sctp_addr *addr)
  508. {
  509. addr->v4.sin_family = AF_INET;
  510. addr->v4.sin_port = addr->v6.sin6_port;
  511. addr->v4.sin_addr.s_addr = addr->v6.sin6_addr.s6_addr32[3];
  512. }
  513. /* Map v4 address to v4-mapped v6 address */
  514. static inline void sctp_v4_map_v6(union sctp_addr *addr)
  515. {
  516. addr->v6.sin6_family = AF_INET6;
  517. addr->v6.sin6_port = addr->v4.sin_port;
  518. addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
  519. addr->v6.sin6_addr.s6_addr32[0] = 0;
  520. addr->v6.sin6_addr.s6_addr32[1] = 0;
  521. addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
  522. }
  523. /* The cookie is always 0 since this is how it's used in the
  524. * pmtu code.
  525. */
  526. static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
  527. {
  528. if (t->dst && !dst_check(t->dst, 0)) {
  529. dst_release(t->dst);
  530. t->dst = NULL;
  531. }
  532. return t->dst;
  533. }
  534. #endif /* __net_sctp_h__ */