inet_timewait_sock.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Generic TIME_WAIT sockets functions
  7. *
  8. * From code orinally in TCP
  9. */
  10. #include <linux/kernel.h>
  11. #include <net/inet_hashtables.h>
  12. #include <net/inet_timewait_sock.h>
  13. #include <net/ip.h>
  14. /* Must be called with locally disabled BHs. */
  15. static void __inet_twsk_kill(struct inet_timewait_sock *tw,
  16. struct inet_hashinfo *hashinfo)
  17. {
  18. struct inet_bind_hashbucket *bhead;
  19. struct inet_bind_bucket *tb;
  20. /* Unlink from established hashes. */
  21. rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
  22. write_lock(lock);
  23. if (hlist_unhashed(&tw->tw_node)) {
  24. write_unlock(lock);
  25. return;
  26. }
  27. __hlist_del(&tw->tw_node);
  28. sk_node_init(&tw->tw_node);
  29. write_unlock(lock);
  30. /* Disassociate with bind bucket. */
  31. bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)];
  32. spin_lock(&bhead->lock);
  33. tb = tw->tw_tb;
  34. __hlist_del(&tw->tw_bind_node);
  35. tw->tw_tb = NULL;
  36. inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
  37. spin_unlock(&bhead->lock);
  38. #ifdef SOCK_REFCNT_DEBUG
  39. if (atomic_read(&tw->tw_refcnt) != 1) {
  40. printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
  41. tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
  42. }
  43. #endif
  44. inet_twsk_put(tw);
  45. }
  46. void inet_twsk_put(struct inet_timewait_sock *tw)
  47. {
  48. if (atomic_dec_and_test(&tw->tw_refcnt)) {
  49. struct module *owner = tw->tw_prot->owner;
  50. twsk_destructor((struct sock *)tw);
  51. #ifdef SOCK_REFCNT_DEBUG
  52. printk(KERN_DEBUG "%s timewait_sock %p released\n",
  53. tw->tw_prot->name, tw);
  54. #endif
  55. kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
  56. module_put(owner);
  57. }
  58. }
  59. EXPORT_SYMBOL_GPL(inet_twsk_put);
  60. /*
  61. * Enter the time wait state. This is called with locally disabled BH.
  62. * Essentially we whip up a timewait bucket, copy the relevant info into it
  63. * from the SK, and mess with hash chains and list linkage.
  64. */
  65. void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
  66. struct inet_hashinfo *hashinfo)
  67. {
  68. const struct inet_sock *inet = inet_sk(sk);
  69. const struct inet_connection_sock *icsk = inet_csk(sk);
  70. struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
  71. rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
  72. struct inet_bind_hashbucket *bhead;
  73. /* Step 1: Put TW into bind hash. Original socket stays there too.
  74. Note, that any socket with inet->num != 0 MUST be bound in
  75. binding cache, even if it is closed.
  76. */
  77. bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)];
  78. spin_lock(&bhead->lock);
  79. tw->tw_tb = icsk->icsk_bind_hash;
  80. BUG_TRAP(icsk->icsk_bind_hash);
  81. inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
  82. spin_unlock(&bhead->lock);
  83. write_lock(lock);
  84. /* Step 2: Remove SK from established hash. */
  85. if (__sk_del_node_init(sk))
  86. sock_prot_inuse_add(sk->sk_prot, -1);
  87. /* Step 3: Hash TW into TIMEWAIT chain. */
  88. inet_twsk_add_node(tw, &ehead->twchain);
  89. atomic_inc(&tw->tw_refcnt);
  90. write_unlock(lock);
  91. }
  92. EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
  93. struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
  94. {
  95. struct inet_timewait_sock *tw =
  96. kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
  97. GFP_ATOMIC);
  98. if (tw != NULL) {
  99. const struct inet_sock *inet = inet_sk(sk);
  100. /* Give us an identity. */
  101. tw->tw_daddr = inet->daddr;
  102. tw->tw_rcv_saddr = inet->rcv_saddr;
  103. tw->tw_bound_dev_if = sk->sk_bound_dev_if;
  104. tw->tw_num = inet->num;
  105. tw->tw_state = TCP_TIME_WAIT;
  106. tw->tw_substate = state;
  107. tw->tw_sport = inet->sport;
  108. tw->tw_dport = inet->dport;
  109. tw->tw_family = sk->sk_family;
  110. tw->tw_reuse = sk->sk_reuse;
  111. tw->tw_hash = sk->sk_hash;
  112. tw->tw_ipv6only = 0;
  113. tw->tw_prot = sk->sk_prot_creator;
  114. atomic_set(&tw->tw_refcnt, 1);
  115. inet_twsk_dead_node_init(tw);
  116. __module_get(tw->tw_prot->owner);
  117. }
  118. return tw;
  119. }
  120. EXPORT_SYMBOL_GPL(inet_twsk_alloc);
  121. /* Returns non-zero if quota exceeded. */
  122. static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
  123. const int slot)
  124. {
  125. struct inet_timewait_sock *tw;
  126. struct hlist_node *node;
  127. unsigned int killed;
  128. int ret;
  129. /* NOTE: compare this to previous version where lock
  130. * was released after detaching chain. It was racy,
  131. * because tw buckets are scheduled in not serialized context
  132. * in 2.3 (with netfilter), and with softnet it is common, because
  133. * soft irqs are not sequenced.
  134. */
  135. killed = 0;
  136. ret = 0;
  137. rescan:
  138. inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
  139. __inet_twsk_del_dead_node(tw);
  140. spin_unlock(&twdr->death_lock);
  141. __inet_twsk_kill(tw, twdr->hashinfo);
  142. inet_twsk_put(tw);
  143. killed++;
  144. spin_lock(&twdr->death_lock);
  145. if (killed > INET_TWDR_TWKILL_QUOTA) {
  146. ret = 1;
  147. break;
  148. }
  149. /* While we dropped twdr->death_lock, another cpu may have
  150. * killed off the next TW bucket in the list, therefore
  151. * do a fresh re-read of the hlist head node with the
  152. * lock reacquired. We still use the hlist traversal
  153. * macro in order to get the prefetches.
  154. */
  155. goto rescan;
  156. }
  157. twdr->tw_count -= killed;
  158. NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
  159. return ret;
  160. }
  161. void inet_twdr_hangman(unsigned long data)
  162. {
  163. struct inet_timewait_death_row *twdr;
  164. int unsigned need_timer;
  165. twdr = (struct inet_timewait_death_row *)data;
  166. spin_lock(&twdr->death_lock);
  167. if (twdr->tw_count == 0)
  168. goto out;
  169. need_timer = 0;
  170. if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
  171. twdr->thread_slots |= (1 << twdr->slot);
  172. schedule_work(&twdr->twkill_work);
  173. need_timer = 1;
  174. } else {
  175. /* We purged the entire slot, anything left? */
  176. if (twdr->tw_count)
  177. need_timer = 1;
  178. }
  179. twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
  180. if (need_timer)
  181. mod_timer(&twdr->tw_timer, jiffies + twdr->period);
  182. out:
  183. spin_unlock(&twdr->death_lock);
  184. }
  185. EXPORT_SYMBOL_GPL(inet_twdr_hangman);
  186. void inet_twdr_twkill_work(struct work_struct *work)
  187. {
  188. struct inet_timewait_death_row *twdr =
  189. container_of(work, struct inet_timewait_death_row, twkill_work);
  190. int i;
  191. BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
  192. (sizeof(twdr->thread_slots) * 8));
  193. while (twdr->thread_slots) {
  194. spin_lock_bh(&twdr->death_lock);
  195. for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
  196. if (!(twdr->thread_slots & (1 << i)))
  197. continue;
  198. while (inet_twdr_do_twkill_work(twdr, i) != 0) {
  199. if (need_resched()) {
  200. spin_unlock_bh(&twdr->death_lock);
  201. schedule();
  202. spin_lock_bh(&twdr->death_lock);
  203. }
  204. }
  205. twdr->thread_slots &= ~(1 << i);
  206. }
  207. spin_unlock_bh(&twdr->death_lock);
  208. }
  209. }
  210. EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
  211. /* These are always called from BH context. See callers in
  212. * tcp_input.c to verify this.
  213. */
  214. /* This is for handling early-kills of TIME_WAIT sockets. */
  215. void inet_twsk_deschedule(struct inet_timewait_sock *tw,
  216. struct inet_timewait_death_row *twdr)
  217. {
  218. spin_lock(&twdr->death_lock);
  219. if (inet_twsk_del_dead_node(tw)) {
  220. inet_twsk_put(tw);
  221. if (--twdr->tw_count == 0)
  222. del_timer(&twdr->tw_timer);
  223. }
  224. spin_unlock(&twdr->death_lock);
  225. __inet_twsk_kill(tw, twdr->hashinfo);
  226. }
  227. EXPORT_SYMBOL(inet_twsk_deschedule);
  228. void inet_twsk_schedule(struct inet_timewait_sock *tw,
  229. struct inet_timewait_death_row *twdr,
  230. const int timeo, const int timewait_len)
  231. {
  232. struct hlist_head *list;
  233. int slot;
  234. /* timeout := RTO * 3.5
  235. *
  236. * 3.5 = 1+2+0.5 to wait for two retransmits.
  237. *
  238. * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
  239. * our ACK acking that FIN can be lost. If N subsequent retransmitted
  240. * FINs (or previous seqments) are lost (probability of such event
  241. * is p^(N+1), where p is probability to lose single packet and
  242. * time to detect the loss is about RTO*(2^N - 1) with exponential
  243. * backoff). Normal timewait length is calculated so, that we
  244. * waited at least for one retransmitted FIN (maximal RTO is 120sec).
  245. * [ BTW Linux. following BSD, violates this requirement waiting
  246. * only for 60sec, we should wait at least for 240 secs.
  247. * Well, 240 consumes too much of resources 8)
  248. * ]
  249. * This interval is not reduced to catch old duplicate and
  250. * responces to our wandering segments living for two MSLs.
  251. * However, if we use PAWS to detect
  252. * old duplicates, we can reduce the interval to bounds required
  253. * by RTO, rather than MSL. So, if peer understands PAWS, we
  254. * kill tw bucket after 3.5*RTO (it is important that this number
  255. * is greater than TS tick!) and detect old duplicates with help
  256. * of PAWS.
  257. */
  258. slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
  259. spin_lock(&twdr->death_lock);
  260. /* Unlink it, if it was scheduled */
  261. if (inet_twsk_del_dead_node(tw))
  262. twdr->tw_count--;
  263. else
  264. atomic_inc(&tw->tw_refcnt);
  265. if (slot >= INET_TWDR_RECYCLE_SLOTS) {
  266. /* Schedule to slow timer */
  267. if (timeo >= timewait_len) {
  268. slot = INET_TWDR_TWKILL_SLOTS - 1;
  269. } else {
  270. slot = DIV_ROUND_UP(timeo, twdr->period);
  271. if (slot >= INET_TWDR_TWKILL_SLOTS)
  272. slot = INET_TWDR_TWKILL_SLOTS - 1;
  273. }
  274. tw->tw_ttd = jiffies + timeo;
  275. slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
  276. list = &twdr->cells[slot];
  277. } else {
  278. tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
  279. if (twdr->twcal_hand < 0) {
  280. twdr->twcal_hand = 0;
  281. twdr->twcal_jiffie = jiffies;
  282. twdr->twcal_timer.expires = twdr->twcal_jiffie +
  283. (slot << INET_TWDR_RECYCLE_TICK);
  284. add_timer(&twdr->twcal_timer);
  285. } else {
  286. if (time_after(twdr->twcal_timer.expires,
  287. jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
  288. mod_timer(&twdr->twcal_timer,
  289. jiffies + (slot << INET_TWDR_RECYCLE_TICK));
  290. slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
  291. }
  292. list = &twdr->twcal_row[slot];
  293. }
  294. hlist_add_head(&tw->tw_death_node, list);
  295. if (twdr->tw_count++ == 0)
  296. mod_timer(&twdr->tw_timer, jiffies + twdr->period);
  297. spin_unlock(&twdr->death_lock);
  298. }
  299. EXPORT_SYMBOL_GPL(inet_twsk_schedule);
  300. void inet_twdr_twcal_tick(unsigned long data)
  301. {
  302. struct inet_timewait_death_row *twdr;
  303. int n, slot;
  304. unsigned long j;
  305. unsigned long now = jiffies;
  306. int killed = 0;
  307. int adv = 0;
  308. twdr = (struct inet_timewait_death_row *)data;
  309. spin_lock(&twdr->death_lock);
  310. if (twdr->twcal_hand < 0)
  311. goto out;
  312. slot = twdr->twcal_hand;
  313. j = twdr->twcal_jiffie;
  314. for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
  315. if (time_before_eq(j, now)) {
  316. struct hlist_node *node, *safe;
  317. struct inet_timewait_sock *tw;
  318. inet_twsk_for_each_inmate_safe(tw, node, safe,
  319. &twdr->twcal_row[slot]) {
  320. __inet_twsk_del_dead_node(tw);
  321. __inet_twsk_kill(tw, twdr->hashinfo);
  322. inet_twsk_put(tw);
  323. killed++;
  324. }
  325. } else {
  326. if (!adv) {
  327. adv = 1;
  328. twdr->twcal_jiffie = j;
  329. twdr->twcal_hand = slot;
  330. }
  331. if (!hlist_empty(&twdr->twcal_row[slot])) {
  332. mod_timer(&twdr->twcal_timer, j);
  333. goto out;
  334. }
  335. }
  336. j += 1 << INET_TWDR_RECYCLE_TICK;
  337. slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
  338. }
  339. twdr->twcal_hand = -1;
  340. out:
  341. if ((twdr->tw_count -= killed) == 0)
  342. del_timer(&twdr->tw_timer);
  343. NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
  344. spin_unlock(&twdr->death_lock);
  345. }
  346. EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);