2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic TIME_WAIT sockets functions
8 * From code orinally in TCP
11 #include <linux/kernel.h>
12 #include <net/inet_hashtables.h>
13 #include <net/inet_timewait_sock.h>
16 /* Must be called with locally disabled BHs. */
17 static void __inet_twsk_kill(struct inet_timewait_sock *tw,
18 struct inet_hashinfo *hashinfo)
20 struct inet_bind_hashbucket *bhead;
21 struct inet_bind_bucket *tb;
22 /* Unlink from established hashes. */
23 rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
26 if (hlist_unhashed(&tw->tw_node)) {
30 __hlist_del(&tw->tw_node);
31 sk_node_init(&tw->tw_node);
34 /* Disassociate with bind bucket. */
35 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
36 hashinfo->bhash_size)];
37 spin_lock(&bhead->lock);
39 __hlist_del(&tw->tw_bind_node);
41 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
42 spin_unlock(&bhead->lock);
43 #ifdef SOCK_REFCNT_DEBUG
44 if (atomic_read(&tw->tw_refcnt) != 1) {
45 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
46 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
52 void inet_twsk_put(struct inet_timewait_sock *tw)
54 if (atomic_dec_and_test(&tw->tw_refcnt)) {
55 struct module *owner = tw->tw_prot->owner;
56 twsk_destructor((struct sock *)tw);
57 #ifdef SOCK_REFCNT_DEBUG
58 printk(KERN_DEBUG "%s timewait_sock %p released\n",
59 tw->tw_prot->name, tw);
61 release_net(twsk_net(tw));
62 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
66 EXPORT_SYMBOL_GPL(inet_twsk_put);
69 * Enter the time wait state. This is called with locally disabled BH.
70 * Essentially we whip up a timewait bucket, copy the relevant info into it
71 * from the SK, and mess with hash chains and list linkage.
73 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
74 struct inet_hashinfo *hashinfo)
76 const struct inet_sock *inet = inet_sk(sk);
77 const struct inet_connection_sock *icsk = inet_csk(sk);
78 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
79 rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
80 struct inet_bind_hashbucket *bhead;
81 /* Step 1: Put TW into bind hash. Original socket stays there too.
82 Note, that any socket with inet->num != 0 MUST be bound in
83 binding cache, even if it is closed.
85 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num,
86 hashinfo->bhash_size)];
87 spin_lock(&bhead->lock);
88 tw->tw_tb = icsk->icsk_bind_hash;
89 WARN_ON(!icsk->icsk_bind_hash);
90 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
91 spin_unlock(&bhead->lock);
95 /* Step 2: Remove SK from established hash. */
96 if (__sk_del_node_init(sk))
97 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
99 /* Step 3: Hash TW into TIMEWAIT chain. */
100 inet_twsk_add_node(tw, &ehead->twchain);
101 atomic_inc(&tw->tw_refcnt);
106 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
108 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
110 struct inet_timewait_sock *tw =
111 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
114 const struct inet_sock *inet = inet_sk(sk);
116 /* Give us an identity. */
117 tw->tw_daddr = inet->daddr;
118 tw->tw_rcv_saddr = inet->rcv_saddr;
119 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
120 tw->tw_num = inet->num;
121 tw->tw_state = TCP_TIME_WAIT;
122 tw->tw_substate = state;
123 tw->tw_sport = inet->sport;
124 tw->tw_dport = inet->dport;
125 tw->tw_family = sk->sk_family;
126 tw->tw_reuse = sk->sk_reuse;
127 tw->tw_hash = sk->sk_hash;
129 tw->tw_transparent = inet->transparent;
130 tw->tw_prot = sk->sk_prot_creator;
131 twsk_net_set(tw, hold_net(sock_net(sk)));
132 atomic_set(&tw->tw_refcnt, 1);
133 inet_twsk_dead_node_init(tw);
134 __module_get(tw->tw_prot->owner);
140 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
142 /* Returns non-zero if quota exceeded. */
143 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
146 struct inet_timewait_sock *tw;
147 struct hlist_node *node;
151 /* NOTE: compare this to previous version where lock
152 * was released after detaching chain. It was racy,
153 * because tw buckets are scheduled in not serialized context
154 * in 2.3 (with netfilter), and with softnet it is common, because
155 * soft irqs are not sequenced.
160 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
161 __inet_twsk_del_dead_node(tw);
162 spin_unlock(&twdr->death_lock);
163 __inet_twsk_kill(tw, twdr->hashinfo);
165 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
169 spin_lock(&twdr->death_lock);
170 if (killed > INET_TWDR_TWKILL_QUOTA) {
175 /* While we dropped twdr->death_lock, another cpu may have
176 * killed off the next TW bucket in the list, therefore
177 * do a fresh re-read of the hlist head node with the
178 * lock reacquired. We still use the hlist traversal
179 * macro in order to get the prefetches.
184 twdr->tw_count -= killed;
185 #ifndef CONFIG_NET_NS
186 NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
191 void inet_twdr_hangman(unsigned long data)
193 struct inet_timewait_death_row *twdr;
194 int unsigned need_timer;
196 twdr = (struct inet_timewait_death_row *)data;
197 spin_lock(&twdr->death_lock);
199 if (twdr->tw_count == 0)
203 if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
204 twdr->thread_slots |= (1 << twdr->slot);
205 schedule_work(&twdr->twkill_work);
208 /* We purged the entire slot, anything left? */
212 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
214 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
216 spin_unlock(&twdr->death_lock);
219 EXPORT_SYMBOL_GPL(inet_twdr_hangman);
221 void inet_twdr_twkill_work(struct work_struct *work)
223 struct inet_timewait_death_row *twdr =
224 container_of(work, struct inet_timewait_death_row, twkill_work);
227 BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
228 (sizeof(twdr->thread_slots) * 8));
230 while (twdr->thread_slots) {
231 spin_lock_bh(&twdr->death_lock);
232 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
233 if (!(twdr->thread_slots & (1 << i)))
236 while (inet_twdr_do_twkill_work(twdr, i) != 0) {
237 if (need_resched()) {
238 spin_unlock_bh(&twdr->death_lock);
240 spin_lock_bh(&twdr->death_lock);
244 twdr->thread_slots &= ~(1 << i);
246 spin_unlock_bh(&twdr->death_lock);
250 EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
252 /* These are always called from BH context. See callers in
253 * tcp_input.c to verify this.
256 /* This is for handling early-kills of TIME_WAIT sockets. */
257 void inet_twsk_deschedule(struct inet_timewait_sock *tw,
258 struct inet_timewait_death_row *twdr)
260 spin_lock(&twdr->death_lock);
261 if (inet_twsk_del_dead_node(tw)) {
263 if (--twdr->tw_count == 0)
264 del_timer(&twdr->tw_timer);
266 spin_unlock(&twdr->death_lock);
267 __inet_twsk_kill(tw, twdr->hashinfo);
270 EXPORT_SYMBOL(inet_twsk_deschedule);
272 void inet_twsk_schedule(struct inet_timewait_sock *tw,
273 struct inet_timewait_death_row *twdr,
274 const int timeo, const int timewait_len)
276 struct hlist_head *list;
279 /* timeout := RTO * 3.5
281 * 3.5 = 1+2+0.5 to wait for two retransmits.
283 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
284 * our ACK acking that FIN can be lost. If N subsequent retransmitted
285 * FINs (or previous seqments) are lost (probability of such event
286 * is p^(N+1), where p is probability to lose single packet and
287 * time to detect the loss is about RTO*(2^N - 1) with exponential
288 * backoff). Normal timewait length is calculated so, that we
289 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
290 * [ BTW Linux. following BSD, violates this requirement waiting
291 * only for 60sec, we should wait at least for 240 secs.
292 * Well, 240 consumes too much of resources 8)
294 * This interval is not reduced to catch old duplicate and
295 * responces to our wandering segments living for two MSLs.
296 * However, if we use PAWS to detect
297 * old duplicates, we can reduce the interval to bounds required
298 * by RTO, rather than MSL. So, if peer understands PAWS, we
299 * kill tw bucket after 3.5*RTO (it is important that this number
300 * is greater than TS tick!) and detect old duplicates with help
303 slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
305 spin_lock(&twdr->death_lock);
307 /* Unlink it, if it was scheduled */
308 if (inet_twsk_del_dead_node(tw))
311 atomic_inc(&tw->tw_refcnt);
313 if (slot >= INET_TWDR_RECYCLE_SLOTS) {
314 /* Schedule to slow timer */
315 if (timeo >= timewait_len) {
316 slot = INET_TWDR_TWKILL_SLOTS - 1;
318 slot = DIV_ROUND_UP(timeo, twdr->period);
319 if (slot >= INET_TWDR_TWKILL_SLOTS)
320 slot = INET_TWDR_TWKILL_SLOTS - 1;
322 tw->tw_ttd = jiffies + timeo;
323 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
324 list = &twdr->cells[slot];
326 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
328 if (twdr->twcal_hand < 0) {
329 twdr->twcal_hand = 0;
330 twdr->twcal_jiffie = jiffies;
331 twdr->twcal_timer.expires = twdr->twcal_jiffie +
332 (slot << INET_TWDR_RECYCLE_TICK);
333 add_timer(&twdr->twcal_timer);
335 if (time_after(twdr->twcal_timer.expires,
336 jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
337 mod_timer(&twdr->twcal_timer,
338 jiffies + (slot << INET_TWDR_RECYCLE_TICK));
339 slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
341 list = &twdr->twcal_row[slot];
344 hlist_add_head(&tw->tw_death_node, list);
346 if (twdr->tw_count++ == 0)
347 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
348 spin_unlock(&twdr->death_lock);
351 EXPORT_SYMBOL_GPL(inet_twsk_schedule);
353 void inet_twdr_twcal_tick(unsigned long data)
355 struct inet_timewait_death_row *twdr;
358 unsigned long now = jiffies;
362 twdr = (struct inet_timewait_death_row *)data;
364 spin_lock(&twdr->death_lock);
365 if (twdr->twcal_hand < 0)
368 slot = twdr->twcal_hand;
369 j = twdr->twcal_jiffie;
371 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
372 if (time_before_eq(j, now)) {
373 struct hlist_node *node, *safe;
374 struct inet_timewait_sock *tw;
376 inet_twsk_for_each_inmate_safe(tw, node, safe,
377 &twdr->twcal_row[slot]) {
378 __inet_twsk_del_dead_node(tw);
379 __inet_twsk_kill(tw, twdr->hashinfo);
381 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
389 twdr->twcal_jiffie = j;
390 twdr->twcal_hand = slot;
393 if (!hlist_empty(&twdr->twcal_row[slot])) {
394 mod_timer(&twdr->twcal_timer, j);
398 j += 1 << INET_TWDR_RECYCLE_TICK;
399 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
401 twdr->twcal_hand = -1;
404 if ((twdr->tw_count -= killed) == 0)
405 del_timer(&twdr->tw_timer);
406 #ifndef CONFIG_NET_NS
407 NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
409 spin_unlock(&twdr->death_lock);
412 EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
414 void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
415 struct inet_timewait_death_row *twdr, int family)
417 struct inet_timewait_sock *tw;
419 struct hlist_node *node;
423 for (h = 0; h < (hashinfo->ehash_size); h++) {
424 struct inet_ehash_bucket *head =
425 inet_ehash_bucket(hashinfo, h);
426 rwlock_t *lock = inet_ehash_lockp(hashinfo, h);
429 sk_for_each(sk, node, &head->twchain) {
432 if (!net_eq(twsk_net(tw), net) ||
433 tw->tw_family != family)
436 atomic_inc(&tw->tw_refcnt);
438 inet_twsk_deschedule(tw, twdr);
447 EXPORT_SYMBOL_GPL(inet_twsk_purge);