patch-2.1.99 linux/net/ipv4/tcp_timer.c
Next file: linux/net/ipv6/ip6_fib.c
Previous file: linux/net/ipv4/tcp_output.c
Back to the patch index
Back to the overall index
- Lines: 188
- Date:
Tue Apr 28 11:10:11 1998
- Orig file:
v2.1.98/linux/net/ipv4/tcp_timer.c
- Orig date:
Tue Apr 14 14:29:26 1998
diff -u --recursive --new-file v2.1.98/linux/net/ipv4/tcp_timer.c linux/net/ipv4/tcp_timer.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_timer.c,v 1.48 1998/04/06 08:42:30 davem Exp $
+ * Version: $Id: tcp_timer.c,v 1.50 1998/04/14 09:08:59 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -32,6 +32,7 @@
static void tcp_syn_recv_timer(unsigned long);
static void tcp_keepalive(unsigned long data);
static void tcp_bucketgc(unsigned long);
+static void tcp_twkill(unsigned long);
struct timer_list tcp_slow_timer = {
NULL, NULL,
@@ -43,6 +44,7 @@
struct tcp_sl_timer tcp_slt_array[TCP_SLT_MAX] = {
{ATOMIC_INIT(0), TCP_SYNACK_PERIOD, 0, tcp_syn_recv_timer},/* SYNACK */
{ATOMIC_INIT(0), TCP_KEEPALIVE_PERIOD, 0, tcp_keepalive}, /* KEEPALIVE */
+ {ATOMIC_INIT(0), TCP_TWKILL_PERIOD, 0, tcp_twkill}, /* TWKILL */
{ATOMIC_INIT(0), TCP_BUCKETGC_PERIOD, 0, tcp_bucketgc} /* BUCKETGC */
};
@@ -166,11 +168,10 @@
{
struct sock *sk = (struct sock*)data;
- if(sk->zapped)
- return;
-
- if (sk->tp_pinfo.af_tcp.delayed_acks)
- tcp_read_wakeup(sk);
+ if(!sk->zapped &&
+ sk->tp_pinfo.af_tcp.delayed_acks &&
+ sk->state != TCP_CLOSE)
+ tcp_send_ack(sk);
}
void tcp_probe_timer(unsigned long data)
@@ -240,9 +241,9 @@
}
/* Garbage collect TCP bind buckets. */
-static void tcp_bucketgc(unsigned long __unused)
+static void tcp_bucketgc(unsigned long data)
{
- int i;
+ int i, reaped = 0;;
for(i = 0; i < TCP_BHTABLE_SIZE; i++) {
struct tcp_bind_bucket *tb = tcp_bound_hash[i];
@@ -252,8 +253,7 @@
if((tb->owners == NULL) &&
!(tb->flags & TCPB_FLAG_LOCKED)) {
- /* Eat timer reference. */
- tcp_dec_slow_timer(TCP_SLT_BUCKETGC);
+ reaped++;
/* Unlink bucket. */
if(tb->next)
@@ -266,6 +266,92 @@
tb = next;
}
}
+ if(reaped != 0) {
+ struct tcp_sl_timer *slt = (struct tcp_sl_timer *)data;
+
+ /* Eat timer references. */
+ atomic_sub(reaped, &slt->count);
+ }
+}
+
+/* Kill off TIME_WAIT sockets once their lifetime has expired. */
+int tcp_tw_death_row_slot = 0;
+static struct tcp_tw_bucket *tcp_tw_death_row[TCP_TWKILL_SLOTS] =
+ { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL };
+
+extern void tcp_timewait_kill(struct tcp_tw_bucket *tw);
+
+static void tcp_twkill(unsigned long data)
+{
+ struct tcp_tw_bucket *tw;
+ int killed = 0;
+
+ tw = tcp_tw_death_row[tcp_tw_death_row_slot];
+ tcp_tw_death_row[tcp_tw_death_row_slot] = NULL;
+ while(tw != NULL) {
+ struct tcp_tw_bucket *next = tw->next_death;
+
+ tcp_timewait_kill(tw);
+ killed++;
+ tw = next;
+ }
+ if(killed != 0) {
+ struct tcp_sl_timer *slt = (struct tcp_sl_timer *)data;
+ atomic_sub(killed, &slt->count);
+ }
+ tcp_tw_death_row_slot =
+ ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
+}
+
+/* These are always called from BH context. See callers in
+ * tcp_input.c to verify this.
+ */
+void tcp_tw_schedule(struct tcp_tw_bucket *tw)
+{
+ int slot = (tcp_tw_death_row_slot - 1) & (TCP_TWKILL_SLOTS - 1);
+
+ tw->death_slot = slot;
+ tw->next_death = tcp_tw_death_row[slot];
+ tcp_tw_death_row[slot] = tw;
+ tcp_inc_slow_timer(TCP_SLT_TWKILL);
+}
+
+/* Happens rarely if at all, no care about scalability here. */
+void tcp_tw_reschedule(struct tcp_tw_bucket *tw)
+{
+ struct tcp_tw_bucket *walk;
+ int slot = tw->death_slot;
+
+ walk = tcp_tw_death_row[slot];
+ if(walk == tw) {
+ tcp_tw_death_row[slot] = tw->next_death;
+ } else {
+ while(walk->next_death != tw)
+ walk = walk->next_death;
+ walk->next_death = tw->next_death;
+ }
+ slot = (tcp_tw_death_row_slot - 1) & (TCP_TWKILL_SLOTS - 1);
+ tw->death_slot = slot;
+ tw->next_death = tcp_tw_death_row[slot];
+ tcp_tw_death_row[slot] = tw;
+ /* Timer was incremented when we first entered the table. */
+}
+
+/* This is for handling early-kills of TIME_WAIT sockets. */
+void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
+{
+ struct tcp_tw_bucket *walk;
+ int slot = tw->death_slot;
+
+ walk = tcp_tw_death_row[slot];
+ if(walk == tw) {
+ tcp_tw_death_row[slot] = tw->next_death;
+ } else {
+ while(walk->next_death != tw)
+ walk = walk->next_death;
+ walk->next_death = tw->next_death;
+ }
+ tcp_dec_slow_timer(TCP_SLT_TWKILL);
}
/*
@@ -511,14 +597,14 @@
slt->last = now;
trigger = slt->period;
}
- next = min(next, trigger);
- }
- }
- if (next != ~0UL) {
- tcp_slow_timer.expires = now + next;
- add_timer(&tcp_slow_timer);
+ /* Only reschedule if some events remain. */
+ if (atomic_read(&slt->count))
+ next = min(next, trigger);
+ }
}
+ if (next != ~0UL)
+ mod_timer(&tcp_slow_timer, (now + next));
}
void __tcp_inc_slow_timer(struct tcp_sl_timer *slt)
@@ -531,9 +617,8 @@
when = now + slt->period;
if (tcp_slow_timer.prev) {
- if ((long)(tcp_slow_timer.expires - when) >= 0) {
+ if ((long)(tcp_slow_timer.expires - when) >= 0)
mod_timer(&tcp_slow_timer, when);
- }
} else {
tcp_slow_timer.expires = when;
add_timer(&tcp_slow_timer);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov