4 * Angelo Dell'Aera: TCP Westwood+ support
7 #include <linux/config.h>
9 #include <linux/module.h>
10 #include <linux/skbuff.h>
11 #include <linux/tcp_diag.h>
14 /* TCP Westwood structure */
16 u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
17 u32 bw_est; /* bandwidth estimate */
18 u32 rtt_win_sx; /* here starts a new evaluation... */
20 u32 snd_una; /* used for evaluating the number of acked bytes */
24 u32 rtt_min; /* minimum observed RTT */
28 /* TCP Westwood functions and constants */
29 #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
30 #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
33 * @tcp_westwood_create
34 * This function initializes fields used in TCP Westwood+,
35 * it is called after the initial SYN, so the sequence numbers
36 * are correct but new passive connections we have no
37 * information about RTTmin at this time so we simply set it to
38 * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
39 * since in this way we're sure it will be updated in a consistent
40 * way as soon as possible. It will reasonably happen within the first
41 * RTT period of the connection lifetime.
43 static void tcp_westwood_init(struct tcp_sock *tp)
45 struct westwood *w = tcp_ca(tp);
52 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
53 w->rtt_win_sx = tcp_time_stamp;
54 w->snd_una = tp->snd_una;
59 * Low-pass filter. Implemented using constant coefficients.
61 static inline u32 westwood_do_filter(u32 a, u32 b)
63 return (((7 * a) + b) >> 3);
66 static inline void westwood_filter(struct westwood *w, u32 delta)
68 w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
69 w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
73 * @westwood_pkts_acked
74 * Called after processing group of packets.
75 * but all westwood needs is the last sample of srtt.
77 static void tcp_westwood_pkts_acked(struct tcp_sock *tp, u32 cnt)
79 struct westwood *w = tcp_ca(tp);
81 w->rtt = tp->srtt >> 3;
85 * @westwood_update_window
86 * It updates RTT evaluation window if it is the right moment to do
87 * it. If so it calls filter for evaluating bandwidth.
89 static void westwood_update_window(struct tcp_sock *tp)
91 struct westwood *w = tcp_ca(tp);
92 s32 delta = tcp_time_stamp - w->rtt_win_sx;
95 * See if a RTT-window has passed.
96 * Be careful since if RTT is less than
97 * 50ms we don't filter but we continue 'building the sample'.
98 * This minimum limit was chosen since an estimation on small
99 * time intervals is better to avoid...
100 * Obviously on a LAN we reasonably will always have
101 * right_bound = left_bound + WESTWOOD_RTT_MIN
103 if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
104 westwood_filter(w, delta);
107 w->rtt_win_sx = tcp_time_stamp;
113 * It is called when we are in fast path. In particular it is called when
114 * header prediction is successful. In such case in fact update is
115 * straight forward and doesn't need any particular care.
117 static inline void westwood_fast_bw(struct tcp_sock *tp)
119 struct westwood *w = tcp_ca(tp);
121 westwood_update_window(tp);
123 w->bk += tp->snd_una - w->snd_una;
124 w->snd_una = tp->snd_una;
125 w->rtt_min = min(w->rtt, w->rtt_min);
129 * @westwood_acked_count
130 * This function evaluates cumul_ack for evaluating bk in case of
131 * delayed or partial acks.
133 static inline u32 westwood_acked_count(struct tcp_sock *tp)
135 struct westwood *w = tcp_ca(tp);
137 w->cumul_ack = tp->snd_una - w->snd_una;
139 /* If cumul_ack is 0 this is a dupack since it's not moving
143 w->accounted += tp->mss_cache;
144 w->cumul_ack = tp->mss_cache;
147 if (w->cumul_ack > tp->mss_cache) {
148 /* Partial or delayed ack */
149 if (w->accounted >= w->cumul_ack) {
150 w->accounted -= w->cumul_ack;
151 w->cumul_ack = tp->mss_cache;
153 w->cumul_ack -= w->accounted;
158 w->snd_una = tp->snd_una;
163 static inline u32 westwood_bw_rttmin(const struct tcp_sock *tp)
165 struct westwood *w = tcp_ca(tp);
166 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
171 * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
172 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
173 * so avoids ever returning 0.
175 static u32 tcp_westwood_cwnd_min(struct tcp_sock *tp)
177 return westwood_bw_rttmin(tp);
180 static void tcp_westwood_event(struct tcp_sock *tp, enum tcp_ca_event event)
182 struct westwood *w = tcp_ca(tp);
185 case CA_EVENT_FAST_ACK:
186 westwood_fast_bw(tp);
189 case CA_EVENT_COMPLETE_CWR:
190 tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(tp);
194 tp->snd_ssthresh = westwood_bw_rttmin(tp);
197 case CA_EVENT_SLOW_ACK:
198 westwood_update_window(tp);
199 w->bk += westwood_acked_count(tp);
200 w->rtt_min = min(w->rtt, w->rtt_min);
210 /* Extract info for Tcp socket info provided via netlink. */
211 static void tcp_westwood_info(struct tcp_sock *tp, u32 ext,
214 const struct westwood *ca = tcp_ca(tp);
215 if (ext & (1<<(TCPDIAG_VEGASINFO-1))) {
217 struct tcpvegas_info *info;
219 rta = __RTA_PUT(skb, TCPDIAG_VEGASINFO, sizeof(*info));
220 info = RTA_DATA(rta);
221 info->tcpv_enabled = 1;
222 info->tcpv_rttcnt = 0;
223 info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
224 info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
230 static struct tcp_congestion_ops tcp_westwood = {
231 .init = tcp_westwood_init,
232 .ssthresh = tcp_reno_ssthresh,
233 .cong_avoid = tcp_reno_cong_avoid,
234 .min_cwnd = tcp_westwood_cwnd_min,
235 .cwnd_event = tcp_westwood_event,
236 .get_info = tcp_westwood_info,
237 .pkts_acked = tcp_westwood_pkts_acked,
239 .owner = THIS_MODULE,
243 static int __init tcp_westwood_register(void)
245 BUG_ON(sizeof(struct westwood) > TCP_CA_PRIV_SIZE);
246 return tcp_register_congestion_control(&tcp_westwood);
249 static void __exit tcp_westwood_unregister(void)
251 tcp_unregister_congestion_control(&tcp_westwood);
254 module_init(tcp_westwood_register);
255 module_exit(tcp_westwood_unregister);
257 MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
258 MODULE_LICENSE("GPL");
259 MODULE_DESCRIPTION("TCP Westwood+");