123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305 |
- /*
- * TCP Westwood+: end-to-end bandwidth estimation for TCP
- *
- * Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
- *
- * Support at http://c3lab.poliba.it/index.php/Westwood
- * Main references in literature:
- *
- * - Mascolo S, Casetti, M. Gerla et al.
- * "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
- *
- * - A. Grieco, s. Mascolo
- * "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
- * Comm. Review, 2004
- *
- * - A. Dell'Aera, L. Grieco, S. Mascolo.
- * "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
- * A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
- *
- * Westwood+ employs end-to-end bandwidth measurement to set cwnd and
- * ssthresh after packet loss. The probing phase is as the original Reno.
- */
- #include <linux/mm.h>
- #include <linux/module.h>
- #include <linux/skbuff.h>
- #include <linux/inet_diag.h>
- #include <net/tcp.h>
- /* TCP Westwood structure */
- struct westwood {
- u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
- u32 bw_est; /* bandwidth estimate */
- u32 rtt_win_sx; /* here starts a new evaluation... */
- u32 bk;
- u32 snd_una; /* used for evaluating the number of acked bytes */
- u32 cumul_ack;
- u32 accounted;
- u32 rtt;
- u32 rtt_min; /* minimum observed RTT */
- u8 first_ack; /* flag which infers that this is the first ack */
- u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/
- };
- /* TCP Westwood functions and constants */
- #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
- #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
- /*
- * @tcp_westwood_create
- * This function initializes fields used in TCP Westwood+,
- * it is called after the initial SYN, so the sequence numbers
- * are correct but new passive connections we have no
- * information about RTTmin at this time so we simply set it to
- * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
- * since in this way we're sure it will be updated in a consistent
- * way as soon as possible. It will reasonably happen within the first
- * RTT period of the connection lifetime.
- */
- static void tcp_westwood_init(struct sock *sk)
- {
- struct westwood *w = inet_csk_ca(sk);
- w->bk = 0;
- w->bw_ns_est = 0;
- w->bw_est = 0;
- w->accounted = 0;
- w->cumul_ack = 0;
- w->reset_rtt_min = 1;
- w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
- w->rtt_win_sx = tcp_time_stamp;
- w->snd_una = tcp_sk(sk)->snd_una;
- w->first_ack = 1;
- }
- /*
- * @westwood_do_filter
- * Low-pass filter. Implemented using constant coefficients.
- */
- static inline u32 westwood_do_filter(u32 a, u32 b)
- {
- return ((7 * a) + b) >> 3;
- }
- static void westwood_filter(struct westwood *w, u32 delta)
- {
- /* If the filter is empty fill it with the first sample of bandwidth */
- if (w->bw_ns_est == 0 && w->bw_est == 0) {
- w->bw_ns_est = w->bk / delta;
- w->bw_est = w->bw_ns_est;
- } else {
- w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
- w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
- }
- }
- /*
- * @westwood_pkts_acked
- * Called after processing group of packets.
- * but all westwood needs is the last sample of srtt.
- */
- static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
- {
- struct westwood *w = inet_csk_ca(sk);
- if (rtt > 0)
- w->rtt = usecs_to_jiffies(rtt);
- }
- /*
- * @westwood_update_window
- * It updates RTT evaluation window if it is the right moment to do
- * it. If so it calls filter for evaluating bandwidth.
- */
- static void westwood_update_window(struct sock *sk)
- {
- struct westwood *w = inet_csk_ca(sk);
- s32 delta = tcp_time_stamp - w->rtt_win_sx;
- /* Initialize w->snd_una with the first acked sequence number in order
- * to fix mismatch between tp->snd_una and w->snd_una for the first
- * bandwidth sample
- */
- if (w->first_ack) {
- w->snd_una = tcp_sk(sk)->snd_una;
- w->first_ack = 0;
- }
- /*
- * See if a RTT-window has passed.
- * Be careful since if RTT is less than
- * 50ms we don't filter but we continue 'building the sample'.
- * This minimum limit was chosen since an estimation on small
- * time intervals is better to avoid...
- * Obviously on a LAN we reasonably will always have
- * right_bound = left_bound + WESTWOOD_RTT_MIN
- */
- if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
- westwood_filter(w, delta);
- w->bk = 0;
- w->rtt_win_sx = tcp_time_stamp;
- }
- }
- static inline void update_rtt_min(struct westwood *w)
- {
- if (w->reset_rtt_min) {
- w->rtt_min = w->rtt;
- w->reset_rtt_min = 0;
- } else
- w->rtt_min = min(w->rtt, w->rtt_min);
- }
- /*
- * @westwood_fast_bw
- * It is called when we are in fast path. In particular it is called when
- * header prediction is successful. In such case in fact update is
- * straight forward and doesn't need any particular care.
- */
- static inline void westwood_fast_bw(struct sock *sk)
- {
- const struct tcp_sock *tp = tcp_sk(sk);
- struct westwood *w = inet_csk_ca(sk);
- westwood_update_window(sk);
- w->bk += tp->snd_una - w->snd_una;
- w->snd_una = tp->snd_una;
- update_rtt_min(w);
- }
- /*
- * @westwood_acked_count
- * This function evaluates cumul_ack for evaluating bk in case of
- * delayed or partial acks.
- */
- static inline u32 westwood_acked_count(struct sock *sk)
- {
- const struct tcp_sock *tp = tcp_sk(sk);
- struct westwood *w = inet_csk_ca(sk);
- w->cumul_ack = tp->snd_una - w->snd_una;
- /* If cumul_ack is 0 this is a dupack since it's not moving
- * tp->snd_una.
- */
- if (!w->cumul_ack) {
- w->accounted += tp->mss_cache;
- w->cumul_ack = tp->mss_cache;
- }
- if (w->cumul_ack > tp->mss_cache) {
- /* Partial or delayed ack */
- if (w->accounted >= w->cumul_ack) {
- w->accounted -= w->cumul_ack;
- w->cumul_ack = tp->mss_cache;
- } else {
- w->cumul_ack -= w->accounted;
- w->accounted = 0;
- }
- }
- w->snd_una = tp->snd_una;
- return w->cumul_ack;
- }
- /*
- * TCP Westwood
- * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
- * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
- * so avoids ever returning 0.
- */
- static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
- {
- const struct tcp_sock *tp = tcp_sk(sk);
- const struct westwood *w = inet_csk_ca(sk);
- return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
- }
- static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
- {
- struct tcp_sock *tp = tcp_sk(sk);
- struct westwood *w = inet_csk_ca(sk);
- switch (event) {
- case CA_EVENT_FAST_ACK:
- westwood_fast_bw(sk);
- break;
- case CA_EVENT_COMPLETE_CWR:
- tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
- break;
- case CA_EVENT_FRTO:
- tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
- /* Update RTT_min when next ack arrives */
- w->reset_rtt_min = 1;
- break;
- case CA_EVENT_SLOW_ACK:
- westwood_update_window(sk);
- w->bk += westwood_acked_count(sk);
- update_rtt_min(w);
- break;
- default:
- /* don't care */
- break;
- }
- }
- /* Extract info for Tcp socket info provided via netlink. */
- static void tcp_westwood_info(struct sock *sk, u32 ext,
- struct sk_buff *skb)
- {
- const struct westwood *ca = inet_csk_ca(sk);
- if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
- struct tcpvegas_info info = {
- .tcpv_enabled = 1,
- .tcpv_rtt = jiffies_to_usecs(ca->rtt),
- .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
- };
- nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
- }
- }
- static struct tcp_congestion_ops tcp_westwood __read_mostly = {
- .init = tcp_westwood_init,
- .ssthresh = tcp_reno_ssthresh,
- .cong_avoid = tcp_reno_cong_avoid,
- .min_cwnd = tcp_westwood_bw_rttmin,
- .cwnd_event = tcp_westwood_event,
- .get_info = tcp_westwood_info,
- .pkts_acked = tcp_westwood_pkts_acked,
- .owner = THIS_MODULE,
- .name = "westwood"
- };
- static int __init tcp_westwood_register(void)
- {
- BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
- return tcp_register_congestion_control(&tcp_westwood);
- }
- static void __exit tcp_westwood_unregister(void)
- {
- tcp_unregister_congestion_control(&tcp_westwood);
- }
- module_init(tcp_westwood_register);
- module_exit(tcp_westwood_unregister);
- MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
- MODULE_LICENSE("GPL");
- MODULE_DESCRIPTION("TCP Westwood+");
|