Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * TCP CUBIC: Binary Increase Congestion control for TCP v2.3
4 : * Home page:
5 : * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
6 : * This is from the implementation of CUBIC TCP in
7 : * Sangtae Ha, Injong Rhee and Lisong Xu,
8 : * "CUBIC: A New TCP-Friendly High-Speed TCP Variant"
9 : * in ACM SIGOPS Operating System Review, July 2008.
10 : * Available from:
11 : * http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf
12 : *
13 : * CUBIC integrates a new slow start algorithm, called HyStart.
14 : * The details of HyStart are presented in
15 : * Sangtae Ha and Injong Rhee,
16 : * "Taming the Elephants: New TCP Slow Start", NCSU TechReport 2008.
17 : * Available from:
18 : * http://netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf
19 : *
20 : * All testing results are available from:
21 : * http://netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing
22 : *
23 : * Unless CUBIC is enabled and congestion window is large
24 : * this behaves the same as the original Reno.
25 : */
26 :
27 : #include <linux/mm.h>
28 : #include <linux/module.h>
29 : #include <linux/math64.h>
30 : #include <net/tcp.h>
31 :
32 : #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
33 : * max_cwnd = snd_cwnd * beta
34 : */
35 : #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
36 :
37 : /* Two methods of hybrid slow start */
38 : #define HYSTART_ACK_TRAIN 0x1
39 : #define HYSTART_DELAY 0x2
40 :
41 : /* Number of delay samples for detecting the increase of delay */
42 : #define HYSTART_MIN_SAMPLES 8
43 : #define HYSTART_DELAY_MIN (4000U) /* 4 ms */
44 : #define HYSTART_DELAY_MAX (16000U) /* 16 ms */
45 : #define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
46 :
47 : static int fast_convergence __read_mostly = 1;
48 : static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
49 : static int initial_ssthresh __read_mostly;
50 : static int bic_scale __read_mostly = 41;
51 : static int tcp_friendliness __read_mostly = 1;
52 :
53 : static int hystart __read_mostly = 1;
54 : static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY;
55 : static int hystart_low_window __read_mostly = 16;
56 : static int hystart_ack_delta_us __read_mostly = 2000;
57 :
58 : static u32 cube_rtt_scale __read_mostly;
59 : static u32 beta_scale __read_mostly;
60 : static u64 cube_factor __read_mostly;
61 :
62 : /* Note parameters that are used for precomputing scale factors are read-only */
63 : module_param(fast_convergence, int, 0644);
64 : MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
65 : module_param(beta, int, 0644);
66 : MODULE_PARM_DESC(beta, "beta for multiplicative increase");
67 : module_param(initial_ssthresh, int, 0644);
68 : MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
69 : module_param(bic_scale, int, 0444);
70 : MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)");
71 : module_param(tcp_friendliness, int, 0644);
72 : MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness");
73 : module_param(hystart, int, 0644);
74 : MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm");
75 : module_param(hystart_detect, int, 0644);
76 : MODULE_PARM_DESC(hystart_detect, "hybrid slow start detection mechanisms"
77 : " 1: packet-train 2: delay 3: both packet-train and delay");
78 : module_param(hystart_low_window, int, 0644);
79 : MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start");
80 : module_param(hystart_ack_delta_us, int, 0644);
81 : MODULE_PARM_DESC(hystart_ack_delta_us, "spacing between ack's indicating train (usecs)");
82 :
83 : /* BIC TCP Parameters */
84 : struct bictcp {
85 : u32 cnt; /* increase cwnd by 1 after ACKs */
86 : u32 last_max_cwnd; /* last maximum snd_cwnd */
87 : u32 last_cwnd; /* the last snd_cwnd */
88 : u32 last_time; /* time when updated last_cwnd */
89 : u32 bic_origin_point;/* origin point of bic function */
90 : u32 bic_K; /* time to origin point
91 : from the beginning of the current epoch */
92 : u32 delay_min; /* min delay (usec) */
93 : u32 epoch_start; /* beginning of an epoch */
94 : u32 ack_cnt; /* number of acks */
95 : u32 tcp_cwnd; /* estimated tcp cwnd */
96 : u16 unused;
97 : u8 sample_cnt; /* number of samples to decide curr_rtt */
98 : u8 found; /* the exit point is found? */
99 : u32 round_start; /* beginning of each round */
100 : u32 end_seq; /* end_seq of the round */
101 : u32 last_ack; /* last time when the ACK spacing is close */
102 : u32 curr_rtt; /* the minimum rtt of current round */
103 : };
104 :
105 4 : static inline void bictcp_reset(struct bictcp *ca)
106 : {
107 4 : memset(ca, 0, offsetof(struct bictcp, unused));
108 4 : ca->found = 0;
109 : }
110 :
111 4 : static inline u32 bictcp_clock_us(const struct sock *sk)
112 : {
113 4 : return tcp_sk(sk)->tcp_mstamp;
114 : }
115 :
116 4 : static inline void bictcp_hystart_reset(struct sock *sk)
117 : {
118 4 : struct tcp_sock *tp = tcp_sk(sk);
119 4 : struct bictcp *ca = inet_csk_ca(sk);
120 :
121 4 : ca->round_start = ca->last_ack = bictcp_clock_us(sk);
122 4 : ca->end_seq = tp->snd_nxt;
123 4 : ca->curr_rtt = ~0U;
124 4 : ca->sample_cnt = 0;
125 4 : }
126 :
127 4 : static void bictcp_init(struct sock *sk)
128 : {
129 4 : struct bictcp *ca = inet_csk_ca(sk);
130 :
131 4 : bictcp_reset(ca);
132 :
133 4 : if (hystart)
134 4 : bictcp_hystart_reset(sk);
135 :
136 4 : if (!hystart && initial_ssthresh)
137 0 : tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
138 4 : }
139 :
140 235 : static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
141 : {
142 235 : if (event == CA_EVENT_TX_START) {
143 231 : struct bictcp *ca = inet_csk_ca(sk);
144 231 : u32 now = tcp_jiffies32;
145 231 : s32 delta;
146 :
147 231 : delta = now - tcp_sk(sk)->lsndtime;
148 :
149 : /* We were application limited (idle) for a while.
150 : * Shift epoch_start to keep cwnd growth to cubic curve.
151 : */
152 231 : if (ca->epoch_start && delta > 0) {
153 0 : ca->epoch_start += delta;
154 0 : if (after(ca->epoch_start, now))
155 0 : ca->epoch_start = now;
156 : }
157 231 : return;
158 : }
159 : }
160 :
161 : /* calculate the cubic root of x using a table lookup followed by one
162 : * Newton-Raphson iteration.
163 : * Avg err ~= 0.195%
164 : */
165 0 : static u32 cubic_root(u64 a)
166 : {
167 0 : u32 x, b, shift;
168 : /*
169 : * cbrt(x) MSB values for x MSB values in [0..63].
170 : * Precomputed then refined by hand - Willy Tarreau
171 : *
172 : * For x in [0..63],
173 : * v = cbrt(x << 18) - 1
174 : * cbrt(x) = (v[x] + 10) >> 6
175 : */
176 0 : static const u8 v[] = {
177 : /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
178 : /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
179 : /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
180 : /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
181 : /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
182 : /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
183 : /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
184 : /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
185 : };
186 :
187 0 : b = fls64(a);
188 0 : if (b < 7) {
189 : /* a in [0..63] */
190 0 : return ((u32)v[(u32)a] + 35) >> 6;
191 : }
192 :
193 0 : b = ((b * 84) >> 8) - 1;
194 0 : shift = (a >> (b * 3));
195 :
196 0 : x = ((u32)(((u32)v[shift] + 10) << b)) >> 6;
197 :
198 : /*
199 : * Newton-Raphson iteration
200 : * 2
201 : * x = ( 2 * x + a / x ) / 3
202 : * k+1 k k
203 : */
204 0 : x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1)));
205 0 : x = ((x * 341) >> 10);
206 0 : return x;
207 : }
208 :
209 : /*
210 : * Compute congestion window to use.
211 : */
212 0 : static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
213 : {
214 0 : u32 delta, bic_target, max_cnt;
215 0 : u64 offs, t;
216 :
217 0 : ca->ack_cnt += acked; /* count the number of ACKed packets */
218 :
219 0 : if (ca->last_cwnd == cwnd &&
220 0 : (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
221 : return;
222 :
223 : /* The CUBIC function can update ca->cnt at most once per jiffy.
224 : * On all cwnd reduction events, ca->epoch_start is set to 0,
225 : * which will force a recalculation of ca->cnt.
226 : */
227 0 : if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
228 0 : goto tcp_friendliness;
229 :
230 0 : ca->last_cwnd = cwnd;
231 0 : ca->last_time = tcp_jiffies32;
232 :
233 0 : if (ca->epoch_start == 0) {
234 0 : ca->epoch_start = tcp_jiffies32; /* record beginning */
235 0 : ca->ack_cnt = acked; /* start counting */
236 0 : ca->tcp_cwnd = cwnd; /* syn with cubic */
237 :
238 0 : if (ca->last_max_cwnd <= cwnd) {
239 0 : ca->bic_K = 0;
240 0 : ca->bic_origin_point = cwnd;
241 : } else {
242 : /* Compute new K based on
243 : * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
244 : */
245 0 : ca->bic_K = cubic_root(cube_factor
246 0 : * (ca->last_max_cwnd - cwnd));
247 0 : ca->bic_origin_point = ca->last_max_cwnd;
248 : }
249 : }
250 :
251 : /* cubic function - calc*/
252 : /* calculate c * time^3 / rtt,
253 : * while considering overflow in calculation of time^3
254 : * (so time^3 is done by using 64 bit)
255 : * and without the support of division of 64bit numbers
256 : * (so all divisions are done by using 32 bit)
257 : * also NOTE the unit of those veriables
258 : * time = (t - K) / 2^bictcp_HZ
259 : * c = bic_scale >> 10
260 : * rtt = (srtt >> 3) / HZ
261 : * !!! The following code does not have overflow problems,
262 : * if the cwnd < 1 million packets !!!
263 : */
264 :
265 0 : t = (s32)(tcp_jiffies32 - ca->epoch_start);
266 0 : t += usecs_to_jiffies(ca->delay_min);
267 : /* change the unit from HZ to bictcp_HZ */
268 0 : t <<= BICTCP_HZ;
269 0 : do_div(t, HZ);
270 :
271 0 : if (t < ca->bic_K) /* t - K */
272 0 : offs = ca->bic_K - t;
273 : else
274 0 : offs = t - ca->bic_K;
275 :
276 : /* c/rtt * (t-K)^3 */
277 0 : delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
278 0 : if (t < ca->bic_K) /* below origin*/
279 0 : bic_target = ca->bic_origin_point - delta;
280 : else /* above origin*/
281 0 : bic_target = ca->bic_origin_point + delta;
282 :
283 : /* cubic function - calc bictcp_cnt*/
284 0 : if (bic_target > cwnd) {
285 0 : ca->cnt = cwnd / (bic_target - cwnd);
286 : } else {
287 0 : ca->cnt = 100 * cwnd; /* very small increment*/
288 : }
289 :
290 : /*
291 : * The initial growth of cubic function may be too conservative
292 : * when the available bandwidth is still unknown.
293 : */
294 0 : if (ca->last_max_cwnd == 0 && ca->cnt > 20)
295 0 : ca->cnt = 20; /* increase cwnd 5% per RTT */
296 :
297 0 : tcp_friendliness:
298 : /* TCP Friendly */
299 0 : if (tcp_friendliness) {
300 0 : u32 scale = beta_scale;
301 :
302 0 : delta = (cwnd * scale) >> 3;
303 0 : while (ca->ack_cnt > delta) { /* update tcp cwnd */
304 0 : ca->ack_cnt -= delta;
305 0 : ca->tcp_cwnd++;
306 : }
307 :
308 0 : if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
309 0 : delta = ca->tcp_cwnd - cwnd;
310 0 : max_cnt = cwnd / delta;
311 0 : if (ca->cnt > max_cnt)
312 0 : ca->cnt = max_cnt;
313 : }
314 : }
315 :
316 : /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
317 : * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
318 : */
319 0 : ca->cnt = max(ca->cnt, 2U);
320 : }
321 :
322 351 : static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
323 : {
324 351 : struct tcp_sock *tp = tcp_sk(sk);
325 351 : struct bictcp *ca = inet_csk_ca(sk);
326 :
327 702 : if (!tcp_is_cwnd_limited(sk))
328 : return;
329 :
330 0 : if (tcp_in_slow_start(tp)) {
331 0 : if (hystart && after(ack, ca->end_seq))
332 0 : bictcp_hystart_reset(sk);
333 0 : acked = tcp_slow_start(tp, acked);
334 0 : if (!acked)
335 : return;
336 : }
337 0 : bictcp_update(ca, tp->snd_cwnd, acked);
338 0 : tcp_cong_avoid_ai(tp, ca->cnt, acked);
339 : }
340 :
341 0 : static u32 bictcp_recalc_ssthresh(struct sock *sk)
342 : {
343 0 : const struct tcp_sock *tp = tcp_sk(sk);
344 0 : struct bictcp *ca = inet_csk_ca(sk);
345 :
346 0 : ca->epoch_start = 0; /* end of epoch */
347 :
348 : /* Wmax and fast convergence */
349 0 : if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
350 0 : ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
351 0 : / (2 * BICTCP_BETA_SCALE);
352 : else
353 0 : ca->last_max_cwnd = tp->snd_cwnd;
354 :
355 0 : return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
356 : }
357 :
358 4 : static void bictcp_state(struct sock *sk, u8 new_state)
359 : {
360 4 : if (new_state == TCP_CA_Loss) {
361 0 : bictcp_reset(inet_csk_ca(sk));
362 0 : bictcp_hystart_reset(sk);
363 : }
364 4 : }
365 :
366 : /* Account for TSO/GRO delays.
367 : * Otherwise short RTT flows could get too small ssthresh, since during
368 : * slow start we begin with small TSO packets and ca->delay_min would
369 : * not account for long aggregation delay when TSO packets get bigger.
370 : * Ideally even with a very small RTT we would like to have at least one
371 : * TSO packet being sent and received by GRO, and another one in qdisc layer.
372 : * We apply another 100% factor because @rate is doubled at this point.
373 : * We cap the cushion to 1ms.
374 : */
375 0 : static u32 hystart_ack_delay(struct sock *sk)
376 : {
377 0 : unsigned long rate;
378 :
379 0 : rate = READ_ONCE(sk->sk_pacing_rate);
380 0 : if (!rate)
381 : return 0;
382 0 : return min_t(u64, USEC_PER_MSEC,
383 : div64_ul((u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
384 : }
385 :
386 0 : static void hystart_update(struct sock *sk, u32 delay)
387 : {
388 0 : struct tcp_sock *tp = tcp_sk(sk);
389 0 : struct bictcp *ca = inet_csk_ca(sk);
390 0 : u32 threshold;
391 :
392 0 : if (hystart_detect & HYSTART_ACK_TRAIN) {
393 0 : u32 now = bictcp_clock_us(sk);
394 :
395 : /* first detection parameter - ack-train detection */
396 0 : if ((s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
397 0 : ca->last_ack = now;
398 :
399 0 : threshold = ca->delay_min + hystart_ack_delay(sk);
400 :
401 : /* Hystart ack train triggers if we get ack past
402 : * ca->delay_min/2.
403 : * Pacing might have delayed packets up to RTT/2
404 : * during slow start.
405 : */
406 0 : if (sk->sk_pacing_status == SK_PACING_NONE)
407 0 : threshold >>= 1;
408 :
409 0 : if ((s32)(now - ca->round_start) > threshold) {
410 0 : ca->found = 1;
411 0 : pr_debug("hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u\n",
412 : now - ca->round_start, threshold,
413 : ca->delay_min, hystart_ack_delay(sk), tp->snd_cwnd);
414 0 : NET_INC_STATS(sock_net(sk),
415 : LINUX_MIB_TCPHYSTARTTRAINDETECT);
416 0 : NET_ADD_STATS(sock_net(sk),
417 : LINUX_MIB_TCPHYSTARTTRAINCWND,
418 : tp->snd_cwnd);
419 0 : tp->snd_ssthresh = tp->snd_cwnd;
420 : }
421 : }
422 : }
423 :
424 0 : if (hystart_detect & HYSTART_DELAY) {
425 : /* obtain the minimum delay of more than sampling packets */
426 0 : if (ca->curr_rtt > delay)
427 0 : ca->curr_rtt = delay;
428 0 : if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
429 0 : ca->sample_cnt++;
430 : } else {
431 0 : if (ca->curr_rtt > ca->delay_min +
432 0 : HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
433 0 : ca->found = 1;
434 0 : NET_INC_STATS(sock_net(sk),
435 : LINUX_MIB_TCPHYSTARTDELAYDETECT);
436 0 : NET_ADD_STATS(sock_net(sk),
437 : LINUX_MIB_TCPHYSTARTDELAYCWND,
438 : tp->snd_cwnd);
439 0 : tp->snd_ssthresh = tp->snd_cwnd;
440 : }
441 : }
442 : }
443 0 : }
444 :
445 351 : static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
446 : {
447 351 : const struct tcp_sock *tp = tcp_sk(sk);
448 351 : struct bictcp *ca = inet_csk_ca(sk);
449 351 : u32 delay;
450 :
451 : /* Some calls are for duplicates without timetamps */
452 351 : if (sample->rtt_us < 0)
453 : return;
454 :
455 : /* Discard delay samples right after fast recovery */
456 351 : if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
457 : return;
458 :
459 351 : delay = sample->rtt_us;
460 351 : if (delay == 0)
461 : delay = 1;
462 :
463 : /* first time call or link delay decreases */
464 351 : if (ca->delay_min == 0 || ca->delay_min > delay)
465 16 : ca->delay_min = delay;
466 :
467 : /* hystart triggers when cwnd is larger than some threshold */
468 351 : if (!ca->found && tcp_in_slow_start(tp) && hystart &&
469 351 : tp->snd_cwnd >= hystart_low_window)
470 0 : hystart_update(sk, delay);
471 : }
472 :
473 : static struct tcp_congestion_ops cubictcp __read_mostly = {
474 : .init = bictcp_init,
475 : .ssthresh = bictcp_recalc_ssthresh,
476 : .cong_avoid = bictcp_cong_avoid,
477 : .set_state = bictcp_state,
478 : .undo_cwnd = tcp_reno_undo_cwnd,
479 : .cwnd_event = bictcp_cwnd_event,
480 : .pkts_acked = bictcp_acked,
481 : .owner = THIS_MODULE,
482 : .name = "cubic",
483 : };
484 :
485 1 : static int __init cubictcp_register(void)
486 : {
487 1 : BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
488 :
489 : /* Precompute a bunch of the scaling factors that are used per-packet
490 : * based on SRTT of 100ms
491 : */
492 :
493 1 : beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
494 1 : / (BICTCP_BETA_SCALE - beta);
495 :
496 1 : cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */
497 :
498 : /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
499 : * so K = cubic_root( (wmax-cwnd)*rtt/c )
500 : * the unit of K is bictcp_HZ=2^10, not HZ
501 : *
502 : * c = bic_scale >> 10
503 : * rtt = 100ms
504 : *
505 : * the following code has been designed and tested for
506 : * cwnd < 1 million packets
507 : * RTT < 100 seconds
508 : * HZ < 1,000,00 (corresponding to 10 nano-second)
509 : */
510 :
511 : /* 1/c * 2^2*bictcp_HZ * srtt */
512 1 : cube_factor = 1ull << (10+3*BICTCP_HZ); /* 2^40 */
513 :
514 : /* divide by bic_scale and by constant Srtt (100ms) */
515 1 : do_div(cube_factor, bic_scale * 10);
516 :
517 1 : return tcp_register_congestion_control(&cubictcp);
518 : }
519 :
520 0 : static void __exit cubictcp_unregister(void)
521 : {
522 0 : tcp_unregister_congestion_control(&cubictcp);
523 0 : }
524 :
525 : module_init(cubictcp_register);
526 : module_exit(cubictcp_unregister);
527 :
528 : MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger");
529 : MODULE_LICENSE("GPL");
530 : MODULE_DESCRIPTION("CUBIC TCP");
531 : MODULE_VERSION("2.3");
|