cc.c 15.7 KB
Newer Older
1
/*-
2
3
 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
 *
4
5
6
7
8
9
10
 * Copyright (c) 2007-2008
 *	Swinburne University of Technology, Melbourne, Australia.
 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
 * Copyright (c) 2010 The FreeBSD Foundation
 * All rights reserved.
 *
 * This software was developed at the Centre for Advanced Internet
11
12
13
 * Architectures, Swinburne University of Technology, by Lawrence Stewart and
 * James Healy, made possible in part by a grant from the Cisco University
 * Research Program Fund at Community Foundation Silicon Valley.
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
 *
 * Portions of this software were developed at the Centre for Advanced
 * Internet Architectures, Swinburne University of Technology, Melbourne,
 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

/*
 * This software was first released in 2007 by James Healy and Lawrence Stewart
43
44
45
46
47
 * whilst working on the NewTCP research project at Swinburne University of
 * Technology's Centre for Advanced Internet Architectures, Melbourne,
 * Australia, which was made possible in part by a grant from the Cisco
 * University Research Program Fund at Community Foundation Silicon Valley.
 * More details are available at:
48
49
50
51
52
 *   http://caia.swin.edu.au/urp/newtcp/
 */

#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
53
#include <opt_cc.h>
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/libkern.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>

68
#include <net/vnet.h>
69
70
71

#include <netinet/in.h>
#include <netinet/in_pcb.h>
72
#include <netinet/tcp.h>
73
#include <netinet/tcp_seq.h>
74
#include <netinet/tcp_var.h>
75
76
#include <netinet/tcp_log_buf.h>
#include <netinet/tcp_hpts.h>
77
#include <netinet/cc/cc.h>
78
79
#include <netinet/cc/cc_module.h>

80
81
82
83
84
85
86
/*
 * Have a sane default if no CC_DEFAULT is specified in the kernel config file.
 */
#ifndef CC_DEFAULT
#define CC_DEFAULT "newreno"
#endif

87
88
MALLOC_DEFINE(M_CC_MEM, "CC Mem", "Congestion Control State memory");

89
90
91
92
93
94
95
96
97
/*
 * List of available cc algorithms on the current system. First element
 * is used as the system default CC algorithm.
 */
struct cc_head cc_list = STAILQ_HEAD_INITIALIZER(cc_list);

/* Protects the cc_list TAILQ. */
struct rwlock cc_list_lock;

98
99
100
101
VNET_DEFINE(struct cc_algo *, default_cc_ptr) = NULL;

VNET_DEFINE(uint32_t, newreno_beta) = 50;
#define V_newreno_beta VNET(newreno_beta)
102
103
104
105
106
107
108

/*
 * Sysctl handler to show and change the default CC algorithm.
 */
static int
cc_default_algo(SYSCTL_HANDLER_ARGS)
{
109
	char default_cc[TCP_CA_NAME_MAX];
110
	struct cc_algo *funcs;
111
112
113
114
	int error;

	/* Get the current default: */
	CC_LIST_RLOCK();
115
116
117
118
	if (CC_DEFAULT_ALGO() != NULL)
		strlcpy(default_cc, CC_DEFAULT_ALGO()->name, sizeof(default_cc));
	else
		memset(default_cc, 0, TCP_CA_NAME_MAX);
119
120
121
	CC_LIST_RUNLOCK();

	error = sysctl_handle_string(oidp, default_cc, sizeof(default_cc), req);
122

123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
	/* Check for error or no change */
	if (error != 0 || req->newptr == NULL)
		goto done;

	error = ESRCH;
	/* Find algo with specified name and set it to default. */
	CC_LIST_RLOCK();
	STAILQ_FOREACH(funcs, &cc_list, entries) {
		if (strncmp(default_cc, funcs->name, sizeof(default_cc)))
			continue;
		V_default_cc_ptr = funcs;
		error = 0;
		break;
	}
	CC_LIST_RUNLOCK();
done:
	return (error);
140
141
142
143
144
145
146
147
148
149
}

/*
 * Sysctl handler to display the list of available CC algorithms.
 */
static int
cc_list_available(SYSCTL_HANDLER_ARGS)
{
	struct cc_algo *algo;
	struct sbuf *s;
150
	int err, first, nalgos;
151

152
	err = nalgos = 0;
153
	first = 1;
154
155
156
157
158
159

	CC_LIST_RLOCK();
	STAILQ_FOREACH(algo, &cc_list, entries) {
		nalgos++;
	}
	CC_LIST_RUNLOCK();
160
161
162
	if (nalgos == 0) {
		return (ENOENT);
	}
163
	s = sbuf_new(NULL, NULL, nalgos * TCP_CA_NAME_MAX, SBUF_FIXEDLEN);
164
165
166
167

	if (s == NULL)
		return (ENOMEM);

168
169
170
171
172
173
174
	/*
	 * It is theoretically possible for the CC list to have grown in size
	 * since the call to sbuf_new() and therefore for the sbuf to be too
	 * small. If this were to happen (incredibly unlikely), the sbuf will
	 * reach an overflow condition, sbuf_printf() will return an error and
	 * the sysctl will fail gracefully.
	 */
175
176
177
	CC_LIST_RLOCK();
	STAILQ_FOREACH(algo, &cc_list, entries) {
		err = sbuf_printf(s, first ? "%s" : ", %s", algo->name);
178
179
180
		if (err) {
			/* Sbuf overflow condition. */
			err = EOVERFLOW;
181
			break;
182
		}
183
184
185
186
187
188
		first = 0;
	}
	CC_LIST_RUNLOCK();

	if (!err) {
		sbuf_finish(s);
189
		err = sysctl_handle_string(oidp, sbuf_data(s), 0, req);
190
191
192
193
194
195
	}

	sbuf_delete(s);
	return (err);
}

196
/*
197
198
 * Return the number of times a proposed removal_cc is
 * being used as the default.
199
 */
200
201
static int
cc_check_default(struct cc_algo *remove_cc)
202
{
203
	int cnt = 0;
204
205
206
207
208
209
210
	VNET_ITERATOR_DECL(vnet_iter);

	CC_LIST_LOCK_ASSERT();

	VNET_LIST_RLOCK_NOSLEEP();
	VNET_FOREACH(vnet_iter) {
		CURVNET_SET(vnet_iter);
211
212
213
214
215
216
		if ((CC_DEFAULT_ALGO() != NULL) &&
		    strncmp(CC_DEFAULT_ALGO()->name,
			    remove_cc->name,
			    TCP_CA_NAME_MAX) == 0) {
			cnt++;
		}
217
218
219
		CURVNET_RESTORE();
	}
	VNET_LIST_RUNLOCK_NOSLEEP();
220
	return (cnt);
221
222
}

223
224
225
/*
 * Initialise CC subsystem on system boot.
 */
226
227
static void
cc_init(void)
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
{
	CC_LIST_LOCK_INIT();
	STAILQ_INIT(&cc_list);
}

/*
 * Returns non-zero on success, 0 on failure.
 */
int
cc_deregister_algo(struct cc_algo *remove_cc)
{
	struct cc_algo *funcs, *tmpfuncs;
	int err;

	err = ENOENT;

	/* Remove algo from cc_list so that new connections can't use it. */
	CC_LIST_WLOCK();
	STAILQ_FOREACH_SAFE(funcs, &cc_list, entries, tmpfuncs) {
		if (funcs == remove_cc) {
248
			if (cc_check_default(remove_cc)) {
249
250
				CC_LIST_WUNLOCK();
				return(EBUSY);
251
252
253
254
			}
			break;
		}
	}
255
	remove_cc->flags |= CC_MODULE_BEING_REMOVED;
256
257
258
259
260
261
262
263
264
265
266
267
268
	CC_LIST_WUNLOCK();
	err = tcp_ccalgounload(remove_cc);
	/*
	 * Now back through and we either remove the temp flag
	 * or pull the registration.
	 */
	CC_LIST_WLOCK();
	STAILQ_FOREACH_SAFE(funcs, &cc_list, entries, tmpfuncs) {
		if (funcs == remove_cc) {
			if (err == 0)
				STAILQ_REMOVE(&cc_list, funcs, cc_algo, entries);
			else
				funcs->flags &= ~CC_MODULE_BEING_REMOVED;
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
			break;
		}
	}
	CC_LIST_WUNLOCK();
	return (err);
}

/*
 * Returns 0 on success, non-zero on failure.
 */
int
cc_register_algo(struct cc_algo *add_cc)
{
	struct cc_algo *funcs;
	int err;

	err = 0;

	/*
	 * Iterate over list of registered CC algorithms and make sure
	 * we're not trying to add a duplicate.
	 */
	CC_LIST_WLOCK();
	STAILQ_FOREACH(funcs, &cc_list, entries) {
293
294
295
		if (funcs == add_cc ||
		    strncmp(funcs->name, add_cc->name,
			    TCP_CA_NAME_MAX) == 0) {
296
			err = EEXIST;
297
298
			break;
		}
299
	}
300
301
302
303
304
305
	/*
	 * The first loaded congestion control module will become
	 * the default until we find the "CC_DEFAULT" defined in
	 * the config (if we do).
	 */
	if (!err) {
306
		STAILQ_INSERT_TAIL(&cc_list, add_cc, entries);
307
308
309
310
311
312
		if (strcmp(add_cc->name, CC_DEFAULT) == 0) {
			V_default_cc_ptr = add_cc;
		} else if (V_default_cc_ptr == NULL) {
			V_default_cc_ptr = add_cc;
		}
	}
313
314
315
316
317
	CC_LIST_WUNLOCK();

	return (err);
}

318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
static void
vnet_cc_sysinit(void *arg)
{
	struct cc_algo *cc;

	if (IS_DEFAULT_VNET(curvnet))
		return;

	CURVNET_SET(vnet0);
	cc = V_default_cc_ptr;
	CURVNET_RESTORE();

	V_default_cc_ptr = cc;
}
VNET_SYSINIT(vnet_cc_sysinit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
    vnet_cc_sysinit, NULL);

335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
/*
 * Perform any necessary tasks before we exit congestion recovery.
 */
void
newreno_cc_post_recovery(struct cc_var *ccv)
{
	int pipe;

	if (IN_FASTRECOVERY(CCV(ccv, t_flags))) {
		/*
		 * Fast recovery will conclude after returning from this
		 * function. Window inflation should have left us with
		 * approximately snd_ssthresh outstanding data. But in case we
		 * would be inclined to send a burst, better to do it via the
		 * slow start mechanism.
		 *
		 * XXXLAS: Find a way to do this without needing curack
		 */
		if (V_tcp_do_newsack)
			pipe = tcp_compute_pipe(ccv->ccvc.tcp);
		else
			pipe = CCV(ccv, snd_max) - ccv->curack;
		if (pipe < CCV(ccv, snd_ssthresh))
			/*
			 * Ensure that cwnd does not collapse to 1 MSS under
360
			 * adverse conditions. Implements RFC6582
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
			 */
			CCV(ccv, snd_cwnd) = max(pipe, CCV(ccv, t_maxseg)) +
			    CCV(ccv, t_maxseg);
		else
			CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
	}
}

void
newreno_cc_after_idle(struct cc_var *ccv)
{
	uint32_t rw;
	/*
	 * If we've been idle for more than one retransmit timeout the old
	 * congestion window is no longer current and we have to reduce it to
	 * the restart window before we can transmit again.
	 *
	 * The restart window is the initial window or the last CWND, whichever
	 * is smaller.
	 *
	 * This is done to prevent us from flooding the path with a full CWND at
	 * wirespeed, overloading router and switch buffers along the way.
	 *
	 * See RFC5681 Section 4.1. "Restarting Idle Connections".
	 *
	 * In addition, per RFC2861 Section 2, the ssthresh is set to the
	 * maximum of the former ssthresh or 3/4 of the old cwnd, to
	 * not exit slow-start prematurely.
	 */
	rw = tcp_compute_initwnd(tcp_maxseg(ccv->ccvc.tcp));

	CCV(ccv, snd_ssthresh) = max(CCV(ccv, snd_ssthresh),
	    CCV(ccv, snd_cwnd)-(CCV(ccv, snd_cwnd)>>2));

	CCV(ccv, snd_cwnd) = min(rw, CCV(ccv, snd_cwnd));
}

/*
 * Perform any necessary tasks before we enter congestion recovery.
 */
void
newreno_cc_cong_signal(struct cc_var *ccv, uint32_t type)
{
	uint32_t cwin, factor;
	u_int mss;

	cwin = CCV(ccv, snd_cwnd);
	mss = tcp_fixed_maxseg(ccv->ccvc.tcp);
	/*
	 * Other TCP congestion controls use newreno_cong_signal(), but
	 * with their own private cc_data. Make sure the cc_data is used
	 * correctly.
	 */
	factor = V_newreno_beta;

	/* Catch algos which mistakenly leak private signal types. */
	KASSERT((type & CC_SIGPRIVMASK) == 0,
	    ("%s: congestion signal type 0x%08x is private\n", __func__, type));

	cwin = max(((uint64_t)cwin * (uint64_t)factor) / (100ULL * (uint64_t)mss),
	    2) * mss;

	switch (type) {
	case CC_NDUPACK:
		if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
			if (!IN_CONGRECOVERY(CCV(ccv, t_flags)))
				CCV(ccv, snd_ssthresh) = cwin;
			ENTER_RECOVERY(CCV(ccv, t_flags));
		}
		break;
	case CC_ECN:
		if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
			CCV(ccv, snd_ssthresh) = cwin;
			CCV(ccv, snd_cwnd) = cwin;
			ENTER_CONGRECOVERY(CCV(ccv, t_flags));
		}
		break;
	case CC_RTO:
		CCV(ccv, snd_ssthresh) = max(min(CCV(ccv, snd_wnd),
						 CCV(ccv, snd_cwnd)) / 2 / mss,
					     2) * mss;
		CCV(ccv, snd_cwnd) = mss;
		break;
	}
}

void
newreno_cc_ack_received(struct cc_var *ccv, uint16_t type)
{
	if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
	    (ccv->flags & CCF_CWND_LIMITED)) {
		u_int cw = CCV(ccv, snd_cwnd);
		u_int incr = CCV(ccv, t_maxseg);

		/*
		 * Regular in-order ACK, open the congestion window.
		 * Method depends on which congestion control state we're
		 * in (slow start or cong avoid) and if ABC (RFC 3465) is
		 * enabled.
		 *
		 * slow start: cwnd <= ssthresh
		 * cong avoid: cwnd > ssthresh
		 *
		 * slow start and ABC (RFC 3465):
		 *   Grow cwnd exponentially by the amount of data
		 *   ACKed capping the max increment per ACK to
		 *   (abc_l_var * maxseg) bytes.
		 *
		 * slow start without ABC (RFC 5681):
		 *   Grow cwnd exponentially by maxseg per ACK.
		 *
		 * cong avoid and ABC (RFC 3465):
		 *   Grow cwnd linearly by maxseg per RTT for each
		 *   cwnd worth of ACKed data.
		 *
		 * cong avoid without ABC (RFC 5681):
		 *   Grow cwnd linearly by approximately maxseg per RTT using
		 *   maxseg^2 / cwnd per ACK as the increment.
		 *   If cwnd > maxseg^2, fix the cwnd increment at 1 byte to
		 *   avoid capping cwnd.
		 */
		if (cw > CCV(ccv, snd_ssthresh)) {
			if (V_tcp_do_rfc3465) {
				if (ccv->flags & CCF_ABC_SENTAWND)
					ccv->flags &= ~CCF_ABC_SENTAWND;
				else
					incr = 0;
			} else
				incr = max((incr * incr / cw), 1);
		} else if (V_tcp_do_rfc3465) {
			/*
			 * In slow-start with ABC enabled and no RTO in sight?
			 * (Must not use abc_l_var > 1 if slow starting after
			 * an RTO. On RTO, snd_nxt = snd_una, so the
			 * snd_nxt == snd_max check is sufficient to
			 * handle this).
			 *
			 * XXXLAS: Find a way to signal SS after RTO that
			 * doesn't rely on tcpcb vars.
			 */
			uint16_t abc_val;

			if (ccv->flags & CCF_USE_LOCAL_ABC)
				abc_val = ccv->labc;
			else
				abc_val = V_tcp_abc_l_var;
			if (CCV(ccv, snd_nxt) == CCV(ccv, snd_max))
				incr = min(ccv->bytes_this_ack,
				    ccv->nsegs * abc_val *
				    CCV(ccv, t_maxseg));
			else
				incr = min(ccv->bytes_this_ack, CCV(ccv, t_maxseg));

		}
		/* ABC is on by default, so incr equals 0 frequently. */
		if (incr > 0)
			CCV(ccv, snd_cwnd) = min(cw + incr,
			    TCP_MAXWIN << CCV(ccv, snd_scale));
	}
}

522
523
524
525
526
527
528
529
530
531
532
533
534
535
/*
 * Handles kld related events. Returns 0 on success, non-zero on failure.
 */
int
cc_modevent(module_t mod, int event_type, void *data)
{
	struct cc_algo *algo;
	int err;

	err = 0;
	algo = (struct cc_algo *)data;

	switch(event_type) {
	case MOD_LOAD:
536
537
538
539
540
541
542
543
544
		if ((algo->cc_data_sz == NULL) && (algo->cb_init != NULL)) {
			/*
			 * A module must have a cc_data_sz function
			 * even if it has no data it should return 0.
			 */
			printf("Module Load Fails, it lacks a cc_data_sz() function but has a cb_init()!\n");
			err = EINVAL;
			break;
		}
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
		if (algo->mod_init != NULL)
			err = algo->mod_init();
		if (!err)
			err = cc_register_algo(algo);
		break;

	case MOD_QUIESCE:
	case MOD_SHUTDOWN:
	case MOD_UNLOAD:
		err = cc_deregister_algo(algo);
		if (!err && algo->mod_destroy != NULL)
			algo->mod_destroy();
		if (err == ENOENT)
			err = 0;
		break;

	default:
		err = EINVAL;
		break;
	}

	return (err);
}

569
570
SYSINIT(cc, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST, cc_init, NULL);

571
/* Declare sysctl tree and populate it. */
572
SYSCTL_NODE(_net_inet_tcp, OID_AUTO, cc, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
573
    "Congestion control related settings");
574

575
SYSCTL_PROC(_net_inet_tcp_cc, OID_AUTO, algorithm,
576
577
578
    CTLFLAG_VNET | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
    NULL, 0, cc_default_algo, "A",
    "Default congestion control algorithm");
579

580
581
SYSCTL_PROC(_net_inet_tcp_cc, OID_AUTO, available,
    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
582
    NULL, 0, cc_list_available, "A",
583
    "List available congestion control algorithms");
584
585
586
587
588
589
590
591
592
593
594

VNET_DEFINE(int, cc_do_abe) = 0;
SYSCTL_INT(_net_inet_tcp_cc, OID_AUTO, abe, CTLFLAG_VNET | CTLFLAG_RW,
    &VNET_NAME(cc_do_abe), 0,
    "Enable draft-ietf-tcpm-alternativebackoff-ecn (TCP Alternative Backoff with ECN)");

VNET_DEFINE(int, cc_abe_frlossreduce) = 0;
SYSCTL_INT(_net_inet_tcp_cc, OID_AUTO, abe_frlossreduce, CTLFLAG_VNET | CTLFLAG_RW,
    &VNET_NAME(cc_abe_frlossreduce), 0,
    "Apply standard beta instead of ABE-beta during ECN-signalled congestion "
    "recovery episodes if loss also needs to be repaired");