Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
Menu
Open sidebar
HardenedBSD
HardenedBSD
Commits
7536b4ae
Commit
7536b4ae
authored
Oct 22, 2021
by
HardenedBSD Sync Service
Browse files
Merge branch 'freebsd/current/main' into hardened/current/master
parents
03c77793
d7acbe48
Changes
10
Hide whitespace changes
Inline
Side-by-side
sys/dev/igc/if_igc.c
View file @
7536b4ae
...
...
@@ -1172,12 +1172,12 @@ igc_if_multi_set(if_ctx_t ctx)
reg_rctl
|=
IGC_RCTL_MPE
;
reg_rctl
&=
~
IGC_RCTL_UPE
;
}
else
reg_rctl
=
~
(
IGC_RCTL_UPE
|
IGC_RCTL_MPE
);
IGC_WRITE_REG
(
&
adapter
->
hw
,
IGC_RCTL
,
reg_rctl
);
reg_rctl
&=
~
(
IGC_RCTL_UPE
|
IGC_RCTL_MPE
);
if
(
mcnt
<
MAX_NUM_MULTICAST_ADDRESSES
)
igc_update_mc_addr_list
(
&
adapter
->
hw
,
mta
,
mcnt
);
IGC_WRITE_REG
(
&
adapter
->
hw
,
IGC_RCTL
,
reg_rctl
);
}
/*********************************************************************
...
...
sys/net80211/ieee80211_sta.c
View file @
7536b4ae
...
...
@@ -552,6 +552,35 @@ sta_input(struct ieee80211_node *ni, struct mbuf *m,
int
is_hw_decrypted
=
0
;
int
has_decrypted
=
0
;
KASSERT
(
ni
!=
NULL
,
(
"%s: null node, mbuf %p"
,
__func__
,
m
));
/* Early init in case of early error case. */
type
=
-
1
;
/*
* Bit of a cheat here, we use a pointer for a 3-address
* frame format but don't reference fields past outside
* ieee80211_frame_min (or other shorter frames) w/o first
* validating the data is present.
*/
wh
=
mtod
(
m
,
struct
ieee80211_frame
*
);
if
(
m
->
m_pkthdr
.
len
<
2
||
m
->
m_pkthdr
.
len
<
ieee80211_anyhdrsize
(
wh
))
{
IEEE80211_DISCARD_MAC
(
vap
,
IEEE80211_MSG_ANY
,
ni
->
ni_macaddr
,
NULL
,
"too short (1): len %u"
,
m
->
m_pkthdr
.
len
);
vap
->
iv_stats
.
is_rx_tooshort
++
;
goto
err
;
}
if
((
wh
->
i_fc
[
0
]
&
IEEE80211_FC0_VERSION_MASK
)
!=
IEEE80211_FC0_VERSION_0
)
{
IEEE80211_DISCARD_MAC
(
vap
,
IEEE80211_MSG_ANY
,
ni
->
ni_macaddr
,
NULL
,
"wrong version, fc %02x:%02x"
,
wh
->
i_fc
[
0
],
wh
->
i_fc
[
1
]);
vap
->
iv_stats
.
is_rx_badversion
++
;
goto
err
;
}
/*
* Some devices do hardware decryption all the way through
* to pretending the frame wasn't encrypted in the first place.
...
...
@@ -569,7 +598,6 @@ sta_input(struct ieee80211_node *ni, struct mbuf *m,
* with the M_AMPDU_MPDU flag and we can bypass most of
* the normal processing.
*/
wh
=
mtod
(
m
,
struct
ieee80211_frame
*
);
type
=
IEEE80211_FC0_TYPE_DATA
;
dir
=
wh
->
i_fc
[
1
]
&
IEEE80211_FC1_DIR_MASK
;
subtype
=
IEEE80211_FC0_SUBTYPE_QOS
;
...
...
@@ -577,39 +605,19 @@ sta_input(struct ieee80211_node *ni, struct mbuf *m,
goto
resubmit_ampdu
;
}
KASSERT
(
ni
!=
NULL
,
(
"null node"
));
ni
->
ni_inact
=
ni
->
ni_inact_reload
;
type
=
-
1
;
/* undefined */
if
(
m
->
m_pkthdr
.
len
<
sizeof
(
struct
ieee80211_frame_min
))
{
IEEE80211_DISCARD_MAC
(
vap
,
IEEE80211_MSG_ANY
,
ni
->
ni_macaddr
,
NULL
,
"too short (1): len %u"
,
m
->
m_pkthdr
.
len
);
vap
->
iv_stats
.
is_rx_tooshort
++
;
goto
out
;
}
/*
* Bit of a cheat here, we use a pointer for a 3-address
* frame format but don't reference fields past outside
* ieee80211_frame_min w/o first validating the data is
* present.
*/
wh
=
mtod
(
m
,
struct
ieee80211_frame
*
);
if
((
wh
->
i_fc
[
0
]
&
IEEE80211_FC0_VERSION_MASK
)
!=
IEEE80211_FC0_VERSION_0
)
{
IEEE80211_DISCARD_MAC
(
vap
,
IEEE80211_MSG_ANY
,
ni
->
ni_macaddr
,
NULL
,
"wrong version, fc %02x:%02x"
,
wh
->
i_fc
[
0
],
wh
->
i_fc
[
1
]);
vap
->
iv_stats
.
is_rx_badversion
++
;
goto
err
;
}
dir
=
wh
->
i_fc
[
1
]
&
IEEE80211_FC1_DIR_MASK
;
type
=
wh
->
i_fc
[
0
]
&
IEEE80211_FC0_TYPE_MASK
;
subtype
=
wh
->
i_fc
[
0
]
&
IEEE80211_FC0_SUBTYPE_MASK
;
if
((
ic
->
ic_flags
&
IEEE80211_F_SCAN
)
==
0
)
{
/*
* Control frames are not folowing the header scheme of data and mgmt
* frames so we do not apply extra checks here.
* We probably should do checks on RA (+TA) where available for those
* too, but for now do not drop them.
*/
if
(
type
!=
IEEE80211_FC0_TYPE_CTL
&&
(
ic
->
ic_flags
&
IEEE80211_F_SCAN
)
==
0
)
{
bssid
=
wh
->
i_addr2
;
if
(
!
IEEE80211_ADDR_EQ
(
bssid
,
ni
->
ni_bssid
))
{
/* not interested in */
...
...
sys/netinet/cc/cc.h
View file @
7536b4ae
...
...
@@ -163,6 +163,15 @@ struct cc_algo {
/* Called for an additional ECN processing apart from RFC3168. */
void
(
*
ecnpkt_handler
)(
struct
cc_var
*
ccv
);
/* Called when a new "round" begins, if the transport is tracking rounds. */
void
(
*
newround
)(
struct
cc_var
*
ccv
,
uint32_t
round_cnt
);
/*
* Called when a RTT sample is made (fas = flight at send, if you dont have it
* send the cwnd in).
*/
void
(
*
rttsample
)(
struct
cc_var
*
ccv
,
uint32_t
usec_rtt
,
uint32_t
rxtcnt
,
uint32_t
fas
);
/* Called for {get|set}sockopt() on a TCP socket with TCP_CCALGOOPT. */
int
(
*
ctl_output
)(
struct
cc_var
*
,
struct
sockopt
*
,
void
*
);
...
...
sys/netinet/cc/cc_newreno.c
View file @
7536b4ae
...
...
@@ -63,15 +63,21 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/tcp.h>
#include <netinet/tcp_seq.h>
#include <netinet/tcp_var.h>
#include <netinet/tcp_log_buf.h>
#include <netinet/tcp_hpts.h>
#include <netinet/cc/cc.h>
#include <netinet/cc/cc_module.h>
#include <netinet/cc/cc_newreno.h>
...
...
@@ -85,6 +91,9 @@ static void newreno_after_idle(struct cc_var *ccv);
static
void
newreno_cong_signal
(
struct
cc_var
*
ccv
,
uint32_t
type
);
static
void
newreno_post_recovery
(
struct
cc_var
*
ccv
);
static
int
newreno_ctl_output
(
struct
cc_var
*
ccv
,
struct
sockopt
*
sopt
,
void
*
buf
);
static
void
newreno_newround
(
struct
cc_var
*
ccv
,
uint32_t
round_cnt
);
static
void
newreno_rttsample
(
struct
cc_var
*
ccv
,
uint32_t
usec_rtt
,
uint32_t
rxtcnt
,
uint32_t
fas
);
static
int
newreno_cb_init
(
struct
cc_var
*
ccv
);
VNET_DEFINE
(
uint32_t
,
newreno_beta
)
=
50
;
VNET_DEFINE
(
uint32_t
,
newreno_beta_ecn
)
=
80
;
...
...
@@ -99,23 +108,95 @@ struct cc_algo newreno_cc_algo = {
.
cong_signal
=
newreno_cong_signal
,
.
post_recovery
=
newreno_post_recovery
,
.
ctl_output
=
newreno_ctl_output
,
.
newround
=
newreno_newround
,
.
rttsample
=
newreno_rttsample
,
.
cb_init
=
newreno_cb_init
,
};
static
inline
struct
newreno
*
newreno_malloc
(
struct
cc_var
*
ccv
)
{
struct
newreno
*
nreno
;
static
uint32_t
hystart_lowcwnd
=
16
;
static
uint32_t
hystart_minrtt_thresh
=
4000
;
static
uint32_t
hystart_maxrtt_thresh
=
16000
;
static
uint32_t
hystart_n_rttsamples
=
8
;
static
uint32_t
hystart_css_growth_div
=
4
;
static
uint32_t
hystart_css_rounds
=
5
;
static
uint32_t
hystart_bblogs
=
0
;
nreno
=
malloc
(
sizeof
(
struct
newreno
),
M_NEWRENO
,
M_NOWAIT
);
if
(
nreno
!=
NULL
)
{
/* NB: nreno is not zeroed, so initialise all fields. */
nreno
->
beta
=
V_newreno_beta
;
nreno
->
beta_ecn
=
V_newreno_beta_ecn
;
nreno
->
newreno_flags
=
0
;
ccv
->
cc_data
=
nreno
;
static
void
newreno_log_hystart_event
(
struct
cc_var
*
ccv
,
struct
newreno
*
nreno
,
uint8_t
mod
,
uint32_t
flex1
)
{
/*
* Types of logs (mod value)
* 1 - rtt_thresh in flex1, checking to see if RTT is to great.
* 2 - rtt is too great, rtt_thresh in flex1.
* 3 - CSS is active incr in flex1
* 4 - A new round is beginning flex1 is round count
* 5 - A new RTT measurement flex1 is the new measurement.
* 6 - We enter CA ssthresh is also in flex1.
* 7 - Socket option to change hystart executed opt.val in flex1.
* 8 - Back out of CSS into SS, flex1 is the css_baseline_minrtt
*/
struct
tcpcb
*
tp
;
if
(
hystart_bblogs
==
0
)
return
;
tp
=
ccv
->
ccvc
.
tcp
;
if
(
tp
->
t_logstate
!=
TCP_LOG_STATE_OFF
)
{
union
tcp_log_stackspecific
log
;
struct
timeval
tv
;
memset
(
&
log
,
0
,
sizeof
(
log
));
log
.
u_bbr
.
flex1
=
flex1
;
log
.
u_bbr
.
flex2
=
nreno
->
css_current_round_minrtt
;
log
.
u_bbr
.
flex3
=
nreno
->
css_lastround_minrtt
;
log
.
u_bbr
.
flex4
=
nreno
->
css_rttsample_count
;
log
.
u_bbr
.
flex5
=
nreno
->
css_entered_at_round
;
log
.
u_bbr
.
flex6
=
nreno
->
css_baseline_minrtt
;
/* We only need bottom 16 bits of flags */
log
.
u_bbr
.
flex7
=
nreno
->
newreno_flags
&
0x0000ffff
;
log
.
u_bbr
.
flex8
=
mod
;
log
.
u_bbr
.
epoch
=
nreno
->
css_current_round
;
log
.
u_bbr
.
timeStamp
=
tcp_get_usecs
(
&
tv
);
log
.
u_bbr
.
lt_epoch
=
nreno
->
css_fas_at_css_entry
;
log
.
u_bbr
.
pkts_out
=
nreno
->
css_last_fas
;
log
.
u_bbr
.
delivered
=
nreno
->
css_lowrtt_fas
;
TCP_LOG_EVENTP
(
tp
,
NULL
,
&
tp
->
t_inpcb
->
inp_socket
->
so_rcv
,
&
tp
->
t_inpcb
->
inp_socket
->
so_snd
,
TCP_HYSTART
,
0
,
0
,
&
log
,
false
,
&
tv
);
}
}
static
int
newreno_cb_init
(
struct
cc_var
*
ccv
)
{
struct
newreno
*
nreno
;
return
(
nreno
);
ccv
->
cc_data
=
NULL
;
ccv
->
cc_data
=
malloc
(
sizeof
(
struct
newreno
),
M_NEWRENO
,
M_NOWAIT
);
if
(
ccv
->
cc_data
==
NULL
)
return
(
ENOMEM
);
nreno
=
(
struct
newreno
*
)
ccv
->
cc_data
;
/* NB: nreno is not zeroed, so initialise all fields. */
nreno
->
beta
=
V_newreno_beta
;
nreno
->
beta_ecn
=
V_newreno_beta_ecn
;
/*
* We set the enabled flag so that if
* the socket option gets strobed and
* we have not hit a loss
*/
nreno
->
newreno_flags
=
CC_NEWRENO_HYSTART_ENABLED
;
/* At init set both to infinity */
nreno
->
css_lastround_minrtt
=
0xffffffff
;
nreno
->
css_current_round_minrtt
=
0xffffffff
;
nreno
->
css_current_round
=
0
;
nreno
->
css_baseline_minrtt
=
0xffffffff
;
nreno
->
css_rttsample_count
=
0
;
nreno
->
css_entered_at_round
=
0
;
nreno
->
css_fas_at_css_entry
=
0
;
nreno
->
css_lowrtt_fas
=
0
;
nreno
->
css_last_fas
=
0
;
return
(
0
);
}
static
void
...
...
@@ -127,6 +208,9 @@ newreno_cb_destroy(struct cc_var *ccv)
static
void
newreno_ack_received
(
struct
cc_var
*
ccv
,
uint16_t
type
)
{
struct
newreno
*
nreno
;
nreno
=
(
struct
newreno
*
)
ccv
->
cc_data
;
if
(
type
==
CC_ACK
&&
!
IN_RECOVERY
(
CCV
(
ccv
,
t_flags
))
&&
(
ccv
->
flags
&
CCF_CWND_LIMITED
))
{
u_int
cw
=
CCV
(
ccv
,
snd_cwnd
);
...
...
@@ -160,6 +244,16 @@ newreno_ack_received(struct cc_var *ccv, uint16_t type)
* avoid capping cwnd.
*/
if
(
cw
>
CCV
(
ccv
,
snd_ssthresh
))
{
if
(
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_IN_CSS
)
{
/*
* We have slipped into CA with
* CSS active. Deactivate all.
*/
/* Turn off the CSS flag */
nreno
->
newreno_flags
&=
~
CC_NEWRENO_HYSTART_IN_CSS
;
/* Disable use of CSS in the future except long idle */
nreno
->
newreno_flags
&=
~
CC_NEWRENO_HYSTART_ENABLED
;
}
if
(
V_tcp_do_rfc3465
)
{
if
(
ccv
->
flags
&
CCF_ABC_SENTAWND
)
ccv
->
flags
&=
~
CCF_ABC_SENTAWND
;
...
...
@@ -184,12 +278,48 @@ newreno_ack_received(struct cc_var *ccv, uint16_t type)
abc_val
=
ccv
->
labc
;
else
abc_val
=
V_tcp_abc_l_var
;
if
((
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_ALLOWED
)
&&
(
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_ENABLED
)
&&
((
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_IN_CSS
)
==
0
))
{
/*
* Hystart is allowed and still enabled and we are not yet
* in CSS. Lets check to see if we can make a decision on
* if we need to go into CSS.
*/
if
((
nreno
->
css_rttsample_count
>=
hystart_n_rttsamples
)
&&
(
CCV
(
ccv
,
snd_cwnd
)
>
(
hystart_lowcwnd
*
tcp_fixed_maxseg
(
ccv
->
ccvc
.
tcp
))))
{
uint32_t
rtt_thresh
;
/* Clamp (minrtt_thresh, lastround/8, maxrtt_thresh) */
rtt_thresh
=
(
nreno
->
css_lastround_minrtt
>>
3
);
if
(
rtt_thresh
<
hystart_minrtt_thresh
)
rtt_thresh
=
hystart_minrtt_thresh
;
if
(
rtt_thresh
>
hystart_maxrtt_thresh
)
rtt_thresh
=
hystart_maxrtt_thresh
;
newreno_log_hystart_event
(
ccv
,
nreno
,
1
,
rtt_thresh
);
if
(
nreno
->
css_current_round_minrtt
>=
(
nreno
->
css_lastround_minrtt
+
rtt_thresh
))
{
/* Enter CSS */
nreno
->
newreno_flags
|=
CC_NEWRENO_HYSTART_IN_CSS
;
nreno
->
css_fas_at_css_entry
=
nreno
->
css_lowrtt_fas
;
nreno
->
css_baseline_minrtt
=
nreno
->
css_current_round_minrtt
;
nreno
->
css_entered_at_round
=
nreno
->
css_current_round
;
newreno_log_hystart_event
(
ccv
,
nreno
,
2
,
rtt_thresh
);
}
}
}
if
(
CCV
(
ccv
,
snd_nxt
)
==
CCV
(
ccv
,
snd_max
))
incr
=
min
(
ccv
->
bytes_this_ack
,
ccv
->
nsegs
*
abc_val
*
CCV
(
ccv
,
t_maxseg
));
else
incr
=
min
(
ccv
->
bytes_this_ack
,
CCV
(
ccv
,
t_maxseg
));
/* Only if Hystart is enabled will the flag get set */
if
(
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_IN_CSS
)
{
incr
/=
hystart_css_growth_div
;
newreno_log_hystart_event
(
ccv
,
nreno
,
3
,
incr
);
}
}
/* ABC is on by default, so incr equals 0 frequently. */
if
(
incr
>
0
)
...
...
@@ -201,8 +331,10 @@ newreno_ack_received(struct cc_var *ccv, uint16_t type)
static
void
newreno_after_idle
(
struct
cc_var
*
ccv
)
{
struct
newreno
*
nreno
;
uint32_t
rw
;
nreno
=
(
struct
newreno
*
)
ccv
->
cc_data
;
/*
* If we've been idle for more than one retransmit timeout the old
* congestion window is no longer current and we have to reduce it to
...
...
@@ -226,6 +358,16 @@ newreno_after_idle(struct cc_var *ccv)
CCV
(
ccv
,
snd_cwnd
)
-
(
CCV
(
ccv
,
snd_cwnd
)
>>
2
));
CCV
(
ccv
,
snd_cwnd
)
=
min
(
rw
,
CCV
(
ccv
,
snd_cwnd
));
if
((
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_ENABLED
)
==
0
)
{
if
(
CCV
(
ccv
,
snd_cwnd
)
<=
(
hystart_lowcwnd
*
tcp_fixed_maxseg
(
ccv
->
ccvc
.
tcp
)))
{
/*
* Re-enable hystart if our cwnd has fallen below
* the hystart lowcwnd point.
*/
nreno
->
newreno_flags
&=
~
CC_NEWRENO_HYSTART_IN_CSS
;
nreno
->
newreno_flags
|=
CC_NEWRENO_HYSTART_ENABLED
;
}
}
}
/*
...
...
@@ -240,15 +382,9 @@ newreno_cong_signal(struct cc_var *ccv, uint32_t type)
cwin
=
CCV
(
ccv
,
snd_cwnd
);
mss
=
tcp_fixed_maxseg
(
ccv
->
ccvc
.
tcp
);
/*
* Other TCP congestion controls use newreno_cong_signal(), but
* with their own private cc_data. Make sure the cc_data is used
* correctly.
*/
nreno
=
(
CC_ALGO
(
ccv
->
ccvc
.
tcp
)
==
&
newreno_cc_algo
)
?
ccv
->
cc_data
:
NULL
;
beta
=
(
nreno
==
NULL
)
?
V_newreno_beta
:
nreno
->
beta
;
beta_ecn
=
(
nreno
==
NULL
)
?
V_newreno_beta_ecn
:
nreno
->
beta_ecn
;
nreno
=
(
struct
newreno
*
)
ccv
->
cc_data
;
beta
=
nreno
->
beta
;
beta_ecn
=
nreno
->
beta_ecn
;
/*
* Note that we only change the backoff for ECN if the
* global sysctl V_cc_do_abe is set <or> the stack itself
...
...
@@ -257,7 +393,7 @@ newreno_cong_signal(struct cc_var *ccv, uint32_t type)
*/
if
((
type
==
CC_ECN
)
&&
(
V_cc_do_abe
||
((
nreno
!=
NULL
)
&&
(
nreno
->
newreno_flags
&
CC_NEWRENO_BETA_ECN
))))
((
nreno
!=
NULL
)
&&
(
nreno
->
newreno_flags
&
CC_NEWRENO_BETA_ECN
_ENABLED
))))
factor
=
beta_ecn
;
else
factor
=
beta
;
...
...
@@ -271,6 +407,11 @@ newreno_cong_signal(struct cc_var *ccv, uint32_t type)
switch
(
type
)
{
case
CC_NDUPACK
:
if
(
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_ENABLED
)
{
/* Make sure the flags are all off we had a loss */
nreno
->
newreno_flags
&=
~
CC_NEWRENO_HYSTART_ENABLED
;
nreno
->
newreno_flags
&=
~
CC_NEWRENO_HYSTART_IN_CSS
;
}
if
(
!
IN_FASTRECOVERY
(
CCV
(
ccv
,
t_flags
)))
{
if
(
IN_CONGRECOVERY
(
CCV
(
ccv
,
t_flags
)
&&
V_cc_do_abe
&&
V_cc_abe_frlossreduce
))
{
...
...
@@ -284,6 +425,11 @@ newreno_cong_signal(struct cc_var *ccv, uint32_t type)
}
break
;
case
CC_ECN
:
if
(
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_ENABLED
)
{
/* Make sure the flags are all off we had a loss */
nreno
->
newreno_flags
&=
~
CC_NEWRENO_HYSTART_ENABLED
;
nreno
->
newreno_flags
&=
~
CC_NEWRENO_HYSTART_IN_CSS
;
}
if
(
!
IN_CONGRECOVERY
(
CCV
(
ccv
,
t_flags
)))
{
CCV
(
ccv
,
snd_ssthresh
)
=
cwin
;
CCV
(
ccv
,
snd_cwnd
)
=
cwin
;
...
...
@@ -346,17 +492,10 @@ newreno_ctl_output(struct cc_var *ccv, struct sockopt *sopt, void *buf)
if
(
CC_ALGO
(
ccv
->
ccvc
.
tcp
)
!=
&
newreno_cc_algo
)
return
(
ENOPROTOOPT
);
nreno
=
ccv
->
cc_data
;
nreno
=
(
struct
newreno
*
)
ccv
->
cc_data
;
opt
=
buf
;
switch
(
sopt
->
sopt_dir
)
{
case
SOPT_SET
:
/* We cannot set without cc_data memory. */
if
(
nreno
==
NULL
)
{
nreno
=
newreno_malloc
(
ccv
);
if
(
nreno
==
NULL
)
return
(
ENOMEM
);
}
switch
(
opt
->
name
)
{
case
CC_NEWRENO_BETA
:
nreno
->
beta
=
opt
->
val
;
...
...
@@ -365,6 +504,19 @@ newreno_ctl_output(struct cc_var *ccv, struct sockopt *sopt, void *buf)
if
((
!
V_cc_do_abe
)
&&
((
nreno
->
newreno_flags
&
CC_NEWRENO_BETA_ECN
)
==
0
))
return
(
EACCES
);
nreno
->
beta_ecn
=
opt
->
val
;
nreno
->
newreno_flags
|=
CC_NEWRENO_BETA_ECN_ENABLED
;
break
;
case
CC_NEWRENO_ENABLE_HYSTART
:
/* Allow hystart on this connection */
if
(
opt
->
val
!=
0
)
{
nreno
->
newreno_flags
|=
CC_NEWRENO_HYSTART_ALLOWED
;
if
(
opt
->
val
>
1
)
nreno
->
newreno_flags
|=
CC_NEWRENO_HYSTART_CAN_SH_CWND
;
if
(
opt
->
val
>
2
)
nreno
->
newreno_flags
|=
CC_NEWRENO_HYSTART_CONS_SSTH
;
}
else
nreno
->
newreno_flags
&=
~
(
CC_NEWRENO_HYSTART_ALLOWED
|
CC_NEWRENO_HYSTART_CAN_SH_CWND
|
CC_NEWRENO_HYSTART_CONS_SSTH
);
newreno_log_hystart_event
(
ccv
,
nreno
,
7
,
opt
->
val
);
break
;
default:
return
(
ENOPROTOOPT
);
...
...
@@ -380,6 +532,17 @@ newreno_ctl_output(struct cc_var *ccv, struct sockopt *sopt, void *buf)
opt
->
val
=
(
nreno
==
NULL
)
?
V_newreno_beta_ecn
:
nreno
->
beta_ecn
;
break
;
case
CC_NEWRENO_ENABLE_HYSTART
:
if
(
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_ALLOWED
)
{
if
(
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_CONS_SSTH
)
opt
->
val
=
3
;
else
if
(
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_CAN_SH_CWND
)
opt
->
val
=
2
;
else
opt
->
val
=
1
;
}
else
opt
->
val
=
0
;
break
;
default:
return
(
ENOPROTOOPT
);
}
...
...
@@ -411,6 +574,78 @@ newreno_beta_handler(SYSCTL_HANDLER_ARGS)
return
(
error
);
}
static
void
newreno_newround
(
struct
cc_var
*
ccv
,
uint32_t
round_cnt
)
{
struct
newreno
*
nreno
;
nreno
=
(
struct
newreno
*
)
ccv
->
cc_data
;
/* We have entered a new round */
nreno
->
css_lastround_minrtt
=
nreno
->
css_current_round_minrtt
;
nreno
->
css_current_round_minrtt
=
0xffffffff
;
nreno
->
css_rttsample_count
=
0
;
nreno
->
css_current_round
=
round_cnt
;
if
((
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_IN_CSS
)
&&
((
round_cnt
-
nreno
->
css_entered_at_round
)
>=
hystart_css_rounds
))
{
/* Enter CA */
if
(
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_CAN_SH_CWND
)
{
/*
* We engage more than snd_ssthresh, engage
* the brakes!! Though we will stay in SS to
* creep back up again, so lets leave CSS active
* and give us hystart_css_rounds more rounds.
*/
if
(
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_CONS_SSTH
)
{
CCV
(
ccv
,
snd_ssthresh
)
=
((
nreno
->
css_lowrtt_fas
+
nreno
->
css_fas_at_css_entry
)
/
2
);
}
else
{
CCV
(
ccv
,
snd_ssthresh
)
=
nreno
->
css_lowrtt_fas
;
}
CCV
(
ccv
,
snd_cwnd
)
=
nreno
->
css_fas_at_css_entry
;
nreno
->
css_entered_at_round
=
round_cnt
;
}
else
{
CCV
(
ccv
,
snd_ssthresh
)
=
CCV
(
ccv
,
snd_cwnd
);
/* Turn off the CSS flag */
nreno
->
newreno_flags
&=
~
CC_NEWRENO_HYSTART_IN_CSS
;
/* Disable use of CSS in the future except long idle */
nreno
->
newreno_flags
&=
~
CC_NEWRENO_HYSTART_ENABLED
;
}
newreno_log_hystart_event
(
ccv
,
nreno
,
6
,
CCV
(
ccv
,
snd_ssthresh
));
}
newreno_log_hystart_event
(
ccv
,
nreno
,
4
,
round_cnt
);
}
static
void
newreno_rttsample
(
struct
cc_var
*
ccv
,
uint32_t
usec_rtt
,
uint32_t
rxtcnt
,
uint32_t
fas
)
{
struct
newreno
*
nreno
;
nreno
=
(
struct
newreno
*
)
ccv
->
cc_data
;
if
(
rxtcnt
>
1
)
{
/*
* Only look at RTT's that are non-ambiguous.
*/
return
;
}
nreno
->
css_rttsample_count
++
;
nreno
->
css_last_fas
=
fas
;
if
(
nreno
->
css_current_round_minrtt
>
usec_rtt
)
{
nreno
->
css_current_round_minrtt
=
usec_rtt
;
nreno
->
css_lowrtt_fas
=
nreno
->
css_last_fas
;
}
if
((
nreno
->
newreno_flags
&
CC_NEWRENO_HYSTART_IN_CSS
)
&&
(
nreno
->
css_rttsample_count
>=
hystart_n_rttsamples
)
&&
(
nreno
->
css_baseline_minrtt
>
nreno
->
css_current_round_minrtt
))
{
/*
* We were in CSS and the RTT is now less, we
* entered CSS erroneously.
*/
nreno
->
newreno_flags
&=
~
CC_NEWRENO_HYSTART_IN_CSS
;
newreno_log_hystart_event
(
ccv
,
nreno
,
8
,
nreno
->
css_baseline_minrtt
);
nreno
->
css_baseline_minrtt
=
0xffffffff
;
}
newreno_log_hystart_event
(
ccv
,
nreno
,
5
,
usec_rtt
);
}
SYSCTL_DECL
(
_net_inet_tcp_cc_newreno
);
SYSCTL_NODE
(
_net_inet_tcp_cc
,
OID_AUTO
,
newreno
,
CTLFLAG_RW
|
CTLFLAG_MPSAFE
,
NULL
,
...
...
@@ -426,5 +661,45 @@ SYSCTL_PROC(_net_inet_tcp_cc_newreno, OID_AUTO, beta_ecn,
&
VNET_NAME
(
newreno_beta_ecn
),
3
,
&
newreno_beta_handler
,
"IU"
,
"New Reno beta ecn, specified as number between 1 and 100"
);
SYSCTL_NODE
(
_net_inet_tcp_cc_newreno
,
OID_AUTO
,
hystartplusplus
,
CTLFLAG_RW
|
CTLFLAG_MPSAFE
,
NULL
,
"New Reno related HyStart++ settings"
);
SYSCTL_UINT
(
_net_inet_tcp_cc_newreno_hystartplusplus
,
OID_AUTO
,
lowcwnd
,
CTLFLAG_RW
,
&
hystart_lowcwnd
,
16
,
"The number of MSS in the CWND before HyStart++ is active"
);
SYSCTL_UINT
(
_net_inet_tcp_cc_newreno_hystartplusplus
,
OID_AUTO
,
minrtt_thresh
,
CTLFLAG_RW
,
&
hystart_minrtt_thresh
,
4000
,
"HyStarts++ minimum RTT thresh used in clamp (in microseconds)"
);
SYSCTL_UINT
(
_net_inet_tcp_cc_newreno_hystartplusplus
,
OID_AUTO
,
maxrtt_thresh
,
CTLFLAG_RW
,
&
hystart_maxrtt_thresh
,
16000
,
"HyStarts++ maximum RTT thresh used in clamp (in microseconds)"
);
SYSCTL_UINT
(
_net_inet_tcp_cc_newreno_hystartplusplus
,
OID_AUTO
,
n_rttsamples
,
CTLFLAG_RW
,
&
hystart_n_rttsamples
,
8
,
"The number of RTT samples that must be seen to consider HyStart++"
);
SYSCTL_UINT
(
_net_inet_tcp_cc_newreno_hystartplusplus
,
OID_AUTO
,
css_growth_div
,
CTLFLAG_RW
,
&
hystart_css_growth_div
,
4
,
"The divisor to the growth when in Hystart++ CSS"
);
SYSCTL_UINT
(
_net_inet_tcp_cc_newreno_hystartplusplus
,
OID_AUTO
,
css_rounds
,
CTLFLAG_RW
,
&
hystart_css_rounds
,
5
,
"The number of rounds HyStart++ lasts in CSS before falling to CA"
);
SYSCTL_UINT
(
_net_inet_tcp_cc_newreno_hystartplusplus
,
OID_AUTO
,
bblogs
,
CTLFLAG_RW
,
&
hystart_bblogs
,
0
,
"Do we enable HyStart++ Black Box logs to be generated if BB logging is on"
);
DECLARE_CC_MODULE
(
newreno
,
&
newreno_cc_algo
);
MODULE_VERSION
(
newreno
,
1
);
sys/netinet/cc/cc_newreno.h
View file @
7536b4ae
...
...
@@ -35,6 +35,15 @@ struct newreno {
uint32_t
beta
;
uint32_t
beta_ecn
;
uint32_t
newreno_flags
;
uint32_t
css_baseline_minrtt
;
uint32_t
css_current_round_minrtt
;
uint32_t
css_lastround_minrtt
;
uint32_t
css_rttsample_count
;
uint32_t
css_entered_at_round
;
uint32_t
css_current_round
;
uint32_t
css_fas_at_css_entry
;
uint32_t
css_lowrtt_fas
;
uint32_t
css_last_fas
;
};
struct
cc_newreno_opts
{