Commit 477e302b authored by HardenedBSD Sync Service's avatar HardenedBSD Sync Service
Browse files

Merge branch 'freebsd/current/main' into hardened/current/master

parents a1564371 5efa7281
......@@ -34,6 +34,9 @@ main() {
vital="false"
case "${outname}" in
bootloader)
pkgdeps=""
;;
clibs)
vital="true"
# clibs should not have any dependencies or anything
......
......@@ -414,6 +414,8 @@ struct mana_rxq {
mana_handle_t rxobj;
struct completion fence_event;
struct mana_cq rx_cq;
struct ifnet *ndev;
......
......@@ -1221,6 +1221,63 @@ mana_create_eq(struct mana_context *ac)
return err;
}
static int
mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
{
struct mana_fence_rq_resp resp = {};
struct mana_fence_rq_req req = {};
int err;
init_completion(&rxq->fence_event);
mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
sizeof(req), sizeof(resp));
req.wq_obj_handle = rxq->rxobj;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
rxq->rxq_idx, err);
return err;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
if (err || resp.hdr.status) {
if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
rxq->rxq_idx, err, resp.hdr.status);
if (!err)
err = EPROTO;
return err;
}
if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
rxq->rxq_idx);
return ETIMEDOUT;
}
return 0;
}
static void
mana_fence_rqs(struct mana_port_context *apc)
{
unsigned int rxq_idx;
struct mana_rxq *rxq;
int err;
for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
rxq = apc->rxqs[rxq_idx];
err = mana_fence_rq(apc, rxq);
/* In case of any error, use sleep instead. */
if (err)
gdma_msleep(100);
}
}
static int
mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
{
......@@ -1564,7 +1621,7 @@ mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
return;
case CQE_RX_OBJECT_FENCE:
if_printf(ndev, "RX Fencing is unsupported\n");
complete(&rxq->fence_event);
return;
default:
......@@ -2368,6 +2425,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
bool update_hash, bool update_tab)
{
uint32_t queue_idx;
int err;
int i;
if (update_tab) {
......@@ -2377,7 +2435,13 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
}
}
return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
if (err)
return err;
mana_fence_rqs(apc);
return 0;
}
static int
......@@ -2532,9 +2596,6 @@ mana_dealloc_queues(struct ifnet *ndev)
return err;
}
/* TODO: Implement RX fencing */
gdma_msleep(1000);
mana_destroy_vport(apc);
return 0;
......
......@@ -551,6 +551,7 @@ struct pf_kpooladdr {
TAILQ_HEAD(pf_kpalist, pf_kpooladdr);
struct pf_kpool {
struct mtx mtx;
struct pf_kpalist list;
struct pf_kpooladdr *cur;
struct pf_poolhashkey key;
......
......@@ -1542,6 +1542,8 @@ pf_krule_free(struct pf_krule *rule)
counter_u64_free(rule->states_cur);
counter_u64_free(rule->states_tot);
counter_u64_free(rule->src_nodes);
mtx_destroy(&rule->rpool.mtx);
free(rule, M_PFRULE);
}
......@@ -1999,6 +2001,8 @@ pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
rule, entries);
ruleset->rules[rs_num].inactive.rcount++;
mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
PF_RULES_WUNLOCK();
return (0);
......
......@@ -374,36 +374,45 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
return (0);
}
mtx_lock(&rpool->mtx);
/* Find the route using chosen algorithm. Store the found route
in src_node if it was given or found. */
if (rpool->cur->addr.type == PF_ADDR_NOROUTE)
if (rpool->cur->addr.type == PF_ADDR_NOROUTE) {
mtx_unlock(&rpool->mtx);
return (1);
}
if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
switch (af) {
#ifdef INET
case AF_INET:
if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 &&
(rpool->opts & PF_POOL_TYPEMASK) !=
PF_POOL_ROUNDROBIN)
PF_POOL_ROUNDROBIN) {
mtx_unlock(&rpool->mtx);
return (1);
raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
}
raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
break;
#endif /* INET */
#ifdef INET6
case AF_INET6:
if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 &&
(rpool->opts & PF_POOL_TYPEMASK) !=
PF_POOL_ROUNDROBIN)
PF_POOL_ROUNDROBIN) {
mtx_unlock(&rpool->mtx);
return (1);
}
raddr = &rpool->cur->addr.p.dyn->pfid_addr6;
rmask = &rpool->cur->addr.p.dyn->pfid_mask6;
break;
#endif /* INET6 */
}
} else if (rpool->cur->addr.type == PF_ADDR_TABLE) {
if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) {
mtx_unlock(&rpool->mtx);
return (1); /* unsupported */
}
} else {
raddr = &rpool->cur->addr.v.a.addr;
rmask = &rpool->cur->addr.v.a.mask;
......@@ -467,27 +476,6 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
{
struct pf_kpooladdr *acur = rpool->cur;
/*
* XXXGL: in the round-robin case we need to store
* the round-robin machine state in the rule, thus
* forwarding thread needs to modify rule.
*
* This is done w/o locking, because performance is assumed
* more important than round-robin precision.
*
* In the simpliest case we just update the "rpool->cur"
* pointer. However, if pool contains tables or dynamic
* addresses, then "tblidx" is also used to store machine
* state. Since "tblidx" is int, concurrent access to it can't
* lead to inconsistence, only to lost of precision.
*
* Things get worse, if table contains not hosts, but
* prefixes. In this case counter also stores machine state,
* and for IPv6 address, counter can't be updated atomically.
* Probably, using round-robin on a table containing IPv6
* prefixes (or even IPv4) would cause a panic.
*/
if (rpool->cur->addr.type == PF_ADDR_TABLE) {
if (!pfr_pool_get(rpool->cur->addr.p.tbl,
&rpool->tblidx, &rpool->counter, af))
......@@ -511,6 +499,7 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
/* table contains no address of type 'af' */
if (rpool->cur != acur)
goto try_next;
mtx_unlock(&rpool->mtx);
return (1);
}
} else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
......@@ -520,6 +509,7 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
/* table contains no address of type 'af' */
if (rpool->cur != acur)
goto try_next;
mtx_unlock(&rpool->mtx);
return (1);
}
} else {
......@@ -539,6 +529,8 @@ pf_map_addr(sa_family_t af, struct pf_krule *r, struct pf_addr *saddr,
if (*sn != NULL)
PF_ACPY(&(*sn)->raddr, naddr, af);
mtx_unlock(&rpool->mtx);
if (V_pf_status.debug >= PF_DEBUG_NOISY &&
(rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
printf("pf_map_addr: selected address ");
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment