Commit 9a99a358 authored by HardenedBSD Sync Service's avatar HardenedBSD Sync Service
Browse files

Merge branch 'freebsd/current/main' into hardened/current/master

parents e57bba3c 72c89ce9
......@@ -191,7 +191,7 @@ CONFS+= ftpd
.endif
.if ${MK_GSSAPI} != "no"
CONFGROUPS+= gssd
CONFGROUPS+= GSSD
GSSD= gssd
GSSDPACKAGE= kerberos
.endif
......
......@@ -143,7 +143,6 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
get_fpcontext(td, &sf.sf_uc.uc_mcontext, &xfpusave, &xfpusave_len);
fpstate_drop(td);
update_pcb_bases(pcb);
sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase;
sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase;
......@@ -203,6 +202,7 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
sigexit(td, SIGILL);
}
fpstate_drop(td);
regs->tf_rsp = (long)sfp;
regs->tf_rip = p->p_sigcode_base;
regs->tf_rflags &= ~(PSL_T | PSL_D);
......
......@@ -607,7 +607,6 @@ ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
sf.sf_uc.uc_mcontext.mc_gs = regs->tf_gs;
sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
ia32_get_fpcontext(td, &sf.sf_uc.uc_mcontext, &xfpusave, &xfpusave_len);
fpstate_drop(td);
sf.sf_uc.uc_mcontext.mc_fsbase = td->td_pcb->pcb_fsbase;
sf.sf_uc.uc_mcontext.mc_gsbase = td->td_pcb->pcb_gsbase;
......@@ -661,6 +660,7 @@ ia32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
sigexit(td, SIGILL);
}
fpstate_drop(td);
regs->tf_rsp = (uintptr_t)sfp;
regs->tf_rip = p->p_sigcode_base;
regs->tf_rflags &= ~(PSL_T | PSL_D);
......
......@@ -506,6 +506,9 @@ static inline void
device_release_driver(struct device *dev)
{
#if 0
/* This leads to panics. Disable temporarily. Keep to rework. */
/* We also need to cleanup LinuxKPI bits. What else? */
lkpi_devres_release_free_list(dev);
dev_set_drvdata(dev, NULL);
......@@ -515,6 +518,7 @@ device_release_driver(struct device *dev)
if (device_is_attached(dev->bsddev))
device_detach(dev->bsddev);
mtx_unlock(&Giant);
#endif
}
static inline int
......
......@@ -91,7 +91,7 @@ struct dma_map_ops {
#define DMA_BIT_MASK(n) ((2ULL << ((n) - 1)) - 1ULL)
int linux_dma_tag_init(struct device *dev, u64 mask);
int linux_dma_tag_init(struct device *, u64);
void *linux_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
dma_addr_t linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len);
......@@ -104,7 +104,7 @@ void linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
unsigned long attrs __unused);
static inline int
dma_supported(struct device *dev, u64 mask)
dma_supported(struct device *dev, u64 dma_mask)
{
/* XXX busdma takes care of this elsewhere. */
......@@ -122,23 +122,23 @@ dma_set_mask(struct device *dev, u64 dma_mask)
}
static inline int
dma_set_coherent_mask(struct device *dev, u64 mask)
dma_set_coherent_mask(struct device *dev, u64 dma_mask)
{
if (!dma_supported(dev, mask))
if (!dma_supported(dev, dma_mask))
return -EIO;
/* XXX Currently we don't support a separate coherent mask. */
return 0;
}
static inline int
dma_set_mask_and_coherent(struct device *dev, u64 mask)
dma_set_mask_and_coherent(struct device *dev, u64 dma_mask)
{
int r;
r = dma_set_mask(dev, mask);
r = dma_set_mask(dev, dma_mask);
if (r == 0)
dma_set_coherent_mask(dev, mask);
dma_set_coherent_mask(dev, dma_mask);
return (r);
}
......
......@@ -1415,7 +1415,7 @@ ipf_p_ftp_process(softf, fin, nat, ftp, rv)
printf("%s:seq[0](%u) + (%d) != (%u)\n",
"ipf_p_ftp_process", t->ftps_seq[0],
ackoff, thack);
printf("%s:seq[0](%u) + (%d) != (%u)\n",
printf("%s:seq[1](%u) + (%d) != (%u)\n",
"ipf_p_ftp_process", t->ftps_seq[1],
ackoff, thack);
}
......
......@@ -1060,7 +1060,7 @@ ipf_proxy_check(fin, nat)
/* pr(I) - protocol number for proxy */
/* name(I) - proxy name */
/* */
/* Search for an proxy by the protocol it is being used with and its name. */
/* Search for a proxy by the protocol being used and by its name. */
/* ------------------------------------------------------------------------ */
aproxy_t *
ipf_proxy_lookup(arg, pr, name)
......
......@@ -1495,7 +1495,6 @@ em_msix_link(void *arg)
{
struct e1000_softc *sc = arg;
u32 reg_icr;
bool notlink = false;
++sc->link_irq;
MPASS(sc->hw.back != NULL);
......@@ -1506,17 +1505,14 @@ em_msix_link(void *arg)
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
em_handle_link(sc->ctx);
else
notlink = true;
/* Re-arm for other/spurious interrupts */
if (notlink && sc->hw.mac.type >= igb_mac_min) {
/* Re-arm unconditionally */
if (sc->hw.mac.type >= igb_mac_min) {
E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->link_mask);
} else if (sc->hw.mac.type == e1000_82574) {
if (notlink)
E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC |
E1000_IMS_OTHER);
E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC |
E1000_IMS_OTHER);
/*
* Because we must read the ICR for this interrupt it may
* clear other causes using autoclear, for this reason we
......@@ -1524,7 +1520,8 @@ em_msix_link(void *arg)
*/
if (reg_icr)
E1000_WRITE_REG(&sc->hw, E1000_ICS, sc->ims);
}
} else
E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
return (FILTER_HANDLED);
}
......@@ -1873,13 +1870,6 @@ em_if_update_admin_status(if_ctx_t ctx)
if (hw->mac.type < em_mac_min)
lem_smartspeed(sc);
else if (hw->mac.type >= igb_mac_min &&
sc->intr_type == IFLIB_INTR_MSIX) {
E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->link_mask);
} else if (hw->mac.type == e1000_82574 &&
sc->intr_type == IFLIB_INTR_MSIX)
E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
}
static void
......
......@@ -2163,15 +2163,21 @@ xeon_gen3_setup_b2b_mw(struct ntb_softc *ntb)
intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XBASE, 0);
/*
* If the value in EMBAR1LIMIT is set equal to the value in EMBAR1,
* the memory window for EMBAR1 is disabled.
* Note: It is needed to avoid malacious access.
* If the value in IMBAR1XLIMIT is set equal to the value in IMBAR1XBASE,
* the local memory window exposure from EMBAR1 is disabled.
* Note: It is needed to avoid malicious access.
*/
reg = pci_read_config(ntb->device, XEON_GEN3_EXT_REG_BAR1BASE, 8);
intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR1XLIMIT, reg);
intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR1XLIMIT, 0);
intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XLIMIT, 0);
reg = pci_read_config(ntb->device, XEON_GEN3_EXT_REG_BAR2BASE, 8);
intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XLIMIT, reg);
/* Config outgoing translation limits (whole bar size windows) */
reg = intel_ntb_reg_read(8, XEON_GEN3_REG_EMBAR1XBASE);
reg += ntb->bar_info[NTB_B2B_BAR_1].size;
intel_ntb_reg_write(8, XEON_GEN3_REG_EMBAR1XLIMIT, reg);
reg = intel_ntb_reg_read(8, XEON_GEN3_REG_EMBAR2XBASE);
reg += ntb->bar_info[NTB_B2B_BAR_2].size;
intel_ntb_reg_write(8, XEON_GEN3_REG_EMBAR2XLIMIT, reg);
return (0);
}
......@@ -3226,7 +3232,10 @@ intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
limit = 0;
if (bar_is_64bit(ntb, bar_num)) {
base = intel_ntb_reg_read(8, base_reg) & BAR_HIGH_MASK;
if (ntb->type == NTB_XEON_GEN3)
base = addr;
else
base = intel_ntb_reg_read(8, base_reg) & BAR_HIGH_MASK;
if (limit_reg != 0 && size != mw_size)
limit = base + size;
......@@ -3249,18 +3258,6 @@ intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
intel_ntb_reg_write(8, xlat_reg, 0);
return (EIO);
}
if (ntb->type == NTB_XEON_GEN3) {
limit = base + size;
/* set EMBAR1/2XLIMIT */
if (!idx)
intel_ntb_reg_write(8,
XEON_GEN3_REG_EMBAR1XLIMIT, limit);
else
intel_ntb_reg_write(8,
XEON_GEN3_REG_EMBAR2XLIMIT, limit);
}
} else {
/* Configure 32-bit (split) BAR MW */
if (ntb->type == NTB_XEON_GEN3)
......
......@@ -326,7 +326,7 @@ pci_host_generic_core_release_resource(device_t dev, device_t child, int type,
}
static bool
generic_pcie_translate_resource(device_t dev, int type, rman_res_t start,
generic_pcie_translate_resource_common(device_t dev, int type, rman_res_t start,
rman_res_t end, rman_res_t *new_start, rman_res_t *new_end)
{
struct generic_pcie_core_softc *sc;
......@@ -382,6 +382,16 @@ generic_pcie_translate_resource(device_t dev, int type, rman_res_t start,
return (found);
}
static int
generic_pcie_translate_resource(device_t bus, int type,
rman_res_t start, rman_res_t *newstart)
{
rman_res_t newend; /* unused */
return (!generic_pcie_translate_resource_common(
bus, type, start, 0, newstart, &newend));
}
struct resource *
pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
......@@ -406,7 +416,7 @@ pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
type, rid, start, end, count, flags));
/* Translate the address from a PCI address to a physical address */
if (!generic_pcie_translate_resource(dev, type, start, end, &phys_start,
if (!generic_pcie_translate_resource_common(dev, type, start, end, &phys_start,
&phys_end)) {
device_printf(dev,
"Failed to translate resource %jx-%jx type %x for %s\n",
......@@ -458,7 +468,7 @@ generic_pcie_activate_resource(device_t dev, device_t child, int type,
start = rman_get_start(r);
end = rman_get_end(r);
if (!generic_pcie_translate_resource(dev, type, start, end, &start,
if (!generic_pcie_translate_resource_common(dev, type, start, end, &start,
&end))
return (EINVAL);
rman_set_start(r, start);
......@@ -529,6 +539,7 @@ static device_method_t generic_pcie_methods[] = {
DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource),
DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource),
DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource),
DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource),
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
......
......@@ -445,6 +445,8 @@ rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end,
"length %#jx, flags %x, device %s\n", rm->rm_descr, start, end,
count, flags,
dev == NULL ? "<null>" : device_get_nameunit(dev)));
KASSERT(count != 0, ("%s: attempted to allocate an empty range",
__func__));
KASSERT((flags & RF_FIRSTSHARE) == 0,
("invalid flags %#x", flags));
new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
......@@ -520,7 +522,7 @@ rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end,
DPRINTF(("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
rstart, rend, (rend - rstart + 1), count));
if ((rend - rstart + 1) >= count) {
if ((rend - rstart) >= (count - 1)) {
DPRINTF(("candidate region: [%#jx, %#jx], size %#jx\n",
rstart, rend, (rend - rstart + 1)));
if ((s->r_end - s->r_start + 1) == count) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment