Commit f10ee2b6 authored by Shawn Webb's avatar Shawn Webb
Browse files

Merge remote-tracking branch 'origin/freebsd/current/main' into hardened/current/master

Conflicts:
	share/man/man4/Makefile (unresolved)
parents 5e942a06 6fa58bfb
......@@ -50,7 +50,11 @@ MAN= aac.4 \
${_aout.4} \
${_apic.4} \
arcmsr.4 \
<<<<<<< HEAD
aslr.4 \
=======
arswitch.4 \
>>>>>>> origin/freebsd/current/main
${_asmc.4} \
at45d.4 \
ata.4 \
......
......@@ -88,24 +88,6 @@ or
.Pa /boot/config .
.It
.Pa loader.efi
searches partitions of type
.Li freebsd-ufs
and
.Li freebsd-zfs
for
.Pa loader.efi .
The search begins with partitions on the device from which
.Pa loader.efi
was loaded, and continues with other available partitions.
If both
.Li freebsd-ufs
and
.Li freebsd-zfs
partitions exist on the same device the
.Li freebsd-zfs
partition is preferred.
.It
.Pa loader.efi
loads and boots the kernel, as described in
.Xr loader 8 .
.El
......
......@@ -398,7 +398,8 @@ iommu_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
static int
iommu_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
{
struct bus_dma_tag_iommu *dmat, *dmat_copy, *parent;
struct bus_dma_tag_iommu *dmat, *parent;
struct bus_dma_tag_iommu *dmat_copy __unused;
int error;
error = 0;
......@@ -920,7 +921,7 @@ static void
iommu_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map1,
bus_dmasync_op_t op)
{
struct bus_dmamap_iommu *map;
struct bus_dmamap_iommu *map __unused;
map = (struct bus_dmamap_iommu *)map1;
kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
......
......@@ -354,7 +354,7 @@ iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg,
static void
iommu_gas_match_insert(struct iommu_gas_match_args *a)
{
bool found;
bool found __diagused;
/*
* The prev->end is always aligned on the page size, which
......@@ -475,7 +475,7 @@ iommu_gas_alloc_region(struct iommu_domain *domain, struct iommu_map_entry *entr
u_int flags)
{
struct iommu_map_entry *next, *prev;
bool found;
bool found __diagused;
IOMMU_DOMAIN_ASSERT_LOCKED(domain);
......
......@@ -431,7 +431,7 @@ int
alq_open_flags(struct alq **alqp, const char *file, struct ucred *cred, int cmode,
int size, int flags)
{
struct thread *td;
struct thread *td __unused;
struct nameidata nd;
struct alq *alq;
int oflags;
......
......@@ -3268,7 +3268,7 @@ postsig(int sig)
int
sig_ast_checksusp(struct thread *td)
{
struct proc *p;
struct proc *p __diagused;
int ret;
p = td->td_proc;
......
......@@ -641,7 +641,7 @@ int
umtxq_requeue(struct umtx_key *key, int n_wake, struct umtx_key *key2,
int n_requeue)
{
struct umtxq_queue *uh, *uh2;
struct umtxq_queue *uh;
struct umtx_q *uq, *uq_temp;
int ret;
......@@ -649,7 +649,6 @@ umtxq_requeue(struct umtx_key *key, int n_wake, struct umtx_key *key2,
UMTXQ_LOCKED_ASSERT(umtxq_getchain(key));
UMTXQ_LOCKED_ASSERT(umtxq_getchain(key2));
uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
uh2 = umtxq_queue_lookup(key2, UMTX_SHARED_QUEUE);
if (uh == NULL)
return (0);
TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) {
......
......@@ -412,7 +412,6 @@ static void
unloadentry(void *unused1, int unused2)
{
struct priv_fw *fp;
int err;
mtx_lock(&firmware_mtx);
restart:
......@@ -434,7 +433,7 @@ unloadentry(void *unused1, int unused2)
* on unload to actually free the entry.
*/
mtx_unlock(&firmware_mtx);
err = linker_release_module(NULL, NULL, fp->file);
(void)linker_release_module(NULL, NULL, fp->file);
mtx_lock(&firmware_mtx);
/*
......
......@@ -784,7 +784,6 @@ ktls_cleanup(struct ktls_session *tls)
counter_u64_add(ktls_sw_chacha20, -1);
break;
}
ktls_ocf_free(tls);
break;
case TCP_TLS_MODE_IFNET:
switch (tls->params.cipher_algorithm) {
......@@ -817,6 +816,8 @@ ktls_cleanup(struct ktls_session *tls)
break;
#endif
}
if (tls->ocf_session != NULL)
ktls_ocf_free(tls);
if (tls->params.auth_key != NULL) {
zfree(tls->params.auth_key, M_KTLS);
tls->params.auth_key = NULL;
......@@ -1004,14 +1005,9 @@ ktls_try_ifnet(struct socket *so, struct ktls_session *tls, bool force)
return (error);
}
static int
ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction)
static void
ktls_use_sw(struct ktls_session *tls)
{
int error;
error = ktls_ocf_try(so, tls, direction);
if (error)
return (error);
tls->mode = TCP_TLS_MODE_SW;
switch (tls->params.cipher_algorithm) {
case CRYPTO_AES_CBC:
......@@ -1024,6 +1020,17 @@ ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction)
counter_u64_add(ktls_sw_chacha20, 1);
break;
}
}
static int
ktls_try_sw(struct socket *so, struct ktls_session *tls, int direction)
{
int error;
error = ktls_ocf_try(so, tls, direction);
if (error)
return (error);
ktls_use_sw(tls);
return (0);
}
......@@ -1082,6 +1089,69 @@ sb_mark_notready(struct sockbuf *sb)
sb->sb_ccc));
}
/*
* Return information about the pending TLS data in a socket
* buffer. On return, 'seqno' is set to the sequence number
* of the next TLS record to be received, 'resid' is set to
* the amount of bytes still needed for the last pending
* record. The function returns 'false' if the last pending
* record contains a partial TLS header. In that case, 'resid'
* is the number of bytes needed to complete the TLS header.
*/
bool
ktls_pending_rx_info(struct sockbuf *sb, uint64_t *seqnop, size_t *residp)
{
struct tls_record_layer hdr;
struct mbuf *m;
uint64_t seqno;
size_t resid;
u_int offset, record_len;
SOCKBUF_LOCK_ASSERT(sb);
MPASS(sb->sb_flags & SB_TLS_RX);
seqno = sb->sb_tls_seqno;
resid = sb->sb_tlscc;
m = sb->sb_mtls;
offset = 0;
if (resid == 0) {
*seqnop = seqno;
*residp = 0;
return (true);
}
for (;;) {
seqno++;
if (resid < sizeof(hdr)) {
*seqnop = seqno;
*residp = sizeof(hdr) - resid;
return (false);
}
m_copydata(m, offset, sizeof(hdr), (void *)&hdr);
record_len = sizeof(hdr) + ntohs(hdr.tls_length);
if (resid <= record_len) {
*seqnop = seqno;
*residp = record_len - resid;
return (true);
}
resid -= record_len;
while (record_len != 0) {
if (m->m_len - offset > record_len) {
offset += record_len;
break;
}
record_len -= (m->m_len - offset);
offset = 0;
m = m->m_next;
}
}
}
int
ktls_enable_rx(struct socket *so, struct tls_enable *en)
{
......@@ -1121,17 +1191,18 @@ ktls_enable_rx(struct socket *so, struct tls_enable *en)
if (error)
return (error);
#ifdef TCP_OFFLOAD
error = ktls_try_toe(so, tls, KTLS_RX);
if (error)
#endif
error = ktls_try_sw(so, tls, KTLS_RX);
error = ktls_ocf_try(so, tls, KTLS_RX);
if (error) {
ktls_cleanup(tls);
return (error);
}
#ifdef TCP_OFFLOAD
error = ktls_try_toe(so, tls, KTLS_RX);
if (error)
#endif
ktls_use_sw(tls);
/* Mark the socket as using TLS offload. */
SOCKBUF_LOCK(&so->so_rcv);
so->so_rcv.sb_tls_seqno = be64dec(en->rec_seq);
......
......@@ -267,7 +267,7 @@ static int
shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
{
vm_page_t m;
vm_page_t m __diagused;
int psind;
psind = object->un_pager.phys.data_val;
......@@ -751,7 +751,8 @@ shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
{
vm_object_t object;
vm_page_t m;
vm_pindex_t newobjsz, oldobjsz;
vm_pindex_t newobjsz;
vm_pindex_t oldobjsz __unused;
int aflags, error, i, psind, try;
KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
......
......@@ -48,7 +48,7 @@ __FBSDID("$FreeBSD$");
#include <opencrypto/cryptodev.h>
#include <opencrypto/ktls.h>
struct ocf_session {
struct ktls_ocf_session {
crypto_session_t sid;
crypto_session_t mac_sid;
struct mtx lock;
......@@ -64,7 +64,7 @@ struct ocf_session {
};
struct ocf_operation {
struct ocf_session *os;
struct ktls_ocf_session *os;
bool done;
};
......@@ -142,7 +142,7 @@ ktls_ocf_callback_async(struct cryptop *crp)
}
static int
ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp)
ktls_ocf_dispatch(struct ktls_ocf_session *os, struct cryptop *crp)
{
struct ocf_operation oo;
int error;
......@@ -228,7 +228,7 @@ ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state,
struct uio *uio;
struct tls_mac_data *ad;
struct cryptop *crp;
struct ocf_session *os;
struct ktls_ocf_session *os;
struct iovec iov[m->m_epg_npgs + 2];
u_int pgoff;
int i, error;
......@@ -237,7 +237,7 @@ ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state,
MPASS(outiovcnt + 1 <= nitems(iov));
os = tls->cipher;
os = tls->ocf_session;
hdr = (const struct tls_record_layer *)m->m_epg_hdr;
crp = &state->crp;
uio = &state->uio;
......@@ -376,11 +376,11 @@ ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state,
struct uio *uio;
struct tls_aead_data *ad;
struct cryptop *crp;
struct ocf_session *os;
struct ktls_ocf_session *os;
int error;
uint16_t tls_comp_len;
os = tls->cipher;
os = tls->ocf_session;
hdr = (const struct tls_record_layer *)m->m_epg_hdr;
crp = &state->crp;
uio = &state->uio;
......@@ -457,12 +457,12 @@ ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
{
struct tls_aead_data ad;
struct cryptop crp;
struct ocf_session *os;
struct ktls_ocf_session *os;
struct ocf_operation oo;
int error;
uint16_t tls_comp_len;
os = tls->cipher;
os = tls->ocf_session;
oo.os = os;
oo.done = false;
......@@ -526,11 +526,11 @@ ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state,
struct uio *uio;
struct tls_aead_data_13 *ad;
struct cryptop *crp;
struct ocf_session *os;
struct ktls_ocf_session *os;
char nonce[12];
int error;
os = tls->cipher;
os = tls->ocf_session;
hdr = (const struct tls_record_layer *)m->m_epg_hdr;
crp = &state->crp;
uio = &state->uio;
......@@ -598,9 +598,9 @@ ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state,
void
ktls_ocf_free(struct ktls_session *tls)
{
struct ocf_session *os;
struct ktls_ocf_session *os;
os = tls->cipher;
os = tls->ocf_session;
crypto_freesession(os->sid);
mtx_destroy(&os->lock);
zfree(os, M_KTLS_OCF);
......@@ -610,7 +610,7 @@ int
ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
{
struct crypto_session_params csp, mac_csp;
struct ocf_session *os;
struct ktls_ocf_session *os;
int error, mac_len;
memset(&csp, 0, sizeof(csp));
......@@ -745,7 +745,7 @@ ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
}
mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
tls->cipher = os;
tls->ocf_session = os;
if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 ||
tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) {
if (direction == KTLS_TX) {
......
......@@ -167,6 +167,7 @@ struct tls_session_params {
#define KTLS_RX 2
struct iovec;
struct ktls_ocf_session;
struct ktls_ocf_encrypt_state;
struct ktls_session;
struct m_snd_tag;
......@@ -183,10 +184,8 @@ struct ktls_session {
const struct tls_record_layer *hdr, struct mbuf *m,
uint64_t seqno, int *trailer_len);
};
union {
void *cipher;
struct m_snd_tag *snd_tag;
};
struct ktls_ocf_session *ocf_session;
struct m_snd_tag *snd_tag;
struct tls_session_params params;
u_int wq_index;
volatile u_int refcount;
......@@ -224,6 +223,7 @@ int ktls_output_eagain(struct inpcb *inp, struct ktls_session *tls);
#ifdef RATELIMIT
int ktls_modify_txrtlmt(struct ktls_session *tls, uint64_t max_pacing_rate);
#endif
bool ktls_pending_rx_info(struct sockbuf *sb, uint64_t *seqnop, size_t *residp);
static inline struct ktls_session *
ktls_hold(struct ktls_session *tls)
......
......@@ -244,8 +244,8 @@ ffs_truncate(vp, length, flags, cred)
ufs2_daddr_t bn, lbn, lastblock, lastiblock[UFS_NIADDR];
ufs2_daddr_t indir_lbn[UFS_NIADDR], oldblks[UFS_NDADDR + UFS_NIADDR];
ufs2_daddr_t newblks[UFS_NDADDR + UFS_NIADDR];
ufs2_daddr_t count, blocksreleased = 0, datablocks, blkno;
struct bufobj *bo;
ufs2_daddr_t count, blocksreleased = 0, blkno;
struct bufobj *bo __diagused;
struct fs *fs;
struct buf *bp;
struct ufsmount *ump;
......@@ -297,10 +297,8 @@ ffs_truncate(vp, length, flags, cred)
if (journaltrunc == 0 && DOINGSOFTDEP(vp) && length == 0)
softdeptrunc = !softdep_slowdown(vp);
extblocks = 0;
datablocks = DIP(ip, i_blocks);
if (fs->fs_magic == FS_UFS2_MAGIC && ip->i_din2->di_extsize > 0) {
extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
datablocks -= extblocks;
}
if ((flags & IO_EXT) && extblocks > 0) {
if (length != 0)
......
......@@ -5064,7 +5064,7 @@ softdep_setup_create(dp, ip)
struct inode *ip;
{
struct inodedep *inodedep;
struct jaddref *jaddref;
struct jaddref *jaddref __diagused;
struct vnode *dvp;
KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
......@@ -5205,11 +5205,9 @@ softdep_setup_rmdir(dp, ip)
struct inode *dp;
struct inode *ip;
{
struct vnode *dvp;
KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
("softdep_setup_rmdir called on non-softdep filesystem"));
dvp = ITOV(dp);
ACQUIRE_LOCK(ITOUMP(dp));
(void) inodedep_lookup_ip(ip);
(void) inodedep_lookup_ip(dp);
......@@ -5225,11 +5223,9 @@ softdep_setup_unlink(dp, ip)
struct inode *dp;
struct inode *ip;
{
struct vnode *dvp;
KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
("softdep_setup_unlink called on non-softdep filesystem"));
dvp = ITOV(dp);
ACQUIRE_LOCK(ITOUMP(dp));
(void) inodedep_lookup_ip(ip);
(void) inodedep_lookup_ip(dp);
......@@ -6424,7 +6420,7 @@ setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)
struct allocindir *aip; /* allocindir allocated by the above routines */
ufs_lbn_t lbn; /* Logical block number for this block. */
{
struct fs *fs;
struct fs *fs __diagused;
struct indirdep *indirdep;
struct allocindir *oldaip;
struct freefrag *freefrag;
......@@ -10626,7 +10622,7 @@ initiate_write_inodeblock_ufs1(inodedep, bp)
#ifdef INVARIANTS
ufs_lbn_t prevlbn = 0;
#endif
int deplist;
int deplist __diagused;
if (inodedep->id_state & IOSTARTED)
panic("initiate_write_inodeblock_ufs1: already started");
......@@ -10798,7 +10794,7 @@ initiate_write_inodeblock_ufs2(inodedep, bp)
#ifdef INVARIANTS
ufs_lbn_t prevlbn = 0;
#endif
int deplist;
int deplist __diagused;
if (inodedep->id_state & IOSTARTED)
panic("initiate_write_inodeblock_ufs2: already started");
......
......@@ -347,7 +347,8 @@ ffs_mount(struct mount *mp)
struct thread *td;
struct ufsmount *ump = NULL;
struct fs *fs;
int error, error1, flags;
int error, flags;
int error1 __diagused;
uint64_t mntorflags, saved_mnt_flag;
accmode_t accmode;
struct nameidata ndp;
......@@ -1613,11 +1614,9 @@ ffs_sync_lazy(mp)
{
struct vnode *mvp, *vp;
struct inode *ip;
struct thread *td;
int allerror, error;
allerror = 0;
td = curthread;
if ((mp->mnt_flag & MNT_NOATIME) != 0) {
#ifdef QUOTA
qsync(mp);
......
......@@ -1399,7 +1399,6 @@ static int
ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
{
struct inode *ip;
struct ufs2_dinode *dp;
int error;
ip = VTOI(vp);
......@@ -1410,7 +1409,6 @@ ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
ffs_unlock_ea(vp);
return (0);
}
dp = ip->i_din2;
error = ffs_rdextattr(&ip->i_ea_area, vp, td);
if (error) {
ffs_unlock_ea(vp);
......@@ -1994,7 +1992,6 @@ ffs_vput_pair(struct vop_vput_pair_args *ap)
struct inode *dp, *ip;
ino_t ip_ino;
u_int64_t ip_gen;
off_t old_size;
int error, vp_locked;
dvp = ap->a_dvp;
......@@ -2031,7 +2028,6 @@ ffs_vput_pair(struct vop_vput_pair_args *ap)
VNASSERT(I_ENDOFF(dp) != 0 && I_ENDOFF(dp) < dp->i_size, dvp,
("IN_ENDOFF set but I_ENDOFF() is not"));
dp->i_flag &= ~IN_ENDOFF;
old_size = dp->i_size;
error = UFS_TRUNCATE(dvp, (off_t)I_ENDOFF(dp), IO_NORMAL |
(DOINGASYNC(dvp) ? 0 : IO_SYNC), curthread->td_ucred);
if (error != 0 && error != ERELOOKUP) {
......
......@@ -521,7 +521,7 @@ dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
{
struct dmar_domain *domain, *domain1;
struct dmar_ctx *ctx, *ctx1;
struct iommu_unit *unit;
struct iommu_unit *unit __diagused;
dmar_ctx_entry_t *ctxp;
struct sf_buf *sf;
int bus, slot, func, error;
......@@ -904,7 +904,7 @@ dmar_domain_unload(struct dmar_domain *domain,
struct dmar_unit *unit;
struct iommu_domain *iodom;
struct iommu_map_entry *entry, *entry1;
int error;
int error __diagused;
iodom = DOM2IODOM(domain);
unit = DOM2DMAR(domain);
......
......@@ -762,7 +762,6 @@ dmar_find_by_scope(int dev_domain, int dev_busno,
struct dmar_unit *
dmar_find(device_t dev, bool verbose)
{
device_t dmar_dev;
struct dmar_unit *unit;
const char *banner;
int i, dev_domain, dev_busno, dev_path_len;
......@@ -774,7 +773,6 @@ dmar_find(device_t dev, bool verbose)
devclass_find("pci"))
return (NULL);
dmar_dev = NULL;
dev_domain = pci_get_domain(dev);
dev_path_len = dmar_dev_depth(dev);
ACPI_DMAR_PCI_PATH dev_path[dev_path_len];
......
......@@ -425,7 +425,7 @@ domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
{
dmar_pte_t *pte;
struct sf_buf *sf;
iommu_gaddr_t pg_sz, base1, size1;
iommu_gaddr_t pg_sz, base1;
vm_pindex_t pi, c, idx, run_sz;
int lvl;
bool superpage;
......@@ -433,7 +433,6 @@ domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
base1 = base;
size1 = size;