Commit de2d4784 authored by Gleb Smirnoff's avatar Gleb Smirnoff
Browse files

SMR protection for inpcbs

With introduction of epoch(9) synchronization to network stack the
inpcb database became protected by the network epoch together with
static network data (interfaces, addresses, etc).  However, inpcb
aren't static in nature, they are created and destroyed all the
time, which creates some traffic on the epoch(9) garbage collector.

Fairly new feature of uma(9) - Safe Memory Reclamation allows to
safely free memory in page-sized batches, with virtually zero
overhead compared to uma_zfree().  However, unlike epoch(9), it
puts stricter requirement on the access to the protected memory,
needing the critical(9) section to access it.  Details:

- The database is already build on CK lists, thanks to epoch(9).
- For write access nothing is changed.
- For a lookup in the database SMR section is now required.
  Once the desired inpcb is found we need to transition from SMR
  section to r/w lock on the inpcb itself, with a check that inpcb
  isn't yet freed.  This requires some compexity, since SMR section
  itself is a critical(9) section.  The complexity is hidden from
  KPI users in inp_smr_lock().
- For a inpcb list traversal (a pcblist sysctl, or broadcast
  notification) also a new KPI is provided, that hides internals of
  the database - inp_next(struct inp_iterator *).

Reviewed by:		rrs
Differential revision:	https://reviews.freebsd.org/D33022
parent 565655f4
......@@ -564,15 +564,15 @@ static struct witness_order_list_entry order_lists[] = {
/*
* UDP/IP
*/
{ "udp", &lock_class_mtx_sleep },
{ "udpinp", &lock_class_rw },
{ "udp", &lock_class_mtx_sleep },
{ "so_snd", &lock_class_mtx_sleep },
{ NULL, NULL },
/*
* TCP/IP
*/
{ "tcp", &lock_class_mtx_sleep },
{ "tcpinp", &lock_class_rw },
{ "tcp", &lock_class_mtx_sleep },
{ "so_snd", &lock_class_mtx_sleep },
{ NULL, NULL },
/*
......
......@@ -854,10 +854,6 @@ ktls_try_toe(struct socket *so, struct ktls_session *tls, int direction)
inp = so->so_pcb;
INP_WLOCK(inp);
if (inp->inp_flags2 & INP_FREED) {
INP_WUNLOCK(inp);
return (ECONNRESET);
}
if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
INP_WUNLOCK(inp);
return (ECONNRESET);
......@@ -909,10 +905,6 @@ ktls_alloc_snd_tag(struct inpcb *inp, struct ktls_session *tls, bool force,
int error;
INP_RLOCK(inp);
if (inp->inp_flags2 & INP_FREED) {
INP_RUNLOCK(inp);
return (ECONNRESET);
}
if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
INP_RUNLOCK(inp);
return (ECONNRESET);
......@@ -2716,8 +2708,7 @@ ktls_disable_ifnet_help(void *context, int pending __unused)
INP_WLOCK(inp);
so = inp->inp_socket;
MPASS(so != NULL);
if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
(inp->inp_flags2 & INP_FREED)) {
if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
goto out;
}
......@@ -2729,7 +2720,6 @@ ktls_disable_ifnet_help(void *context, int pending __unused)
counter_u64_add(ktls_ifnet_disable_ok, 1);
/* ktls_set_tx_mode() drops inp wlock, so recheck flags */
if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) == 0 &&
(inp->inp_flags2 & INP_FREED) == 0 &&
(tp = intotcpcb(inp)) != NULL &&
tp->t_fb->tfb_hwtls_change != NULL)
(*tp->t_fb->tfb_hwtls_change)(tp, 0);
......
......@@ -113,6 +113,7 @@ __FBSDID("$FreeBSD$");
#define INPCBLBGROUP_SIZMIN 8
#define INPCBLBGROUP_SIZMAX 256
#define INP_FREED 0x00000200 /* See in_pcb.h. */
static struct callout ipport_tick_callout;
......@@ -145,7 +146,6 @@ VNET_DEFINE_STATIC(int, ipport_tcplastcount);
#define V_ipport_tcplastcount VNET(ipport_tcplastcount)
static void in_pcbremlists(struct inpcb *inp);
#ifdef INET
static struct inpcb *in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo,
struct in_addr faddr, u_int fport_arg,
......@@ -514,38 +514,43 @@ inpcb_fini(void *mem, int size)
INP_LOCK_DESTROY(inp);
}
/* Make sure it is safe to use hashinit(9) on CK_LIST. */
CTASSERT(sizeof(struct inpcbhead) == sizeof(LIST_HEAD(, inpcb)));
/*
* Initialize an inpcbinfo -- we should be able to reduce the number of
* arguments in time.
*/
void
in_pcbinfo_init(struct inpcbinfo *pcbinfo, const char *name,
struct inpcbhead *listhead, int hash_nelements, int porthash_nelements,
char *inpcbzone_name, uma_init inpcbzone_init, u_int hashfields)
u_int hash_nelements, int porthash_nelements, char *inpcbzone_name,
uma_init inpcbzone_init)
{
porthash_nelements = imin(porthash_nelements, IPPORT_MAX + 1);
INP_INFO_LOCK_INIT(pcbinfo, name);
INP_HASH_LOCK_INIT(pcbinfo, "pcbinfohash"); /* XXXRW: argument? */
INP_LIST_LOCK_INIT(pcbinfo, "pcbinfolist");
mtx_init(&pcbinfo->ipi_lock, name, NULL, MTX_DEF);
mtx_init(&pcbinfo->ipi_hash_lock, "pcbinfohash", NULL, MTX_DEF);
#ifdef VIMAGE
pcbinfo->ipi_vnet = curvnet;
#endif
pcbinfo->ipi_listhead = listhead;
CK_LIST_INIT(pcbinfo->ipi_listhead);
CK_LIST_INIT(&pcbinfo->ipi_listhead);
pcbinfo->ipi_count = 0;
pcbinfo->ipi_hashbase = hashinit(hash_nelements, M_PCB,
&pcbinfo->ipi_hashmask);
porthash_nelements = imin(porthash_nelements, IPPORT_MAX + 1);
pcbinfo->ipi_porthashbase = hashinit(porthash_nelements, M_PCB,
&pcbinfo->ipi_porthashmask);
pcbinfo->ipi_lbgrouphashbase = hashinit(porthash_nelements, M_PCB,
&pcbinfo->ipi_lbgrouphashmask);
pcbinfo->ipi_zone = uma_zcreate(inpcbzone_name, sizeof(struct inpcb),
NULL, NULL, inpcbzone_init, inpcb_fini, UMA_ALIGN_PTR, 0);
NULL, NULL, inpcbzone_init, inpcb_fini, UMA_ALIGN_PTR,
UMA_ZONE_SMR);
uma_zone_set_max(pcbinfo->ipi_zone, maxsockets);
uma_zone_set_warning(pcbinfo->ipi_zone,
"kern.ipc.maxsockets limit reached");
pcbinfo->ipi_smr = uma_zone_get_smr(pcbinfo->ipi_zone);
pcbinfo->ipi_portzone = uma_zcreate(inpcbzone_name,
sizeof(struct inpcbport), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
uma_zone_set_smr(pcbinfo->ipi_portzone, pcbinfo->ipi_smr);
}
/*
......@@ -564,9 +569,8 @@ in_pcbinfo_destroy(struct inpcbinfo *pcbinfo)
hashdestroy(pcbinfo->ipi_lbgrouphashbase, M_PCB,
pcbinfo->ipi_lbgrouphashmask);
uma_zdestroy(pcbinfo->ipi_zone);
INP_LIST_LOCK_DESTROY(pcbinfo);
INP_HASH_LOCK_DESTROY(pcbinfo);
INP_INFO_LOCK_DESTROY(pcbinfo);
mtx_destroy(&pcbinfo->ipi_hash_lock);
mtx_destroy(&pcbinfo->ipi_lock);
}
/*
......@@ -580,7 +584,7 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo)
int error;
error = 0;
inp = uma_zalloc(pcbinfo->ipi_zone, M_NOWAIT);
inp = uma_zalloc_smr(pcbinfo->ipi_zone, M_NOWAIT);
if (inp == NULL)
return (ENOBUFS);
bzero(&inp->inp_start_zero, inp_zero_size);
......@@ -612,33 +616,38 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo)
if (V_ip6_v6only)
inp->inp_flags |= IN6P_IPV6_V6ONLY;
}
#endif
INP_WLOCK(inp);
INP_LIST_WLOCK(pcbinfo);
CK_LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
pcbinfo->ipi_count++;
so->so_pcb = (caddr_t)inp;
#ifdef INET6
if (V_ip6_auto_flowlabel)
inp->inp_flags |= IN6P_AUTOFLOWLABEL;
#endif
inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
refcount_init(&inp->inp_refcount, 1); /* Reference from inpcbinfo */
/*
* Routes in inpcb's can cache L2 as well; they are guaranteed
* to be cleaned up.
*/
inp->inp_route.ro_flags = RT_LLE_CACHE;
INP_LIST_WUNLOCK(pcbinfo);
#ifdef TCPHPTS
/*
* If using hpts lets drop a random number in so
* not all new connections fall on the same CPU.
*/
inp->inp_hpts_cpu = inp->inp_input_cpu = hpts_random_cpu(inp);
#endif
refcount_init(&inp->inp_refcount, 1); /* Reference from socket. */
INP_WLOCK(inp);
INP_INFO_WLOCK(pcbinfo);
pcbinfo->ipi_count++;
inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
CK_LIST_INSERT_HEAD(&pcbinfo->ipi_listhead, inp, inp_list);
INP_INFO_WUNLOCK(pcbinfo);
so->so_pcb = inp;
return (0);
#if defined(IPSEC) || defined(IPSEC_SUPPORT) || defined(MAC)
out:
if (error != 0) {
crfree(inp->inp_cred);
uma_zfree(pcbinfo->ipi_zone, inp);
}
#endif
crfree(inp->inp_cred);
uma_zfree_smr(pcbinfo->ipi_zone, inp);
return (error);
#endif
}
#ifdef INET
......@@ -1505,192 +1514,274 @@ in_pcbdetach(struct inpcb *inp)
}
/*
* in_pcbref() bumps the reference count on an inpcb in order to maintain
* stability of an inpcb pointer despite the inpcb lock being released. This
* is used in TCP when the inpcbinfo lock needs to be acquired or upgraded,
* but where the inpcb lock may already held.
*
* in_pcbref() should be used only to provide brief memory stability, and
* must always be followed by a call to INP_WLOCK() and in_pcbrele() to
* garbage collect the inpcb if it has been in_pcbfree()'d from another
* context. Until in_pcbrele() has returned that the inpcb is still valid,
* lock and rele are the *only* safe operations that may be performed on the
* inpcb.
* inpcb hash lookups are protected by SMR section.
*
* While the inpcb will not be freed, releasing the inpcb lock means that the
* connection's state may change, so the caller should be careful to
* revalidate any cached state on reacquiring the lock. Drop the reference
* using in_pcbrele().
* Once desired pcb has been found, switching from SMR section to a pcb
* lock is performed with inp_smr_lock(). We can not use INP_(W|R)LOCK
* here because SMR is a critical section.
* In 99%+ cases inp_smr_lock() would obtain the lock immediately.
*/
void
in_pcbref(struct inpcb *inp)
static inline void
inp_lock(struct inpcb *inp, const inp_lookup_t lock)
{
KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
lock == INPLOOKUP_RLOCKPCB ?
rw_rlock(&inp->inp_lock) : rw_wlock(&inp->inp_lock);
}
static inline void
inp_unlock(struct inpcb *inp, const inp_lookup_t lock)
{
refcount_acquire(&inp->inp_refcount);
lock == INPLOOKUP_RLOCKPCB ?
rw_runlock(&inp->inp_lock) : rw_wunlock(&inp->inp_lock);
}
/*
* Drop a refcount on an inpcb elevated using in_pcbref(); because a call to
* in_pcbfree() may have been made between in_pcbref() and in_pcbrele(), we
* return a flag indicating whether or not the inpcb remains valid. If it is
* valid, we return with the inpcb lock held.
*
* Notice that, unlike in_pcbref(), the inpcb lock must be held to drop a
* reference on an inpcb. Historically more work was done here (actually, in
* in_pcbfree_internal()) but has been moved to in_pcbfree() to avoid the
* need for the pcbinfo lock in in_pcbrele(). Deferring the free is entirely
* about memory stability (and continued use of the write lock).
*/
int
in_pcbrele_rlocked(struct inpcb *inp)
static inline int
inp_trylock(struct inpcb *inp, const inp_lookup_t lock)
{
struct inpcbinfo *pcbinfo;
KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
return (lock == INPLOOKUP_RLOCKPCB ?
rw_try_rlock(&inp->inp_lock) : rw_try_wlock(&inp->inp_lock));
}
INP_RLOCK_ASSERT(inp);
static inline bool
in_pcbrele(struct inpcb *inp, const inp_lookup_t lock)
{
if (refcount_release(&inp->inp_refcount) == 0) {
/*
* If the inpcb has been freed, let the caller know, even if
* this isn't the last reference.
*/
if (inp->inp_flags2 & INP_FREED) {
INP_RUNLOCK(inp);
return (1);
return (lock == INPLOOKUP_RLOCKPCB ?
in_pcbrele_rlocked(inp) : in_pcbrele_wlocked(inp));
}
bool
inp_smr_lock(struct inpcb *inp, const inp_lookup_t lock)
{
MPASS(lock == INPLOOKUP_RLOCKPCB || lock == INPLOOKUP_WLOCKPCB);
SMR_ASSERT_ENTERED(inp->inp_pcbinfo->ipi_smr);
if (__predict_true(inp_trylock(inp, lock))) {
if (__predict_false(inp->inp_flags & INP_FREED)) {
smr_exit(inp->inp_pcbinfo->ipi_smr);
inp_unlock(inp, lock);
return (false);
}
return (0);
smr_exit(inp->inp_pcbinfo->ipi_smr);
return (true);
}
KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
#ifdef TCPHPTS
if (inp->inp_in_hpts || inp->inp_in_input) {
struct tcp_hpts_entry *hpts;
if (__predict_true(refcount_acquire_if_not_zero(&inp->inp_refcount))) {
smr_exit(inp->inp_pcbinfo->ipi_smr);
inp_lock(inp, lock);
if (__predict_false(in_pcbrele(inp, lock)))
return (false);
/*
* We should not be on the hpts at
* this point in any form. we must
* get the lock to be sure.
* inp acquired through refcount & lock for sure didn't went
* through uma_zfree(). However, it may have already went
* through in_pcbfree() and has another reference, that
* prevented its release by our in_pcbrele().
*/
hpts = tcp_hpts_lock(inp);
if (inp->inp_in_hpts)
panic("Hpts:%p inp:%p at free still on hpts",
hpts, inp);
mtx_unlock(&hpts->p_mtx);
hpts = tcp_input_lock(inp);
if (inp->inp_in_input)
panic("Hpts:%p inp:%p at free still on input hpts",
hpts, inp);
mtx_unlock(&hpts->p_mtx);
if (__predict_false(inp->inp_flags & INP_FREED)) {
inp_unlock(inp, lock);
return (false);
}
return (true);
} else {
smr_exit(inp->inp_pcbinfo->ipi_smr);
return (false);
}
#endif
INP_RUNLOCK(inp);
pcbinfo = inp->inp_pcbinfo;
uma_zfree(pcbinfo->ipi_zone, inp);
return (1);
}
int
in_pcbrele_wlocked(struct inpcb *inp)
/*
* inp_next() - inpcb hash/list traversal iterator
*
* Requires initialized struct inpcb_iterator for context.
* The structure can be initialized with INP_ITERATOR() or INP_ALL_ITERATOR().
*
* - Iterator can have either write-lock or read-lock semantics, that can not
* be changed later.
* - Iterator can iterate either over all pcbs list (INP_ALL_LIST), or through
* a single hash slot. Note: only rip_input() does the latter.
* - Iterator may have optional bool matching function. The matching function
* will be executed for each inpcb in the SMR context, so it can not acquire
* locks and can safely access only immutable fields of inpcb.
*
* A fresh initialized iterator has NULL inpcb in its context and that
* means that inp_next() call would return the very first inpcb on the list
* locked with desired semantic. In all following calls the context pointer
* shall hold the current inpcb pointer. The KPI user is not supposed to
* unlock the current inpcb! Upon end of traversal inp_next() will return NULL
* and write NULL to its context. After end of traversal an iterator can be
* reused.
*
* List traversals have the following features/constraints:
* - New entries won't be seen, as they are always added to the head of a list.
* - Removed entries won't stop traversal as long as they are not added to
* a different list. This is violated by in_pcbrehash().
*/
#define II_LIST_FIRST(ipi, hash) \
(((hash) == INP_ALL_LIST) ? \
CK_LIST_FIRST(&(ipi)->ipi_listhead) : \
CK_LIST_FIRST(&(ipi)->ipi_hashbase[(hash)]))
#define II_LIST_NEXT(inp, hash) \
(((hash) == INP_ALL_LIST) ? \
CK_LIST_NEXT((inp), inp_list) : \
CK_LIST_NEXT((inp), inp_hash))
#define II_LOCK_ASSERT(inp, lock) \
rw_assert(&(inp)->inp_lock, \
(lock) == INPLOOKUP_RLOCKPCB ? RA_RLOCKED : RA_WLOCKED )
struct inpcb *
inp_next(struct inpcb_iterator *ii)
{
struct inpcbinfo *pcbinfo;
const struct inpcbinfo *ipi = ii->ipi;
inp_match_t *match = ii->match;
void *ctx = ii->ctx;
inp_lookup_t lock = ii->lock;
int hash = ii->hash;
struct inpcb *inp;
KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
if (ii->inp == NULL) { /* First call. */
smr_enter(ipi->ipi_smr);
/* This is unrolled CK_LIST_FOREACH(). */
for (inp = II_LIST_FIRST(ipi, hash);
inp != NULL;
inp = II_LIST_NEXT(inp, hash)) {
if (match != NULL && (match)(inp, ctx) == false)
continue;
if (__predict_true(inp_smr_lock(inp, lock)))
break;
else {
smr_enter(ipi->ipi_smr);
MPASS(inp != II_LIST_FIRST(ipi, hash));
inp = II_LIST_FIRST(ipi, hash);
}
}
INP_WLOCK_ASSERT(inp);
if (inp == NULL)
smr_exit(ipi->ipi_smr);
else
ii->inp = inp;
if (refcount_release(&inp->inp_refcount) == 0) {
/*
* If the inpcb has been freed, let the caller know, even if
* this isn't the last reference.
*/
if (inp->inp_flags2 & INP_FREED) {
INP_WUNLOCK(inp);
return (1);
return (inp);
}
/* Not a first call. */
smr_enter(ipi->ipi_smr);
restart:
inp = ii->inp;
II_LOCK_ASSERT(inp, lock);
next:
inp = II_LIST_NEXT(inp, hash);
if (inp == NULL) {
smr_exit(ipi->ipi_smr);
goto found;
}
if (match != NULL && (match)(inp, ctx) == false)
goto next;
if (__predict_true(inp_trylock(inp, lock))) {
if (__predict_false(inp->inp_flags & INP_FREED)) {
/*
* Entries are never inserted in middle of a list, thus
* as long as we are in SMR, we can continue traversal.
* Jump to 'restart' should yield in the same result,
* but could produce unnecessary looping. Could this
* looping be unbound?
*/
inp_unlock(inp, lock);
goto next;
} else {
smr_exit(ipi->ipi_smr);
goto found;
}
return (0);
}
KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
#ifdef TCPHPTS
if (inp->inp_in_hpts || inp->inp_in_input) {
struct tcp_hpts_entry *hpts;
/*
* Can't obtain lock immediately, thus going hard. Once we exit the
* SMR section we can no longer jump to 'next', and our only stable
* anchoring point is ii->inp, which we keep locked for this case, so
* we jump to 'restart'.
*/
if (__predict_true(refcount_acquire_if_not_zero(&inp->inp_refcount))) {
smr_exit(ipi->ipi_smr);
inp_lock(inp, lock);
if (__predict_false(in_pcbrele(inp, lock))) {
smr_enter(ipi->ipi_smr);
goto restart;
}
/*
* We should not be on the hpts at
* this point in any form. we must
* get the lock to be sure.
* See comment in inp_smr_lock().
*/
hpts = tcp_hpts_lock(inp);
if (inp->inp_in_hpts)
panic("Hpts:%p inp:%p at free still on hpts",
hpts, inp);
mtx_unlock(&hpts->p_mtx);
hpts = tcp_input_lock(inp);
if (inp->inp_in_input)
panic("Hpts:%p inp:%p at free still on input hpts",
hpts, inp);
mtx_unlock(&hpts->p_mtx);
}
#endif
INP_WUNLOCK(inp);
pcbinfo = inp->inp_pcbinfo;
uma_zfree(pcbinfo->ipi_zone, inp);
return (1);
if (__predict_false(inp->inp_flags & INP_FREED)) {
inp_unlock(inp, lock);
smr_enter(ipi->ipi_smr);
goto restart;
}
} else
goto next;
found:
inp_unlock(ii->inp, lock);
ii->inp = inp;
return (ii->inp);
}
static void
inpcbport_free(epoch_context_t ctx)
/*
* in_pcbref() bumps the reference count on an inpcb in order to maintain
* stability of an inpcb pointer despite the inpcb lock being released or
* SMR section exited.
*
* To free a reference later in_pcbrele_(r|w)locked() must be performed.
*/
void
in_pcbref(struct inpcb *inp)
{
struct inpcbport *phd;
u_int old __diagused;
phd = __containerof(ctx, struct inpcbport, phd_epoch_ctx);
free(phd, M_PCB);
old = refcount_acquire(&inp->inp_refcount);
KASSERT(old > 0, ("%s: refcount 0", __func__));
}
static void
in_pcbfree_deferred(epoch_context_t ctx)
/*
* Drop a refcount on an inpcb elevated using in_pcbref(), potentially
* freeing the pcb, if the reference was very last.
*/
bool
in_pcbrele_rlocked(struct inpcb *inp)
{
struct inpcb *inp;
int released __unused;
inp = __containerof(ctx, struct inpcb, inp_epoch_ctx);
INP_RLOCK_ASSERT(inp);
INP_WLOCK(inp);
CURVNET_SET(inp->inp_vnet);
#ifdef INET
struct ip_moptions *imo = inp->inp_moptions;
inp->inp_moptions = NULL;
#endif
/* XXXRW: Do as much as possible here. */
#if defined(IPSEC) || defined(IPSEC_SUPPORT)
if (inp->inp_sp != NULL)
ipsec_delete_pcbpolicy(inp);
#endif
#ifdef INET6
struct ip6_moptions *im6o = NULL;
if (inp->inp_vflag & INP_IPV6PROTO) {
ip6_freepcbopts(inp->in6p_outputopts);
im6o = inp->in6p_moptions;
inp->in6p_moptions = NULL;
}
#endif
if (inp->inp_options)
(void)m_free(inp->inp_options);
inp->inp_vflag = 0;
crfree(inp->inp_cred);
#ifdef MAC
mac_inpcb_destroy(inp);
#endif
released = in_pcbrele_wlocked(inp);
MPASS(released);
#ifdef INET6
ip6_freemoptions(im6o);
#endif
#ifdef INET
inp_freemoptions(imo);
#endif
CURVNET_RESTORE();
if (refcount_release(&inp->inp_refcount) == 0)
return (false);
MPASS(inp->inp_flags & INP_FREED);
MPASS(inp->inp_socket == NULL);
MPASS(inp->inp_in_hpts == 0);
MPASS(inp->inp_in_input == 0);
INP_RUNLOCK(inp);
uma_zfree_smr(inp->inp_pcbinfo->ipi_zone, inp);
return (true);
}
bool
in_pcbrele_wlocked(struct inpcb *inp)
{
INP_WLOCK_ASSERT(inp);