Commit a6ef4a64 authored by HardenedBSD Sync Service's avatar HardenedBSD Sync Service
Browse files

Merge branch 'freebsd/12-stable/master' into hardened/12-stable/master

* freebsd/12-stable/master:
  Stop accounting for MAX_KMAP when allocating UMA boot pages.
  MFC r364302, r364306: Remove the VM map zone.
  In 13-CURRENT, the linux rc script uses "nocover" mount option to avoid mounting required filesystems twice.  However, this option hasn't been MFC'd. Implement its functionality in a different way, by stating the mount point.
parents f0475853 1624e531
......@@ -15,6 +15,12 @@ rcvar="linux_enable"
start_cmd="${name}_start"
stop_cmd=":"
unmounted()
{
[ `stat -f "%d" "$1"` == `stat -f "%d" "$1/.."` -a \
`stat -f "%i" "$1"` != `stat -f "%i" "$1/.."` ]
}
linux_start()
{
local _emul_path _tmpdir
......@@ -48,11 +54,12 @@ linux_start()
if checkyesno linux_mounts_enable; then
_emul_path="/compat/linux"
mount -t linprocfs linprocfs "${_emul_path}/proc"
mount -t linsysfs linsysfs "${_emul_path}/sys"
mount -t devfs devfs "${_emul_path}/dev"
mount -o linrdlnk -t fdescfs fdescfs "${_emul_path}/dev/fd"
mount -o mode=1777 -t tmpfs tmpfs "${_emul_path}/dev/shm"
unmounted "${_emul_path}/proc" && mount -t linprocfs linprocfs "${_emul_path}/proc"
unmounted "${_emul_path}/sys" && mount -t linsysfs linsysfs "${_emul_path}/sys"
unmounted "${_emul_path}/dev" && mount -t devfs devfs "${_emul_path}/dev"
unmounted "${_emul_path}/dev/fd" && mount -o linrdlnk -t fdescfs fdescfs "${_emul_path}/dev/fd"
unmounted "${_emul_path}/dev/shm" && mount -o mode=1777 -t tmpfs tmpfs "${_emul_path}/dev/shm"
true
fi
}
......
......@@ -77,8 +77,8 @@ void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
/* Bootstrapping. */
void kmem_bootstrap_free(vm_offset_t, vm_size_t);
vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
boolean_t);
void kmem_subinit(vm_map_t, vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
bool);
void kmem_init(vm_offset_t, vm_offset_t);
void kmem_init_zero_region(void);
void kmeminit(void);
......
......@@ -273,8 +273,8 @@ vm_ksubmap_init(struct kva_md_info *kmi)
exec_map_entries = 2 * mp_ncpus + 4;
#endif
exec_map_entry_size = round_page(PATH_MAX + ARG_MAX);
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, FALSE);
pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva,
FALSE);
kmem_subinit(exec_map, kernel_map, &minaddr, &maxaddr,
exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, false);
kmem_subinit(pipe_map, kernel_map, &minaddr, &maxaddr, maxpipekva,
false);
}
......@@ -97,9 +97,9 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/uma.h>
vm_map_t kernel_map;
vm_map_t exec_map;
vm_map_t pipe_map;
struct vm_map kernel_map_store;
struct vm_map exec_map_store;
struct vm_map pipe_map_store;
const void *zero_region;
CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
......@@ -357,9 +357,9 @@ kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
}
/*
* kmem_suballoc:
* kmem_subinit:
*
* Allocates a map to manage a subrange
* Initializes a map to manage a subrange
* of the kernel virtual address space.
*
* Arguments are as follows:
......@@ -369,12 +369,11 @@ kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
* size Size of range to find
* superpage_align Request that min is superpage aligned
*/
vm_map_t
kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
vm_size_t size, boolean_t superpage_align)
void
kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
vm_size_t size, bool superpage_align)
{
int ret;
vm_map_t result;
size = round_page(size);
......@@ -383,14 +382,11 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
MAP_ACC_NO_CHARGE);
if (ret != KERN_SUCCESS)
panic("kmem_suballoc: bad status return of %d", ret);
panic("kmem_subinit: bad status return of %d", ret);
*max = *min + size;
result = vm_map_create(vm_map_pmap(parent), *min, *max);
if (result == NULL)
panic("kmem_suballoc: cannot create submap");
if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
panic("kmem_suballoc: unable to change range to submap");
return (result);
vm_map_init(map, vm_map_pmap(parent), *min, *max);
if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS)
panic("kmem_subinit: unable to change range to submap");
}
/*
......@@ -746,15 +742,13 @@ kva_import_domain(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp)
void
kmem_init(vm_offset_t start, vm_offset_t end)
{
vm_map_t m;
int domain;
m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
m->system_map = 1;
vm_map_lock(m);
vm_map_init(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
kernel_map->system_map = 1;
vm_map_lock(kernel_map);
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
kernel_map = m;
(void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
(void) vm_map_insert(kernel_map, NULL, (vm_ooffset_t) 0,
#ifdef __amd64__
KERNBASE,
#else
......@@ -762,7 +756,7 @@ kmem_init(vm_offset_t start, vm_offset_t end)
#endif
start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
/* ... and ending with the completion of the above `insert' */
vm_map_unlock(m);
vm_map_unlock(kernel_map);
/*
* Initialize the kernel_arena. This can grow on demand.
......
......@@ -66,9 +66,12 @@
#define _VM_VM_KERN_H_
/* Kernel memory management definitions. */
extern vm_map_t kernel_map;
extern vm_map_t exec_map;
extern vm_map_t pipe_map;
extern struct vm_map kernel_map_store;
#define kernel_map (&kernel_map_store)
extern struct vm_map exec_map_store;
#define exec_map (&exec_map_store)
extern struct vm_map pipe_map_store;
#define pipe_map (&pipe_map_store)
extern struct vmem *kernel_arena;
extern struct vmem *kmem_arena;
extern struct vmem *buffer_arena;
......
......@@ -131,10 +131,8 @@ __FBSDID("$FreeBSD$");
static struct mtx map_sleep_mtx;
static uma_zone_t mapentzone;
static uma_zone_t kmapentzone;
static uma_zone_t mapzone;
static uma_zone_t vmspace_zone;
static int vmspace_zinit(void *mem, int size, int flags);
static int vm_map_zinit(void *mem, int ize, int flags);
static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
vm_offset_t max);
static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
......@@ -145,7 +143,6 @@ static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
#ifdef INVARIANTS
static void vm_map_zdtor(void *mem, int size, void *arg);
static void vmspace_zdtor(void *mem, int size, void *arg);
#endif
static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
......@@ -204,14 +201,6 @@ void
vm_map_startup(void)
{
mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
#ifdef INVARIANTS
vm_map_zdtor,
#else
NULL,
#endif
vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_prealloc(mapzone, MAX_KMAP);
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
......@@ -230,24 +219,16 @@ static int
vmspace_zinit(void *mem, int size, int flags)
{
struct vmspace *vm;
vm_map_t map;
vm = (struct vmspace *)mem;
map = &vm->vm_map;
vm->vm_map.pmap = NULL;
(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
PMAP_LOCK_INIT(vmspace_pmap(vm));
return (0);
}
static int
vm_map_zinit(void *mem, int size, int flags)
{
vm_map_t map;
map = (vm_map_t)mem;
memset(map, 0, sizeof(*map));
mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
mtx_init(&map->system_mtx, "vm map (system)", NULL,
MTX_DEF | MTX_DUPOK);
sx_init(&map->lock, "vm map (user)");
PMAP_LOCK_INIT(vmspace_pmap(vm));
return (0);
}
......@@ -258,29 +239,16 @@ vmspace_zdtor(void *mem, int size, void *arg)
struct vmspace *vm;
vm = (struct vmspace *)mem;
vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
}
static void
vm_map_zdtor(void *mem, int size, void *arg)
{
vm_map_t map;
map = (vm_map_t)mem;
KASSERT(map->nentries == 0,
("map %p nentries == %d on free.",
map, map->nentries));
KASSERT(map->size == 0,
("map %p size == %lu on free.",
map, (unsigned long)map->size));
KASSERT(vm->vm_map.nentries == 0,
("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
KASSERT(vm->vm_map.size == 0,
("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
}
#endif /* INVARIANTS */
/*
* Allocate a vmspace structure, including a vm_map and pmap,
* and initialize those structures. The refcnt is set to 1.
*
* If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
*/
struct vmspace *
vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
......@@ -893,24 +861,6 @@ vmspace_resident_count(struct vmspace *vmspace)
return pmap_resident_count(vmspace_pmap(vmspace));
}
/*
* vm_map_create:
*
* Creates and returns a new empty VM map with
* the given physical map structure, and having
* the given lower and upper address bounds.
*/
vm_map_t
vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
{
vm_map_t result;
result = uma_zalloc(mapzone, M_WAITOK);
CTR1(KTR_VM, "vm_map_create: %p", result);
_vm_map_init(result, pmap, min, max);
return (result);
}
/*
* Initialize an existing vm_map structure
* such as that in the vmspace structure.
......@@ -937,8 +887,9 @@ vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
{
_vm_map_init(map, pmap, min, max);
mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
sx_init(&map->lock, "user map");
mtx_init(&map->system_mtx, "vm map (system)", NULL,
MTX_DEF | MTX_DUPOK);
sx_init(&map->lock, "vm map (user)");
}
/*
......
......@@ -350,10 +350,6 @@ bool vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end);
long vmspace_resident_count(struct vmspace *vmspace);
#endif /* _KERNEL */
/* XXX: number of kernel maps to statically allocate */
#define MAX_KMAP 10
/*
* Copy-on-write flags for vm_map operations
*/
......@@ -417,7 +413,6 @@ long vmspace_resident_count(struct vmspace *vmspace);
#ifdef _KERNEL
boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t);
int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t);
int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t,
vm_offset_t, int, vm_prot_t, vm_prot_t, int);
......
......@@ -590,9 +590,6 @@ vm_page_startup(vm_offset_t vaddr)
#ifndef UMA_MD_SMALL_ALLOC
/* vmem_startup() calls uma_prealloc(). */
boot_pages += vmem_startup_count();
/* vm_map_startup() calls uma_prealloc(). */
boot_pages += howmany(MAX_KMAP,
UMA_SLAB_SPACE / sizeof(struct vm_map));
/*
* Before going fully functional kmem_init() does allocation
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment