commit 612b7c4b7e722535421b11a68343fa2361030775 Author: Mark Johnston Date: Fri Aug 14 10:49:13 2020 -0400 Remove the VM map zone. It is only used to allocate the kernel map, and the exec and pipe submaps. Rename kmem_suballoc() to kmem_subinit(), and remove vm_map_create(). Rename the pipe_map field of struct pipe to avoid conflicting with the macro. I think pipe_pages is a better name since the structure really only tracks the wired sources pages used during a direct write, not a mapping itself. diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h index 25fd9e503967..acdb361d3262 100644 --- a/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -77,8 +77,8 @@ void kmem_unback(vm_object_t, vm_offset_t, vm_size_t); /* Bootstrapping. */ void kmem_bootstrap_free(vm_offset_t, vm_size_t); -vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, - boolean_t); +void kmem_subinit(vm_map_t, vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, + bool); void kmem_init(vm_offset_t, vm_offset_t); void kmem_init_zero_region(void); void kmeminit(void); diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c index 649f3cbc7d84..7884eb19eef9 100644 --- a/sys/vm/vm_init.c +++ b/sys/vm/vm_init.c @@ -253,8 +253,8 @@ vm_ksubmap_init(struct kva_md_info *kmi) exec_map_entries = 2 * mp_ncpus + 4; #endif exec_map_entry_size = round_page(PATH_MAX + ARG_MAX); - exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, - exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, FALSE); - pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva, - FALSE); + kmem_subinit(exec_map, kernel_map, &minaddr, &maxaddr, + exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, false); + kmem_subinit(pipe_map, kernel_map, &minaddr, &maxaddr, maxpipekva, + false); } diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 8ae238c8d59c..f9414fea6638 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -97,9 +97,9 @@ __FBSDID("$FreeBSD$"); #include #include -vm_map_t kernel_map; -vm_map_t exec_map; -vm_map_t pipe_map; +struct vm_map kernel_map_store; +struct vm_map exec_map_store; +struct vm_map pipe_map_store; const void *zero_region; CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); @@ -360,9 +360,9 @@ kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags, } /* - * kmem_suballoc: + * kmem_subinit: * - * Allocates a map to manage a subrange + * Initializes a map to manage a subrange * of the kernel virtual address space. * * Arguments are as follows: @@ -372,12 +372,11 @@ kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags, * size Size of range to find * superpage_align Request that min is superpage aligned */ -vm_map_t -kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, - vm_size_t size, boolean_t superpage_align) +void +kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max, + vm_size_t size, bool superpage_align) { int ret; - vm_map_t result; size = round_page(size); @@ -386,14 +385,11 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_ACC_NO_CHARGE); if (ret != KERN_SUCCESS) - panic("kmem_suballoc: bad status return of %d", ret); + panic("kmem_subinit: bad status return of %d", ret); *max = *min + size; - result = vm_map_create(vm_map_pmap(parent), *min, *max); - if (result == NULL) - panic("kmem_suballoc: cannot create submap"); - if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) - panic("kmem_suballoc: unable to change range to submap"); - return (result); + vm_map_init(map, vm_map_pmap(parent), *min, *max); + if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS) + panic("kmem_subinit: unable to change range to submap"); } /* @@ -750,16 +746,14 @@ kva_import_domain(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp) void kmem_init(vm_offset_t start, vm_offset_t end) { - vm_map_t m; vm_size_t quantum; int domain; - m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); - m->system_map = 1; - vm_map_lock(m); + vm_map_init(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); + kernel_map->system_map = 1; + vm_map_lock(kernel_map); /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ - kernel_map = m; - (void)vm_map_insert(m, NULL, 0, + (void)vm_map_insert(kernel_map, NULL, 0, #ifdef __amd64__ KERNBASE, #else @@ -774,12 +768,12 @@ kmem_init(vm_offset_t start, vm_offset_t end) * that handle vm_page_array allocation can simply adjust virtual_avail * instead. */ - (void)vm_map_insert(m, NULL, 0, (vm_offset_t)vm_page_array, + (void)vm_map_insert(kernel_map, NULL, 0, (vm_offset_t)vm_page_array, (vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size * sizeof(struct vm_page)), VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); #endif - vm_map_unlock(m); + vm_map_unlock(kernel_map); /* * Use a large import quantum on NUMA systems. This helps minimize diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h index 8904fe4ded30..c3db4bb57651 100644 --- a/sys/vm/vm_kern.h +++ b/sys/vm/vm_kern.h @@ -66,9 +66,12 @@ #define _VM_VM_KERN_H_ /* Kernel memory management definitions. */ -extern vm_map_t kernel_map; -extern vm_map_t exec_map; -extern vm_map_t pipe_map; +extern struct vm_map kernel_map_store; +#define kernel_map (&kernel_map_store) +extern struct vm_map exec_map_store; +#define exec_map (&exec_map_store) +extern struct vm_map pipe_map_store; +#define pipe_map (&pipe_map_store) extern struct vmem *kernel_arena; extern struct vmem *kmem_arena; extern struct vmem *buffer_arena; diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index df59694e3b31..9ac45132d88a 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -128,10 +128,8 @@ __FBSDID("$FreeBSD$"); static struct mtx map_sleep_mtx; static uma_zone_t mapentzone; static uma_zone_t kmapentzone; -static uma_zone_t mapzone; static uma_zone_t vmspace_zone; static int vmspace_zinit(void *mem, int size, int flags); -static int vm_map_zinit(void *mem, int ize, int flags); static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max); static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map); @@ -142,7 +140,6 @@ static int vm_map_growstack(vm_map_t map, vm_offset_t addr, static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags); #ifdef INVARIANTS -static void vm_map_zdtor(void *mem, int size, void *arg); static void vmspace_zdtor(void *mem, int size, void *arg); #endif static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, @@ -198,13 +195,6 @@ void vm_map_startup(void) { mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF); - mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL, -#ifdef INVARIANTS - vm_map_zdtor, -#else - NULL, -#endif - vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS | UMA_ZONE_VM); @@ -223,24 +213,16 @@ static int vmspace_zinit(void *mem, int size, int flags) { struct vmspace *vm; + vm_map_t map; vm = (struct vmspace *)mem; + map = &vm->vm_map; - vm->vm_map.pmap = NULL; - (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags); - PMAP_LOCK_INIT(vmspace_pmap(vm)); - return (0); -} - -static int -vm_map_zinit(void *mem, int size, int flags) -{ - vm_map_t map; - - map = (vm_map_t)mem; memset(map, 0, sizeof(*map)); - mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK); + mtx_init(&map->system_mtx, "vm map (system)", NULL, + MTX_DEF | MTX_DUPOK); sx_init(&map->lock, "vm map (user)"); + PMAP_LOCK_INIT(vmspace_pmap(vm)); return (0); } @@ -251,21 +233,10 @@ vmspace_zdtor(void *mem, int size, void *arg) struct vmspace *vm; vm = (struct vmspace *)mem; - - vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg); -} -static void -vm_map_zdtor(void *mem, int size, void *arg) -{ - vm_map_t map; - - map = (vm_map_t)mem; - KASSERT(map->nentries == 0, - ("map %p nentries == %d on free.", - map, map->nentries)); - KASSERT(map->size == 0, - ("map %p size == %lu on free.", - map, (unsigned long)map->size)); + KASSERT(vm->vm_map.nentries == 0, + ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries)); + KASSERT(vm->vm_map.size == 0, + ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size)); } #endif /* INVARIANTS */ @@ -869,24 +840,6 @@ vmspace_resident_count(struct vmspace *vmspace) return pmap_resident_count(vmspace_pmap(vmspace)); } -/* - * vm_map_create: - * - * Creates and returns a new empty VM map with - * the given physical map structure, and having - * the given lower and upper address bounds. - */ -vm_map_t -vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) -{ - vm_map_t result; - - result = uma_zalloc(mapzone, M_WAITOK); - CTR1(KTR_VM, "vm_map_create: %p", result); - _vm_map_init(result, pmap, min, max); - return (result); -} - /* * Initialize an existing vm_map structure * such as that in the vmspace structure. @@ -917,8 +870,9 @@ vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) { _vm_map_init(map, pmap, min, max); - mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK); - sx_init(&map->lock, "user map"); + mtx_init(&map->system_mtx, "vm map (system)", NULL, + MTX_DEF | MTX_DUPOK); + sx_init(&map->lock, "vm map (user)"); } /* diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index 8f80aa9d4eb5..2ddf9201fd42 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -455,7 +455,6 @@ vm_map_entry_read_succ(void *token, struct vm_map_entry *const clone, #ifdef _KERNEL boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t); -vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t); int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t); int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, vm_offset_t, int, vm_prot_t, vm_prot_t, int);