View | Details | Raw Unified | Return to bug 248008 | Differences between
and this patch

Collapse All | Expand All

(-)b/sys/vm/vm_extern.h (-2 / +2 lines)
Lines 77-84 void kmem_unback(vm_object_t, vm_offset_t, vm_size_t); Link Here
77
77
78
/* Bootstrapping. */
78
/* Bootstrapping. */
79
void kmem_bootstrap_free(vm_offset_t, vm_size_t);
79
void kmem_bootstrap_free(vm_offset_t, vm_size_t);
80
vm_map_t kmem_suballoc(vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
80
void kmem_subinit(vm_map_t, vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
81
    boolean_t);
81
    bool);
82
void kmem_init(vm_offset_t, vm_offset_t);
82
void kmem_init(vm_offset_t, vm_offset_t);
83
void kmem_init_zero_region(void);
83
void kmem_init_zero_region(void);
84
void kmeminit(void);
84
void kmeminit(void);
(-)b/sys/vm/vm_init.c (-4 / +4 lines)
Lines 253-260 vm_ksubmap_init(struct kva_md_info *kmi) Link Here
253
	exec_map_entries = 2 * mp_ncpus + 4;
253
	exec_map_entries = 2 * mp_ncpus + 4;
254
#endif
254
#endif
255
	exec_map_entry_size = round_page(PATH_MAX + ARG_MAX);
255
	exec_map_entry_size = round_page(PATH_MAX + ARG_MAX);
256
	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
256
	kmem_subinit(exec_map, kernel_map, &minaddr, &maxaddr,
257
	    exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, FALSE);
257
	    exec_map_entries * exec_map_entry_size + 64 * PAGE_SIZE, false);
258
	pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva,
258
	kmem_subinit(pipe_map, kernel_map, &minaddr, &maxaddr, maxpipekva,
259
	    FALSE);
259
	    false);
260
}
260
}
(-)b/sys/vm/vm_kern.c (-24 / +18 lines)
Lines 97-105 __FBSDID("$FreeBSD$"); Link Here
97
#include <vm/vm_extern.h>
97
#include <vm/vm_extern.h>
98
#include <vm/uma.h>
98
#include <vm/uma.h>
99
99
100
vm_map_t kernel_map;
100
struct vm_map kernel_map_store;
101
vm_map_t exec_map;
101
struct vm_map exec_map_store;
102
vm_map_t pipe_map;
102
struct vm_map pipe_map_store;
103
103
104
const void *zero_region;
104
const void *zero_region;
105
CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
105
CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
Lines 360-368 kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags, Link Here
360
}
360
}
361
361
362
/*
362
/*
363
 *	kmem_suballoc:
363
 *	kmem_subinit:
364
 *
364
 *
365
 *	Allocates a map to manage a subrange
365
 *	Initializes a map to manage a subrange
366
 *	of the kernel virtual address space.
366
 *	of the kernel virtual address space.
367
 *
367
 *
368
 *	Arguments are as follows:
368
 *	Arguments are as follows:
Lines 372-383 kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags, Link Here
372
 *	size		Size of range to find
372
 *	size		Size of range to find
373
 *	superpage_align	Request that min is superpage aligned
373
 *	superpage_align	Request that min is superpage aligned
374
 */
374
 */
375
vm_map_t
375
void
376
kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
376
kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
377
    vm_size_t size, boolean_t superpage_align)
377
    vm_size_t size, bool superpage_align)
378
{
378
{
379
	int ret;
379
	int ret;
380
	vm_map_t result;
381
380
382
	size = round_page(size);
381
	size = round_page(size);
383
382
Lines 386-399 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, Link Here
386
	    VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
385
	    VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
387
	    MAP_ACC_NO_CHARGE);
386
	    MAP_ACC_NO_CHARGE);
388
	if (ret != KERN_SUCCESS)
387
	if (ret != KERN_SUCCESS)
389
		panic("kmem_suballoc: bad status return of %d", ret);
388
		panic("kmem_subinit: bad status return of %d", ret);
390
	*max = *min + size;
389
	*max = *min + size;
391
	result = vm_map_create(vm_map_pmap(parent), *min, *max);
390
	vm_map_init(map, vm_map_pmap(parent), *min, *max);
392
	if (result == NULL)
391
	if (vm_map_submap(parent, *min, *max, map) != KERN_SUCCESS)
393
		panic("kmem_suballoc: cannot create submap");
392
		panic("kmem_subinit: unable to change range to submap");
394
	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
395
		panic("kmem_suballoc: unable to change range to submap");
396
	return (result);
397
}
393
}
398
394
399
/*
395
/*
Lines 750-765 kva_import_domain(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp) Link Here
750
void
746
void
751
kmem_init(vm_offset_t start, vm_offset_t end)
747
kmem_init(vm_offset_t start, vm_offset_t end)
752
{
748
{
753
	vm_map_t m;
754
	vm_size_t quantum;
749
	vm_size_t quantum;
755
	int domain;
750
	int domain;
756
751
757
	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
752
	vm_map_init(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
758
	m->system_map = 1;
753
	kernel_map->system_map = 1;
759
	vm_map_lock(m);
754
	vm_map_lock(kernel_map);
760
	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
755
	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
761
	kernel_map = m;
756
	(void)vm_map_insert(kernel_map, NULL, 0,
762
	(void)vm_map_insert(m, NULL, 0,
763
#ifdef __amd64__
757
#ifdef __amd64__
764
	    KERNBASE,
758
	    KERNBASE,
765
#else		     
759
#else		     
Lines 774-785 kmem_init(vm_offset_t start, vm_offset_t end) Link Here
774
	 * that handle vm_page_array allocation can simply adjust virtual_avail
768
	 * that handle vm_page_array allocation can simply adjust virtual_avail
775
	 * instead.
769
	 * instead.
776
	 */
770
	 */
777
	(void)vm_map_insert(m, NULL, 0, (vm_offset_t)vm_page_array,
771
	(void)vm_map_insert(kernel_map, NULL, 0, (vm_offset_t)vm_page_array,
778
	    (vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size *
772
	    (vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size *
779
	    sizeof(struct vm_page)),
773
	    sizeof(struct vm_page)),
780
	    VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
774
	    VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
781
#endif
775
#endif
782
	vm_map_unlock(m);
776
	vm_map_unlock(kernel_map);
783
777
784
	/*
778
	/*
785
	 * Use a large import quantum on NUMA systems.  This helps minimize
779
	 * Use a large import quantum on NUMA systems.  This helps minimize
(-)b/sys/vm/vm_kern.h (-3 / +6 lines)
Lines 66-74 Link Here
66
#define	_VM_VM_KERN_H_
66
#define	_VM_VM_KERN_H_
67
67
68
/* Kernel memory management definitions. */
68
/* Kernel memory management definitions. */
69
extern vm_map_t kernel_map;
69
extern struct vm_map kernel_map_store;
70
extern vm_map_t exec_map;
70
#define	kernel_map	(&kernel_map_store)
71
extern vm_map_t pipe_map;
71
extern struct vm_map exec_map_store;
72
#define	exec_map	(&exec_map_store)
73
extern struct vm_map pipe_map_store;
74
#define	pipe_map	(&pipe_map_store)
72
extern struct vmem *kernel_arena;
75
extern struct vmem *kernel_arena;
73
extern struct vmem *kmem_arena;
76
extern struct vmem *kmem_arena;
74
extern struct vmem *buffer_arena;
77
extern struct vmem *buffer_arena;
(-)b/sys/vm/vm_map.c (-58 / +12 lines)
Lines 128-137 __FBSDID("$FreeBSD$"); Link Here
128
static struct mtx map_sleep_mtx;
128
static struct mtx map_sleep_mtx;
129
static uma_zone_t mapentzone;
129
static uma_zone_t mapentzone;
130
static uma_zone_t kmapentzone;
130
static uma_zone_t kmapentzone;
131
static uma_zone_t mapzone;
132
static uma_zone_t vmspace_zone;
131
static uma_zone_t vmspace_zone;
133
static int vmspace_zinit(void *mem, int size, int flags);
132
static int vmspace_zinit(void *mem, int size, int flags);
134
static int vm_map_zinit(void *mem, int ize, int flags);
135
static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
133
static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
136
    vm_offset_t max);
134
    vm_offset_t max);
137
static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
135
static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
Lines 142-148 static int vm_map_growstack(vm_map_t map, vm_offset_t addr, Link Here
142
static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
140
static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
143
    vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
141
    vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
144
#ifdef INVARIANTS
142
#ifdef INVARIANTS
145
static void vm_map_zdtor(void *mem, int size, void *arg);
146
static void vmspace_zdtor(void *mem, int size, void *arg);
143
static void vmspace_zdtor(void *mem, int size, void *arg);
147
#endif
144
#endif
148
static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
145
static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
Lines 198-210 void Link Here
198
vm_map_startup(void)
195
vm_map_startup(void)
199
{
196
{
200
	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
197
	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
201
	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
202
#ifdef INVARIANTS
203
	    vm_map_zdtor,
204
#else
205
	    NULL,
206
#endif
207
	    vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
208
	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
198
	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
209
	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
199
	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
210
	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
200
	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
Lines 223-246 static int Link Here
223
vmspace_zinit(void *mem, int size, int flags)
213
vmspace_zinit(void *mem, int size, int flags)
224
{
214
{
225
	struct vmspace *vm;
215
	struct vmspace *vm;
216
	vm_map_t map;
226
217
227
	vm = (struct vmspace *)mem;
218
	vm = (struct vmspace *)mem;
219
	map = &vm->vm_map;
228
220
229
	vm->vm_map.pmap = NULL;
230
	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
231
	PMAP_LOCK_INIT(vmspace_pmap(vm));
232
	return (0);
233
}
234
235
static int
236
vm_map_zinit(void *mem, int size, int flags)
237
{
238
	vm_map_t map;
239
240
	map = (vm_map_t)mem;
241
	memset(map, 0, sizeof(*map));
221
	memset(map, 0, sizeof(*map));
242
	mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
222
	mtx_init(&map->system_mtx, "vm map (system)", NULL,
223
	    MTX_DEF | MTX_DUPOK);
243
	sx_init(&map->lock, "vm map (user)");
224
	sx_init(&map->lock, "vm map (user)");
225
	PMAP_LOCK_INIT(vmspace_pmap(vm));
244
	return (0);
226
	return (0);
245
}
227
}
246
228
Lines 251-271 vmspace_zdtor(void *mem, int size, void *arg) Link Here
251
	struct vmspace *vm;
233
	struct vmspace *vm;
252
234
253
	vm = (struct vmspace *)mem;
235
	vm = (struct vmspace *)mem;
254
236
	KASSERT(vm->vm_map.nentries == 0,
255
	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
237
	    ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
256
}
238
	KASSERT(vm->vm_map.size == 0,
257
static void
239
	    ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
258
vm_map_zdtor(void *mem, int size, void *arg)
259
{
260
	vm_map_t map;
261
262
	map = (vm_map_t)mem;
263
	KASSERT(map->nentries == 0,
264
	    ("map %p nentries == %d on free.",
265
	    map, map->nentries));
266
	KASSERT(map->size == 0,
267
	    ("map %p size == %lu on free.",
268
	    map, (unsigned long)map->size));
269
}
240
}
270
#endif	/* INVARIANTS */
241
#endif	/* INVARIANTS */
271
242
Lines 869-892 vmspace_resident_count(struct vmspace *vmspace) Link Here
869
	return pmap_resident_count(vmspace_pmap(vmspace));
840
	return pmap_resident_count(vmspace_pmap(vmspace));
870
}
841
}
871
842
872
/*
873
 *	vm_map_create:
874
 *
875
 *	Creates and returns a new empty VM map with
876
 *	the given physical map structure, and having
877
 *	the given lower and upper address bounds.
878
 */
879
vm_map_t
880
vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
881
{
882
	vm_map_t result;
883
884
	result = uma_zalloc(mapzone, M_WAITOK);
885
	CTR1(KTR_VM, "vm_map_create: %p", result);
886
	_vm_map_init(result, pmap, min, max);
887
	return (result);
888
}
889
890
/*
843
/*
891
 * Initialize an existing vm_map structure
844
 * Initialize an existing vm_map structure
892
 * such as that in the vmspace structure.
845
 * such as that in the vmspace structure.
Lines 917-924 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max) Link Here
917
{
870
{
918
871
919
	_vm_map_init(map, pmap, min, max);
872
	_vm_map_init(map, pmap, min, max);
920
	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
873
	mtx_init(&map->system_mtx, "vm map (system)", NULL,
921
	sx_init(&map->lock, "user map");
874
	    MTX_DEF | MTX_DUPOK);
875
	sx_init(&map->lock, "vm map (user)");
922
}
876
}
923
877
924
/*
878
/*
(-)b/sys/vm/vm_map.h (-1 lines)
Lines 455-461 vm_map_entry_read_succ(void *token, struct vm_map_entry *const clone, Link Here
455
455
456
#ifdef _KERNEL
456
#ifdef _KERNEL
457
boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
457
boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
458
vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t);
459
int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t);
458
int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t);
460
int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t,
459
int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t,
461
    vm_offset_t, int, vm_prot_t, vm_prot_t, int);
460
    vm_offset_t, int, vm_prot_t, vm_prot_t, int);

Return to bug 248008