--- b/sys/kern/subr_vmem.c +++ b/sys/kern/subr_vmem.c @@ -636,7 +636,7 @@ vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, VMEM_ADDR_MIN, VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) { if (kmem_back_domain(domain, kernel_object, addr, bytes, - M_NOWAIT | M_USE_RESERVE) == 0) { + M_NOWAIT | M_USE_RESERVE, false) == 0) { mtx_unlock(&vmem_bt_lock); return ((void *)addr); } @@ -682,7 +682,7 @@ vmem_startup(void) UMA_ALIGN_PTR, UMA_ZONE_VM); vmem_bt_zone = uma_zcreate("vmem btag", sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, - UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); + UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE | UMA_ZONE_NOBUCKET); #ifndef UMA_MD_SMALL_ALLOC mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); uma_prealloc(vmem_bt_zone, BT_MAXALLOC); --- b/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -490,6 +490,7 @@ do { \ #define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */ #define TDP_FORKING 0x20000000 /* Thread is being created through fork() */ #define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */ +#define TDP_MEMGUARD 0x80000000 /* * Reasons that the current thread can not be run yet. --- b/sys/vm/memguard.c +++ b/sys/vm/memguard.c @@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -286,13 +287,19 @@ v2sizev(vm_offset_t va) void * memguard_alloc(unsigned long req_size, int flags) { + struct thread *td; vm_offset_t addr, origaddr; u_long size_p, size_v; int do_guard, rv; + td = curthread; + if ((td->td_pflags & TDP_MEMGUARD) != 0) + return (NULL); + size_p = round_page(req_size); if (size_p == 0) return (NULL); + /* * To ensure there are holes on both sides of the allocation, * request 2 extra pages of KVA. We will only actually add a @@ -348,7 +355,13 @@ memguard_alloc(unsigned long req_size, int flags) addr = origaddr; if (do_guard) addr += PAGE_SIZE; - rv = kmem_back(kernel_object, addr, size_p, flags); + if ((td->td_pflags & TDP_MEMGUARD) != 0) + rv = kmem_back_locked(kernel_object, addr, size_p, flags); + else { + td->td_pflags |= TDP_MEMGUARD; + rv = kmem_back(kernel_object, addr, size_p, flags); + td->td_pflags &= ~TDP_MEMGUARD; + } if (rv != KERN_SUCCESS) { vmem_xfree(memguard_arena, origaddr, size_v); memguard_fail_pgs++; @@ -389,11 +402,14 @@ is_memguard_addr(void *addr) void memguard_free(void *ptr) { + struct thread *td; vm_offset_t addr; u_long req_size, size, sizev; char *temp; int i; + td = curthread; + addr = trunc_page((uintptr_t)ptr); req_size = *v2sizep(addr); sizev = *v2sizev(addr); @@ -418,12 +434,19 @@ memguard_free(void *ptr) * vm_map lock to serialize updates to memguard_wasted, since * we had the lock at increment. */ - kmem_unback(kernel_object, addr, size); + if ((td->td_pflags & TDP_MEMGUARD) != 0) + kmem_unback_locked(kernel_object, addr, size); + else { + td->td_pflags |= TDP_MEMGUARD; + kmem_unback(kernel_object, addr, size); + td->td_pflags &= ~TDP_MEMGUARD; + } if (sizev > size) addr -= PAGE_SIZE; vmem_xfree(memguard_arena, addr, sizev); if (req_size < PAGE_SIZE) memguard_wasted -= (PAGE_SIZE - req_size); + } /* --- b/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -72,8 +72,10 @@ void kmem_free(vm_offset_t addr, vm_size_t size); /* This provides memory for previously allocated address space. */ int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int); -int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int); +int kmem_back_locked(vm_object_t, vm_offset_t, vm_size_t, int); +int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int, bool); void kmem_unback(vm_object_t, vm_offset_t, vm_size_t); +void kmem_unback_locked(vm_object_t, vm_offset_t, vm_size_t); /* Bootstrapping. */ void kmem_bootstrap_free(vm_offset_t, vm_size_t); --- b/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -410,7 +410,7 @@ kmem_malloc_domain(int domain, vm_size_t size, int flags) if (vmem_alloc(arena, size, flags | M_BESTFIT, &addr)) return (0); - rv = kmem_back_domain(domain, kernel_object, addr, size, flags); + rv = kmem_back_domain(domain, kernel_object, addr, size, flags, false); if (rv != KERN_SUCCESS) { vmem_free(arena, addr, size); return (0); @@ -450,7 +450,7 @@ kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags) */ int kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, - vm_size_t size, int flags) + vm_size_t size, int flags, bool locked) { vm_offset_t offset, i; vm_page_t m, mpred; @@ -468,7 +468,8 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW; i = 0; - VM_OBJECT_WLOCK(object); + if (!locked) + VM_OBJECT_WLOCK(object); retry: mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i)); for (; i < size; i += PAGE_SIZE, mpred = m) { @@ -483,8 +484,12 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, if (m == NULL) { if ((flags & M_NOWAIT) == 0) goto retry; - VM_OBJECT_WUNLOCK(object); - kmem_unback(object, addr, i); + if (locked) + kmem_unback_locked(object, addr, i); + else { + VM_OBJECT_WUNLOCK(object); + kmem_unback(object, addr, i); + } return (KERN_NO_SPACE); } KASSERT(vm_phys_domain(m) == domain, @@ -502,7 +507,8 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, m->oflags |= VPO_KMEM_EXEC; #endif } - VM_OBJECT_WUNLOCK(object); + if (!locked) + VM_OBJECT_WUNLOCK(object); return (KERN_SUCCESS); } @@ -512,8 +518,9 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, * * Allocate physical pages for the specified virtual address range. */ -int -kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) +static int +_kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags, + bool locked) { vm_offset_t end, next, start; int domain, rv; @@ -537,15 +544,33 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) domain = 0; next = end; } - rv = kmem_back_domain(domain, object, addr, next - addr, flags); + rv = kmem_back_domain(domain, object, addr, next - addr, flags, + locked); if (rv != KERN_SUCCESS) { - kmem_unback(object, start, addr - start); + if (locked) + kmem_unback_locked(object, start, addr - start); + else + kmem_unback(object, start, addr - start); break; } } return (rv); } +int +kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) +{ + + return _kmem_back(object, addr, size, flags, false); +} + +int +kmem_back_locked(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) +{ + + return _kmem_back(object, addr, size, flags, true); +} + /* * kmem_unback: * @@ -556,7 +581,7 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) * that is being unmapped. */ static struct vmem * -_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) +_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size, bool locked) { struct vmem *arena; vm_page_t m, next; @@ -568,10 +593,11 @@ _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) if (size == 0) return (NULL); + if (!locked) + VM_OBJECT_WLOCK(object); pmap_remove(kernel_pmap, addr, addr + size); offset = addr - VM_MIN_KERNEL_ADDRESS; end = offset + size; - VM_OBJECT_WLOCK(object); m = vm_page_lookup(object, atop(offset)); domain = vm_phys_domain(m); #if VM_NRESERVLEVEL > 0 @@ -587,7 +613,8 @@ _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) vm_page_unwire(m, PQ_NONE); vm_page_free(m); } - VM_OBJECT_WUNLOCK(object); + if (!locked) + VM_OBJECT_WUNLOCK(object); return (arena); } @@ -596,9 +623,17 @@ void kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) { - (void)_kmem_unback(object, addr, size); + (void)_kmem_unback(object, addr, size, false); } +void +kmem_unback_locked(vm_object_t object, vm_offset_t addr, vm_size_t size) +{ + + (void)_kmem_unback(object, addr, size, true); +} + + /* * kmem_free: * @@ -611,7 +646,7 @@ kmem_free(vm_offset_t addr, vm_size_t size) struct vmem *arena; size = round_page(size); - arena = _kmem_unback(kernel_object, addr, size); + arena = _kmem_unback(kernel_object, addr, size, false); if (arena != NULL) vmem_free(arena, addr, size); }