View | Details | Raw Unified | Return to bug 233414 | Differences between
and this patch

Collapse All | Expand All

(-)b/sys/kern/subr_vmem.c (-2 / +2 lines)
Lines 636-642 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, Link Here
636
	    VMEM_ADDR_MIN, VMEM_ADDR_MAX,
636
	    VMEM_ADDR_MIN, VMEM_ADDR_MAX,
637
	    M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
637
	    M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
638
		if (kmem_back_domain(domain, kernel_object, addr, bytes,
638
		if (kmem_back_domain(domain, kernel_object, addr, bytes,
639
		    M_NOWAIT | M_USE_RESERVE) == 0) {
639
		    M_NOWAIT | M_USE_RESERVE, false) == 0) {
640
			mtx_unlock(&vmem_bt_lock);
640
			mtx_unlock(&vmem_bt_lock);
641
			return ((void *)addr);
641
			return ((void *)addr);
642
		}
642
		}
Lines 682-688 vmem_startup(void) Link Here
682
	    UMA_ALIGN_PTR, UMA_ZONE_VM);
682
	    UMA_ALIGN_PTR, UMA_ZONE_VM);
683
	vmem_bt_zone = uma_zcreate("vmem btag",
683
	vmem_bt_zone = uma_zcreate("vmem btag",
684
	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
684
	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
685
	    UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
685
	    UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE | UMA_ZONE_NOBUCKET);
686
#ifndef UMA_MD_SMALL_ALLOC
686
#ifndef UMA_MD_SMALL_ALLOC
687
	mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
687
	mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
688
	uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
688
	uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
(-)b/sys/sys/proc.h (+1 lines)
Lines 490-495 do { \ Link Here
490
#define	TDP_UIOHELD	0x10000000 /* Current uio has pages held in td_ma */
490
#define	TDP_UIOHELD	0x10000000 /* Current uio has pages held in td_ma */
491
#define	TDP_FORKING	0x20000000 /* Thread is being created through fork() */
491
#define	TDP_FORKING	0x20000000 /* Thread is being created through fork() */
492
#define	TDP_EXECVMSPC	0x40000000 /* Execve destroyed old vmspace */
492
#define	TDP_EXECVMSPC	0x40000000 /* Execve destroyed old vmspace */
493
#define	TDP_MEMGUARD	0x80000000
493
494
494
/*
495
/*
495
 * Reasons that the current thread can not be run yet.
496
 * Reasons that the current thread can not be run yet.
(-)b/sys/vm/memguard.c (-2 / +25 lines)
Lines 49-54 __FBSDID("$FreeBSD$"); Link Here
49
#include <sys/lock.h>
49
#include <sys/lock.h>
50
#include <sys/mutex.h>
50
#include <sys/mutex.h>
51
#include <sys/malloc.h>
51
#include <sys/malloc.h>
52
#include <sys/proc.h>
52
#include <sys/sysctl.h>
53
#include <sys/sysctl.h>
53
#include <sys/vmem.h>
54
#include <sys/vmem.h>
54
#include <sys/vmmeter.h>
55
#include <sys/vmmeter.h>
Lines 286-298 v2sizev(vm_offset_t va) Link Here
286
void *
287
void *
287
memguard_alloc(unsigned long req_size, int flags)
288
memguard_alloc(unsigned long req_size, int flags)
288
{
289
{
290
	struct thread *td;
289
	vm_offset_t addr, origaddr;
291
	vm_offset_t addr, origaddr;
290
	u_long size_p, size_v;
292
	u_long size_p, size_v;
291
	int do_guard, rv;
293
	int do_guard, rv;
292
294
295
	td = curthread;
296
	if ((td->td_pflags & TDP_MEMGUARD) != 0)
297
		return (NULL);
298
293
	size_p = round_page(req_size);
299
	size_p = round_page(req_size);
294
	if (size_p == 0)
300
	if (size_p == 0)
295
		return (NULL);
301
		return (NULL);
302
296
	/*
303
	/*
297
	 * To ensure there are holes on both sides of the allocation,
304
	 * To ensure there are holes on both sides of the allocation,
298
	 * request 2 extra pages of KVA.  We will only actually add a
305
	 * request 2 extra pages of KVA.  We will only actually add a
Lines 348-354 memguard_alloc(unsigned long req_size, int flags) Link Here
348
	addr = origaddr;
355
	addr = origaddr;
349
	if (do_guard)
356
	if (do_guard)
350
		addr += PAGE_SIZE;
357
		addr += PAGE_SIZE;
351
	rv = kmem_back(kernel_object, addr, size_p, flags);
358
	if ((td->td_pflags & TDP_MEMGUARD) != 0)
359
		rv = kmem_back_locked(kernel_object, addr, size_p, flags);
360
	else {
361
		td->td_pflags |= TDP_MEMGUARD;
362
		rv = kmem_back(kernel_object, addr, size_p, flags);
363
		td->td_pflags &= ~TDP_MEMGUARD;
364
	}
352
	if (rv != KERN_SUCCESS) {
365
	if (rv != KERN_SUCCESS) {
353
		vmem_xfree(memguard_arena, origaddr, size_v);
366
		vmem_xfree(memguard_arena, origaddr, size_v);
354
		memguard_fail_pgs++;
367
		memguard_fail_pgs++;
Lines 389-399 is_memguard_addr(void *addr) Link Here
389
void
402
void
390
memguard_free(void *ptr)
403
memguard_free(void *ptr)
391
{
404
{
405
	struct thread *td;
392
	vm_offset_t addr;
406
	vm_offset_t addr;
393
	u_long req_size, size, sizev;
407
	u_long req_size, size, sizev;
394
	char *temp;
408
	char *temp;
395
	int i;
409
	int i;
396
410
411
	td = curthread;
412
397
	addr = trunc_page((uintptr_t)ptr);
413
	addr = trunc_page((uintptr_t)ptr);
398
	req_size = *v2sizep(addr);
414
	req_size = *v2sizep(addr);
399
	sizev = *v2sizev(addr);
415
	sizev = *v2sizev(addr);
Lines 418-429 memguard_free(void *ptr) Link Here
418
	 * vm_map lock to serialize updates to memguard_wasted, since
434
	 * vm_map lock to serialize updates to memguard_wasted, since
419
	 * we had the lock at increment.
435
	 * we had the lock at increment.
420
	 */
436
	 */
421
	kmem_unback(kernel_object, addr, size);
437
	if ((td->td_pflags & TDP_MEMGUARD) != 0)
438
		kmem_unback_locked(kernel_object, addr, size);
439
	else {
440
		td->td_pflags |= TDP_MEMGUARD;
441
		kmem_unback(kernel_object, addr, size);
442
		td->td_pflags &= ~TDP_MEMGUARD;
443
	}
422
	if (sizev > size)
444
	if (sizev > size)
423
		addr -= PAGE_SIZE;
445
		addr -= PAGE_SIZE;
424
	vmem_xfree(memguard_arena, addr, sizev);
446
	vmem_xfree(memguard_arena, addr, sizev);
425
	if (req_size < PAGE_SIZE)
447
	if (req_size < PAGE_SIZE)
426
		memguard_wasted -= (PAGE_SIZE - req_size);
448
		memguard_wasted -= (PAGE_SIZE - req_size);
449
427
}
450
}
428
451
429
/*
452
/*
(-)b/sys/vm/vm_extern.h (-1 / +3 lines)
Lines 72-79 void kmem_free(vm_offset_t addr, vm_size_t size); Link Here
72
72
73
/* This provides memory for previously allocated address space. */
73
/* This provides memory for previously allocated address space. */
74
int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
74
int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
75
int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int);
75
int kmem_back_locked(vm_object_t, vm_offset_t, vm_size_t, int);
76
int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int, bool);
76
void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
77
void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
78
void kmem_unback_locked(vm_object_t, vm_offset_t, vm_size_t);
77
79
78
/* Bootstrapping. */
80
/* Bootstrapping. */
79
void kmem_bootstrap_free(vm_offset_t, vm_size_t);
81
void kmem_bootstrap_free(vm_offset_t, vm_size_t);
(-)b/sys/vm/vm_kern.c (-15 / +50 lines)
Lines 410-416 kmem_malloc_domain(int domain, vm_size_t size, int flags) Link Here
410
	if (vmem_alloc(arena, size, flags | M_BESTFIT, &addr))
410
	if (vmem_alloc(arena, size, flags | M_BESTFIT, &addr))
411
		return (0);
411
		return (0);
412
412
413
	rv = kmem_back_domain(domain, kernel_object, addr, size, flags);
413
	rv = kmem_back_domain(domain, kernel_object, addr, size, flags, false);
414
	if (rv != KERN_SUCCESS) {
414
	if (rv != KERN_SUCCESS) {
415
		vmem_free(arena, addr, size);
415
		vmem_free(arena, addr, size);
416
		return (0);
416
		return (0);
Lines 450-456 kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags) Link Here
450
 */
450
 */
451
int
451
int
452
kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
452
kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
453
    vm_size_t size, int flags)
453
    vm_size_t size, int flags, bool locked)
454
{
454
{
455
	vm_offset_t offset, i;
455
	vm_offset_t offset, i;
456
	vm_page_t m, mpred;
456
	vm_page_t m, mpred;
Lines 468-474 kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, Link Here
468
	prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
468
	prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
469
469
470
	i = 0;
470
	i = 0;
471
	VM_OBJECT_WLOCK(object);
471
	if (!locked)
472
		VM_OBJECT_WLOCK(object);
472
retry:
473
retry:
473
	mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
474
	mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
474
	for (; i < size; i += PAGE_SIZE, mpred = m) {
475
	for (; i < size; i += PAGE_SIZE, mpred = m) {
Lines 483-490 kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, Link Here
483
		if (m == NULL) {
484
		if (m == NULL) {
484
			if ((flags & M_NOWAIT) == 0)
485
			if ((flags & M_NOWAIT) == 0)
485
				goto retry;
486
				goto retry;
486
			VM_OBJECT_WUNLOCK(object);
487
			if (locked)
487
			kmem_unback(object, addr, i);
488
				kmem_unback_locked(object, addr, i);
489
			else {
490
				VM_OBJECT_WUNLOCK(object);
491
				kmem_unback(object, addr, i);
492
			}
488
			return (KERN_NO_SPACE);
493
			return (KERN_NO_SPACE);
489
		}
494
		}
490
		KASSERT(vm_phys_domain(m) == domain,
495
		KASSERT(vm_phys_domain(m) == domain,
Lines 502-508 kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, Link Here
502
			m->oflags |= VPO_KMEM_EXEC;
507
			m->oflags |= VPO_KMEM_EXEC;
503
#endif
508
#endif
504
	}
509
	}
505
	VM_OBJECT_WUNLOCK(object);
510
	if (!locked)
511
		VM_OBJECT_WUNLOCK(object);
506
512
507
	return (KERN_SUCCESS);
513
	return (KERN_SUCCESS);
508
}
514
}
Lines 512-519 kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, Link Here
512
 *
518
 *
513
 *	Allocate physical pages for the specified virtual address range.
519
 *	Allocate physical pages for the specified virtual address range.
514
 */
520
 */
515
int
521
static int
516
kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
522
_kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags,
523
    bool locked)
517
{
524
{
518
	vm_offset_t end, next, start;
525
	vm_offset_t end, next, start;
519
	int domain, rv;
526
	int domain, rv;
Lines 537-551 kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) Link Here
537
			domain = 0;
544
			domain = 0;
538
			next = end;
545
			next = end;
539
		}
546
		}
540
		rv = kmem_back_domain(domain, object, addr, next - addr, flags);
547
		rv = kmem_back_domain(domain, object, addr, next - addr, flags,
548
		    locked);
541
		if (rv != KERN_SUCCESS) {
549
		if (rv != KERN_SUCCESS) {
542
			kmem_unback(object, start, addr - start);
550
			if (locked)
551
				kmem_unback_locked(object, start, addr - start);
552
			else
553
				kmem_unback(object, start, addr - start);
543
			break;
554
			break;
544
		}
555
		}
545
	}
556
	}
546
	return (rv);
557
	return (rv);
547
}
558
}
548
559
560
int
561
kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
562
{
563
564
	return _kmem_back(object, addr, size, flags, false);
565
}
566
567
int
568
kmem_back_locked(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
569
{
570
571
	return _kmem_back(object, addr, size, flags, true);
572
}
573
549
/*
574
/*
550
 *	kmem_unback:
575
 *	kmem_unback:
551
 *
576
 *
Lines 556-562 kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) Link Here
556
 *	that is being unmapped.
581
 *	that is being unmapped.
557
 */
582
 */
558
static struct vmem *
583
static struct vmem *
559
_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
584
_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size, bool locked)
560
{
585
{
561
	struct vmem *arena;
586
	struct vmem *arena;
562
	vm_page_t m, next;
587
	vm_page_t m, next;
Lines 568-577 _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) Link Here
568
593
569
	if (size == 0)
594
	if (size == 0)
570
		return (NULL);
595
		return (NULL);
596
	if (!locked)
597
		VM_OBJECT_WLOCK(object);
571
	pmap_remove(kernel_pmap, addr, addr + size);
598
	pmap_remove(kernel_pmap, addr, addr + size);
572
	offset = addr - VM_MIN_KERNEL_ADDRESS;
599
	offset = addr - VM_MIN_KERNEL_ADDRESS;
573
	end = offset + size;
600
	end = offset + size;
574
	VM_OBJECT_WLOCK(object);
575
	m = vm_page_lookup(object, atop(offset)); 
601
	m = vm_page_lookup(object, atop(offset)); 
576
	domain = vm_phys_domain(m);
602
	domain = vm_phys_domain(m);
577
#if VM_NRESERVLEVEL > 0
603
#if VM_NRESERVLEVEL > 0
Lines 587-593 _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) Link Here
587
		vm_page_unwire(m, PQ_NONE);
613
		vm_page_unwire(m, PQ_NONE);
588
		vm_page_free(m);
614
		vm_page_free(m);
589
	}
615
	}
590
	VM_OBJECT_WUNLOCK(object);
616
	if (!locked)
617
		VM_OBJECT_WUNLOCK(object);
591
618
592
	return (arena);
619
	return (arena);
593
}
620
}
Lines 596-604 void Link Here
596
kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
623
kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
597
{
624
{
598
625
599
	(void)_kmem_unback(object, addr, size);
626
	(void)_kmem_unback(object, addr, size, false);
600
}
627
}
601
628
629
void
630
kmem_unback_locked(vm_object_t object, vm_offset_t addr, vm_size_t size)
631
{
632
633
	(void)_kmem_unback(object, addr, size, true);
634
}
635
636
602
/*
637
/*
603
 *	kmem_free:
638
 *	kmem_free:
604
 *
639
 *
Lines 611-617 kmem_free(vm_offset_t addr, vm_size_t size) Link Here
611
	struct vmem *arena;
646
	struct vmem *arena;
612
647
613
	size = round_page(size);
648
	size = round_page(size);
614
	arena = _kmem_unback(kernel_object, addr, size);
649
	arena = _kmem_unback(kernel_object, addr, size, false);
615
	if (arena != NULL)
650
	if (arena != NULL)
616
		vmem_free(arena, addr, size);
651
		vmem_free(arena, addr, size);
617
}
652
}

Return to bug 233414