View | Details | Raw Unified | Return to bug 185727 | Differences between
and this patch

Collapse All | Expand All

(-)amd64/include/vmparam.h (-6 / +14 lines)
Lines 101-116 Link Here
101
#define	VM_FREEPOOL_DIRECT	1
101
#define	VM_FREEPOOL_DIRECT	1
102
102
103
/*
103
/*
104
 * Create two free page lists: VM_FREELIST_DEFAULT is for physical
104
 * Create up to three free page lists: VM_FREELIST_DMA32 is for physical pages
105
 * pages that are above the largest physical address that is
105
 * that have physical addresses below 4G but are not accessible by ISA DMA,
106
 * accessible by ISA DMA and VM_FREELIST_ISADMA is for physical pages
106
 * and VM_FREELIST_ISADMA is for physical pages that are accessible by ISA
107
 * that are below that address.
107
 * DMA.
108
 */
108
 */
109
#define	VM_NFREELIST		2
109
#define	VM_NFREELIST		3
110
#define	VM_FREELIST_DEFAULT	0
110
#define	VM_FREELIST_DEFAULT	0
111
#define	VM_FREELIST_ISADMA	1
111
#define	VM_FREELIST_DMA32	1
112
#define	VM_FREELIST_ISADMA	2
112
113
113
/*
114
/*
115
 * Create the DMA32 free list only if the number of physical pages above
116
 * physical address 4G is at least 16M, which amounts to 64GB of physical
117
 * memory.
118
 */
119
#define	VM_DMA32_THRESHOLD	16777216
120
121
/*
114
 * An allocation size of 16MB is supported in order to optimize the
122
 * An allocation size of 16MB is supported in order to optimize the
115
 * use of the direct map by UMA.  Specifically, a cache line contains
123
 * use of the direct map by UMA.  Specifically, a cache line contains
116
 * at most 8 PDEs, collectively mapping 16MB of physical memory.  By
124
 * at most 8 PDEs, collectively mapping 16MB of physical memory.  By
(-)mips/include/vmparam.h (-11 / +9 lines)
Lines 160-172 Link Here
160
#define	VM_FREEPOOL_DIRECT	1
160
#define	VM_FREEPOOL_DIRECT	1
161
161
162
/*
162
/*
163
 * we support 2 free lists:
163
 * Create up to two free lists on !__mips_n64: VM_FREELIST_DEFAULT is for
164
 *
164
 * physical pages that are above the largest physical address that is
165
 *	- DEFAULT for direct mapped (KSEG0) pages.
165
 * accessible through the direct map (KSEG0) and VM_FREELIST_LOWMEM is for
166
 *	  Note: This usage of DEFAULT may be misleading because we use
166
 * physical pages that are below that address.  VM_LOWMEM_BOUNDARY is the
167
 *	  DEFAULT for allocating direct mapped pages. The normal page
167
 * physical address for the end of the direct map (KSEG0).
168
 *	  allocations use HIGHMEM if available, and then DEFAULT. 
169
 *	- HIGHMEM for other pages 
170
 */
168
 */
171
#ifdef __mips_n64
169
#ifdef __mips_n64
172
#define	VM_NFREELIST		1
170
#define	VM_NFREELIST		1
Lines 174-183 Link Here
174
#define	VM_FREELIST_DIRECT	VM_FREELIST_DEFAULT
172
#define	VM_FREELIST_DIRECT	VM_FREELIST_DEFAULT
175
#else
173
#else
176
#define	VM_NFREELIST		2
174
#define	VM_NFREELIST		2
177
#define	VM_FREELIST_DEFAULT	1
175
#define	VM_FREELIST_DEFAULT	0
178
#define	VM_FREELIST_HIGHMEM	0
176
#define	VM_FREELIST_LOWMEM	1
179
#define	VM_FREELIST_DIRECT	VM_FREELIST_DEFAULT
177
#define	VM_FREELIST_DIRECT	VM_FREELIST_LOWMEM
180
#define	VM_HIGHMEM_ADDRESS	((vm_paddr_t)0x20000000)
178
#define	VM_LOWMEM_BOUNDARY	((vm_paddr_t)0x20000000)
181
#endif
179
#endif
182
180
183
/*
181
/*
(-)vm/vm_phys.c (-46 / +144 lines)
Lines 101-108 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fict Link Here
101
static struct vm_freelist
101
static struct vm_freelist
102
    vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
102
    vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
103
103
104
static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;
104
static int vm_nfreelists;
105
105
106
/*
107
 * Mapping from VM_FREELIST_* to flind
108
 */
109
static int vm_freelist_to_flind[VM_NFREELIST];
110
111
CTASSERT(VM_FREELIST_DEFAULT == 0);
112
113
#if defined(VM_FREELIST_ISADMA) && defined(VM_FREELIST_LOWMEM)
114
CTASSERT(16777216 < VM_LOWMEM_BOUNDARY);
115
#endif
116
#if defined(VM_FREELIST_LOWMEM) && defined(VM_FREELIST_DMA32)
117
CTASSERT(VM_LOWMEM_BOUNDARY < ((vm_paddr_t)1 << 32));
118
#endif
119
106
static int cnt_prezero;
120
static int cnt_prezero;
107
SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
121
SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
108
    &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
122
    &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
Lines 120-128 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, Link Here
120
134
121
static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool,
135
static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool,
122
    int order);
136
    int order);
123
static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind,
137
static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
124
    int domain);
138
static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
125
static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind);
126
static int vm_phys_paddr_to_segind(vm_paddr_t pa);
139
static int vm_phys_paddr_to_segind(vm_paddr_t pa);
127
static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
140
static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
128
    int order);
141
    int order);
Lines 298-304 vm_freelist_rem(struct vm_freelist *fl, vm_page_t Link Here
298
 * Create a physical memory segment.
311
 * Create a physical memory segment.
299
 */
312
 */
300
static void
313
static void
301
_vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain)
314
_vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
302
{
315
{
303
	struct vm_phys_seg *seg;
316
	struct vm_phys_seg *seg;
304
317
Lines 314-329 static void Link Here
314
	seg->start = start;
327
	seg->start = start;
315
	seg->end = end;
328
	seg->end = end;
316
	seg->domain = domain;
329
	seg->domain = domain;
317
	seg->free_queues = &vm_phys_free_queues[domain][flind];
318
}
330
}
319
331
320
static void
332
static void
321
vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
333
vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
322
{
334
{
323
	int i;
335
	int i;
324
336
325
	if (mem_affinity == NULL) {
337
	if (mem_affinity == NULL) {
326
		_vm_phys_create_seg(start, end, flind, 0);
338
		_vm_phys_create_seg(start, end, 0);
327
		return;
339
		return;
328
	}
340
	}
329
341
Lines 336-346 static void Link Here
336
			panic("No affinity info for start %jx",
348
			panic("No affinity info for start %jx",
337
			    (uintmax_t)start);
349
			    (uintmax_t)start);
338
		if (mem_affinity[i].end >= end) {
350
		if (mem_affinity[i].end >= end) {
339
			_vm_phys_create_seg(start, end, flind,
351
			_vm_phys_create_seg(start, end,
340
			    mem_affinity[i].domain);
352
			    mem_affinity[i].domain);
341
			break;
353
			break;
342
		}
354
		}
343
		_vm_phys_create_seg(start, mem_affinity[i].end, flind,
355
		_vm_phys_create_seg(start, mem_affinity[i].end,
344
		    mem_affinity[i].domain);
356
		    mem_affinity[i].domain);
345
		start = mem_affinity[i].end;
357
		start = mem_affinity[i].end;
346
	}
358
	}
Lines 352-392 static void Link Here
352
void
364
void
353
vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
365
vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
354
{
366
{
367
	vm_paddr_t paddr;
355
368
356
	KASSERT((start & PAGE_MASK) == 0,
369
	KASSERT((start & PAGE_MASK) == 0,
357
	    ("vm_phys_define_seg: start is not page aligned"));
370
	    ("vm_phys_define_seg: start is not page aligned"));
358
	KASSERT((end & PAGE_MASK) == 0,
371
	KASSERT((end & PAGE_MASK) == 0,
359
	    ("vm_phys_define_seg: end is not page aligned"));
372
	    ("vm_phys_define_seg: end is not page aligned"));
373
374
	/*
375
	 * Split the physical memory segment if it spans two or more free
376
	 * list boundaries.
377
	 */
378
	paddr = start;
360
#ifdef	VM_FREELIST_ISADMA
379
#ifdef	VM_FREELIST_ISADMA
361
	if (start < 16777216) {
380
	if (paddr < 16777216 && end > 16777216) {
362
		if (end > 16777216) {
381
		vm_phys_create_seg(paddr, 16777216);
363
			vm_phys_create_seg(start, 16777216,
382
		paddr = 16777216;
364
			    VM_FREELIST_ISADMA);
383
	}
365
			vm_phys_create_seg(16777216, end, VM_FREELIST_DEFAULT);
366
		} else
367
			vm_phys_create_seg(start, end, VM_FREELIST_ISADMA);
368
		if (VM_FREELIST_ISADMA >= vm_nfreelists)
369
			vm_nfreelists = VM_FREELIST_ISADMA + 1;
370
	} else
371
#endif
384
#endif
372
#ifdef	VM_FREELIST_HIGHMEM
385
#ifdef	VM_FREELIST_LOWMEM
373
	if (end > VM_HIGHMEM_ADDRESS) {
386
	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
374
		if (start < VM_HIGHMEM_ADDRESS) {
387
		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
375
			vm_phys_create_seg(start, VM_HIGHMEM_ADDRESS,
388
		paddr = VM_LOWMEM_BOUNDARY;
376
			    VM_FREELIST_DEFAULT);
389
	}
377
			vm_phys_create_seg(VM_HIGHMEM_ADDRESS, end,
378
			    VM_FREELIST_HIGHMEM);
379
		} else
380
			vm_phys_create_seg(start, end, VM_FREELIST_HIGHMEM);
381
		if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
382
			vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
383
	} else
384
#endif
390
#endif
385
	vm_phys_create_seg(start, end, VM_FREELIST_DEFAULT);
391
#ifdef	VM_FREELIST_DMA32
392
	if (paddr < ((vm_paddr_t)1 << 32) && end > ((vm_paddr_t)1 << 32)) {
393
		vm_phys_create_seg(paddr, ((vm_paddr_t)1 << 32));
394
		paddr = (vm_paddr_t)1 << 32;
395
	}
396
#endif
397
	vm_phys_create_seg(paddr, end);
386
}
398
}
387
399
388
/*
400
/*
389
 * Initialize the physical memory allocator.
401
 * Initialize the physical memory allocator.
402
 *
403
 * Requires that vm_page_array is initialized!
390
 */
404
 */
391
void
405
void
392
vm_phys_init(void)
406
vm_phys_init(void)
Lines 393-415 vm_phys_init(void) Link Here
393
{
407
{
394
	struct vm_freelist *fl;
408
	struct vm_freelist *fl;
395
	struct vm_phys_seg *seg;
409
	struct vm_phys_seg *seg;
396
#ifdef VM_PHYSSEG_SPARSE
410
	u_long npages;
397
	long pages;
411
	int dom, flind, freelist, oind, pind, segind;
412
413
	/*
414
	 * Compute the number of free lists, and generate the mapping from the
415
	 * manifest constants VM_FREELIST_* to the free list indices.
416
	 */
417
	npages = 0;
418
	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
419
		seg = &vm_phys_segs[segind];
420
#ifdef	VM_FREELIST_ISADMA
421
		if (seg->end <= 16777216)
422
			vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
423
		else
398
#endif
424
#endif
399
	int dom, flind, oind, pind, segind;
425
#ifdef	VM_FREELIST_LOWMEM
426
		if (seg->end <= VM_LOWMEM_BOUNDARY)
427
			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
428
		else
429
#endif
430
#ifdef	VM_FREELIST_DMA32
431
		if (
432
#ifdef	VM_DMA32_THRESHOLD
433
		    /*
434
		     * Create the DMA32 free list only if the amount of
435
		     * physical memory above physical address 4G exceeds the
436
		     * given threshold.
437
		     */
438
		    npages > VM_DMA32_THRESHOLD &&
439
#endif
440
		    seg->end <= ((vm_paddr_t)1 << 32))
441
			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
442
		else
443
#endif
444
		{
445
			npages += atop(seg->end - seg->start);
446
			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
447
		}
448
	}
449
	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
450
		vm_freelist_to_flind[freelist] +=
451
		    vm_freelist_to_flind[freelist - 1];
452
	}
453
	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
454
	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
455
	/* Convert the running count of free lists to a free list index. */
456
	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
457
		vm_freelist_to_flind[freelist]--;
400
458
459
	/*
460
	 * Initialize the first_page and free_queues fields of each physical
461
	 * memory segment.
462
	 */
401
#ifdef VM_PHYSSEG_SPARSE
463
#ifdef VM_PHYSSEG_SPARSE
402
	pages = 0;
464
	npages = 0;
403
#endif
465
#endif
404
	for (segind = 0; segind < vm_phys_nsegs; segind++) {
466
	for (segind = 0; segind < vm_phys_nsegs; segind++) {
405
		seg = &vm_phys_segs[segind];
467
		seg = &vm_phys_segs[segind];
406
#ifdef VM_PHYSSEG_SPARSE
468
#ifdef VM_PHYSSEG_SPARSE
407
		seg->first_page = &vm_page_array[pages];
469
		seg->first_page = &vm_page_array[npages];
408
		pages += atop(seg->end - seg->start);
470
		npages += atop(seg->end - seg->start);
409
#else
471
#else
410
		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
472
		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
411
#endif
473
#endif
474
#ifdef	VM_FREELIST_ISADMA
475
		if (seg->end <= 16777216) {
476
			flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
477
			KASSERT(flind >= 0,
478
			    ("vm_phys_init: ISADMA flind < 0"));
479
		} else
480
#endif
481
#ifdef	VM_FREELIST_LOWMEM
482
		if (seg->end <= VM_LOWMEM_BOUNDARY) {
483
			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
484
			KASSERT(flind >= 0,
485
			    ("vm_phys_init: LOWMEM flind < 0"));
486
		} else
487
#endif
488
#ifdef	VM_FREELIST_DMA32
489
		if (seg->end <= ((vm_paddr_t)1 << 32)) {
490
			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
491
			KASSERT(flind >= 0,
492
			    ("vm_phys_init: DMA32 flind < 0"));
493
		} else
494
#endif
495
		{
496
			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
497
			KASSERT(flind >= 0,
498
			    ("vm_phys_init: DEFAULT flind < 0"));
499
		}
500
		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
412
	}
501
	}
502
503
	/*
504
	 * Initialize the free queues.
505
	 */
413
	for (dom = 0; dom < vm_ndomains; dom++) {
506
	for (dom = 0; dom < vm_ndomains; dom++) {
414
		for (flind = 0; flind < vm_nfreelists; flind++) {
507
		for (flind = 0; flind < vm_nfreelists; flind++) {
415
			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
508
			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
Lines 419-424 vm_phys_init(void) Link Here
419
			}
512
			}
420
		}
513
		}
421
	}
514
	}
515
422
	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
516
	rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
423
}
517
}
424
518
Lines 498-522 vm_phys_alloc_pages(int pool, int order) Link Here
498
}
592
}
499
593
500
/*
594
/*
501
 * Find and dequeue a free page on the given free list, with the 
595
 * Allocate a contiguous, power of two-sized set of physical pages from the
502
 * specified pool and order
596
 * specified free list.  The free list must be specified using one of the
597
 * manifest constants VM_FREELIST_*.
598
 *
599
 * The free page queues must be locked.
503
 */
600
 */
504
vm_page_t
601
vm_page_t
505
vm_phys_alloc_freelist_pages(int flind, int pool, int order)
602
vm_phys_alloc_freelist_pages(int freelist, int pool, int order)
506
{
603
{
507
	vm_page_t m;
604
	vm_page_t m;
508
	int dom, domain;
605
	int dom, domain;
509
606
510
	KASSERT(flind < VM_NFREELIST,
607
	KASSERT(freelist < VM_NFREELIST,
511
	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind));
608
	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
609
	    freelist));
512
	KASSERT(pool < VM_NFREEPOOL,
610
	KASSERT(pool < VM_NFREEPOOL,
513
	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
611
	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
514
	KASSERT(order < VM_NFREEORDER,
612
	KASSERT(order < VM_NFREEORDER,
515
	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
613
	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
516
517
	for (dom = 0; dom < vm_ndomains; dom++) {
614
	for (dom = 0; dom < vm_ndomains; dom++) {
518
		domain = vm_rr_selectdomain();
615
		domain = vm_rr_selectdomain();
519
		m = vm_phys_alloc_domain_pages(domain, flind, pool, order);
616
		m = vm_phys_alloc_domain_pages(domain,
617
		    vm_freelist_to_flind[freelist], pool, order);
520
		if (m != NULL)
618
		if (m != NULL)
521
			return (m);
619
			return (m);
522
	}
620
	}
(-)vm/vm_phys.h (-1 / +1 lines)
Lines 72-78 void vm_phys_add_page(vm_paddr_t pa); Link Here
72
void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end);
72
void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end);
73
vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
73
vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
74
    u_long alignment, vm_paddr_t boundary);
74
    u_long alignment, vm_paddr_t boundary);
75
vm_page_t vm_phys_alloc_freelist_pages(int flind, int pool, int order);
75
vm_page_t vm_phys_alloc_freelist_pages(int freelist, int pool, int order);
76
vm_page_t vm_phys_alloc_pages(int pool, int order);
76
vm_page_t vm_phys_alloc_pages(int pool, int order);
77
boolean_t vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high);
77
boolean_t vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high);
78
int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
78
int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,

Return to bug 185727