|
Lines 101-108
MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fict
Link Here
|
| 101 |
static struct vm_freelist |
101 |
static struct vm_freelist |
| 102 |
vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; |
102 |
vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; |
| 103 |
|
103 |
|
| 104 |
static int vm_nfreelists = VM_FREELIST_DEFAULT + 1; |
104 |
static int vm_nfreelists; |
| 105 |
|
105 |
|
|
|
106 |
/* |
| 107 |
* Mapping from VM_FREELIST_* to flind |
| 108 |
*/ |
| 109 |
static int vm_freelist_to_flind[VM_NFREELIST]; |
| 110 |
|
| 111 |
CTASSERT(VM_FREELIST_DEFAULT == 0); |
| 112 |
|
| 113 |
#if defined(VM_FREELIST_ISADMA) && defined(VM_FREELIST_LOWMEM) |
| 114 |
CTASSERT(16777216 < VM_LOWMEM_BOUNDARY); |
| 115 |
#endif |
| 116 |
#if defined(VM_FREELIST_LOWMEM) && defined(VM_FREELIST_DMA32) |
| 117 |
CTASSERT(VM_LOWMEM_BOUNDARY < ((vm_paddr_t)1 << 32)); |
| 118 |
#endif |
| 119 |
|
| 106 |
static int cnt_prezero; |
120 |
static int cnt_prezero; |
| 107 |
SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD, |
121 |
SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD, |
| 108 |
&cnt_prezero, 0, "The number of physical pages prezeroed at idle time"); |
122 |
&cnt_prezero, 0, "The number of physical pages prezeroed at idle time"); |
|
Lines 120-128
SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
Link Here
|
| 120 |
|
134 |
|
| 121 |
static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool, |
135 |
static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool, |
| 122 |
int order); |
136 |
int order); |
| 123 |
static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, |
137 |
static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); |
| 124 |
int domain); |
138 |
static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); |
| 125 |
static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind); |
|
|
| 126 |
static int vm_phys_paddr_to_segind(vm_paddr_t pa); |
139 |
static int vm_phys_paddr_to_segind(vm_paddr_t pa); |
| 127 |
static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, |
140 |
static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, |
| 128 |
int order); |
141 |
int order); |
|
Lines 298-304
vm_freelist_rem(struct vm_freelist *fl, vm_page_t
Link Here
|
| 298 |
* Create a physical memory segment. |
311 |
* Create a physical memory segment. |
| 299 |
*/ |
312 |
*/ |
| 300 |
static void |
313 |
static void |
| 301 |
_vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain) |
314 |
_vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain) |
| 302 |
{ |
315 |
{ |
| 303 |
struct vm_phys_seg *seg; |
316 |
struct vm_phys_seg *seg; |
| 304 |
|
317 |
|
|
Lines 314-329
static void
Link Here
|
| 314 |
seg->start = start; |
327 |
seg->start = start; |
| 315 |
seg->end = end; |
328 |
seg->end = end; |
| 316 |
seg->domain = domain; |
329 |
seg->domain = domain; |
| 317 |
seg->free_queues = &vm_phys_free_queues[domain][flind]; |
|
|
| 318 |
} |
330 |
} |
| 319 |
|
331 |
|
| 320 |
static void |
332 |
static void |
| 321 |
vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind) |
333 |
vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end) |
| 322 |
{ |
334 |
{ |
| 323 |
int i; |
335 |
int i; |
| 324 |
|
336 |
|
| 325 |
if (mem_affinity == NULL) { |
337 |
if (mem_affinity == NULL) { |
| 326 |
_vm_phys_create_seg(start, end, flind, 0); |
338 |
_vm_phys_create_seg(start, end, 0); |
| 327 |
return; |
339 |
return; |
| 328 |
} |
340 |
} |
| 329 |
|
341 |
|
|
Lines 336-346
static void
Link Here
|
| 336 |
panic("No affinity info for start %jx", |
348 |
panic("No affinity info for start %jx", |
| 337 |
(uintmax_t)start); |
349 |
(uintmax_t)start); |
| 338 |
if (mem_affinity[i].end >= end) { |
350 |
if (mem_affinity[i].end >= end) { |
| 339 |
_vm_phys_create_seg(start, end, flind, |
351 |
_vm_phys_create_seg(start, end, |
| 340 |
mem_affinity[i].domain); |
352 |
mem_affinity[i].domain); |
| 341 |
break; |
353 |
break; |
| 342 |
} |
354 |
} |
| 343 |
_vm_phys_create_seg(start, mem_affinity[i].end, flind, |
355 |
_vm_phys_create_seg(start, mem_affinity[i].end, |
| 344 |
mem_affinity[i].domain); |
356 |
mem_affinity[i].domain); |
| 345 |
start = mem_affinity[i].end; |
357 |
start = mem_affinity[i].end; |
| 346 |
} |
358 |
} |
|
Lines 352-392
static void
Link Here
|
| 352 |
void |
364 |
void |
| 353 |
vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end) |
365 |
vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end) |
| 354 |
{ |
366 |
{ |
|
|
367 |
vm_paddr_t paddr; |
| 355 |
|
368 |
|
| 356 |
KASSERT((start & PAGE_MASK) == 0, |
369 |
KASSERT((start & PAGE_MASK) == 0, |
| 357 |
("vm_phys_define_seg: start is not page aligned")); |
370 |
("vm_phys_define_seg: start is not page aligned")); |
| 358 |
KASSERT((end & PAGE_MASK) == 0, |
371 |
KASSERT((end & PAGE_MASK) == 0, |
| 359 |
("vm_phys_define_seg: end is not page aligned")); |
372 |
("vm_phys_define_seg: end is not page aligned")); |
|
|
373 |
|
| 374 |
/* |
| 375 |
* Split the physical memory segment if it spans two or more free |
| 376 |
* list boundaries. |
| 377 |
*/ |
| 378 |
paddr = start; |
| 360 |
#ifdef VM_FREELIST_ISADMA |
379 |
#ifdef VM_FREELIST_ISADMA |
| 361 |
if (start < 16777216) { |
380 |
if (paddr < 16777216 && end > 16777216) { |
| 362 |
if (end > 16777216) { |
381 |
vm_phys_create_seg(paddr, 16777216); |
| 363 |
vm_phys_create_seg(start, 16777216, |
382 |
paddr = 16777216; |
| 364 |
VM_FREELIST_ISADMA); |
383 |
} |
| 365 |
vm_phys_create_seg(16777216, end, VM_FREELIST_DEFAULT); |
|
|
| 366 |
} else |
| 367 |
vm_phys_create_seg(start, end, VM_FREELIST_ISADMA); |
| 368 |
if (VM_FREELIST_ISADMA >= vm_nfreelists) |
| 369 |
vm_nfreelists = VM_FREELIST_ISADMA + 1; |
| 370 |
} else |
| 371 |
#endif |
384 |
#endif |
| 372 |
#ifdef VM_FREELIST_HIGHMEM |
385 |
#ifdef VM_FREELIST_LOWMEM |
| 373 |
if (end > VM_HIGHMEM_ADDRESS) { |
386 |
if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) { |
| 374 |
if (start < VM_HIGHMEM_ADDRESS) { |
387 |
vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY); |
| 375 |
vm_phys_create_seg(start, VM_HIGHMEM_ADDRESS, |
388 |
paddr = VM_LOWMEM_BOUNDARY; |
| 376 |
VM_FREELIST_DEFAULT); |
389 |
} |
| 377 |
vm_phys_create_seg(VM_HIGHMEM_ADDRESS, end, |
|
|
| 378 |
VM_FREELIST_HIGHMEM); |
| 379 |
} else |
| 380 |
vm_phys_create_seg(start, end, VM_FREELIST_HIGHMEM); |
| 381 |
if (VM_FREELIST_HIGHMEM >= vm_nfreelists) |
| 382 |
vm_nfreelists = VM_FREELIST_HIGHMEM + 1; |
| 383 |
} else |
| 384 |
#endif |
390 |
#endif |
| 385 |
vm_phys_create_seg(start, end, VM_FREELIST_DEFAULT); |
391 |
#ifdef VM_FREELIST_DMA32 |
|
|
392 |
if (paddr < ((vm_paddr_t)1 << 32) && end > ((vm_paddr_t)1 << 32)) { |
| 393 |
vm_phys_create_seg(paddr, ((vm_paddr_t)1 << 32)); |
| 394 |
paddr = (vm_paddr_t)1 << 32; |
| 395 |
} |
| 396 |
#endif |
| 397 |
vm_phys_create_seg(paddr, end); |
| 386 |
} |
398 |
} |
| 387 |
|
399 |
|
| 388 |
/* |
400 |
/* |
| 389 |
* Initialize the physical memory allocator. |
401 |
* Initialize the physical memory allocator. |
|
|
402 |
* |
| 403 |
* Requires that vm_page_array is initialized! |
| 390 |
*/ |
404 |
*/ |
| 391 |
void |
405 |
void |
| 392 |
vm_phys_init(void) |
406 |
vm_phys_init(void) |
|
Lines 393-415
vm_phys_init(void)
Link Here
|
| 393 |
{ |
407 |
{ |
| 394 |
struct vm_freelist *fl; |
408 |
struct vm_freelist *fl; |
| 395 |
struct vm_phys_seg *seg; |
409 |
struct vm_phys_seg *seg; |
| 396 |
#ifdef VM_PHYSSEG_SPARSE |
410 |
u_long npages; |
| 397 |
long pages; |
411 |
int dom, flind, freelist, oind, pind, segind; |
|
|
412 |
|
| 413 |
/* |
| 414 |
* Compute the number of free lists, and generate the mapping from the |
| 415 |
* manifest constants VM_FREELIST_* to the free list indices. |
| 416 |
*/ |
| 417 |
npages = 0; |
| 418 |
for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { |
| 419 |
seg = &vm_phys_segs[segind]; |
| 420 |
#ifdef VM_FREELIST_ISADMA |
| 421 |
if (seg->end <= 16777216) |
| 422 |
vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1; |
| 423 |
else |
| 398 |
#endif |
424 |
#endif |
| 399 |
int dom, flind, oind, pind, segind; |
425 |
#ifdef VM_FREELIST_LOWMEM |
|
|
426 |
if (seg->end <= VM_LOWMEM_BOUNDARY) |
| 427 |
vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1; |
| 428 |
else |
| 429 |
#endif |
| 430 |
#ifdef VM_FREELIST_DMA32 |
| 431 |
if ( |
| 432 |
#ifdef VM_DMA32_THRESHOLD |
| 433 |
/* |
| 434 |
* Create the DMA32 free list only if the amount of |
| 435 |
* physical memory above physical address 4G exceeds the |
| 436 |
* given threshold. |
| 437 |
*/ |
| 438 |
npages > VM_DMA32_THRESHOLD && |
| 439 |
#endif |
| 440 |
seg->end <= ((vm_paddr_t)1 << 32)) |
| 441 |
vm_freelist_to_flind[VM_FREELIST_DMA32] = 1; |
| 442 |
else |
| 443 |
#endif |
| 444 |
{ |
| 445 |
npages += atop(seg->end - seg->start); |
| 446 |
vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1; |
| 447 |
} |
| 448 |
} |
| 449 |
for (freelist = 1; freelist < VM_NFREELIST; freelist++) { |
| 450 |
vm_freelist_to_flind[freelist] += |
| 451 |
vm_freelist_to_flind[freelist - 1]; |
| 452 |
} |
| 453 |
vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1]; |
| 454 |
KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists")); |
| 455 |
/* Convert the running count of free lists to a free list index. */ |
| 456 |
for (freelist = 0; freelist < VM_NFREELIST; freelist++) |
| 457 |
vm_freelist_to_flind[freelist]--; |
| 400 |
|
458 |
|
|
|
459 |
/* |
| 460 |
* Initialize the first_page and free_queues fields of each physical |
| 461 |
* memory segment. |
| 462 |
*/ |
| 401 |
#ifdef VM_PHYSSEG_SPARSE |
463 |
#ifdef VM_PHYSSEG_SPARSE |
| 402 |
pages = 0; |
464 |
npages = 0; |
| 403 |
#endif |
465 |
#endif |
| 404 |
for (segind = 0; segind < vm_phys_nsegs; segind++) { |
466 |
for (segind = 0; segind < vm_phys_nsegs; segind++) { |
| 405 |
seg = &vm_phys_segs[segind]; |
467 |
seg = &vm_phys_segs[segind]; |
| 406 |
#ifdef VM_PHYSSEG_SPARSE |
468 |
#ifdef VM_PHYSSEG_SPARSE |
| 407 |
seg->first_page = &vm_page_array[pages]; |
469 |
seg->first_page = &vm_page_array[npages]; |
| 408 |
pages += atop(seg->end - seg->start); |
470 |
npages += atop(seg->end - seg->start); |
| 409 |
#else |
471 |
#else |
| 410 |
seg->first_page = PHYS_TO_VM_PAGE(seg->start); |
472 |
seg->first_page = PHYS_TO_VM_PAGE(seg->start); |
| 411 |
#endif |
473 |
#endif |
|
|
474 |
#ifdef VM_FREELIST_ISADMA |
| 475 |
if (seg->end <= 16777216) { |
| 476 |
flind = vm_freelist_to_flind[VM_FREELIST_ISADMA]; |
| 477 |
KASSERT(flind >= 0, |
| 478 |
("vm_phys_init: ISADMA flind < 0")); |
| 479 |
} else |
| 480 |
#endif |
| 481 |
#ifdef VM_FREELIST_LOWMEM |
| 482 |
if (seg->end <= VM_LOWMEM_BOUNDARY) { |
| 483 |
flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM]; |
| 484 |
KASSERT(flind >= 0, |
| 485 |
("vm_phys_init: LOWMEM flind < 0")); |
| 486 |
} else |
| 487 |
#endif |
| 488 |
#ifdef VM_FREELIST_DMA32 |
| 489 |
if (seg->end <= ((vm_paddr_t)1 << 32)) { |
| 490 |
flind = vm_freelist_to_flind[VM_FREELIST_DMA32]; |
| 491 |
KASSERT(flind >= 0, |
| 492 |
("vm_phys_init: DMA32 flind < 0")); |
| 493 |
} else |
| 494 |
#endif |
| 495 |
{ |
| 496 |
flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT]; |
| 497 |
KASSERT(flind >= 0, |
| 498 |
("vm_phys_init: DEFAULT flind < 0")); |
| 499 |
} |
| 500 |
seg->free_queues = &vm_phys_free_queues[seg->domain][flind]; |
| 412 |
} |
501 |
} |
|
|
502 |
|
| 503 |
/* |
| 504 |
* Initialize the free queues. |
| 505 |
*/ |
| 413 |
for (dom = 0; dom < vm_ndomains; dom++) { |
506 |
for (dom = 0; dom < vm_ndomains; dom++) { |
| 414 |
for (flind = 0; flind < vm_nfreelists; flind++) { |
507 |
for (flind = 0; flind < vm_nfreelists; flind++) { |
| 415 |
for (pind = 0; pind < VM_NFREEPOOL; pind++) { |
508 |
for (pind = 0; pind < VM_NFREEPOOL; pind++) { |
|
Lines 419-424
vm_phys_init(void)
Link Here
|
| 419 |
} |
512 |
} |
| 420 |
} |
513 |
} |
| 421 |
} |
514 |
} |
|
|
515 |
|
| 422 |
rw_init(&vm_phys_fictitious_reg_lock, "vmfctr"); |
516 |
rw_init(&vm_phys_fictitious_reg_lock, "vmfctr"); |
| 423 |
} |
517 |
} |
| 424 |
|
518 |
|
|
Lines 498-522
vm_phys_alloc_pages(int pool, int order)
Link Here
|
| 498 |
} |
592 |
} |
| 499 |
|
593 |
|
| 500 |
/* |
594 |
/* |
| 501 |
* Find and dequeue a free page on the given free list, with the |
595 |
* Allocate a contiguous, power of two-sized set of physical pages from the |
| 502 |
* specified pool and order |
596 |
* specified free list. The free list must be specified using one of the |
|
|
597 |
* manifest constants VM_FREELIST_*. |
| 598 |
* |
| 599 |
* The free page queues must be locked. |
| 503 |
*/ |
600 |
*/ |
| 504 |
vm_page_t |
601 |
vm_page_t |
| 505 |
vm_phys_alloc_freelist_pages(int flind, int pool, int order) |
602 |
vm_phys_alloc_freelist_pages(int freelist, int pool, int order) |
| 506 |
{ |
603 |
{ |
| 507 |
vm_page_t m; |
604 |
vm_page_t m; |
| 508 |
int dom, domain; |
605 |
int dom, domain; |
| 509 |
|
606 |
|
| 510 |
KASSERT(flind < VM_NFREELIST, |
607 |
KASSERT(freelist < VM_NFREELIST, |
| 511 |
("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind)); |
608 |
("vm_phys_alloc_freelist_pages: freelist %d is out of range", |
|
|
609 |
freelist)); |
| 512 |
KASSERT(pool < VM_NFREEPOOL, |
610 |
KASSERT(pool < VM_NFREEPOOL, |
| 513 |
("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); |
611 |
("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); |
| 514 |
KASSERT(order < VM_NFREEORDER, |
612 |
KASSERT(order < VM_NFREEORDER, |
| 515 |
("vm_phys_alloc_freelist_pages: order %d is out of range", order)); |
613 |
("vm_phys_alloc_freelist_pages: order %d is out of range", order)); |
| 516 |
|
|
|
| 517 |
for (dom = 0; dom < vm_ndomains; dom++) { |
614 |
for (dom = 0; dom < vm_ndomains; dom++) { |
| 518 |
domain = vm_rr_selectdomain(); |
615 |
domain = vm_rr_selectdomain(); |
| 519 |
m = vm_phys_alloc_domain_pages(domain, flind, pool, order); |
616 |
m = vm_phys_alloc_domain_pages(domain, |
|
|
617 |
vm_freelist_to_flind[freelist], pool, order); |
| 520 |
if (m != NULL) |
618 |
if (m != NULL) |
| 521 |
return (m); |
619 |
return (m); |
| 522 |
} |
620 |
} |