View | Details | Raw Unified | Return to bug 274592
Collapse All | Expand All

(-)b/sys/vm/vm_phys.c (-62 / +72 lines)
Lines 173-181 Link Here
173
SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
173
SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
174
    &vm_ndomains, 0, "Number of physical memory domains available.");
174
    &vm_ndomains, 0, "Number of physical memory domains available.");
175
175
176
static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
177
    u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
178
    vm_paddr_t boundary);
179
static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
176
static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
180
static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
177
static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
181
static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
178
static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
Lines 1351-1413 Link Here
1351
}
1348
}
1352
1349
1353
/*
1350
/*
1354
 * Allocate a contiguous set of physical pages of the given size
1351
 * Allocate a run of contiguous physical pages from the specified free list
1355
 * "npages" from the free lists.  All of the physical pages must be at
1352
 * table.
1356
 * or above the given physical address "low" and below the given
1357
 * physical address "high".  The given value "alignment" determines the
1358
 * alignment of the first physical page in the set.  If the given value
1359
 * "boundary" is non-zero, then the set of physical pages cannot cross
1360
 * any physical address boundary that is a multiple of that value.  Both
1361
 * "alignment" and "boundary" must be a power of two.
1362
 */
1353
 */
1363
vm_page_t
1354
static vm_page_t
1364
vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1355
vm_phys_alloc_queues_contig(
1356
    struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX],
1357
    u_long npages, vm_paddr_t low, vm_paddr_t high,
1365
    u_long alignment, vm_paddr_t boundary)
1358
    u_long alignment, vm_paddr_t boundary)
1366
{
1359
{
1367
	vm_paddr_t pa_end, pa_start;
1368
	vm_page_t m_run;
1369
	struct vm_phys_seg *seg;
1360
	struct vm_phys_seg *seg;
1370
	int segind;
1371
1372
	KASSERT(npages > 0, ("npages is 0"));
1373
	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1374
	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1375
	vm_domain_free_assert_locked(VM_DOMAIN(domain));
1376
	if (low >= high)
1377
		return (NULL);
1378
	m_run = NULL;
1379
	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1380
		seg = &vm_phys_segs[segind];
1381
		if (seg->start >= high || seg->domain != domain)
1382
			continue;
1383
		if (low >= seg->end)
1384
			break;
1385
		if (low <= seg->start)
1386
			pa_start = seg->start;
1387
		else
1388
			pa_start = low;
1389
		if (high < seg->end)
1390
			pa_end = high;
1391
		else
1392
			pa_end = seg->end;
1393
		if (pa_end - pa_start < ptoa(npages))
1394
			continue;
1395
		m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1396
		    alignment, boundary);
1397
		if (m_run != NULL)
1398
			break;
1399
	}
1400
	return (m_run);
1401
}
1402
1403
/*
1404
 * Allocate a run of contiguous physical pages from the free list for the
1405
 * specified segment.
1406
 */
1407
static vm_page_t
1408
vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1409
    vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1410
{
1411
	struct vm_freelist *fl;
1361
	struct vm_freelist *fl;
1412
	vm_paddr_t pa, pa_end, size;
1362
	vm_paddr_t pa, pa_end, size;
1413
	vm_page_t m, m_ret;
1363
	vm_page_t m, m_ret;
Lines 1417-1423 Link Here
1417
	KASSERT(npages > 0, ("npages is 0"));
1367
	KASSERT(npages > 0, ("npages is 0"));
1418
	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1368
	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1419
	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1369
	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1420
	vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
1421
	/* Compute the queue that is the best fit for npages. */
1370
	/* Compute the queue that is the best fit for npages. */
1422
	order = flsl(npages - 1);
1371
	order = flsl(npages - 1);
1423
	/* Search for a run satisfying the specified conditions. */
1372
	/* Search for a run satisfying the specified conditions. */
Lines 1425-1431 Link Here
1425
	for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1374
	for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1426
	    oind++) {
1375
	    oind++) {
1427
		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1376
		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1428
			fl = (*seg->free_queues)[pind];
1377
			fl = (*queues)[pind];
1429
			TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1378
			TAILQ_FOREACH(m_ret, &fl[oind].pl, listq) {
1430
				/*
1379
				/*
1431
				 * Determine if the address range starting at pa
1380
				 * Determine if the address range starting at pa
Lines 1451-1458 Link Here
1451
				 * (without overflow in pa_end calculation)
1400
				 * (without overflow in pa_end calculation)
1452
				 * and fits within the segment.
1401
				 * and fits within the segment.
1453
				 */
1402
				 */
1454
				if (pa_end < pa ||
1403
				seg = &vm_phys_segs[m_ret->segind];
1455
				    pa < seg->start || seg->end < pa_end)
1404
				if (pa_end < pa || seg->end < pa_end)
1456
					continue;
1405
					continue;
1457
1406
1458
				/*
1407
				/*
Lines 1473-1479 Link Here
1473
	return (NULL);
1422
	return (NULL);
1474
done:
1423
done:
1475
	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1424
	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1476
		fl = (*seg->free_queues)[m->pool];
1425
		fl = (*queues)[m->pool];
1477
		vm_freelist_rem(fl, m, oind);
1426
		vm_freelist_rem(fl, m, oind);
1478
		if (m->pool != VM_FREEPOOL_DEFAULT)
1427
		if (m->pool != VM_FREEPOOL_DEFAULT)
1479
			vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
1428
			vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
Lines 1481-1492 Link Here
1481
	/* Return excess pages to the free lists. */
1430
	/* Return excess pages to the free lists. */
1482
	npages_end = roundup2(npages, 1 << oind);
1431
	npages_end = roundup2(npages, 1 << oind);
1483
	if (npages < npages_end) {
1432
	if (npages < npages_end) {
1484
		fl = (*seg->free_queues)[VM_FREEPOOL_DEFAULT];
1433
		fl = (*queues)[VM_FREEPOOL_DEFAULT];
1485
		vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0);
1434
		vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0);
1486
	}
1435
	}
1487
	return (m_ret);
1436
	return (m_ret);
1488
}
1437
}
1489
1438
1439
/*
1440
 * Allocate a contiguous set of physical pages of the given size
1441
 * "npages" from the free lists.  All of the physical pages must be at
1442
 * or above the given physical address "low" and below the given
1443
 * physical address "high".  The given value "alignment" determines the
1444
 * alignment of the first physical page in the set.  If the given value
1445
 * "boundary" is non-zero, then the set of physical pages cannot cross
1446
 * any physical address boundary that is a multiple of that value.  Both
1447
 * "alignment" and "boundary" must be a power of two.
1448
 */
1449
vm_page_t
1450
vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
1451
    u_long alignment, vm_paddr_t boundary)
1452
{
1453
	vm_paddr_t pa_end, pa_start;
1454
	vm_page_t m_run;
1455
	struct vm_phys_seg *seg;
1456
	struct vm_freelist (*queues)[VM_NFREEPOOL][VM_NFREEORDER_MAX];
1457
	int segind;
1458
1459
	KASSERT(npages > 0, ("npages is 0"));
1460
	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1461
	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1462
	vm_domain_free_assert_locked(VM_DOMAIN(domain));
1463
	if (low >= high)
1464
		return (NULL);
1465
	queues = NULL;
1466
	m_run = NULL;
1467
	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1468
		seg = &vm_phys_segs[segind];
1469
		if (seg->start >= high || seg->domain != domain)
1470
			continue;
1471
		if (low >= seg->end)
1472
			break;
1473
		if (low <= seg->start)
1474
			pa_start = seg->start;
1475
		else
1476
			pa_start = low;
1477
		if (high < seg->end)
1478
			pa_end = high;
1479
		else
1480
			pa_end = seg->end;
1481
		if (pa_end - pa_start < ptoa(npages))
1482
			continue;
1483
		/*
1484
		 * If a previous segment led to a search using
1485
		 * the same free lists as would this segment, then
1486
		 * we've actually already searched within this
1487
		 * too.  So skip it.
1488
		 */
1489
		if (seg->free_queues == queues)
1490
			continue;
1491
		queues = seg->free_queues;
1492
		m_run = vm_phys_alloc_queues_contig(queues, npages,
1493
		    low, high, alignment, boundary);
1494
		if (m_run != NULL)
1495
			break;
1496
	}
1497
	return (m_run);
1498
}
1499
1490
/*
1500
/*
1491
 * Return the index of the first unused slot which may be the terminating
1501
 * Return the index of the first unused slot which may be the terminating
1492
 * entry.
1502
 * entry.

Return to bug 274592