View | Details | Raw Unified | Return to bug 187594 | Differences between
and this patch

Collapse All | Expand All

(-)sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c (-7 / +39 lines)
Lines 34-39 __FBSDID("$FreeBSD$"); Link Here
34
#include <sys/kmem.h>
34
#include <sys/kmem.h>
35
#include <sys/debug.h>
35
#include <sys/debug.h>
36
#include <sys/mutex.h>
36
#include <sys/mutex.h>
37
#include <sys/sdt.h>
37
38
38
#include <vm/vm_page.h>
39
#include <vm/vm_page.h>
39
#include <vm/vm_object.h>
40
#include <vm/vm_object.h>
Lines 133-145 kmem_size(void) Link Here
133
	return (kmem_size_val);
134
	return (kmem_size_val);
134
}
135
}
135
136
136
uint64_t
137
kmem_used(void)
138
{
139
140
	return (vmem_size(kmem_arena, VMEM_ALLOC));
141
}
142
143
static int
137
static int
144
kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
138
kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
145
{
139
{
Lines 228-239 kmem_cache_reap_now(kmem_cache_t *cache) Link Here
228
}
222
}
229
223
230
void
224
void
225
kmem_cache_reap(kmem_cache_t *cache, uint64_t maxfree)
226
{
227
228
	if (cache->kc_zone != NULL &&
229
	    uma_zone_free_size(cache->kc_zone) > maxfree)
230
		zone_drain(cache->kc_zone);
231
}
232
233
void
231
kmem_reap(void)
234
kmem_reap(void)
232
{
235
{
233
	uma_reclaim();
236
	uma_reclaim();
234
}
237
}
238
239
uint64_t
240
kmem_cache_free_size(kmem_cache_t *cache)
241
{
242
	uint64_t cachefree;
243
244
	cachefree = (cache->kc_zone == NULL) ? 0 :
245
	    uma_zone_free_size(cache->kc_zone);
246
247
	/*
248
	 * Manual probe as the return fbt probe never fires due to
249
	 * compiler tall call optimisation.
250
	 */
251
	DTRACE_PROBE2(kmem_cache_free_size, char *, cache->kc_name, uint64_t,
252
	    cachefree);
253
254
	return (cachefree);
255
}
256
235
#else
257
#else
236
void
258
void
259
kmem_cache_reap(kmem_cache_t *cache, uint64_t maxfree)
260
{
261
}
262
263
void
237
kmem_cache_reap_now(kmem_cache_t *cache __unused)
264
kmem_cache_reap_now(kmem_cache_t *cache __unused)
238
{
265
{
239
}
266
}
Lines 242-247 void Link Here
242
kmem_reap(void)
269
kmem_reap(void)
243
{
270
{
244
}
271
}
272
273
uint64_t
274
kmem_cache_free_size(kmem_cache_t *cache)
275
{
276
}
245
#endif
277
#endif
246
278
247
int
279
int
(-)sys/cddl/compat/opensolaris/sys/kmem.h (-2 / +6 lines)
Lines 44-50 MALLOC_DECLARE(M_SOLARIS); Link Here
44
#define	POINTER_INVALIDATE(pp)	(*(pp) = (void *)((uintptr_t)(*(pp)) | 0x1))
44
#define	POINTER_INVALIDATE(pp)	(*(pp) = (void *)((uintptr_t)(*(pp)) | 0x1))
45
45
46
#define	KM_SLEEP		M_WAITOK
46
#define	KM_SLEEP		M_WAITOK
47
#define	KM_PUSHPAGE		M_WAITOK
47
#define	KM_PUSHPAGE		M_WAITOK|M_USE_RESERVE
48
#define	KM_NOSLEEP		M_NOWAIT
48
#define	KM_NOSLEEP		M_NOWAIT
49
#define	KM_NODEBUG		M_NODUMP
49
#define	KM_NODEBUG		M_NODUMP
50
#define	KM_NORMALPRI		0
50
#define	KM_NORMALPRI		0
Lines 66-72 typedef struct kmem_cache { Link Here
66
void *zfs_kmem_alloc(size_t size, int kmflags);
66
void *zfs_kmem_alloc(size_t size, int kmflags);
67
void zfs_kmem_free(void *buf, size_t size);
67
void zfs_kmem_free(void *buf, size_t size);
68
uint64_t kmem_size(void);
68
uint64_t kmem_size(void);
69
uint64_t kmem_used(void);
70
kmem_cache_t *kmem_cache_create(char *name, size_t bufsize, size_t align,
69
kmem_cache_t *kmem_cache_create(char *name, size_t bufsize, size_t align,
71
    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
70
    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
72
    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
71
    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
Lines 74-83 void kmem_cache_destroy(kmem_cache_t *cache); Link Here
74
void *kmem_cache_alloc(kmem_cache_t *cache, int flags);
73
void *kmem_cache_alloc(kmem_cache_t *cache, int flags);
75
void kmem_cache_free(kmem_cache_t *cache, void *buf);
74
void kmem_cache_free(kmem_cache_t *cache, void *buf);
76
void kmem_cache_reap_now(kmem_cache_t *cache);
75
void kmem_cache_reap_now(kmem_cache_t *cache);
76
void kmem_cache_reap(kmem_cache_t *cache, uint64_t maxfree);
77
void kmem_reap(void);
77
void kmem_reap(void);
78
uint64_t kmem_cache_free_size(kmem_cache_t *cache);
78
int kmem_debugging(void);
79
int kmem_debugging(void);
79
void *calloc(size_t n, size_t s);
80
void *calloc(size_t n, size_t s);
80
81
82
#define	freemem				(cnt.v_free_count + cnt.v_cache_count)
83
#define	minfree				cnt.v_free_min
84
#define	heap_arena			kmem_arena
81
#define	kmem_alloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags))
85
#define	kmem_alloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags))
82
#define	kmem_zalloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags) | M_ZERO)
86
#define	kmem_zalloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags) | M_ZERO)
83
#define	kmem_free(buf, size)		zfs_kmem_free((buf), (size))
87
#define	kmem_free(buf, size)		zfs_kmem_free((buf), (size))
(-)sys/cddl/compat/opensolaris/sys/param.h (+1 lines)
Lines 36-41 Link Here
36
36
37
#ifdef _KERNEL
37
#ifdef _KERNEL
38
#define	ptob(x)		((uint64_t)(x) << PAGE_SHIFT)
38
#define	ptob(x)		((uint64_t)(x) << PAGE_SHIFT)
39
#define	btop(x)		((uint64_t)(x) >> PAGE_SHIFT)
39
#endif
40
#endif
40
41
41
#endif
42
#endif
(-)sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c (-45 / +314 lines)
Lines 138-143 Link Here
138
#include <sys/sdt.h>
138
#include <sys/sdt.h>
139
139
140
#include <vm/vm_pageout.h>
140
#include <vm/vm_pageout.h>
141
#include <machine/vmparam.h>
141
142
142
#ifdef illumos
143
#ifdef illumos
143
#ifndef _KERNEL
144
#ifndef _KERNEL
Lines 159-164 typedef enum arc_reclaim_strategy { Link Here
159
	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
160
	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
160
} arc_reclaim_strategy_t;
161
} arc_reclaim_strategy_t;
161
162
163
typedef enum arc_cache_reclaim_stragegy {
164
	ARC_CACHE_RECLAIM_NOW,		/* Immediate reclaim strategy */
165
	ARC_CACHE_RECLAIM_SIZE,		/* Free size reclaim strategy */
166
	ARC_CACHE_RECLAIM_FORCE,	/* Forced immediate reclaim strategy */
167
} arc_cache_reclaim_strategy_t;
168
169
/* When the last cache reclaim was processed. */
170
static clock_t cache_reclaim_last = 0;
171
162
/*
172
/*
163
 * The number of iterations through arc_evict_*() before we
173
 * The number of iterations through arc_evict_*() before we
164
 * drop & reacquire the lock.
174
 * drop & reacquire the lock.
Lines 193-201 extern int zfs_prefetch_disable; Link Here
193
 */
203
 */
194
static boolean_t arc_warm;
204
static boolean_t arc_warm;
195
205
196
/*
197
 * These tunables are for performance analysis.
198
 */
199
uint64_t zfs_arc_max;
206
uint64_t zfs_arc_max;
200
uint64_t zfs_arc_min;
207
uint64_t zfs_arc_min;
201
uint64_t zfs_arc_meta_limit = 0;
208
uint64_t zfs_arc_meta_limit = 0;
Lines 204-210 int zfs_arc_shrink_shift = 0; Link Here
204
int zfs_arc_p_min_shift = 0;
211
int zfs_arc_p_min_shift = 0;
205
int zfs_disable_dup_eviction = 0;
212
int zfs_disable_dup_eviction = 0;
206
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
213
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
214
u_int zfs_arc_free_target = 0;
215
u_int zfs_arc_cache_target = 0;
216
int zfs_arc_cache_period = 10;
217
int zfs_arc_cache_partial = 0;
218
int zfs_arc_cache_free_period = 300;
219
uint64_t zfs_arc_cache_free_max = (1 <<24); /* 16MB */
207
220
221
#ifdef _KERNEL
222
static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
223
static int sysctl_vfs_zfs_arc_cache_target(SYSCTL_HANDLER_ARGS);
224
225
static void
226
arc_target_init(void *unused __unused)
227
{
228
229
	zfs_arc_free_target = vm_pageout_wakeup_thresh;
230
	zfs_arc_cache_target = (vm_pageout_wakeup_thresh / 2) * 3;
231
}
232
SYSINIT(arc_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
233
    arc_target_init, NULL);
234
208
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
235
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
209
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
236
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
210
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
237
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
Lines 217-223 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_ Link Here
217
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
244
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
218
    &zfs_arc_average_blocksize, 0,
245
    &zfs_arc_average_blocksize, 0,
219
    "ARC average blocksize");
246
    "ARC average blocksize");
247
SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_cache_reclaim_period, CTLFLAG_RWTUN,
248
    &zfs_arc_cache_period, 0,
249
    "Min number of seconds between ARC cache reclaims");
250
SYSCTL_UINT(_vfs_zfs, OID_AUTO, arc_cache_reclaim_partial, CTLFLAG_RWTUN,
251
    &zfs_arc_cache_partial, 0,
252
    "Enable ARC to perform partial cache reclaims");
253
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_cache_free_max, CTLFLAG_RWTUN,
254
    &zfs_arc_cache_free_max, 0,
255
    "Maximum free bytes in an ARC cache zone before reclaim will be triggered");
256
SYSCTL_UINT(_vfs_zfs, OID_AUTO, arc_cache_free_period, CTLFLAG_RWTUN,
257
    &zfs_arc_cache_free_period, 0,
258
    "Min number of seconds between ARC free size based cache reclaims");
259
/*
260
 * We don't have a tunable for these sysctls due to their dependency on
261
 * pagedaemon initialisation.
262
 */
263
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
264
    CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(u_int),
265
    sysctl_vfs_zfs_arc_free_target, "IU",
266
    "Desired number of free pages below which ARC triggers reclaim");
267
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_cache_target,
268
    CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(u_int),
269
    sysctl_vfs_zfs_arc_cache_target, "IU",
270
    "Desired number of free pages below which ARC triggers cache reclaim");
220
271
272
273
static int
274
sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS)
275
{
276
	u_int val;
277
	int err;
278
279
	val = zfs_arc_free_target;
280
	err = sysctl_handle_int(oidp, &val, 0, req);
281
	if (err != 0 || req->newptr == NULL)
282
		return (err);
283
284
	if (val < minfree)
285
		return (EINVAL);
286
	if (val > cnt.v_page_count)
287
		return (EINVAL);
288
289
	zfs_arc_free_target = val;
290
291
	return (0);
292
}
293
294
static int
295
sysctl_vfs_zfs_arc_cache_target(SYSCTL_HANDLER_ARGS)
296
{
297
	u_int val;
298
	int err;
299
300
	val = zfs_arc_cache_target;
301
	err = sysctl_handle_int(oidp, &val, 0, req);
302
	if (err != 0 || req->newptr == NULL)
303
		return (err);
304
305
	if (val < minfree)
306
		return (EINVAL);
307
	if (val > cnt.v_page_count)
308
		return (EINVAL);
309
310
	zfs_arc_cache_target = val;
311
312
	return (0);
313
}
314
#endif
315
221
/*
316
/*
222
 * Note that buffers can be in one of 6 states:
317
 * Note that buffers can be in one of 6 states:
223
 *	ARC_anon	- anonymous (discussed below)
318
 *	ARC_anon	- anonymous (discussed below)
Lines 592-597 static void arc_evict_ghost(arc_state_t *state, ui Link Here
592
static void arc_buf_watch(arc_buf_t *buf);
687
static void arc_buf_watch(arc_buf_t *buf);
593
#endif /* illumos */
688
#endif /* illumos */
594
689
690
static uint64_t arc_cache_free(void);
691
static boolean_t arc_cache_reclaim_needed(uint64_t size);
692
static boolean_t arc_cache_reclaim(uint64_t size,
693
    arc_cache_reclaim_strategy_t strat);
694
static boolean_t arc_cache_reclaim_strat(kmem_cache_t *cache, uint64_t size,
695
    arc_cache_reclaim_strategy_t strat);
696
595
static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
697
static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
596
698
597
#define	GHOST_STATE(state)	\
699
#define	GHOST_STATE(state)	\
Lines 2421-2426 arc_flush(spa_t *spa) Link Here
2421
void
2523
void
2422
arc_shrink(void)
2524
arc_shrink(void)
2423
{
2525
{
2526
2424
	if (arc_c > arc_c_min) {
2527
	if (arc_c > arc_c_min) {
2425
		uint64_t to_free;
2528
		uint64_t to_free;
2426
2529
Lines 2429-2434 arc_shrink(void) Link Here
2429
#else
2532
#else
2430
		to_free = arc_c >> arc_shrink_shift;
2533
		to_free = arc_c >> arc_shrink_shift;
2431
#endif
2534
#endif
2535
		DTRACE_PROBE4(arc__shrink, uint64_t, arc_c, uint64_t,
2536
			arc_c_min, uint64_t, arc_p, uint64_t, to_free);
2537
2432
		if (arc_c > arc_c_min + to_free)
2538
		if (arc_c > arc_c_min + to_free)
2433
			atomic_add_64(&arc_c, -to_free);
2539
			atomic_add_64(&arc_c, -to_free);
2434
		else
2540
		else
Lines 2439-2450 arc_shrink(void) Link Here
2439
			arc_c = MAX(arc_size, arc_c_min);
2545
			arc_c = MAX(arc_size, arc_c_min);
2440
		if (arc_p > arc_c)
2546
		if (arc_p > arc_c)
2441
			arc_p = (arc_c >> 1);
2547
			arc_p = (arc_c >> 1);
2548
2549
		DTRACE_PROBE2(arc__shrunk, uint64_t, arc_c, uint64_t,
2550
			arc_p);
2551
2442
		ASSERT(arc_c >= arc_c_min);
2552
		ASSERT(arc_c >= arc_c_min);
2443
		ASSERT((int64_t)arc_p >= 0);
2553
		ASSERT((int64_t)arc_p >= 0);
2444
	}
2554
	}
2445
2555
2446
	if (arc_size > arc_c)
2556
	if (arc_size > arc_c) {
2557
		DTRACE_PROBE2(arc__shrink_adjust, uint64_t, arc_size,
2558
			uint64_t, arc_c);
2447
		arc_adjust();
2559
		arc_adjust();
2560
	}
2448
}
2561
}
2449
2562
2450
static int needfree = 0;
2563
static int needfree = 0;
Lines 2454-2469 arc_reclaim_needed(void) Link Here
2454
{
2567
{
2455
2568
2456
#ifdef _KERNEL
2569
#ifdef _KERNEL
2570
	if (arc_size <= arc_c_min) {
2571
		DTRACE_PROBE2(arc__reclaim_min, uint64_t, arc_size,
2572
		    uint64_t, arc_c_min);
2573
		return (0);
2574
	}
2457
2575
2458
	if (needfree)
2576
	if (needfree) {
2577
		DTRACE_PROBE(arc__reclaim_needfree);
2459
		return (1);
2578
		return (1);
2579
	}
2460
2580
2461
	/*
2581
	/*
2462
	 * Cooperate with pagedaemon when it's time for it to scan
2582
	 * Cooperate with pagedaemon when it's time for it to scan
2463
	 * and reclaim some pages.
2583
	 * and reclaim some pages.
2464
	 */
2584
	 */
2465
	if (vm_paging_needed())
2585
	if (freemem < zfs_arc_free_target) {
2586
		DTRACE_PROBE2(arc__reclaim_freemem, uint64_t,
2587
		    freemem, uint64_t, zfs_arc_free_target);
2466
		return (1);
2588
		return (1);
2589
	}
2467
2590
2468
#ifdef sun
2591
#ifdef sun
2469
	/*
2592
	/*
Lines 2491-2498 arc_reclaim_needed(void) Link Here
2491
	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2614
	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2492
		return (1);
2615
		return (1);
2493
2616
2494
#if defined(__i386)
2495
	/*
2617
	/*
2618
	 * Check that we have enough availrmem that memory locking (e.g., via
2619
	 * mlock(3C) or memcntl(2)) can still succeed.  (pages_pp_maximum
2620
	 * stores the number of pages that cannot be locked; when availrmem
2621
	 * drops below pages_pp_maximum, page locking mechanisms such as
2622
	 * page_pp_lock() will fail.)
2623
	 */
2624
	if (availrmem <= pages_pp_maximum)
2625
		return (1);
2626
2627
#endif	/* sun */
2628
#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
2629
	/*
2496
	 * If we're on an i386 platform, it's possible that we'll exhaust the
2630
	 * If we're on an i386 platform, it's possible that we'll exhaust the
2497
	 * kernel heap space before we ever run out of available physical
2631
	 * kernel heap space before we ever run out of available physical
2498
	 * memory.  Most checks of the size of the heap_area compare against
2632
	 * memory.  Most checks of the size of the heap_area compare against
Lines 2503-2528 arc_reclaim_needed(void) Link Here
2503
	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2637
	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2504
	 * free)
2638
	 * free)
2505
	 */
2639
	 */
2506
	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
2640
	if (vmem_size(heap_arena, VMEM_FREE) <
2507
	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2641
	    (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2)) {
2642
		DTRACE_PROBE2(arc__reclaim_used, uint64_t,
2643
		    vmem_size(heap_arena, VMEM_FREE), uint64_t,
2644
		    (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2);
2508
		return (1);
2645
		return (1);
2646
	}
2509
#endif
2647
#endif
2510
#else	/* !sun */
2648
#ifdef sun
2511
	if (kmem_used() > (kmem_size() * 3) / 4)
2649
	/*
2650
	 * If zio data pages are being allocated out of a separate heap segment,
2651
	 * then enforce that the size of available vmem for this arena remains
2652
	 * above about 1/16th free.
2653
	 *
2654
	 * Note: The 1/16th arena free requirement was put in place
2655
	 * to aggressively evict memory from the arc in order to avoid
2656
	 * memory fragmentation issues.
2657
	 */
2658
	if (zio_arena != NULL &&
2659
	    vmem_size(zio_arena, VMEM_FREE) <
2660
	    (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2512
		return (1);
2661
		return (1);
2513
#endif	/* sun */
2662
#endif	/* sun */
2514
2663
#else	/* _KERNEL */
2515
#else
2516
	if (spa_get_random(100) == 0)
2664
	if (spa_get_random(100) == 0)
2517
		return (1);
2665
		return (1);
2518
#endif
2666
#endif	/* _KERNEL */
2667
	DTRACE_PROBE(arc__reclaim_no);
2668
2519
	return (0);
2669
	return (0);
2520
}
2670
}
2521
2671
2522
extern kmem_cache_t	*zio_buf_cache[];
2672
extern kmem_cache_t	*zio_buf_cache[];
2523
extern kmem_cache_t	*zio_data_buf_cache[];
2673
extern kmem_cache_t	*zio_data_buf_cache[];
2674
extern kmem_cache_t	*range_seg_cache;
2524
2675
2525
static void
2676
static void __noinline
2526
arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2677
arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2527
{
2678
{
2528
	size_t			i;
2679
	size_t			i;
Lines 2529-2534 arc_kmem_reap_now(arc_reclaim_strategy_t strat) Link Here
2529
	kmem_cache_t		*prev_cache = NULL;
2680
	kmem_cache_t		*prev_cache = NULL;
2530
	kmem_cache_t		*prev_data_cache = NULL;
2681
	kmem_cache_t		*prev_data_cache = NULL;
2531
2682
2683
	DTRACE_PROBE(arc__kmem_reap_start);
2532
#ifdef _KERNEL
2684
#ifdef _KERNEL
2533
	if (arc_meta_used >= arc_meta_limit) {
2685
	if (arc_meta_used >= arc_meta_limit) {
2534
		/*
2686
		/*
Lines 2537-2543 arc_kmem_reap_now(arc_reclaim_strategy_t strat) Link Here
2537
		 */
2689
		 */
2538
		dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2690
		dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2539
	}
2691
	}
2540
#if defined(__i386)
2692
#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
2541
	/*
2693
	/*
2542
	 * Reclaim unused memory from all kmem caches.
2694
	 * Reclaim unused memory from all kmem caches.
2543
	 */
2695
	 */
Lines 2552-2571 arc_kmem_reap_now(arc_reclaim_strategy_t strat) Link Here
2552
	if (strat == ARC_RECLAIM_AGGR)
2704
	if (strat == ARC_RECLAIM_AGGR)
2553
		arc_shrink();
2705
		arc_shrink();
2554
2706
2707
	(void) arc_cache_reclaim(0, ARC_CACHE_RECLAIM_FORCE);
2708
2709
#ifdef sun
2710
	/*
2711
	 * Ask the vmem areana to reclaim unused memory from its
2712
	 * quantum caches.
2713
	 */
2714
	if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2715
		vmem_qcache_reap(zio_arena);
2716
#endif
2717
out:
2718
	DTRACE_PROBE(arc__kmem_reap_end);
2719
}
2720
2721
2722
static boolean_t
2723
arc_cache_reclaim_needed(uint64_t size)
2724
{
2725
2726
	if (zfs_arc_cache_target && freemem < zfs_arc_cache_target + btop(size)) {
2727
		DTRACE_PROBE1(arc_cache_reclaim_needed, int, B_TRUE);
2728
		return (B_TRUE);
2729
	}
2730
2731
	DTRACE_PROBE1(arc_cache_reclaim_needed, int, B_FALSE);
2732
	return (B_FALSE);
2733
}
2734
2735
static boolean_t
2736
arc_cache_reclaim_strat(kmem_cache_t *cache, uint64_t size,
2737
    arc_cache_reclaim_strategy_t strat)
2738
{
2739
2740
	switch(strat) {
2741
	case ARC_CACHE_RECLAIM_NOW:
2742
	case ARC_CACHE_RECLAIM_FORCE:
2743
		kmem_cache_reap_now(cache);
2744
		if (zfs_arc_cache_partial && !arc_cache_reclaim_needed(size))
2745
			return (B_TRUE);
2746
		break;
2747
	default:
2748
		kmem_cache_reap(cache, zfs_arc_cache_free_max);
2749
	}
2750
2751
	return (B_FALSE);
2752
}
2753
2754
static boolean_t
2755
arc_cache_reclaim(uint64_t size, arc_cache_reclaim_strategy_t strat)
2756
{
2757
	int i;
2758
	clock_t now;
2759
	kmem_cache_t *prev_cache, *prev_data_cache;
2760
2761
	now = ddi_get_lbolt();
2762
	DTRACE_PROBE3(arc_cache_reclaim_test, int, strat, int64_t, now,
2763
	    int64_t, cache_reclaim_last);
2764
	if (now - cache_reclaim_last > (zfs_arc_cache_period * hz))
2765
		return (B_FALSE);
2766
2767
	DTRACE_PROBE1(arc_cache_reclaim, int, strat);
2768
2555
	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2769
	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2556
		if (zio_buf_cache[i] != prev_cache) {
2770
		if (zio_buf_cache[i] != prev_cache) {
2557
			prev_cache = zio_buf_cache[i];
2771
			prev_cache = zio_buf_cache[i];
2558
			kmem_cache_reap_now(zio_buf_cache[i]);
2772
			if (arc_cache_reclaim_strat(zio_buf_cache[i], size,
2773
			    strat)) {
2774
				return (B_TRUE);
2775
			}
2776
2559
		}
2777
		}
2560
		if (zio_data_buf_cache[i] != prev_data_cache) {
2778
		if (zio_data_buf_cache[i] != prev_data_cache) {
2561
			prev_data_cache = zio_data_buf_cache[i];
2779
			prev_data_cache = zio_data_buf_cache[i];
2562
			kmem_cache_reap_now(zio_data_buf_cache[i]);
2780
			if (arc_cache_reclaim_strat(zio_data_buf_cache[i],
2781
			    size, strat))
2782
				return (B_TRUE);
2563
		}
2783
		}
2564
	}
2784
	}
2565
	kmem_cache_reap_now(buf_cache);
2785
	if (arc_cache_reclaim_strat(range_seg_cache, size, strat))
2566
	kmem_cache_reap_now(hdr_cache);
2786
		return (B_TRUE);
2787
2788
	if (arc_cache_reclaim_strat(buf_cache, size, strat))
2789
		return (B_TRUE);
2790
2791
	arc_cache_reclaim_strat(hdr_cache, size, strat);
2792
2793
	cache_reclaim_last = ddi_get_lbolt();
2794
2795
	if (arc_cache_reclaim_needed(size))
2796
		return (B_FALSE);
2797
2798
	return (B_TRUE);
2567
}
2799
}
2568
2800
2801
static uint64_t
2802
arc_cache_free(void)
2803
{
2804
	int i;
2805
	uint64_t cachefree;
2806
	kmem_cache_t *prev_cache, *prev_data_cache;
2807
2808
	cachefree = kmem_cache_free_size(buf_cache) +
2809
	    kmem_cache_free_size(hdr_cache);
2810
2811
	prev_cache = NULL;
2812
	prev_data_cache = NULL;
2813
2814
        for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2815
                if (zio_buf_cache[i] != prev_cache) {
2816
                        prev_cache = zio_buf_cache[i];
2817
                        cachefree += kmem_cache_free_size(zio_buf_cache[i]);
2818
                }
2819
                if (zio_data_buf_cache[i] != prev_data_cache) {
2820
                        prev_data_cache = zio_data_buf_cache[i];
2821
                        cachefree += kmem_cache_free_size(zio_data_buf_cache[i]);
2822
                }
2823
        }
2824
2825
	return (cachefree);
2826
}
2827
2569
static void
2828
static void
2570
arc_reclaim_thread(void *dummy __unused)
2829
arc_reclaim_thread(void *dummy __unused)
2571
{
2830
{
Lines 2577-2586 arc_reclaim_thread(void *dummy __unused) Link Here
2577
2836
2578
	mutex_enter(&arc_reclaim_thr_lock);
2837
	mutex_enter(&arc_reclaim_thr_lock);
2579
	while (arc_thread_exit == 0) {
2838
	while (arc_thread_exit == 0) {
2839
		DTRACE_PROBE(arc__reclaim_thread);
2840
		if (arc_cache_reclaim_needed(0)) {
2841
			(void) arc_cache_reclaim(0, ARC_CACHE_RECLAIM_NOW);
2842
		} else {
2843
			clock_t now;
2844
2845
			now = ddi_get_lbolt();
2846
			DTRACE_PROBE2(arc__reclaim_check, int64_t, now,
2847
				int64_t, cache_reclaim_last);
2848
			if (now - cache_reclaim_last >
2849
			    (zfs_arc_cache_free_period * hz)) {
2850
				(void) arc_cache_reclaim(0,
2851
				    ARC_CACHE_RECLAIM_SIZE);
2852
			}
2853
		}
2854
2580
		if (arc_reclaim_needed()) {
2855
		if (arc_reclaim_needed()) {
2581
2856
			DTRACE_PROBE1(arc__caches_free, uint64_t,
2857
			    arc_cache_free());
2582
			if (arc_no_grow) {
2858
			if (arc_no_grow) {
2583
				if (last_reclaim == ARC_RECLAIM_CONS) {
2859
				if (last_reclaim == ARC_RECLAIM_CONS) {
2860
					DTRACE_PROBE(arc__reclaim_aggr_no_grow);
2584
					last_reclaim = ARC_RECLAIM_AGGR;
2861
					last_reclaim = ARC_RECLAIM_AGGR;
2585
				} else {
2862
				} else {
2586
					last_reclaim = ARC_RECLAIM_CONS;
2863
					last_reclaim = ARC_RECLAIM_CONS;
Lines 2588-2593 arc_reclaim_thread(void *dummy __unused) Link Here
2588
			} else {
2865
			} else {
2589
				arc_no_grow = TRUE;
2866
				arc_no_grow = TRUE;
2590
				last_reclaim = ARC_RECLAIM_AGGR;
2867
				last_reclaim = ARC_RECLAIM_AGGR;
2868
				DTRACE_PROBE(arc__reclaim_aggr);
2591
				membar_producer();
2869
				membar_producer();
2592
			}
2870
			}
2593
2871
Lines 2602-2607 arc_reclaim_thread(void *dummy __unused) Link Here
2602
				 */
2880
				 */
2603
				arc_no_grow = TRUE;
2881
				arc_no_grow = TRUE;
2604
				last_reclaim = ARC_RECLAIM_AGGR;
2882
				last_reclaim = ARC_RECLAIM_AGGR;
2883
				DTRACE_PROBE(arc__reclaim_aggr_needfree);
2605
			}
2884
			}
2606
			arc_kmem_reap_now(last_reclaim);
2885
			arc_kmem_reap_now(last_reclaim);
2607
			arc_warm = B_TRUE;
2886
			arc_warm = B_TRUE;
Lines 2618-2623 arc_reclaim_thread(void *dummy __unused) Link Here
2618
#ifdef _KERNEL
2897
#ifdef _KERNEL
2619
		if (needfree) {
2898
		if (needfree) {
2620
			needfree = 0;
2899
			needfree = 0;
2900
			DTRACE_PROBE(arc__clear_needfree);
2621
			wakeup(&needfree);
2901
			wakeup(&needfree);
2622
		}
2902
		}
2623
#endif
2903
#endif
Lines 2692-2697 arc_adapt(int bytes, arc_state_t *state) Link Here
2692
	 * cache size, increment the target cache size
2972
	 * cache size, increment the target cache size
2693
	 */
2973
	 */
2694
	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2974
	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2975
		DTRACE_PROBE1(arc__inc_adapt, int, bytes);
2695
		atomic_add_64(&arc_c, (int64_t)bytes);
2976
		atomic_add_64(&arc_c, (int64_t)bytes);
2696
		if (arc_c > arc_c_max)
2977
		if (arc_c > arc_c_max)
2697
			arc_c = arc_c_max;
2978
			arc_c = arc_c_max;
Lines 2713-2732 arc_evict_needed(arc_buf_contents_t type) Link Here
2713
	if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2994
	if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2714
		return (1);
2995
		return (1);
2715
2996
2716
#ifdef sun
2717
#ifdef _KERNEL
2718
	/*
2719
	 * If zio data pages are being allocated out of a separate heap segment,
2720
	 * then enforce that the size of available vmem for this area remains
2721
	 * above about 1/32nd free.
2722
	 */
2723
	if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2724
	    vmem_size(zio_arena, VMEM_FREE) <
2725
	    (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2726
		return (1);
2727
#endif
2728
#endif	/* sun */
2729
2730
	if (arc_reclaim_needed())
2997
	if (arc_reclaim_needed())
2731
		return (1);
2998
		return (1);
2732
2999
Lines 2763-2768 arc_get_data_buf(arc_buf_t *buf) Link Here
2763
	uint64_t		size = buf->b_hdr->b_size;
3030
	uint64_t		size = buf->b_hdr->b_size;
2764
	arc_buf_contents_t	type = buf->b_hdr->b_type;
3031
	arc_buf_contents_t	type = buf->b_hdr->b_type;
2765
3032
3033
	if (arc_cache_reclaim_needed(size))
3034
		(void) arc_cache_reclaim(size, ARC_CACHE_RECLAIM_NOW);
3035
2766
	arc_adapt(size, state);
3036
	arc_adapt(size, state);
2767
3037
2768
	/*
3038
	/*
Lines 3885-3904 static int Link Here
3885
arc_memory_throttle(uint64_t reserve, uint64_t txg)
4155
arc_memory_throttle(uint64_t reserve, uint64_t txg)
3886
{
4156
{
3887
#ifdef _KERNEL
4157
#ifdef _KERNEL
3888
	uint64_t available_memory =
4158
	uint64_t available_memory = ptob(freemem);
3889
	    ptoa((uintmax_t)cnt.v_free_count + cnt.v_cache_count);
3890
	static uint64_t page_load = 0;
4159
	static uint64_t page_load = 0;
3891
	static uint64_t last_txg = 0;
4160
	static uint64_t last_txg = 0;
3892
4161
3893
#ifdef sun
4162
#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
3894
#if defined(__i386)
3895
	available_memory =
4163
	available_memory =
3896
	    MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
4164
	    MIN(available_memory, ptob(vmem_size(heap_arena, VMEM_FREE)));
3897
#endif
4165
#endif
3898
#endif	/* sun */
3899
4166
3900
	if (cnt.v_free_count + cnt.v_cache_count >
4167
	if (freemem > (uint64_t)physmem * arc_lotsfree_percent / 100)
3901
	    (uint64_t)physmem * arc_lotsfree_percent / 100)
3902
		return (0);
4168
		return (0);
3903
4169
3904
	if (txg > last_txg) {
4170
	if (txg > last_txg) {
Lines 3911-3917 arc_memory_throttle(uint64_t reserve, uint64_t txg Link Here
3911
	 * continue to let page writes occur as quickly as possible.
4177
	 * continue to let page writes occur as quickly as possible.
3912
	 */
4178
	 */
3913
	if (curproc == pageproc) {
4179
	if (curproc == pageproc) {
3914
		if (page_load > available_memory / 4)
4180
		if (page_load > MAX(ptob(minfree), available_memory) / 4)
3915
			return (SET_ERROR(ERESTART));
4181
			return (SET_ERROR(ERESTART));
3916
		/* Note: reserve is inflated, so we deflate */
4182
		/* Note: reserve is inflated, so we deflate */
3917
		page_load += reserve / 8;
4183
		page_load += reserve / 8;
Lines 3939-3946 arc_tempreserve_space(uint64_t reserve, uint64_t t Link Here
3939
	int error;
4205
	int error;
3940
	uint64_t anon_size;
4206
	uint64_t anon_size;
3941
4207
3942
	if (reserve > arc_c/4 && !arc_no_grow)
4208
	if (reserve > arc_c/4 && !arc_no_grow) {
3943
		arc_c = MIN(arc_c_max, reserve * 4);
4209
		arc_c = MIN(arc_c_max, reserve * 4);
4210
		DTRACE_PROBE1(arc__set_reserve, uint64_t, arc_c);
4211
	}
3944
	if (reserve > arc_c)
4212
	if (reserve > arc_c)
3945
		return (SET_ERROR(ENOMEM));
4213
		return (SET_ERROR(ENOMEM));
3946
4214
Lines 3994-3999 arc_lowmem(void *arg __unused, int howto __unused) Link Here
3994
	mutex_enter(&arc_lowmem_lock);
4262
	mutex_enter(&arc_lowmem_lock);
3995
	mutex_enter(&arc_reclaim_thr_lock);
4263
	mutex_enter(&arc_reclaim_thr_lock);
3996
	needfree = 1;
4264
	needfree = 1;
4265
	DTRACE_PROBE(arc__needfree);
3997
	cv_signal(&arc_reclaim_thr_cv);
4266
	cv_signal(&arc_reclaim_thr_cv);
3998
4267
3999
	/*
4268
	/*
(-)sys/cddl/contrib/opensolaris/uts/common/fs/zfs/range_tree.c (-1 / +1 lines)
Lines 33-39 Link Here
33
#include <sys/zio.h>
33
#include <sys/zio.h>
34
#include <sys/range_tree.h>
34
#include <sys/range_tree.h>
35
35
36
static kmem_cache_t *range_seg_cache;
36
kmem_cache_t *range_seg_cache;
37
37
38
void
38
void
39
range_tree_init(void)
39
range_tree_init(void)
(-)sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_queue.c (+2 lines)
Lines 312-317 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) Link Here
312
	ASSERT(MUTEX_HELD(&vq->vq_lock));
312
	ASSERT(MUTEX_HELD(&vq->vq_lock));
313
	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
313
	ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
314
	avl_add(&vq->vq_class[zio->io_priority].vqc_queued_tree, zio);
314
	avl_add(&vq->vq_class[zio->io_priority].vqc_queued_tree, zio);
315
	DTRACE_PROBE3(vdev_queue, vdev_queue_t *, vq, zio_t *, zio, uint64_t,
316
	    avl_numnodes(&vq->vq_class[zio->io_priority].vqc_queued_tree));
315
317
316
#ifdef illumos
318
#ifdef illumos
317
	mutex_enter(&spa->spa_iokstat_lock);
319
	mutex_enter(&spa->spa_iokstat_lock);
(-)sys/vm/uma.h (+11 lines)
Lines 636-641 int uma_zone_exhausted(uma_zone_t zone); Link Here
636
int uma_zone_exhausted_nolock(uma_zone_t zone);
636
int uma_zone_exhausted_nolock(uma_zone_t zone);
637
637
638
/*
638
/*
639
 * Used to determine the amount of memory consumed by a zone's free space.
640
 *
641
 * Arguments:
642
 *	zone	The zone to determine the free space of.
643
 *
644
 * Returns:
645
 *	uint64_t The amount of memory consumed by the zone's free space.
646
 */
647
uint64_t uma_zone_free_size(uma_zone_t zone);
648
649
/*
639
 * Common UMA_ZONE_PCPU zones.
650
 * Common UMA_ZONE_PCPU zones.
640
 */
651
 */
641
extern uma_zone_t pcpu_zone_64;
652
extern uma_zone_t pcpu_zone_64;
(-)sys/vm/uma_core.c (+27 lines)
Lines 3312-3317 uma_print_zone(uma_zone_t zone) Link Here
3312
	}
3312
	}
3313
}
3313
}
3314
3314
3315
uint64_t
3316
uma_zone_free_size(uma_zone_t zone)
3317
{
3318
	int cpu;
3319
	uint64_t cachefree;
3320
	uma_bucket_t bucket;
3321
	uma_cache_t cache;
3322
3323
	cachefree = 0;
3324
3325
	ZONE_LOCK(zone);
3326
	LIST_FOREACH(bucket, &zone->uz_buckets, ub_link)
3327
		cachefree += (uint64_t)bucket->ub_cnt;
3328
3329
	CPU_FOREACH(cpu) {
3330
		cache = &zone->uz_cpu[cpu];
3331
		if (cache->uc_allocbucket != NULL)
3332
			cachefree += (uint64_t)cache->uc_allocbucket->ub_cnt;
3333
		if (cache->uc_freebucket != NULL)
3334
			cachefree += (uint64_t)cache->uc_freebucket->ub_cnt;
3335
	} 
3336
	cachefree *= zone->uz_size;
3337
	ZONE_UNLOCK(zone);
3338
3339
	return (cachefree);
3340
}
3341
3315
#ifdef DDB
3342
#ifdef DDB
3316
/*
3343
/*
3317
 * Generate statistics across both the zone and its per-cpu cache's.  Return
3344
 * Generate statistics across both the zone and its per-cpu cache's.  Return
(-)sys/vm/vm_pageout.c (-8 / +27 lines)
Lines 76-81 Link Here
76
__FBSDID("$FreeBSD$");
76
__FBSDID("$FreeBSD$");
77
77
78
#include "opt_vm.h"
78
#include "opt_vm.h"
79
#include "opt_kdtrace.h"
79
#include <sys/param.h>
80
#include <sys/param.h>
80
#include <sys/systm.h>
81
#include <sys/systm.h>
81
#include <sys/kernel.h>
82
#include <sys/kernel.h>
Lines 89-94 __FBSDID("$FreeBSD$"); Link Here
89
#include <sys/racct.h>
90
#include <sys/racct.h>
90
#include <sys/resourcevar.h>
91
#include <sys/resourcevar.h>
91
#include <sys/sched.h>
92
#include <sys/sched.h>
93
#include <sys/sdt.h>
92
#include <sys/signalvar.h>
94
#include <sys/signalvar.h>
93
#include <sys/smp.h>
95
#include <sys/smp.h>
94
#include <sys/vnode.h>
96
#include <sys/vnode.h>
Lines 115-124 __FBSDID("$FreeBSD$"); Link Here
115
117
116
/* the kernel process "vm_pageout"*/
118
/* the kernel process "vm_pageout"*/
117
static void vm_pageout(void);
119
static void vm_pageout(void);
120
static void vm_pageout_init(void);
118
static int vm_pageout_clean(vm_page_t);
121
static int vm_pageout_clean(vm_page_t);
119
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
122
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
120
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
123
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
121
124
125
SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
126
    NULL);
127
122
struct proc *pageproc;
128
struct proc *pageproc;
123
129
124
static struct kproc_desc page_kp = {
130
static struct kproc_desc page_kp = {
Lines 126-134 static struct kproc_desc page_kp = { Link Here
126
	vm_pageout,
132
	vm_pageout,
127
	&pageproc
133
	&pageproc
128
};
134
};
129
SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start,
135
SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
130
    &page_kp);
136
    &page_kp);
131
137
138
SDT_PROVIDER_DEFINE(vm);
139
SDT_PROBE_DEFINE(vm, , , vm__lowmem_cache);
140
SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
141
132
#if !defined(NO_SWAPPING)
142
#if !defined(NO_SWAPPING)
133
/* the kernel process "vm_daemon"*/
143
/* the kernel process "vm_daemon"*/
134
static void vm_daemon(void);
144
static void vm_daemon(void);
Lines 663-668 vm_pageout_grow_cache(int tries, vm_paddr_t low, v Link Here
663
		 * may acquire locks and/or sleep, so they can only be invoked
673
		 * may acquire locks and/or sleep, so they can only be invoked
664
		 * when "tries" is greater than zero.
674
		 * when "tries" is greater than zero.
665
		 */
675
		 */
676
		SDT_PROBE0(vm, , , vm__lowmem_cache);
666
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
677
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
667
678
668
		/*
679
		/*
Lines 921-930 vm_pageout_scan(struct vm_domain *vmd, int pass) Link Here
921
	 * some.  We rate limit to avoid thrashing.
932
	 * some.  We rate limit to avoid thrashing.
922
	 */
933
	 */
923
	if (vmd == &vm_dom[0] && pass > 0 &&
934
	if (vmd == &vm_dom[0] && pass > 0 &&
924
	    lowmem_ticks + (lowmem_period * hz) < ticks) {
935
	    (ticks - lowmem_ticks) / hz >= lowmem_period) {
925
		/*
936
		/*
926
		 * Decrease registered cache sizes.
937
		 * Decrease registered cache sizes.
927
		 */
938
		 */
939
		SDT_PROBE0(vm, , , vm__lowmem_scan);
928
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
940
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
929
		/*
941
		/*
930
		 * We do this explicitly after the caches have been
942
		 * We do this explicitly after the caches have been
Lines 1650-1664 vm_pageout_worker(void *arg) Link Here
1650
}
1662
}
1651
1663
1652
/*
1664
/*
1653
 *	vm_pageout is the high level pageout daemon.
1665
 *	vm_pageout_init initialises basic pageout daemon settings.
1654
 */
1666
 */
1655
static void
1667
static void
1656
vm_pageout(void)
1668
vm_pageout_init(void)
1657
{
1669
{
1658
#if MAXMEMDOM > 1
1659
	int error, i;
1660
#endif
1661
1662
	/*
1670
	/*
1663
	 * Initialize some paging parameters.
1671
	 * Initialize some paging parameters.
1664
	 */
1672
	 */
Lines 1704-1710 static void Link Here
1704
	/* XXX does not really belong here */
1712
	/* XXX does not really belong here */
1705
	if (vm_page_max_wired == 0)
1713
	if (vm_page_max_wired == 0)
1706
		vm_page_max_wired = cnt.v_free_count / 3;
1714
		vm_page_max_wired = cnt.v_free_count / 3;
1715
}
1707
1716
1717
/*
1718
 *     vm_pageout is the high level pageout daemon.
1719
 */
1720
static void
1721
vm_pageout(void)
1722
{
1723
#if MAXMEMDOM > 1
1724
	int error, i;
1725
#endif
1726
1708
	swap_pager_swap_init();
1727
	swap_pager_swap_init();
1709
#if MAXMEMDOM > 1
1728
#if MAXMEMDOM > 1
1710
	for (i = 1; i < vm_ndomains; i++) {
1729
	for (i = 1; i < vm_ndomains; i++) {

Return to bug 187594