View | Details | Raw Unified | Return to bug 187594 | Differences between
and this patch

Collapse All | Expand All

(-)sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c (-43 lines)
Lines 126-167 kmem_size_init(void *unused __unused) Link Here
126
}
126
}
127
SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL);
127
SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL);
128
128
129
/*
130
 * The return values from kmem_free_* are only valid once the pagedaemon
131
 * has been initialised, before then they return 0.
132
 * 
133
 * To ensure the returns are valid the caller can use a SYSINIT with
134
 * subsystem set to SI_SUB_KTHREAD_PAGE and an order of at least
135
 * SI_ORDER_SECOND.
136
 */
137
u_int
138
kmem_free_target(void)
139
{
140
141
	return (vm_cnt.v_free_target);
142
}
143
144
u_int
145
kmem_free_min(void)
146
{
147
148
	return (vm_cnt.v_free_min);
149
}
150
151
u_int
152
kmem_free_count(void)
153
{
154
155
	return (vm_cnt.v_free_count + vm_cnt.v_cache_count);
156
}
157
158
u_int
159
kmem_page_count(void)
160
{
161
162
	return (vm_cnt.v_page_count);
163
}
164
165
uint64_t
129
uint64_t
166
kmem_size(void)
130
kmem_size(void)
167
{
131
{
Lines 169-181 kmem_size(void) Link Here
169
	return (kmem_size_val);
133
	return (kmem_size_val);
170
}
134
}
171
135
172
uint64_t
173
kmem_used(void)
174
{
175
176
	return (vmem_size(kmem_arena, VMEM_ALLOC));
177
}
178
179
static int
136
static int
180
kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
137
kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
181
{
138
{
(-)sys/cddl/compat/opensolaris/sys/kmem.h (-11 / +3 lines)
Lines 66-82 typedef struct kmem_cache { Link Here
66
void *zfs_kmem_alloc(size_t size, int kmflags);
66
void *zfs_kmem_alloc(size_t size, int kmflags);
67
void zfs_kmem_free(void *buf, size_t size);
67
void zfs_kmem_free(void *buf, size_t size);
68
uint64_t kmem_size(void);
68
uint64_t kmem_size(void);
69
uint64_t kmem_used(void);
70
u_int kmem_page_count(void);
71
72
/*
73
 * The return values from kmem_free_* are only valid once the pagedaemon
74
 * has been initialised, before then they return 0.
75
 */
76
u_int kmem_free_count(void);
77
u_int kmem_free_target(void);
78
u_int kmem_free_min(void);
79
80
kmem_cache_t *kmem_cache_create(char *name, size_t bufsize, size_t align,
69
kmem_cache_t *kmem_cache_create(char *name, size_t bufsize, size_t align,
81
    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
70
    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
82
    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
71
    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
Lines 88-93 void kmem_reap(void); Link Here
88
int kmem_debugging(void);
77
int kmem_debugging(void);
89
void *calloc(size_t n, size_t s);
78
void *calloc(size_t n, size_t s);
90
79
80
#define	freemem				(vm_cnt.v_free_count + vm_cnt.v_cache_count)
81
#define	minfree				vm_cnt.v_free_min
82
#define	heap_arena			kmem_arena
91
#define	kmem_alloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags))
83
#define	kmem_alloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags))
92
#define	kmem_zalloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags) | M_ZERO)
84
#define	kmem_zalloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags) | M_ZERO)
93
#define	kmem_free(buf, size)		zfs_kmem_free((buf), (size))
85
#define	kmem_free(buf, size)		zfs_kmem_free((buf), (size))
(-)sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c (-53 / +71 lines)
Lines 138-143 Link Here
138
#include <sys/sdt.h>
138
#include <sys/sdt.h>
139
139
140
#include <vm/vm_pageout.h>
140
#include <vm/vm_pageout.h>
141
#include <machine/vmparam.h>
141
142
142
#ifdef illumos
143
#ifdef illumos
143
#ifndef _KERNEL
144
#ifndef _KERNEL
Lines 201-207 int zfs_arc_shrink_shift = 0; Link Here
201
int zfs_arc_p_min_shift = 0;
202
int zfs_arc_p_min_shift = 0;
202
int zfs_disable_dup_eviction = 0;
203
int zfs_disable_dup_eviction = 0;
203
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
204
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
204
u_int zfs_arc_free_target = (1 << 19); /* default before pagedaemon init only */
205
u_int zfs_arc_free_target = (1 << 16); /* default before pagedaemon init only */
205
206
206
static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
207
static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
207
208
Lines 210-220 static void Link Here
210
arc_free_target_init(void *unused __unused)
211
arc_free_target_init(void *unused __unused)
211
{
212
{
212
213
213
	zfs_arc_free_target = kmem_free_target();
214
	zfs_arc_free_target = (vm_pageout_wakeup_thresh / 2) * 3;
214
}
215
}
215
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
216
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
216
    arc_free_target_init, NULL);
217
    arc_free_target_init, NULL);
217
#endif
218
218
219
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
219
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
220
SYSCTL_DECL(_vfs_zfs);
220
SYSCTL_DECL(_vfs_zfs);
Lines 245-253 sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS Link Here
245
	if (err != 0 || req->newptr == NULL)
245
	if (err != 0 || req->newptr == NULL)
246
		return (err);
246
		return (err);
247
247
248
	if (val < kmem_free_min())
248
	if (val < minfree)
249
		return (EINVAL);
249
		return (EINVAL);
250
	if (val > kmem_page_count())
250
	if (val > vm_cnt.v_page_count)
251
		return (EINVAL);
251
		return (EINVAL);
252
252
253
	zfs_arc_free_target = val;
253
	zfs_arc_free_target = val;
Lines 254-259 sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS Link Here
254
254
255
	return (0);
255
	return (0);
256
}
256
}
257
#endif
257
258
258
/*
259
/*
259
 * Note that buffers can be in one of 6 states:
260
 * Note that buffers can be in one of 6 states:
Lines 2462-2469 arc_shrink(void) Link Here
2462
	if (arc_c > arc_c_min) {
2463
	if (arc_c > arc_c_min) {
2463
		uint64_t to_free;
2464
		uint64_t to_free;
2464
2465
2465
		DTRACE_PROBE2(arc__shrink, uint64_t, arc_c, uint64_t,
2466
		DTRACE_PROBE4(arc__shrink, uint64_t, arc_c, uint64_t,
2466
			arc_c_min);
2467
			arc_c_min, uint64_t, arc_p, uint64_t, to_free);
2467
#ifdef _KERNEL
2468
#ifdef _KERNEL
2468
		to_free = arc_c >> arc_shrink_shift;
2469
		to_free = arc_c >> arc_shrink_shift;
2469
#else
2470
#else
Lines 2479-2484 arc_shrink(void) Link Here
2479
			arc_c = MAX(arc_size, arc_c_min);
2480
			arc_c = MAX(arc_size, arc_c_min);
2480
		if (arc_p > arc_c)
2481
		if (arc_p > arc_c)
2481
			arc_p = (arc_c >> 1);
2482
			arc_p = (arc_c >> 1);
2483
2484
		DTRACE_PROBE2(arc__shrunk, uint64_t, arc_c, uint64_t,
2485
			arc_p);
2486
2482
		ASSERT(arc_c >= arc_c_min);
2487
		ASSERT(arc_c >= arc_c_min);
2483
		ASSERT((int64_t)arc_p >= 0);
2488
		ASSERT((int64_t)arc_p >= 0);
2484
	}
2489
	}
Lines 2503-2520 arc_reclaim_needed(void) Link Here
2503
		return (1);
2508
		return (1);
2504
	}
2509
	}
2505
2510
2506
	if (kmem_free_count() < zfs_arc_free_target) {
2507
		DTRACE_PROBE2(arc__reclaim_freetarget, uint64_t,
2508
		    kmem_free_count(), uint64_t, zfs_arc_free_target);
2509
		return (1);
2510
	}
2511
2512
	/*
2511
	/*
2513
	 * Cooperate with pagedaemon when it's time for it to scan
2512
	 * Cooperate with pagedaemon when it's time for it to scan
2514
	 * and reclaim some pages.
2513
	 * and reclaim some pages.
2515
	 */
2514
	 */
2516
	if (vm_paging_needed()) {
2515
	if (freemem < zfs_arc_free_target) {
2517
		DTRACE_PROBE(arc__reclaim_paging);
2516
		DTRACE_PROBE2(arc__reclaim_freemem, uint64_t,
2517
		    freemem, uint64_t, zfs_arc_free_target);
2518
		return (1);
2518
		return (1);
2519
	}
2519
	}
2520
2520
Lines 2544-2551 arc_reclaim_needed(void) Link Here
2544
	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2544
	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2545
		return (1);
2545
		return (1);
2546
2546
2547
#if defined(__i386)
2548
	/*
2547
	/*
2548
	 * Check that we have enough availrmem that memory locking (e.g., via
2549
	 * mlock(3C) or memcntl(2)) can still succeed.  (pages_pp_maximum
2550
	 * stores the number of pages that cannot be locked; when availrmem
2551
	 * drops below pages_pp_maximum, page locking mechanisms such as
2552
	 * page_pp_lock() will fail.)
2553
	 */
2554
	if (availrmem <= pages_pp_maximum)
2555
		return (1);
2556
2557
#endif	/* sun */
2558
#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
2559
	/*
2549
	 * If we're on an i386 platform, it's possible that we'll exhaust the
2560
	 * If we're on an i386 platform, it's possible that we'll exhaust the
2550
	 * kernel heap space before we ever run out of available physical
2561
	 * kernel heap space before we ever run out of available physical
2551
	 * memory.  Most checks of the size of the heap_area compare against
2562
	 * memory.  Most checks of the size of the heap_area compare against
Lines 2556-2580 arc_reclaim_needed(void) Link Here
2556
	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2567
	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2557
	 * free)
2568
	 * free)
2558
	 */
2569
	 */
2559
	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
2570
	if (vmem_size(heap_arena, VMEM_FREE) <
2560
	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2571
	    (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2)) {
2561
		return (1);
2562
#endif
2563
#else	/* sun */
2564
#ifdef __i386__
2565
	/* i386 has KVA limits that the raw page counts above don't consider */
2566
	if (kmem_used() > (kmem_size() * 3) / 4) {
2567
		DTRACE_PROBE2(arc__reclaim_used, uint64_t,
2572
		DTRACE_PROBE2(arc__reclaim_used, uint64_t,
2568
		    kmem_used(), uint64_t, (kmem_size() * 3) / 4);
2573
		    vmem_size(heap_arena, VMEM_FREE), uint64_t,
2574
		    (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2);
2569
		return (1);
2575
		return (1);
2570
	}
2576
	}
2571
#endif
2577
#endif
2578
#ifdef sun
2579
	/*
2580
	 * If zio data pages are being allocated out of a separate heap segment,
2581
	 * then enforce that the size of available vmem for this arena remains
2582
	 * above about 1/16th free.
2583
	 *
2584
	 * Note: The 1/16th arena free requirement was put in place
2585
	 * to aggressively evict memory from the arc in order to avoid
2586
	 * memory fragmentation issues.
2587
	 */
2588
	if (zio_arena != NULL &&
2589
	    vmem_size(zio_arena, VMEM_FREE) <
2590
	    (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2591
		return (1);
2572
#endif	/* sun */
2592
#endif	/* sun */
2573
2593
#else	/* _KERNEL */
2574
#else
2575
	if (spa_get_random(100) == 0)
2594
	if (spa_get_random(100) == 0)
2576
		return (1);
2595
		return (1);
2577
#endif
2596
#endif	/* _KERNEL */
2578
	DTRACE_PROBE(arc__reclaim_no);
2597
	DTRACE_PROBE(arc__reclaim_no);
2579
2598
2580
	return (0);
2599
	return (0);
Lines 2583-2589 arc_reclaim_needed(void) Link Here
2583
extern kmem_cache_t	*zio_buf_cache[];
2602
extern kmem_cache_t	*zio_buf_cache[];
2584
extern kmem_cache_t	*zio_data_buf_cache[];
2603
extern kmem_cache_t	*zio_data_buf_cache[];
2585
2604
2586
static void
2605
static void __used
2587
arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2606
arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2588
{
2607
{
2589
	size_t			i;
2608
	size_t			i;
Lines 2590-2595 arc_kmem_reap_now(arc_reclaim_strategy_t strat) Link Here
2590
	kmem_cache_t		*prev_cache = NULL;
2609
	kmem_cache_t		*prev_cache = NULL;
2591
	kmem_cache_t		*prev_data_cache = NULL;
2610
	kmem_cache_t		*prev_data_cache = NULL;
2592
2611
2612
	DTRACE_PROBE(arc__kmem_reap_start);
2593
#ifdef _KERNEL
2613
#ifdef _KERNEL
2594
	if (arc_meta_used >= arc_meta_limit) {
2614
	if (arc_meta_used >= arc_meta_limit) {
2595
		/*
2615
		/*
Lines 2625-2630 arc_kmem_reap_now(arc_reclaim_strategy_t strat) Link Here
2625
	}
2645
	}
2626
	kmem_cache_reap_now(buf_cache);
2646
	kmem_cache_reap_now(buf_cache);
2627
	kmem_cache_reap_now(hdr_cache);
2647
	kmem_cache_reap_now(hdr_cache);
2648
2649
#ifdef sun
2650
	/*
2651
	 * Ask the vmem arena to reclaim unused memory from its
2652
	 * quantum caches.
2653
	 */
2654
	if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2655
		vmem_qcache_reap(zio_arena);
2656
#endif
2657
	DTRACE_PROBE(arc__kmem_reap_end);
2628
}
2658
}
2629
2659
2630
static void
2660
static void
Lines 2642-2647 arc_reclaim_thread(void *dummy __unused) Link Here
2642
2672
2643
			if (arc_no_grow) {
2673
			if (arc_no_grow) {
2644
				if (last_reclaim == ARC_RECLAIM_CONS) {
2674
				if (last_reclaim == ARC_RECLAIM_CONS) {
2675
					DTRACE_PROBE(arc__reclaim_aggr_no_grow);
2645
					last_reclaim = ARC_RECLAIM_AGGR;
2676
					last_reclaim = ARC_RECLAIM_AGGR;
2646
				} else {
2677
				} else {
2647
					last_reclaim = ARC_RECLAIM_CONS;
2678
					last_reclaim = ARC_RECLAIM_CONS;
Lines 2649-2654 arc_reclaim_thread(void *dummy __unused) Link Here
2649
			} else {
2680
			} else {
2650
				arc_no_grow = TRUE;
2681
				arc_no_grow = TRUE;
2651
				last_reclaim = ARC_RECLAIM_AGGR;
2682
				last_reclaim = ARC_RECLAIM_AGGR;
2683
				DTRACE_PROBE(arc__reclaim_aggr);
2652
				membar_producer();
2684
				membar_producer();
2653
			}
2685
			}
2654
2686
Lines 2753-2758 arc_adapt(int bytes, arc_state_t *state) Link Here
2753
	 * cache size, increment the target cache size
2785
	 * cache size, increment the target cache size
2754
	 */
2786
	 */
2755
	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2787
	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2788
		DTRACE_PROBE1(arc__inc_adapt, int, bytes);
2756
		atomic_add_64(&arc_c, (int64_t)bytes);
2789
		atomic_add_64(&arc_c, (int64_t)bytes);
2757
		if (arc_c > arc_c_max)
2790
		if (arc_c > arc_c_max)
2758
			arc_c = arc_c_max;
2791
			arc_c = arc_c_max;
Lines 2774-2793 arc_evict_needed(arc_buf_contents_t type) Link Here
2774
	if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2807
	if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2775
		return (1);
2808
		return (1);
2776
2809
2777
#ifdef sun
2778
#ifdef _KERNEL
2779
	/*
2780
	 * If zio data pages are being allocated out of a separate heap segment,
2781
	 * then enforce that the size of available vmem for this area remains
2782
	 * above about 1/32nd free.
2783
	 */
2784
	if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2785
	    vmem_size(zio_arena, VMEM_FREE) <
2786
	    (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2787
		return (1);
2788
#endif
2789
#endif	/* sun */
2790
2791
	if (arc_reclaim_needed())
2810
	if (arc_reclaim_needed())
2792
		return (1);
2811
		return (1);
2793
2812
Lines 3946-3965 static int Link Here
3946
arc_memory_throttle(uint64_t reserve, uint64_t txg)
3965
arc_memory_throttle(uint64_t reserve, uint64_t txg)
3947
{
3966
{
3948
#ifdef _KERNEL
3967
#ifdef _KERNEL
3949
	uint64_t available_memory =
3968
	uint64_t available_memory = ptob(freemem);
3950
	    ptoa((uintmax_t)vm_cnt.v_free_count + vm_cnt.v_cache_count);
3951
	static uint64_t page_load = 0;
3969
	static uint64_t page_load = 0;
3952
	static uint64_t last_txg = 0;
3970
	static uint64_t last_txg = 0;
3953
3971
3954
#ifdef sun
3972
#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
3955
#if defined(__i386)
3956
	available_memory =
3973
	available_memory =
3957
	    MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3974
	    MIN(available_memory, ptob(vmem_size(heap_arena, VMEM_FREE)));
3958
#endif
3975
#endif
3959
#endif	/* sun */
3960
3976
3961
	if (vm_cnt.v_free_count + vm_cnt.v_cache_count >
3977
	if (freemem > (uint64_t)physmem * arc_lotsfree_percent / 100)
3962
	    (uint64_t)physmem * arc_lotsfree_percent / 100)
3963
		return (0);
3978
		return (0);
3964
3979
3965
	if (txg > last_txg) {
3980
	if (txg > last_txg) {
Lines 3972-3978 arc_memory_throttle(uint64_t reserve, uint64_t txg Link Here
3972
	 * continue to let page writes occur as quickly as possible.
3987
	 * continue to let page writes occur as quickly as possible.
3973
	 */
3988
	 */
3974
	if (curproc == pageproc) {
3989
	if (curproc == pageproc) {
3975
		if (page_load > available_memory / 4)
3990
		if (page_load > MAX(ptob(minfree), available_memory) / 4)
3976
			return (SET_ERROR(ERESTART));
3991
			return (SET_ERROR(ERESTART));
3977
		/* Note: reserve is inflated, so we deflate */
3992
		/* Note: reserve is inflated, so we deflate */
3978
		page_load += reserve / 8;
3993
		page_load += reserve / 8;
Lines 4000-4007 arc_tempreserve_space(uint64_t reserve, uint64_t t Link Here
4000
	int error;
4015
	int error;
4001
	uint64_t anon_size;
4016
	uint64_t anon_size;
4002
4017
4003
	if (reserve > arc_c/4 && !arc_no_grow)
4018
	if (reserve > arc_c/4 && !arc_no_grow) {
4004
		arc_c = MIN(arc_c_max, reserve * 4);
4019
		arc_c = MIN(arc_c_max, reserve * 4);
4020
		DTRACE_PROBE1(arc__set_reserve, uint64_t, arc_c);
4021
	}
4005
	if (reserve > arc_c)
4022
	if (reserve > arc_c)
4006
		return (SET_ERROR(ENOMEM));
4023
		return (SET_ERROR(ENOMEM));
4007
4024
Lines 4055-4060 arc_lowmem(void *arg __unused, int howto __unused) Link Here
4055
	mutex_enter(&arc_lowmem_lock);
4072
	mutex_enter(&arc_lowmem_lock);
4056
	mutex_enter(&arc_reclaim_thr_lock);
4073
	mutex_enter(&arc_reclaim_thr_lock);
4057
	needfree = 1;
4074
	needfree = 1;
4075
	DTRACE_PROBE(arc__needfree);
4058
	cv_signal(&arc_reclaim_thr_cv);
4076
	cv_signal(&arc_reclaim_thr_cv);
4059
4077
4060
	/*
4078
	/*
(-)sys/vm/vm_pageout.c (-1 / +9 lines)
Lines 76-81 Link Here
76
__FBSDID("$FreeBSD$");
76
__FBSDID("$FreeBSD$");
77
77
78
#include "opt_vm.h"
78
#include "opt_vm.h"
79
#include "opt_kdtrace.h"
79
#include <sys/param.h>
80
#include <sys/param.h>
80
#include <sys/systm.h>
81
#include <sys/systm.h>
81
#include <sys/kernel.h>
82
#include <sys/kernel.h>
Lines 89-94 __FBSDID("$FreeBSD$"); Link Here
89
#include <sys/racct.h>
90
#include <sys/racct.h>
90
#include <sys/resourcevar.h>
91
#include <sys/resourcevar.h>
91
#include <sys/sched.h>
92
#include <sys/sched.h>
93
#include <sys/sdt.h>
92
#include <sys/signalvar.h>
94
#include <sys/signalvar.h>
93
#include <sys/smp.h>
95
#include <sys/smp.h>
94
#include <sys/vnode.h>
96
#include <sys/vnode.h>
Lines 133-138 static struct kproc_desc page_kp = { Link Here
133
SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
135
SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
134
    &page_kp);
136
    &page_kp);
135
137
138
SDT_PROVIDER_DEFINE(vm);
139
SDT_PROBE_DEFINE(vm, , , vm__lowmem_cache);
140
SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
141
136
#if !defined(NO_SWAPPING)
142
#if !defined(NO_SWAPPING)
137
/* the kernel process "vm_daemon"*/
143
/* the kernel process "vm_daemon"*/
138
static void vm_daemon(void);
144
static void vm_daemon(void);
Lines 667-672 vm_pageout_grow_cache(int tries, vm_paddr_t low, v Link Here
667
		 * may acquire locks and/or sleep, so they can only be invoked
673
		 * may acquire locks and/or sleep, so they can only be invoked
668
		 * when "tries" is greater than zero.
674
		 * when "tries" is greater than zero.
669
		 */
675
		 */
676
		SDT_PROBE0(vm, , , vm__lowmem_cache);
670
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
677
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
671
678
672
		/*
679
		/*
Lines 899-905 vm_pageout_map_deactivate_pages(map, desired) Link Here
899
 *	pass 1 - Move inactive to cache or free
906
 *	pass 1 - Move inactive to cache or free
900
 *	pass 2 - Launder dirty pages
907
 *	pass 2 - Launder dirty pages
901
 */
908
 */
902
static void
909
static void __used
903
vm_pageout_scan(struct vm_domain *vmd, int pass)
910
vm_pageout_scan(struct vm_domain *vmd, int pass)
904
{
911
{
905
	vm_page_t m, next;
912
	vm_page_t m, next;
Lines 920-925 vm_pageout_scan(struct vm_domain *vmd, int pass) Link Here
920
		/*
927
		/*
921
		 * Decrease registered cache sizes.
928
		 * Decrease registered cache sizes.
922
		 */
929
		 */
930
		SDT_PROBE0(vm, , , vm__lowmem_scan);
923
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
931
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
924
		/*
932
		/*
925
		 * We do this explicitly after the caches have been
933
		 * We do this explicitly after the caches have been

Return to bug 187594