View | Details | Raw Unified | Return to bug 187594 | Differences between
and this patch

Collapse All | Expand All

(-)sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c (-5 / +34 lines)
Lines 126-143 kmem_size_init(void *unused __unused) Link Here
126
}
126
}
127
SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL);
127
SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL);
128
128
129
uint64_t
129
/*
130
kmem_size(void)
130
 * The return values from kmem_free_* are only valid once the pagedaemon
131
 * has been initialised, before then they return 0.
132
 * 
133
 * To ensure the returns are valid the caller can use a SYSINIT with
134
 * subsystem set to SI_SUB_KTHREAD_PAGE and an order of at least
135
 * SI_ORDER_SECOND.
136
 */
137
u_int
138
kmem_free_target(void)
131
{
139
{
132
140
133
	return (kmem_size_val);
141
	return (cnt.v_free_target);
134
}
142
}
135
143
144
u_int
145
kmem_free_min(void)
146
{
147
148
	return (cnt.v_free_min);
149
}
150
151
u_int
152
kmem_free_count(void)
153
{
154
155
	return (cnt.v_free_count);
156
}
157
158
u_int
159
kmem_page_count(void)
160
{
161
162
	return (cnt.v_page_count);
163
}
164
136
uint64_t
165
uint64_t
137
kmem_used(void)
166
kmem_size(void)
138
{
167
{
139
168
140
	return (vmem_size(kmem_arena, VMEM_ALLOC));
169
	return (kmem_size_val);
141
}
170
}
142
171
143
static int
172
static int
(-)sys/cddl/compat/opensolaris/sys/kmem.h (-1 / +10 lines)
Lines 66-72 typedef struct kmem_cache { Link Here
66
void *zfs_kmem_alloc(size_t size, int kmflags);
66
void *zfs_kmem_alloc(size_t size, int kmflags);
67
void zfs_kmem_free(void *buf, size_t size);
67
void zfs_kmem_free(void *buf, size_t size);
68
uint64_t kmem_size(void);
68
uint64_t kmem_size(void);
69
uint64_t kmem_used(void);
69
u_int kmem_page_count(void);
70
71
/*
72
 * The return values from kmem_free_* are only valid once the pagedaemon
73
 * has been initialised, before then they return 0.
74
 */
75
u_int kmem_free_count(void);
76
u_int kmem_free_target(void);
77
u_int kmem_free_min(void);
78
70
kmem_cache_t *kmem_cache_create(char *name, size_t bufsize, size_t align,
79
kmem_cache_t *kmem_cache_create(char *name, size_t bufsize, size_t align,
71
    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
80
    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
72
    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
81
    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
(-)sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c (-9 / +64 lines)
Lines 193-201 extern int zfs_prefetch_disable; Link Here
193
 */
193
 */
194
static boolean_t arc_warm;
194
static boolean_t arc_warm;
195
195
196
/*
197
 * These tunables are for performance analysis.
198
 */
199
uint64_t zfs_arc_max;
196
uint64_t zfs_arc_max;
200
uint64_t zfs_arc_min;
197
uint64_t zfs_arc_min;
201
uint64_t zfs_arc_meta_limit = 0;
198
uint64_t zfs_arc_meta_limit = 0;
Lines 204-213 int zfs_arc_shrink_shift = 0; Link Here
204
int zfs_arc_p_min_shift = 0;
201
int zfs_arc_p_min_shift = 0;
205
int zfs_disable_dup_eviction = 0;
202
int zfs_disable_dup_eviction = 0;
206
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
203
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
204
u_int zfs_arc_free_target = (1 << 19); /* default before pagedaemon init only */
207
205
206
static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
207
208
#ifdef _KERNEL
209
static void
210
arc_free_target_init(void *unused __unused)
211
{
212
213
	zfs_arc_free_target = kmem_free_target();
214
}
215
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
216
    arc_free_target_init, NULL);
217
#endif
218
208
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
219
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
209
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
220
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
210
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
221
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
211
TUNABLE_QUAD("vfs.zfs.arc_average_blocksize", &zfs_arc_average_blocksize);
222
TUNABLE_QUAD("vfs.zfs.arc_average_blocksize", &zfs_arc_average_blocksize);
212
SYSCTL_DECL(_vfs_zfs);
223
SYSCTL_DECL(_vfs_zfs);
213
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,
224
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,
Lines 217-223 SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_ Link Here
217
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
228
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
218
    &zfs_arc_average_blocksize, 0,
229
    &zfs_arc_average_blocksize, 0,
219
    "ARC average blocksize");
230
    "ARC average blocksize");
231
/*
232
 * We don't have a tunable for arc_free_target due to the dependency on
233
 * pagedaemon initialisation.
234
 */
235
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
236
    CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(u_int),
237
    sysctl_vfs_zfs_arc_free_target, "IU",
238
    "Desired number of free pages below which ARC triggers reclaim");
220
239
240
static int
241
sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS)
242
{
243
	u_int val;
244
	int err;
245
246
	val = zfs_arc_free_target;
247
	err = sysctl_handle_int(oidp, &val, 0, req);
248
	if (err != 0 || req->newptr == NULL)
249
		return (err);
250
251
	if (val < kmem_free_min())
252
		return (EINVAL);
253
	if (val > kmem_page_count())
254
		return (EINVAL);
255
256
	zfs_arc_free_target = val;
257
258
	return (0);
259
}
260
221
/*
261
/*
222
 * Note that buffers can be in one of 6 states:
262
 * Note that buffers can be in one of 6 states:
223
 *	ARC_anon	- anonymous (discussed below)
263
 *	ARC_anon	- anonymous (discussed below)
Lines 2421-2429 arc_flush(spa_t *spa) Link Here
2421
void
2461
void
2422
arc_shrink(void)
2462
arc_shrink(void)
2423
{
2463
{
2464
2424
	if (arc_c > arc_c_min) {
2465
	if (arc_c > arc_c_min) {
2425
		uint64_t to_free;
2466
		uint64_t to_free;
2426
2467
2468
		DTRACE_PROBE2(arc__shrink, uint64_t, arc_c, uint64_t,
2469
			arc_c_min);
2427
#ifdef _KERNEL
2470
#ifdef _KERNEL
2428
		to_free = arc_c >> arc_shrink_shift;
2471
		to_free = arc_c >> arc_shrink_shift;
2429
#else
2472
#else
Lines 2443-2450 arc_shrink(void) Link Here
2443
		ASSERT((int64_t)arc_p >= 0);
2486
		ASSERT((int64_t)arc_p >= 0);
2444
	}
2487
	}
2445
2488
2446
	if (arc_size > arc_c)
2489
	if (arc_size > arc_c) {
2490
		DTRACE_PROBE2(arc__shrink_adjust, uint64_t, arc_size,
2491
			uint64_t, arc_c);
2447
		arc_adjust();
2492
		arc_adjust();
2493
	}
2448
}
2494
}
2449
2495
2450
static int needfree = 0;
2496
static int needfree = 0;
Lines 2455-2469 arc_reclaim_needed(void) Link Here
2455
2501
2456
#ifdef _KERNEL
2502
#ifdef _KERNEL
2457
2503
2458
	if (needfree)
2504
	if (needfree) {
2505
		DTRACE_PROBE(arc__reclaim_needfree);
2459
		return (1);
2506
		return (1);
2507
	}
2460
2508
2509
	if (kmem_free_count() < zfs_arc_free_target) {
2510
		DTRACE_PROBE2(arc__reclaim_freetarget, uint64_t,
2511
		    kmem_free_count(), uint64_t, zfs_arc_free_target);
2512
		return (1);
2513
	}
2514
2461
	/*
2515
	/*
2462
	 * Cooperate with pagedaemon when it's time for it to scan
2516
	 * Cooperate with pagedaemon when it's time for it to scan
2463
	 * and reclaim some pages.
2517
	 * and reclaim some pages.
2464
	 */
2518
	 */
2465
	if (vm_paging_needed())
2519
	if (vm_paging_needed()) {
2520
		DTRACE_PROBE(arc__reclaim_paging);
2466
		return (1);
2521
		return (1);
2522
	}
2467
2523
2468
#ifdef sun
2524
#ifdef sun
2469
	/*
2525
	/*
Lines 2507-2515 arc_reclaim_needed(void) Link Here
2507
	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2563
	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2508
		return (1);
2564
		return (1);
2509
#endif
2565
#endif
2510
#else	/* !sun */
2511
	if (kmem_used() > (kmem_size() * 3) / 4)
2512
		return (1);
2513
#endif	/* sun */
2566
#endif	/* sun */
2514
2567
2515
#else
2568
#else
Lines 2516-2521 arc_reclaim_needed(void) Link Here
2516
	if (spa_get_random(100) == 0)
2569
	if (spa_get_random(100) == 0)
2517
		return (1);
2570
		return (1);
2518
#endif
2571
#endif
2572
	DTRACE_PROBE(arc__reclaim_no);
2573
2519
	return (0);
2574
	return (0);
2520
}
2575
}
2521
2576
(-)sys/vm/vm_pageout.c (-7 / +18 lines)
Lines 115-124 __FBSDID("$FreeBSD$"); Link Here
115
115
116
/* the kernel process "vm_pageout"*/
116
/* the kernel process "vm_pageout"*/
117
static void vm_pageout(void);
117
static void vm_pageout(void);
118
static void vm_pageout_init(void);
118
static int vm_pageout_clean(vm_page_t);
119
static int vm_pageout_clean(vm_page_t);
119
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
120
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
120
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
121
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
121
122
123
SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
124
    NULL);
125
122
struct proc *pageproc;
126
struct proc *pageproc;
123
127
124
static struct kproc_desc page_kp = {
128
static struct kproc_desc page_kp = {
Lines 126-132 static struct kproc_desc page_kp = { Link Here
126
	vm_pageout,
130
	vm_pageout,
127
	&pageproc
131
	&pageproc
128
};
132
};
129
SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start,
133
SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
130
    &page_kp);
134
    &page_kp);
131
135
132
#if !defined(NO_SWAPPING)
136
#if !defined(NO_SWAPPING)
Lines 1637-1651 vm_pageout_worker(void *arg) Link Here
1637
}
1641
}
1638
1642
1639
/*
1643
/*
1640
 *	vm_pageout is the high level pageout daemon.
1644
 *	vm_pageout_init initialises basic pageout daemon settings.
1641
 */
1645
 */
1642
static void
1646
static void
1643
vm_pageout(void)
1647
vm_pageout_init(void)
1644
{
1648
{
1645
#if MAXMEMDOM > 1
1646
	int error, i;
1647
#endif
1648
1649
	/*
1649
	/*
1650
	 * Initialize some paging parameters.
1650
	 * Initialize some paging parameters.
1651
	 */
1651
	 */
Lines 1691-1697 static void Link Here
1691
	/* XXX does not really belong here */
1691
	/* XXX does not really belong here */
1692
	if (vm_page_max_wired == 0)
1692
	if (vm_page_max_wired == 0)
1693
		vm_page_max_wired = cnt.v_free_count / 3;
1693
		vm_page_max_wired = cnt.v_free_count / 3;
1694
}
1694
1695
1696
/*
1697
 *     vm_pageout is the high level pageout daemon.
1698
 */
1699
static void
1700
vm_pageout(void)
1701
{
1702
#if MAXMEMDOM > 1
1703
	int error, i;
1704
#endif
1705
1695
	swap_pager_swap_init();
1706
	swap_pager_swap_init();
1696
#if MAXMEMDOM > 1
1707
#if MAXMEMDOM > 1
1697
	for (i = 1; i < vm_ndomains; i++) {
1708
	for (i = 1; i < vm_ndomains; i++) {

Return to bug 187594