View | Details | Raw Unified | Return to bug 187594 | Differences between
and this patch

Collapse All | Expand All

(-)sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c (-4 / +26 lines)
Lines 126-145 Link Here
126
}
126
}
127
SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL);
127
SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL);
128
128
129
/*
130
 * The returns from kmem_free_*size are only valid once the pagedaemon
131
 * has been initialised, before then they return 0.
132
 * 
133
 * To ensure the returns are valid the caller can use a SYSINIT with
134
 * subsystem set to SI_SUB_KTHREAD_PAGE and order of at least
135
 * SI_ORDER_SECOND.
136
 */
129
uint64_t
137
uint64_t
130
kmem_size(void)
138
kmem_free_target_size(void)
131
{
139
{
132
140
133
	return (kmem_size_val);
141
	return ((uint64_t)cnt.v_free_target * PAGE_SIZE);
134
}
142
}
135
143
136
uint64_t
144
uint64_t
137
kmem_used(void)
145
kmem_free_min_size(void)
138
{
146
{
139
147
140
	return (vmem_size(kmem_arena, VMEM_ALLOC));
148
	return ((uint64_t)cnt.v_free_min * PAGE_SIZE);
141
}
149
}
142
150
151
uint64_t
152
kmem_free_size(void)
153
{
154
155
	return ((uint64_t)cnt.v_free_count * PAGE_SIZE);
156
}
157
158
uint64_t
159
kmem_size(void)
160
{
161
162
	return (kmem_size_val);
163
}
164
143
static int
165
static int
144
kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
166
kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
145
{
167
{
(-)sys/cddl/compat/opensolaris/sys/kmem.h (-1 / +6 lines)
Lines 66-72 Link Here
66
void *zfs_kmem_alloc(size_t size, int kmflags);
66
void *zfs_kmem_alloc(size_t size, int kmflags);
67
void zfs_kmem_free(void *buf, size_t size);
67
void zfs_kmem_free(void *buf, size_t size);
68
uint64_t kmem_size(void);
68
uint64_t kmem_size(void);
69
uint64_t kmem_used(void);
69
70
/* Return vals from kmem_free_*size are only valid after the pagedaemon init. */
71
uint64_t kmem_free_size(void);
72
uint64_t kmem_free_target_size(void);
73
uint64_t kmem_free_min_size(void);
74
70
kmem_cache_t *kmem_cache_create(char *name, size_t bufsize, size_t align,
75
kmem_cache_t *kmem_cache_create(char *name, size_t bufsize, size_t align,
71
    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
76
    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
72
    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
77
    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
(-)sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c (-9 / +63 lines)
Lines 193-201 Link Here
193
 */
193
 */
194
static boolean_t arc_warm;
194
static boolean_t arc_warm;
195
195
196
/*
197
 * These tunables are for performance analysis.
198
 */
199
uint64_t zfs_arc_max;
196
uint64_t zfs_arc_max;
200
uint64_t zfs_arc_min;
197
uint64_t zfs_arc_min;
201
uint64_t zfs_arc_meta_limit = 0;
198
uint64_t zfs_arc_meta_limit = 0;
Lines 204-210 Link Here
204
int zfs_arc_p_min_shift = 0;
201
int zfs_arc_p_min_shift = 0;
205
int zfs_disable_dup_eviction = 0;
202
int zfs_disable_dup_eviction = 0;
206
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
203
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
204
uint64_t zfs_arc_free_target = (1 << 30); /* 1GB */
207
205
206
static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
207
208
#ifdef _KERNEL
209
static void
210
arc_free_target_init(void *unused __unused)
211
{
212
213
	zfs_arc_free_target = kmem_free_target_size() * 3;
214
}
215
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
216
    arc_free_target_init, NULL);
217
#endif
218
219
208
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
220
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
209
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
221
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
210
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
222
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
Lines 217-223 Link Here
217
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
229
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
218
    &zfs_arc_average_blocksize, 0,
230
    &zfs_arc_average_blocksize, 0,
219
    "ARC average blocksize");
231
    "ARC average blocksize");
232
/*
233
 * We don't have a tunable for arc_free_target due to the dependency on
234
 * pagedaemon initialisation.
235
 */
236
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
237
    CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t),
238
    sysctl_vfs_zfs_arc_free_target, "QU",
239
    "Desired amount of free memory below which ARC triggers reclaim");
220
240
241
static int
242
sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS)
243
{
244
	uint64_t val;
245
	int err;
246
247
	val = zfs_arc_free_target;
248
	err = sysctl_handle_64(oidp, &val, 0, req);
249
	if (err != 0 || req->newptr == NULL)
250
		return (err);
251
252
	if (val < kmem_free_min_size())
253
		return (EINVAL);
254
255
	zfs_arc_free_target = val;
256
257
	return (0);
258
}
259
221
/*
260
/*
222
 * Note that buffers can be in one of 6 states:
261
 * Note that buffers can be in one of 6 states:
223
 *	ARC_anon	- anonymous (discussed below)
262
 *	ARC_anon	- anonymous (discussed below)
Lines 2421-2429 Link Here
2421
void
2460
void
2422
arc_shrink(void)
2461
arc_shrink(void)
2423
{
2462
{
2463
2424
	if (arc_c > arc_c_min) {
2464
	if (arc_c > arc_c_min) {
2425
		uint64_t to_free;
2465
		uint64_t to_free;
2426
2466
2467
		DTRACE_PROBE2(arc__shrink, uint64_t, arc_c, uint64_t,
2468
			arc_c_min);
2427
#ifdef _KERNEL
2469
#ifdef _KERNEL
2428
		to_free = arc_c >> arc_shrink_shift;
2470
		to_free = arc_c >> arc_shrink_shift;
2429
#else
2471
#else
Lines 2443-2450 Link Here
2443
		ASSERT((int64_t)arc_p >= 0);
2485
		ASSERT((int64_t)arc_p >= 0);
2444
	}
2486
	}
2445
2487
2446
	if (arc_size > arc_c)
2488
	if (arc_size > arc_c) {
2489
		DTRACE_PROBE2(arc__shrink_adjust, uint64_t, arc_size,
2490
			uint64_t, arc_c);
2447
		arc_adjust();
2491
		arc_adjust();
2492
	}
2448
}
2493
}
2449
2494
2450
static int needfree = 0;
2495
static int needfree = 0;
Lines 2455-2469 Link Here
2455
2500
2456
#ifdef _KERNEL
2501
#ifdef _KERNEL
2457
2502
2458
	if (needfree)
2503
	if (needfree) {
2504
		DTRACE_PROBE(arc__reclaim_needfree);
2459
		return (1);
2505
		return (1);
2506
	}
2460
2507
2508
	if (kmem_free_size() < zfs_arc_free_target) {
2509
		DTRACE_PROBE2(arc__reclaim_freetarget, uint64_t,
2510
		    kmem_free_size(), uint64_t, zfs_arc_free_target);
2511
		return (1);
2512
	}
2513
2461
	/*
2514
	/*
2462
	 * Cooperate with pagedaemon when it's time for it to scan
2515
	 * Cooperate with pagedaemon when it's time for it to scan
2463
	 * and reclaim some pages.
2516
	 * and reclaim some pages.
2464
	 */
2517
	 */
2465
	if (vm_paging_needed())
2518
	if (vm_paging_needed()) {
2519
		DTRACE_PROBE(arc__reclaim_paging);
2466
		return (1);
2520
		return (1);
2521
	}
2467
2522
2468
#ifdef sun
2523
#ifdef sun
2469
	/*
2524
	/*
Lines 2507-2515 Link Here
2507
	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2562
	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2508
		return (1);
2563
		return (1);
2509
#endif
2564
#endif
2510
#else	/* !sun */
2511
	if (kmem_used() > (kmem_size() * 3) / 4)
2512
		return (1);
2513
#endif	/* sun */
2565
#endif	/* sun */
2514
2566
2515
#else
2567
#else
Lines 2516-2521 Link Here
2516
	if (spa_get_random(100) == 0)
2568
	if (spa_get_random(100) == 0)
2517
		return (1);
2569
		return (1);
2518
#endif
2570
#endif
2571
	DTRACE_PROBE(arc__reclaim_no);
2572
2519
	return (0);
2573
	return (0);
2520
}
2574
}
2521
2575
(-)sys/vm/vm_pageout.c (-7 / +18 lines)
Lines 115-124 Link Here
115
115
116
/* the kernel process "vm_pageout"*/
116
/* the kernel process "vm_pageout"*/
117
static void vm_pageout(void);
117
static void vm_pageout(void);
118
static void vm_pageout_init(void);
118
static int vm_pageout_clean(vm_page_t);
119
static int vm_pageout_clean(vm_page_t);
119
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
120
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
120
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
121
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
121
122
123
SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
124
    NULL);
125
122
struct proc *pageproc;
126
struct proc *pageproc;
123
127
124
static struct kproc_desc page_kp = {
128
static struct kproc_desc page_kp = {
Lines 126-132 Link Here
126
	vm_pageout,
130
	vm_pageout,
127
	&pageproc
131
	&pageproc
128
};
132
};
129
SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start,
133
SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
130
    &page_kp);
134
    &page_kp);
131
135
132
#if !defined(NO_SWAPPING)
136
#if !defined(NO_SWAPPING)
Lines 1647-1661 Link Here
1647
}
1651
}
1648
1652
1649
/*
1653
/*
1650
 *	vm_pageout is the high level pageout daemon.
1654
 *	vm_pageout_init initialises basic pageout daemon settings.
1651
 */
1655
 */
1652
static void
1656
static void
1653
vm_pageout(void)
1657
vm_pageout_init(void)
1654
{
1658
{
1655
#if MAXMEMDOM > 1
1656
	int error, i;
1657
#endif
1658
1659
	/*
1659
	/*
1660
	 * Initialize some paging parameters.
1660
	 * Initialize some paging parameters.
1661
	 */
1661
	 */
Lines 1701-1707 Link Here
1701
	/* XXX does not really belong here */
1701
	/* XXX does not really belong here */
1702
	if (vm_page_max_wired == 0)
1702
	if (vm_page_max_wired == 0)
1703
		vm_page_max_wired = cnt.v_free_count / 3;
1703
		vm_page_max_wired = cnt.v_free_count / 3;
1704
}
1704
1705
1706
/*
1707
 *	vm_pageout is the high level pageout daemon.
1708
 */
1709
static void
1710
vm_pageout(void)
1711
{
1712
#if MAXMEMDOM > 1
1713
	int error, i;
1714
#endif
1715
1705
	swap_pager_swap_init();
1716
	swap_pager_swap_init();
1706
#if MAXMEMDOM > 1
1717
#if MAXMEMDOM > 1
1707
	for (i = 1; i < vm_ndomains; i++) {
1718
	for (i = 1; i < vm_ndomains; i++) {

Return to bug 187594