View | Details | Raw Unified | Return to bug 187594 | Differences between
and this patch

Collapse All | Expand All

(-)sys/cddl/compat/opensolaris/kern/opensolaris_kmem.c (-7 lines)
Lines 133-145 Link Here
133
	return (kmem_size_val);
133
	return (kmem_size_val);
134
}
134
}
135
135
136
uint64_t
137
kmem_used(void)
138
{
139
140
	return (vmem_size(kmem_arena, VMEM_ALLOC));
141
}
142
143
static int
136
static int
144
kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
137
kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
145
{
138
{
(-)sys/cddl/compat/opensolaris/sys/kmem.h (-1 / +3 lines)
Lines 66-72 Link Here
66
void *zfs_kmem_alloc(size_t size, int kmflags);
66
void *zfs_kmem_alloc(size_t size, int kmflags);
67
void zfs_kmem_free(void *buf, size_t size);
67
void zfs_kmem_free(void *buf, size_t size);
68
uint64_t kmem_size(void);
68
uint64_t kmem_size(void);
69
uint64_t kmem_used(void);
70
kmem_cache_t *kmem_cache_create(char *name, size_t bufsize, size_t align,
69
kmem_cache_t *kmem_cache_create(char *name, size_t bufsize, size_t align,
71
    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
70
    int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
72
    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
71
    void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags);
Lines 78-83 Link Here
78
int kmem_debugging(void);
77
int kmem_debugging(void);
79
void *calloc(size_t n, size_t s);
78
void *calloc(size_t n, size_t s);
80
79
80
#define	freemem				(cnt.v_free_count + cnt.v_cache_count)
81
#define	minfree				cnt.v_free_min
82
#define	heap_arena			kmem_arena
81
#define	kmem_alloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags))
83
#define	kmem_alloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags))
82
#define	kmem_zalloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags) | M_ZERO)
84
#define	kmem_zalloc(size, kmflags)	zfs_kmem_alloc((size), (kmflags) | M_ZERO)
83
#define	kmem_free(buf, size)		zfs_kmem_free((buf), (size))
85
#define	kmem_free(buf, size)		zfs_kmem_free((buf), (size))
(-)sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c (-39 / +140 lines)
Lines 138-143 Link Here
138
#include <sys/sdt.h>
138
#include <sys/sdt.h>
139
139
140
#include <vm/vm_pageout.h>
140
#include <vm/vm_pageout.h>
141
#include <machine/vmparam.h>
141
142
142
#ifdef illumos
143
#ifdef illumos
143
#ifndef _KERNEL
144
#ifndef _KERNEL
Lines 193-201 Link Here
193
 */
194
 */
194
static boolean_t arc_warm;
195
static boolean_t arc_warm;
195
196
196
/*
197
 * These tunables are for performance analysis.
198
 */
199
uint64_t zfs_arc_max;
197
uint64_t zfs_arc_max;
200
uint64_t zfs_arc_min;
198
uint64_t zfs_arc_min;
201
uint64_t zfs_arc_meta_limit = 0;
199
uint64_t zfs_arc_meta_limit = 0;
Lines 204-210 Link Here
204
int zfs_arc_p_min_shift = 0;
202
int zfs_arc_p_min_shift = 0;
205
int zfs_disable_dup_eviction = 0;
203
int zfs_disable_dup_eviction = 0;
206
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
204
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
205
u_int zfs_arc_free_target = (1 << 16); /* default before pagedaemon init only */
206
int zfs_arc_reclaim_cache_free = 1;
207
207
208
static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
209
210
#ifdef _KERNEL
211
static void
212
arc_free_target_init(void *unused __unused)
213
{
214
215
	zfs_arc_free_target = (vm_pageout_wakeup_thresh + ((cnt.v_free_target - vm_pageout_wakeup_thresh) / 2));
216
}
217
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
218
    arc_free_target_init, NULL);
219
208
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
220
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
209
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
221
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
210
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
222
TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
Lines 217-223 Link Here
217
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
229
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
218
    &zfs_arc_average_blocksize, 0,
230
    &zfs_arc_average_blocksize, 0,
219
    "ARC average blocksize");
231
    "ARC average blocksize");
232
SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_reclaim_cache_free, CTLFLAG_RWTUN,
233
    &zfs_arc_reclaim_cache_free, 0,
234
    "ARC treats cached pages as free blocksize");
235
/*
236
 * We don't have a tunable for arc_free_target due to the dependency on
237
 * pagedaemon initialisation.
238
 */
239
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
240
    CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(u_int),
241
    sysctl_vfs_zfs_arc_free_target, "IU",
242
    "Desired number of free pages below which ARC triggers reclaim");
220
243
244
static int
245
sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS)
246
{
247
	u_int val;
248
	int err;
249
250
	val = zfs_arc_free_target;
251
	err = sysctl_handle_int(oidp, &val, 0, req);
252
	if (err != 0 || req->newptr == NULL)
253
		return (err);
254
255
	if (val < minfree)
256
		return (EINVAL);
257
	if (val > cnt.v_page_count)
258
		return (EINVAL);
259
260
	zfs_arc_free_target = val;
261
262
	return (0);
263
}
264
#endif
265
221
/*
266
/*
222
 * Note that buffers can be in one of 6 states:
267
 * Note that buffers can be in one of 6 states:
223
 *	ARC_anon	- anonymous (discussed below)
268
 *	ARC_anon	- anonymous (discussed below)
Lines 2421-2426 Link Here
2421
void
2466
void
2422
arc_shrink(void)
2467
arc_shrink(void)
2423
{
2468
{
2469
2424
	if (arc_c > arc_c_min) {
2470
	if (arc_c > arc_c_min) {
2425
		uint64_t to_free;
2471
		uint64_t to_free;
2426
2472
Lines 2429-2434 Link Here
2429
#else
2475
#else
2430
		to_free = arc_c >> arc_shrink_shift;
2476
		to_free = arc_c >> arc_shrink_shift;
2431
#endif
2477
#endif
2478
		DTRACE_PROBE4(arc__shrink, uint64_t, arc_c, uint64_t,
2479
			arc_c_min, uint64_t, arc_p, uint64_t, to_free);
2480
2432
		if (arc_c > arc_c_min + to_free)
2481
		if (arc_c > arc_c_min + to_free)
2433
			atomic_add_64(&arc_c, -to_free);
2482
			atomic_add_64(&arc_c, -to_free);
2434
		else
2483
		else
Lines 2439-2450 Link Here
2439
			arc_c = MAX(arc_size, arc_c_min);
2488
			arc_c = MAX(arc_size, arc_c_min);
2440
		if (arc_p > arc_c)
2489
		if (arc_p > arc_c)
2441
			arc_p = (arc_c >> 1);
2490
			arc_p = (arc_c >> 1);
2491
2492
		DTRACE_PROBE2(arc__shrunk, uint64_t, arc_c, uint64_t,
2493
			arc_p);
2494
2442
		ASSERT(arc_c >= arc_c_min);
2495
		ASSERT(arc_c >= arc_c_min);
2443
		ASSERT((int64_t)arc_p >= 0);
2496
		ASSERT((int64_t)arc_p >= 0);
2444
	}
2497
	}
2445
2498
2446
	if (arc_size > arc_c)
2499
	if (arc_size > arc_c) {
2500
		DTRACE_PROBE2(arc__shrink_adjust, uint64_t, arc_size,
2501
			uint64_t, arc_c);
2447
		arc_adjust();
2502
		arc_adjust();
2503
	}
2448
}
2504
}
2449
2505
2450
static int needfree = 0;
2506
static int needfree = 0;
Lines 2452-2469 Link Here
2452
static int
2508
static int
2453
arc_reclaim_needed(void)
2509
arc_reclaim_needed(void)
2454
{
2510
{
2511
	u_int fm;
2455
2512
2456
#ifdef _KERNEL
2513
#ifdef _KERNEL
2514
	if (arc_size <= arc_c_min) {
2515
		DTRACE_PROBE2(arc__reclaim_min, uint64_t, arc_size,
2516
		    uint64_t, arc_c_min);
2517
		return (0);
2518
	}
2457
2519
2458
	if (needfree)
2520
	if (needfree) {
2521
		DTRACE_PROBE(arc__reclaim_needfree);
2459
		return (1);
2522
		return (1);
2523
	}
2460
2524
2461
	/*
2525
	/*
2462
	 * Cooperate with pagedaemon when it's time for it to scan
2526
	 * Cooperate with pagedaemon when it's time for it to scan
2463
	 * and reclaim some pages.
2527
	 * and reclaim some pages.
2464
	 */
2528
	 */
2465
	if (vm_paging_needed())
2529
	if (zfs_arc_reclaim_cache_free == 0)
2530
		fm = cnt.v_free_count;
2531
	else
2532
		fm = freemem;
2533
2534
	if (fm < zfs_arc_free_target) {
2535
		DTRACE_PROBE3(arc__reclaim_freemem, uint64_t,
2536
		    fm, uint64_t, zfs_arc_free_target,
2537
		    int, zfs_arc_reclaim_cache_free);
2466
		return (1);
2538
		return (1);
2539
	}
2467
2540
2468
#ifdef sun
2541
#ifdef sun
2469
	/*
2542
	/*
Lines 2491-2498 Link Here
2491
	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2564
	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2492
		return (1);
2565
		return (1);
2493
2566
2494
#if defined(__i386)
2495
	/*
2567
	/*
2568
	 * Check that we have enough availrmem that memory locking (e.g., via
2569
	 * mlock(3C) or memcntl(2)) can still succeed.  (pages_pp_maximum
2570
	 * stores the number of pages that cannot be locked; when availrmem
2571
	 * drops below pages_pp_maximum, page locking mechanisms such as
2572
	 * page_pp_lock() will fail.)
2573
	 */
2574
	if (availrmem <= pages_pp_maximum)
2575
		return (1);
2576
2577
#endif	/* sun */
2578
#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
2579
	/*
2496
	 * If we're on an i386 platform, it's possible that we'll exhaust the
2580
	 * If we're on an i386 platform, it's possible that we'll exhaust the
2497
	 * kernel heap space before we ever run out of available physical
2581
	 * kernel heap space before we ever run out of available physical
2498
	 * memory.  Most checks of the size of the heap_area compare against
2582
	 * memory.  Most checks of the size of the heap_area compare against
Lines 2503-2534 Link Here
2503
	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2587
	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2504
	 * free)
2588
	 * free)
2505
	 */
2589
	 */
2506
	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
2590
	if (vmem_size(heap_arena, VMEM_FREE) <
2507
	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2591
	    (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2)) {
2592
		DTRACE_PROBE2(arc__reclaim_used, uint64_t,
2593
		    vmem_size(heap_arena, VMEM_FREE), uint64_t,
2594
		    (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2);
2508
		return (1);
2595
		return (1);
2596
	}
2509
#endif
2597
#endif
2510
#else	/* !sun */
2598
#ifdef sun
2511
	if (kmem_used() > (kmem_size() * 3) / 4)
2599
	/*
2600
	 * If zio data pages are being allocated out of a separate heap segment,
2601
	 * then enforce that the size of available vmem for this arena remains
2602
	 * above about 1/16th free.
2603
	 *
2604
	 * Note: The 1/16th arena free requirement was put in place
2605
	 * to aggressively evict memory from the arc in order to avoid
2606
	 * memory fragmentation issues.
2607
	 */
2608
	if (zio_arena != NULL &&
2609
	    vmem_size(zio_arena, VMEM_FREE) <
2610
	    (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2512
		return (1);
2611
		return (1);
2513
#endif	/* sun */
2612
#endif	/* sun */
2514
2613
#else	/* _KERNEL */
2515
#else
2516
	if (spa_get_random(100) == 0)
2614
	if (spa_get_random(100) == 0)
2517
		return (1);
2615
		return (1);
2518
#endif
2616
#endif	/* _KERNEL */
2617
	DTRACE_PROBE(arc__reclaim_no);
2618
2519
	return (0);
2619
	return (0);
2520
}
2620
}
2521
2621
2522
extern kmem_cache_t	*zio_buf_cache[];
2622
extern kmem_cache_t	*zio_buf_cache[];
2523
extern kmem_cache_t	*zio_data_buf_cache[];
2623
extern kmem_cache_t	*zio_data_buf_cache[];
2524
2624
2525
static void
2625
static void __used
2526
arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2626
arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2527
{
2627
{
2528
	size_t			i;
2628
	size_t			i;
2529
	kmem_cache_t		*prev_cache = NULL;
2629
	kmem_cache_t		*prev_cache = NULL;
2530
	kmem_cache_t		*prev_data_cache = NULL;
2630
	kmem_cache_t		*prev_data_cache = NULL;
2531
2631
2632
	DTRACE_PROBE(arc__kmem_reap_start);
2532
#ifdef _KERNEL
2633
#ifdef _KERNEL
2533
	if (arc_meta_used >= arc_meta_limit) {
2634
	if (arc_meta_used >= arc_meta_limit) {
2534
		/*
2635
		/*
Lines 2564-2569 Link Here
2564
	}
2665
	}
2565
	kmem_cache_reap_now(buf_cache);
2666
	kmem_cache_reap_now(buf_cache);
2566
	kmem_cache_reap_now(hdr_cache);
2667
	kmem_cache_reap_now(hdr_cache);
2668
2669
#ifdef sun
2670
	/*
2671
	 * Ask the vmem areana to reclaim unused memory from its
2672
	 * quantum caches.
2673
	 */
2674
	if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2675
		vmem_qcache_reap(zio_arena);
2676
#endif
2677
	DTRACE_PROBE(arc__kmem_reap_end);
2567
}
2678
}
2568
2679
2569
static void
2680
static void
Lines 2581-2586 Link Here
2581
2692
2582
			if (arc_no_grow) {
2693
			if (arc_no_grow) {
2583
				if (last_reclaim == ARC_RECLAIM_CONS) {
2694
				if (last_reclaim == ARC_RECLAIM_CONS) {
2695
					DTRACE_PROBE(arc__reclaim_aggr_no_grow);
2584
					last_reclaim = ARC_RECLAIM_AGGR;
2696
					last_reclaim = ARC_RECLAIM_AGGR;
2585
				} else {
2697
				} else {
2586
					last_reclaim = ARC_RECLAIM_CONS;
2698
					last_reclaim = ARC_RECLAIM_CONS;
Lines 2588-2593 Link Here
2588
			} else {
2700
			} else {
2589
				arc_no_grow = TRUE;
2701
				arc_no_grow = TRUE;
2590
				last_reclaim = ARC_RECLAIM_AGGR;
2702
				last_reclaim = ARC_RECLAIM_AGGR;
2703
				DTRACE_PROBE(arc__reclaim_aggr);
2591
				membar_producer();
2704
				membar_producer();
2592
			}
2705
			}
2593
2706
Lines 2602-2607 Link Here
2602
				 */
2715
				 */
2603
				arc_no_grow = TRUE;
2716
				arc_no_grow = TRUE;
2604
				last_reclaim = ARC_RECLAIM_AGGR;
2717
				last_reclaim = ARC_RECLAIM_AGGR;
2718
				DTRACE_PROBE(arc__reclaim_aggr_needfree);
2605
			}
2719
			}
2606
			arc_kmem_reap_now(last_reclaim);
2720
			arc_kmem_reap_now(last_reclaim);
2607
			arc_warm = B_TRUE;
2721
			arc_warm = B_TRUE;
Lines 2618-2623 Link Here
2618
#ifdef _KERNEL
2732
#ifdef _KERNEL
2619
		if (needfree) {
2733
		if (needfree) {
2620
			needfree = 0;
2734
			needfree = 0;
2735
			DTRACE_PROBE(arc__clear_needfree);
2621
			wakeup(&needfree);
2736
			wakeup(&needfree);
2622
		}
2737
		}
2623
#endif
2738
#endif
Lines 2692-2697 Link Here
2692
	 * cache size, increment the target cache size
2807
	 * cache size, increment the target cache size
2693
	 */
2808
	 */
2694
	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2809
	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2810
		DTRACE_PROBE1(arc__inc_adapt, int, bytes);
2695
		atomic_add_64(&arc_c, (int64_t)bytes);
2811
		atomic_add_64(&arc_c, (int64_t)bytes);
2696
		if (arc_c > arc_c_max)
2812
		if (arc_c > arc_c_max)
2697
			arc_c = arc_c_max;
2813
			arc_c = arc_c_max;
Lines 2713-2732 Link Here
2713
	if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2829
	if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2714
		return (1);
2830
		return (1);
2715
2831
2716
#ifdef sun
2717
#ifdef _KERNEL
2718
	/*
2719
	 * If zio data pages are being allocated out of a separate heap segment,
2720
	 * then enforce that the size of available vmem for this area remains
2721
	 * above about 1/32nd free.
2722
	 */
2723
	if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2724
	    vmem_size(zio_arena, VMEM_FREE) <
2725
	    (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2726
		return (1);
2727
#endif
2728
#endif	/* sun */
2729
2730
	if (arc_reclaim_needed())
2832
	if (arc_reclaim_needed())
2731
		return (1);
2833
		return (1);
2732
2834
Lines 3885-3904 Link Here
3885
arc_memory_throttle(uint64_t reserve, uint64_t txg)
3987
arc_memory_throttle(uint64_t reserve, uint64_t txg)
3886
{
3988
{
3887
#ifdef _KERNEL
3989
#ifdef _KERNEL
3888
	uint64_t available_memory =
3990
	uint64_t available_memory = ptob(freemem);
3889
	    ptoa((uintmax_t)cnt.v_free_count + cnt.v_cache_count);
3890
	static uint64_t page_load = 0;
3991
	static uint64_t page_load = 0;
3891
	static uint64_t last_txg = 0;
3992
	static uint64_t last_txg = 0;
3892
3993
3893
#ifdef sun
3994
#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
3894
#if defined(__i386)
3895
	available_memory =
3995
	available_memory =
3896
	    MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3996
	    MIN(available_memory, ptob(vmem_size(heap_arena, VMEM_FREE)));
3897
#endif
3997
#endif
3898
#endif	/* sun */
3899
3998
3900
	if (cnt.v_free_count + cnt.v_cache_count >
3999
	if (freemem > (uint64_t)physmem * arc_lotsfree_percent / 100)
3901
	    (uint64_t)physmem * arc_lotsfree_percent / 100)
3902
		return (0);
4000
		return (0);
3903
4001
3904
	if (txg > last_txg) {
4002
	if (txg > last_txg) {
Lines 3911-3917 Link Here
3911
	 * continue to let page writes occur as quickly as possible.
4009
	 * continue to let page writes occur as quickly as possible.
3912
	 */
4010
	 */
3913
	if (curproc == pageproc) {
4011
	if (curproc == pageproc) {
3914
		if (page_load > available_memory / 4)
4012
		if (page_load > MAX(ptob(minfree), available_memory) / 4)
3915
			return (SET_ERROR(ERESTART));
4013
			return (SET_ERROR(ERESTART));
3916
		/* Note: reserve is inflated, so we deflate */
4014
		/* Note: reserve is inflated, so we deflate */
3917
		page_load += reserve / 8;
4015
		page_load += reserve / 8;
Lines 3939-3946 Link Here
3939
	int error;
4037
	int error;
3940
	uint64_t anon_size;
4038
	uint64_t anon_size;
3941
4039
3942
	if (reserve > arc_c/4 && !arc_no_grow)
4040
	if (reserve > arc_c/4 && !arc_no_grow) {
3943
		arc_c = MIN(arc_c_max, reserve * 4);
4041
		arc_c = MIN(arc_c_max, reserve * 4);
4042
		DTRACE_PROBE1(arc__set_reserve, uint64_t, arc_c);
4043
	}
3944
	if (reserve > arc_c)
4044
	if (reserve > arc_c)
3945
		return (SET_ERROR(ENOMEM));
4045
		return (SET_ERROR(ENOMEM));
3946
4046
Lines 3994-3999 Link Here
3994
	mutex_enter(&arc_lowmem_lock);
4094
	mutex_enter(&arc_lowmem_lock);
3995
	mutex_enter(&arc_reclaim_thr_lock);
4095
	mutex_enter(&arc_reclaim_thr_lock);
3996
	needfree = 1;
4096
	needfree = 1;
4097
	DTRACE_PROBE(arc__needfree);
3997
	cv_signal(&arc_reclaim_thr_cv);
4098
	cv_signal(&arc_reclaim_thr_cv);
3998
4099
3999
	/*
4100
	/*
(-)sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c (-1 / +1 lines)
Lines 43-49 Link Here
43
SYSCTL_DECL(_vfs_zfs);
43
SYSCTL_DECL(_vfs_zfs);
44
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
44
SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO");
45
#if defined(__amd64__)
45
#if defined(__amd64__)
46
static int zio_use_uma = 1;
46
static int zio_use_uma = 0;	/* KD 2014-09-18 - UMA off; broken! */
47
#else
47
#else
48
static int zio_use_uma = 0;
48
static int zio_use_uma = 0;
49
#endif
49
#endif
(-)sys/vm/vm_pageout.c (-8 / +27 lines)
Lines 76-81 Link Here
76
__FBSDID("$FreeBSD$");
76
__FBSDID("$FreeBSD$");
77
77
78
#include "opt_vm.h"
78
#include "opt_vm.h"
79
#include "opt_kdtrace.h"
79
#include <sys/param.h>
80
#include <sys/param.h>
80
#include <sys/systm.h>
81
#include <sys/systm.h>
81
#include <sys/kernel.h>
82
#include <sys/kernel.h>
Lines 89-94 Link Here
89
#include <sys/racct.h>
90
#include <sys/racct.h>
90
#include <sys/resourcevar.h>
91
#include <sys/resourcevar.h>
91
#include <sys/sched.h>
92
#include <sys/sched.h>
93
#include <sys/sdt.h>
92
#include <sys/signalvar.h>
94
#include <sys/signalvar.h>
93
#include <sys/smp.h>
95
#include <sys/smp.h>
94
#include <sys/vnode.h>
96
#include <sys/vnode.h>
Lines 115-124 Link Here
115
117
116
/* the kernel process "vm_pageout"*/
118
/* the kernel process "vm_pageout"*/
117
static void vm_pageout(void);
119
static void vm_pageout(void);
120
static void vm_pageout_init(void);
118
static int vm_pageout_clean(vm_page_t);
121
static int vm_pageout_clean(vm_page_t);
119
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
122
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
120
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
123
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
121
124
125
SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
126
    NULL);
127
122
struct proc *pageproc;
128
struct proc *pageproc;
123
129
124
static struct kproc_desc page_kp = {
130
static struct kproc_desc page_kp = {
Lines 126-134 Link Here
126
	vm_pageout,
132
	vm_pageout,
127
	&pageproc
133
	&pageproc
128
};
134
};
129
SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start,
135
SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start,
130
    &page_kp);
136
    &page_kp);
131
137
138
SDT_PROVIDER_DEFINE(vm);
139
SDT_PROBE_DEFINE(vm, , , vm__lowmem_cache);
140
SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan);
141
132
#if !defined(NO_SWAPPING)
142
#if !defined(NO_SWAPPING)
133
/* the kernel process "vm_daemon"*/
143
/* the kernel process "vm_daemon"*/
134
static void vm_daemon(void);
144
static void vm_daemon(void);
Lines 663-668 Link Here
663
		 * may acquire locks and/or sleep, so they can only be invoked
673
		 * may acquire locks and/or sleep, so they can only be invoked
664
		 * when "tries" is greater than zero.
674
		 * when "tries" is greater than zero.
665
		 */
675
		 */
676
		SDT_PROBE0(vm, , , vm__lowmem_cache);
666
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
677
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
667
678
668
		/*
679
		/*
Lines 904-910 Link Here
904
 *	pass 1 - Move inactive to cache or free
915
 *	pass 1 - Move inactive to cache or free
905
 *	pass 2 - Launder dirty pages
916
 *	pass 2 - Launder dirty pages
906
 */
917
 */
907
static void
918
static void __used
908
vm_pageout_scan(struct vm_domain *vmd, int pass)
919
vm_pageout_scan(struct vm_domain *vmd, int pass)
909
{
920
{
910
	vm_page_t m, next;
921
	vm_page_t m, next;
Lines 925-930 Link Here
925
		/*
936
		/*
926
		 * Decrease registered cache sizes.
937
		 * Decrease registered cache sizes.
927
		 */
938
		 */
939
		SDT_PROBE0(vm, , , vm__lowmem_scan);
928
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
940
		EVENTHANDLER_INVOKE(vm_lowmem, 0);
929
		/*
941
		/*
930
		 * We do this explicitly after the caches have been
942
		 * We do this explicitly after the caches have been
Lines 1650-1664 Link Here
1650
}
1662
}
1651
1663
1652
/*
1664
/*
1653
 *	vm_pageout is the high level pageout daemon.
1665
 *	vm_pageout_init initialises basic pageout daemon settings.
1654
 */
1666
 */
1655
static void
1667
static void
1656
vm_pageout(void)
1668
vm_pageout_init(void)
1657
{
1669
{
1658
#if MAXMEMDOM > 1
1659
	int error, i;
1660
#endif
1661
1662
	/*
1670
	/*
1663
	 * Initialize some paging parameters.
1671
	 * Initialize some paging parameters.
1664
	 */
1672
	 */
Lines 1704-1710 Link Here
1704
	/* XXX does not really belong here */
1712
	/* XXX does not really belong here */
1705
	if (vm_page_max_wired == 0)
1713
	if (vm_page_max_wired == 0)
1706
		vm_page_max_wired = cnt.v_free_count / 3;
1714
		vm_page_max_wired = cnt.v_free_count / 3;
1715
}
1707
1716
1717
/*
1718
 *     vm_pageout is the high level pageout daemon.
1719
 */
1720
static void
1721
vm_pageout(void)
1722
{
1723
#if MAXMEMDOM > 1
1724
	int error, i;
1725
#endif
1726
1708
	swap_pager_swap_init();
1727
	swap_pager_swap_init();
1709
#if MAXMEMDOM > 1
1728
#if MAXMEMDOM > 1
1710
	for (i = 1; i < vm_ndomains; i++) {
1729
	for (i = 1; i < vm_ndomains; i++) {

Return to bug 187594