View | Details | Raw Unified | Return to bug 187594 | Differences between
and this patch

Collapse All | Expand All

(-)cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c (-5 / +92 lines)
Lines 237-243 int zfs_arc_p_min_shift = 0; Link Here
237
int zfs_disable_dup_eviction = 0;
237
int zfs_disable_dup_eviction = 0;
238
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
238
uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
239
u_int zfs_arc_free_target = 0;
239
u_int zfs_arc_free_target = 0;
240
u_int zfs_arc_wakeup_pager = 0;
241
u_int zfs_arc_wakeup_delay = 500;
242
int	zfs_arc_last_slab = 0;
240
243
244
#define	WAKE_PAGER
245
#ifdef	WAKE_PAGER
246
#define	WAKE_PAGER_CONSTANT	10 / 9	/* Pager wakeup threshold */
247
static	int	arc_init_done = 0;	/* After arc_warm is valid */
248
extern void pagedaemon_wakeup(void);
249
#endif
250
241
/* Absolute min for arc min / max is 16MB. */
251
/* Absolute min for arc min / max is 16MB. */
242
static uint64_t arc_abs_min = 16 << 20;
252
static uint64_t arc_abs_min = 16 << 20;
243
253
Lines 251-257 static void Link Here
251
arc_free_target_init(void *unused __unused)
261
arc_free_target_init(void *unused __unused)
252
{
262
{
253
263
254
	zfs_arc_free_target = vm_pageout_wakeup_thresh;
264
	zfs_arc_free_target = vm_pageout_wakeup_thresh + (vm_pageout_wakeup_thresh / 20);
265
	zfs_arc_wakeup_pager = vm_pageout_wakeup_thresh * WAKE_PAGER_CONSTANT;
255
}
266
}
256
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
267
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
257
    arc_free_target_init, NULL);
268
    arc_free_target_init, NULL);
Lines 3475-3481 int64_t arc_pages_pp_reserve = 64; Link Here
3475
 */
3486
 */
3476
int64_t arc_swapfs_reserve = 64;
3487
int64_t arc_swapfs_reserve = 64;
3477
3488
3489
#ifdef	WAKE_PAGER
3478
/*
3490
/*
3491
 * Declare file-local static for event processor bypass and forward functions
3492
 */
3493
static unsigned int arc_no_wake_event = 0;
3494
static void arc_kmem_reap_now(int);
3495
#endif
3496
3497
/*
3479
 * Return the amount of memory that can be consumed before reclaim will be
3498
 * Return the amount of memory that can be consumed before reclaim will be
3480
 * needed.  Positive if there is sufficient free memory, negative indicates
3499
 * needed.  Positive if there is sufficient free memory, negative indicates
3481
 * the amount of memory that needs to be freed up.
3500
 * the amount of memory that needs to be freed up.
Lines 3488-3493 arc_available_memory(void) Link Here
3488
	free_memory_reason_t r = FMR_UNKNOWN;
3507
	free_memory_reason_t r = FMR_UNKNOWN;
3489
3508
3490
#ifdef _KERNEL
3509
#ifdef _KERNEL
3510
#ifdef WAKE_PAGER
3511
	sbintime_t now;
3512
	static sbintime_t last_pagedaemon_wake = 0;
3513
#endif	/* WAKE_PAGER */
3514
3491
	if (needfree > 0) {
3515
	if (needfree > 0) {
3492
		n = PAGESIZE * (-needfree);
3516
		n = PAGESIZE * (-needfree);
3493
		if (n < lowest) {
3517
		if (n < lowest) {
Lines 3495-3500 arc_available_memory(void) Link Here
3495
			r = FMR_NEEDFREE;
3519
			r = FMR_NEEDFREE;
3496
		}
3520
		}
3497
	}
3521
	}
3522
#ifdef WAKE_PAGER
3523
/*
3524
 * When arc is initialized, perform the following:
3525
 *
3526
 * 1. If we are in the "memory is low enough to wake the pager" zone,
3527
 *    reap the kernel UMA caches once per wakeup_delay period 500ms default)
3528
 *    AND wake the pager up (so it can demote pages from inactive to cache to
3529
 *    ultimately the free list.)
3530
 *
3531
 * 2. If we're below VM's free_target in free RAM reap *one* UMA zone per
3532
 *    time period (500ms).
3533
 * 
3534
 */
3535
	if (arc_init_done) {
3536
		now = getsbinuptime();
3537
		if ((now - last_pagedaemon_wake) / SBT_1MS > zfs_arc_wakeup_delay) {
3538
			last_pagedaemon_wake = now;
3539
			arc_no_wake_event++;    /* Set bypass flag for ARC */
3540
			if ( ( ((int64_t) freemem - zfs_arc_wakeup_pager) < 0) && (arc_warm == B_TRUE) ) {
3541
				arc_kmem_reap_now(0);	/* Reap caches if we're close */
3542
				DTRACE_PROBE(arc__wake_pagedaemon);
3543
				(void) pagedaemon_wakeup();    /* Wake the pager */
3544
			} else {
3545
				if ( ((int64_t) freemem - vm_cnt.v_free_target) < 0) {
3546
					arc_kmem_reap_now(1);	/* Reap one cache if lots of memory */
3547
					DTRACE_PROBE2(arc__reap_one, int, zfs_arc_last_slab, int, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
3548
				}
3549
			}
3550
		}
3551
	} 
3552
#endif /* WAKE_PAGER */
3498
3553
3499
	/*
3554
	/*
3500
	 * Cooperate with pagedaemon when it's time for it to scan
3555
	 * Cooperate with pagedaemon when it's time for it to scan
Lines 3633-3644 extern kmem_cache_t *zio_buf_cache[]; Link Here
3633
extern kmem_cache_t	*zio_data_buf_cache[];
3688
extern kmem_cache_t	*zio_data_buf_cache[];
3634
extern kmem_cache_t	*range_seg_cache;
3689
extern kmem_cache_t	*range_seg_cache;
3635
3690
3691
/*
3692
 * Pass a flag to this routine; if zero, then reap all.  If not then reap
3693
 * one slab on a rotating basis.  This allows a low-rate call to be used
3694
 * on a routine, maintenance basis even when not terribly low on RAM so 
3695
 * we don't have huge amounts of RAM out in unused UMA allocations.
3696
 */
3636
static __noinline void
3697
static __noinline void
3637
arc_kmem_reap_now(void)
3698
arc_kmem_reap_now(flag)
3699
int	flag;
3638
{
3700
{
3639
	size_t			i;
3701
	size_t			i;
3640
	kmem_cache_t		*prev_cache = NULL;
3702
	kmem_cache_t		*prev_cache = NULL;
3641
	kmem_cache_t		*prev_data_cache = NULL;
3703
	kmem_cache_t		*prev_data_cache = NULL;
3704
	int			arc_cache_reaped = 0;
3705
	int			arc_data_cache_reaped = 0;
3706
	int			reset_last_slab = 0;
3642
3707
3643
	DTRACE_PROBE(arc__kmem_reap_start);
3708
	DTRACE_PROBE(arc__kmem_reap_start);
3644
#ifdef _KERNEL
3709
#ifdef _KERNEL
Lines 3660-3672 static __noinline void Link Here
3660
	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
3725
	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
3661
		if (zio_buf_cache[i] != prev_cache) {
3726
		if (zio_buf_cache[i] != prev_cache) {
3662
			prev_cache = zio_buf_cache[i];
3727
			prev_cache = zio_buf_cache[i];
3663
			kmem_cache_reap_now(zio_buf_cache[i]);
3728
			if ((!flag) || ((i > zfs_arc_last_slab) && (!arc_cache_reaped))) {
3729
				kmem_cache_reap_now(zio_buf_cache[i]);
3730
				arc_cache_reaped++;
3731
			}
3664
		}
3732
		}
3665
		if (zio_data_buf_cache[i] != prev_data_cache) {
3733
		if (zio_data_buf_cache[i] != prev_data_cache) {
3666
			prev_data_cache = zio_data_buf_cache[i];
3734
			prev_data_cache = zio_data_buf_cache[i];
3667
			kmem_cache_reap_now(zio_data_buf_cache[i]);
3735
			if ((!flag) || ((i > zfs_arc_last_slab) && (!arc_data_cache_reaped))) {
3736
				kmem_cache_reap_now(zio_data_buf_cache[i]);
3737
				arc_data_cache_reaped++;
3738
			}
3668
		}
3739
		}
3740
		if (flag && (!reset_last_slab) && (arc_cache_reaped || arc_data_cache_reaped)) {
3741
			reset_last_slab = i;
3742
		}
3669
	}
3743
	}
3744
	if (reset_last_slab) {
3745
		zfs_arc_last_slab = reset_last_slab;
3746
	}
3747
	if ((!arc_cache_reaped) && (!arc_data_cache_reaped) && (flag)) {	/* Found nothing to reap on one-pass */
3748
		zfs_arc_last_slab = 0;	/* Reset */
3749
	}
3670
	kmem_cache_reap_now(buf_cache);
3750
	kmem_cache_reap_now(buf_cache);
3671
	kmem_cache_reap_now(hdr_full_cache);
3751
	kmem_cache_reap_now(hdr_full_cache);
3672
	kmem_cache_reap_now(hdr_l2only_cache);
3752
	kmem_cache_reap_now(hdr_l2only_cache);
Lines 3726-3732 arc_reclaim_thread(void *dummy __unused) Link Here
3726
			 */
3806
			 */
3727
			growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
3807
			growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
3728
3808
3729
			arc_kmem_reap_now();
3809
			arc_kmem_reap_now(0);
3730
3810
3731
			/*
3811
			/*
3732
			 * If we are still low on memory, shrink the ARC
3812
			 * If we are still low on memory, shrink the ARC
Lines 5431-5436 static eventhandler_tag arc_event_lowmem = NULL; Link Here
5431
static void
5511
static void
5432
arc_lowmem(void *arg __unused, int howto __unused)
5512
arc_lowmem(void *arg __unused, int howto __unused)
5433
{
5513
{
5514
	if (arc_no_wake_event) {        /* Don't do it if we woke the pager */
5515
		arc_no_wake_event = 0;  /* Just clear the flag */
5516
		return;
5517
	}
5434
5518
5435
	mutex_enter(&arc_reclaim_lock);
5519
	mutex_enter(&arc_reclaim_lock);
5436
	/* XXX: Memory deficit should be passed as argument. */
5520
	/* XXX: Memory deficit should be passed as argument. */
Lines 5696-5701 arc_init(void) Link Here
5696
		printf("             in /boot/loader.conf.\n");
5780
		printf("             in /boot/loader.conf.\n");
5697
	}
5781
	}
5698
#endif
5782
#endif
5783
#ifdef	WAKE_PAGER
5784
	arc_init_done++;
5785
#endif	/* WAKE_PAGER */
5699
}
5786
}
5700
5787
5701
void
5788
void

Return to bug 187594