Lines 190-195
static int arc_dead;
Link Here
|
190 |
extern int zfs_prefetch_disable; |
190 |
extern int zfs_prefetch_disable; |
191 |
|
191 |
|
192 |
/* |
192 |
/* |
|
|
193 |
* KD 2015-02-10 |
194 |
* We have to be able to test for UIO use inside the arc allocator. |
195 |
* NOTE: DO NOT MODIFY HERE! |
196 |
*/ |
197 |
extern int zio_use_uma; |
198 |
extern int zfs_dynamic_write_buffer; |
199 |
|
200 |
|
201 |
/* |
193 |
* The arc has filled available memory and has now warmed up. |
202 |
* The arc has filled available memory and has now warmed up. |
194 |
*/ |
203 |
*/ |
195 |
static boolean_t arc_warm; |
204 |
static boolean_t arc_warm; |
Lines 212-218
static void
Link Here
|
212 |
arc_free_target_init(void *unused __unused) |
221 |
arc_free_target_init(void *unused __unused) |
213 |
{ |
222 |
{ |
214 |
|
223 |
|
215 |
zfs_arc_free_target = vm_pageout_wakeup_thresh; |
224 |
zfs_arc_free_target = vm_pageout_wakeup_thresh + ((vm_cnt.v_free_target - vm_pageout_wakeup_thresh) / 2); |
216 |
} |
225 |
} |
217 |
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY, |
226 |
SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY, |
218 |
arc_free_target_init, NULL); |
227 |
arc_free_target_init, NULL); |
Lines 233-239
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_block
Link Here
|
233 |
SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_shrink_shift, CTLFLAG_RW, |
242 |
SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_shrink_shift, CTLFLAG_RW, |
234 |
&arc_shrink_shift, 0, |
243 |
&arc_shrink_shift, 0, |
235 |
"log2(fraction of arc to reclaim)"); |
244 |
"log2(fraction of arc to reclaim)"); |
236 |
|
245 |
SYSCTL_INT(_vfs_zfs, OID_AUTO, dynamic_write_buffer, CTLFLAG_RWTUN, |
|
|
246 |
&zfs_dynamic_write_buffer, 0, |
247 |
"Dynamically restrict dirty data when memory is low"); |
237 |
/* |
248 |
/* |
238 |
* We don't have a tunable for arc_free_target due to the dependency on |
249 |
* We don't have a tunable for arc_free_target due to the dependency on |
239 |
* pagedaemon initialisation. |
250 |
* pagedaemon initialisation. |
Lines 2645-2650
extern kmem_cache_t *zio_buf_cache[];
Link Here
|
2645 |
extern kmem_cache_t *zio_data_buf_cache[]; |
2656 |
extern kmem_cache_t *zio_data_buf_cache[]; |
2646 |
extern kmem_cache_t *range_seg_cache; |
2657 |
extern kmem_cache_t *range_seg_cache; |
2647 |
|
2658 |
|
|
|
2659 |
static void __used |
2660 |
reap_arc_caches() |
2661 |
{ |
2662 |
size_t i; |
2663 |
kmem_cache_t *prev_cache = NULL; |
2664 |
kmem_cache_t *prev_data_cache = NULL; |
2665 |
|
2666 |
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { |
2667 |
if (zio_buf_cache[i] != prev_cache) { |
2668 |
prev_cache = zio_buf_cache[i]; |
2669 |
kmem_cache_reap_now(zio_buf_cache[i]); |
2670 |
} |
2671 |
if (zio_data_buf_cache[i] != prev_data_cache) { |
2672 |
prev_data_cache = zio_data_buf_cache[i]; |
2673 |
kmem_cache_reap_now(zio_data_buf_cache[i]); |
2674 |
} |
2675 |
} |
2676 |
kmem_cache_reap_now(buf_cache); |
2677 |
kmem_cache_reap_now(hdr_full_cache); |
2678 |
kmem_cache_reap_now(hdr_l2only_cache); |
2679 |
kmem_cache_reap_now(range_seg_cache); |
2680 |
} |
2681 |
|
2648 |
static __noinline void |
2682 |
static __noinline void |
2649 |
arc_kmem_reap_now(void) |
2683 |
arc_kmem_reap_now(void) |
2650 |
{ |
2684 |
{ |
Lines 2676-2695
arc_kmem_reap_now(void)
Link Here
|
2676 |
#endif |
2709 |
#endif |
2677 |
#endif |
2710 |
#endif |
2678 |
|
2711 |
|
2679 |
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { |
2712 |
reap_arc_caches(); |
2680 |
if (zio_buf_cache[i] != prev_cache) { |
|
|
2681 |
prev_cache = zio_buf_cache[i]; |
2682 |
kmem_cache_reap_now(zio_buf_cache[i]); |
2683 |
} |
2684 |
if (zio_data_buf_cache[i] != prev_data_cache) { |
2685 |
prev_data_cache = zio_data_buf_cache[i]; |
2686 |
kmem_cache_reap_now(zio_data_buf_cache[i]); |
2687 |
} |
2688 |
} |
2689 |
kmem_cache_reap_now(buf_cache); |
2690 |
kmem_cache_reap_now(hdr_full_cache); |
2691 |
kmem_cache_reap_now(hdr_l2only_cache); |
2692 |
kmem_cache_reap_now(range_seg_cache); |
2693 |
|
2713 |
|
2694 |
#ifdef illumos |
2714 |
#ifdef illumos |
2695 |
if (zio_arena != NULL) { |
2715 |
if (zio_arena != NULL) { |
Lines 2707-2718
arc_reclaim_thread(void *dummy __unused)
Link Here
|
2707 |
{ |
2728 |
{ |
2708 |
clock_t growtime = 0; |
2729 |
clock_t growtime = 0; |
2709 |
callb_cpr_t cpr; |
2730 |
callb_cpr_t cpr; |
|
|
2731 |
int autoreap = 0; |
2710 |
|
2732 |
|
2711 |
CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); |
2733 |
CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); |
2712 |
|
2734 |
|
2713 |
mutex_enter(&arc_reclaim_thr_lock); |
2735 |
mutex_enter(&arc_reclaim_thr_lock); |
2714 |
while (arc_thread_exit == 0) { |
2736 |
while (arc_thread_exit == 0) { |
|
|
2737 |
|
2738 |
#ifdef _KERNEL |
2739 |
/* KD 2015-02-10 |
2740 |
* Protect against UMA free memory bloat. We already do this on a low-memory |
2741 |
* basis in the allocator; it has to happen there rather than here due to |
2742 |
* response time considerations. Make the call here once every 10 passes as |
2743 |
* well; this reclaims unused UMA buffers every 10 seconds on an idle system |
2744 |
* and more frequently if the reclaim thread gets woken up by low RAM |
2745 |
* conditions. |
2746 |
*/ |
2747 |
if ((zio_use_uma) && (autoreap++ == 10)) { |
2748 |
autoreap = 0; |
2749 |
DTRACE_PROBE(arc__reclaim_timed_reap); |
2750 |
reap_arc_caches(); |
2751 |
} |
2752 |
#endif /* _KERNEL */ |
2753 |
|
2715 |
int64_t free_memory = arc_available_memory(); |
2754 |
int64_t free_memory = arc_available_memory(); |
2716 |
if (free_memory < 0) { |
2755 |
if (free_memory < 0) { |
2717 |
|
2756 |
|
2718 |
arc_no_grow = B_TRUE; |
2757 |
arc_no_grow = B_TRUE; |
Lines 2899-2904
arc_get_data_buf(arc_buf_t *buf)
Link Here
|
2899 |
arc_space_consume(size, ARC_SPACE_DATA); |
2938 |
arc_space_consume(size, ARC_SPACE_DATA); |
2900 |
} else { |
2939 |
} else { |
2901 |
ASSERT(type == ARC_BUFC_DATA); |
2940 |
ASSERT(type == ARC_BUFC_DATA); |
|
|
2941 |
#ifdef _KERNEL |
2942 |
/* KD 2015-02-10 |
2943 |
* It would be nice if we could leave this to the arc_reclaim thread. |
2944 |
* Unfortunately we cannot; the test has to be done here as well, because |
2945 |
* under heavy I/O demand we can grab enough RAM fast enough to induce |
2946 |
* nasty oscillation problems. Fortunately we only need to call this when |
2947 |
* the system is under reasonably-severe memory stress. |
2948 |
*/ |
2949 |
if (zio_use_uma && (ptob(vm_cnt.v_free_count) + size < ptob(vm_cnt.v_free_target))) { |
2950 |
DTRACE_PROBE3(arc__alloc_lowmem_reap, int, vm_cnt.v_free_count, int, size, int, vm_cnt.v_free_target); |
2951 |
reap_arc_caches(); |
2952 |
} |
2953 |
#endif /* _KERNEL */ |
2902 |
buf->b_data = zio_data_buf_alloc(size); |
2954 |
buf->b_data = zio_data_buf_alloc(size); |
2903 |
ARCSTAT_INCR(arcstat_data_size, size); |
2955 |
ARCSTAT_INCR(arcstat_data_size, size); |
2904 |
atomic_add_64(&arc_size, size); |
2956 |
atomic_add_64(&arc_size, size); |