summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authorTim Kordas <tim.kordas@joyent.com>2018-02-02 13:55:03 -0500
committerDan McDonald <danmcd@joyent.com>2018-02-05 13:17:49 -0500
commit36a64e62848b51ac5a9a5216e894ec723cfef14e (patch)
treec4a675df56e14ada4f9fc07a6841adc309dd37a3 /usr/src
parent4c99ecc308d297ccc23eec0665e892052c57bf49 (diff)
downloadillumos-joyent-36a64e62848b51ac5a9a5216e894ec723cfef14e.tar.gz
9018 Replace kmem_cache_reap_now() with kmem_cache_reap_soon()
Reviewed by: Bryan Cantrill <bryan@joyent.com> Reviewed by: Dan McDonald <danmcd@joyent.com> Reviewed by: Matthew Ahrens <mahrens@delphix.com> Reviewed by: Yuri Pankov <yuripv@yuripv.net>
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/lib/libfakekernel/common/kmem.c11
-rw-r--r--usr/src/lib/libfakekernel/common/mapfile-vers3
-rw-r--r--usr/src/uts/common/fs/zfs/arc.c47
-rw-r--r--usr/src/uts/common/os/kmem.c23
-rw-r--r--usr/src/uts/common/os/vmem.c4
-rw-r--r--usr/src/uts/common/sys/kmem.h4
6 files changed, 72 insertions, 20 deletions
diff --git a/usr/src/lib/libfakekernel/common/kmem.c b/usr/src/lib/libfakekernel/common/kmem.c
index 82d1cfeaef..0c69bf5151 100644
--- a/usr/src/lib/libfakekernel/common/kmem.c
+++ b/usr/src/lib/libfakekernel/common/kmem.c
@@ -11,6 +11,7 @@
/*
* Copyright 2013 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2018, Joyent, Inc.
*/
#include <sys/kmem.h>
@@ -125,13 +126,19 @@ kmem_cache_free(kmem_cache_t *kc, void *p)
/* ARGSUSED */
void
kmem_cache_set_move(kmem_cache_t *kc,
- kmem_cbrc_t (*fun)(void *, void *, size_t, void *))
+ kmem_cbrc_t (*fun)(void *, void *, size_t, void *))
{
}
+boolean_t
+kmem_cache_reap_active(void)
+{
+ return (B_FALSE);
+}
+
/* ARGSUSED */
void
-kmem_cache_reap_now(kmem_cache_t *kc)
+kmem_cache_reap_soon(kmem_cache_t *kc)
{
}
diff --git a/usr/src/lib/libfakekernel/common/mapfile-vers b/usr/src/lib/libfakekernel/common/mapfile-vers
index 578301be86..42af216580 100644
--- a/usr/src/lib/libfakekernel/common/mapfile-vers
+++ b/usr/src/lib/libfakekernel/common/mapfile-vers
@@ -104,7 +104,8 @@ SYMBOL_VERSION SUNWprivate_1.1 {
kmem_cache_create;
kmem_cache_destroy;
kmem_cache_free;
- kmem_cache_reap_now;
+ kmem_cache_reap_active;
+ kmem_cache_reap_soon;
kmem_cache_set_move;
kmem_debugging;
kmem_free;
diff --git a/usr/src/uts/common/fs/zfs/arc.c b/usr/src/uts/common/fs/zfs/arc.c
index df319810b5..d8e7c00298 100644
--- a/usr/src/uts/common/fs/zfs/arc.c
+++ b/usr/src/uts/common/fs/zfs/arc.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ * Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc. All rights reserved.
@@ -301,6 +301,9 @@ int zfs_arc_evict_batch_limit = 10;
/* number of seconds before growing cache again */
static int arc_grow_retry = 60;
+/* number of milliseconds before attempting a kmem-cache-reap */
+static int arc_kmem_cache_reap_retry_ms = 1000;
+
/* shift of arc_c for calculating overflow limit in arc_get_data_impl */
int zfs_arc_overflow_shift = 8;
@@ -4047,21 +4050,31 @@ arc_kmem_reap_now(void)
#endif
#endif
+ /*
+ * If a kmem reap is already active, don't schedule more. We must
+ * check for this because kmem_cache_reap_soon() won't actually
+ * block on the cache being reaped (this is to prevent callers from
+ * becoming implicitly blocked by a system-wide kmem reap -- which,
+ * on a system with many, many full magazines, can take minutes).
+ */
+ if (kmem_cache_reap_active())
+ return;
+
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
if (zio_buf_cache[i] != prev_cache) {
prev_cache = zio_buf_cache[i];
- kmem_cache_reap_now(zio_buf_cache[i]);
+ kmem_cache_reap_soon(zio_buf_cache[i]);
}
if (zio_data_buf_cache[i] != prev_data_cache) {
prev_data_cache = zio_data_buf_cache[i];
- kmem_cache_reap_now(zio_data_buf_cache[i]);
+ kmem_cache_reap_soon(zio_data_buf_cache[i]);
}
}
- kmem_cache_reap_now(abd_chunk_cache);
- kmem_cache_reap_now(buf_cache);
- kmem_cache_reap_now(hdr_full_cache);
- kmem_cache_reap_now(hdr_l2only_cache);
- kmem_cache_reap_now(range_seg_cache);
+ kmem_cache_reap_soon(abd_chunk_cache);
+ kmem_cache_reap_soon(buf_cache);
+ kmem_cache_reap_soon(hdr_full_cache);
+ kmem_cache_reap_soon(hdr_l2only_cache);
+ kmem_cache_reap_soon(range_seg_cache);
if (zio_arena != NULL) {
/*
@@ -4093,6 +4106,7 @@ static void
arc_reclaim_thread(void *unused)
{
hrtime_t growtime = 0;
+ hrtime_t kmem_reap_time = 0;
callb_cpr_t cpr;
CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG);
@@ -4126,7 +4140,7 @@ arc_reclaim_thread(void *unused)
int64_t free_memory = arc_available_memory();
if (free_memory < 0) {
-
+ hrtime_t curtime = gethrtime();
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
@@ -4134,9 +4148,20 @@ arc_reclaim_thread(void *unused)
* Wait at least zfs_grow_retry (default 60) seconds
* before considering growing.
*/
- growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
+ growtime = curtime + SEC2NSEC(arc_grow_retry);
- arc_kmem_reap_now();
+ /*
+ * Wait at least arc_kmem_cache_reap_retry_ms
+ * between arc_kmem_reap_now() calls. Without
+ * this check it is possible to end up in a
+ * situation where we spend lots of time
+ * reaping caches, while we're near arc_c_min.
+ */
+ if (curtime >= kmem_reap_time) {
+ arc_kmem_reap_now();
+ kmem_reap_time = gethrtime() +
+ MSEC2NSEC(arc_kmem_cache_reap_retry_ms);
+ }
/*
* If we are still low on memory, shrink the ARC
diff --git a/usr/src/uts/common/os/kmem.c b/usr/src/uts/common/os/kmem.c
index a2437497ea..0728305ac4 100644
--- a/usr/src/uts/common/os/kmem.c
+++ b/usr/src/uts/common/os/kmem.c
@@ -22,6 +22,7 @@
* Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2018, Joyent, Inc.
*/
/*
@@ -3249,10 +3250,27 @@ kmem_cache_magazine_enable(kmem_cache_t *cp)
}
/*
- * Reap (almost) everything right now.
+ * Allow our caller to determine if there are running reaps.
+ *
+ * This call is very conservative and may return B_TRUE even when
+ * reaping activity isn't active. If it returns B_FALSE, then reaping
+ * activity is definitely inactive.
+ */
+boolean_t
+kmem_cache_reap_active(void)
+{
+ return (!taskq_empty(kmem_taskq));
+}
+
+/*
+ * Reap (almost) everything soon.
+ *
+ * Note: this does not wait for the reap-tasks to complete. Caller
+ * should use kmem_cache_reap_active() (above) and/or moderation to
+ * avoid scheduling too many reap-tasks.
*/
void
-kmem_cache_reap_now(kmem_cache_t *cp)
+kmem_cache_reap_soon(kmem_cache_t *cp)
{
ASSERT(list_link_active(&cp->cache_link));
@@ -3260,7 +3278,6 @@ kmem_cache_reap_now(kmem_cache_t *cp)
(void) taskq_dispatch(kmem_taskq,
(task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
- taskq_wait(kmem_taskq);
}
/*
diff --git a/usr/src/uts/common/os/vmem.c b/usr/src/uts/common/os/vmem.c
index b79b1b4cf7..4664c52e77 100644
--- a/usr/src/uts/common/os/vmem.c
+++ b/usr/src/uts/common/os/vmem.c
@@ -25,7 +25,7 @@
/*
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
- * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ * Copyright (c) 2018, Joyent, Inc.
*/
/*
@@ -1745,7 +1745,7 @@ vmem_qcache_reap(vmem_t *vmp)
*/
for (i = 0; i < VMEM_NQCACHE_MAX; i++)
if (vmp->vm_qcache[i])
- kmem_cache_reap_now(vmp->vm_qcache[i]);
+ kmem_cache_reap_soon(vmp->vm_qcache[i]);
}
/*
diff --git a/usr/src/uts/common/sys/kmem.h b/usr/src/uts/common/sys/kmem.h
index e54d83e499..aac2eafa3c 100644
--- a/usr/src/uts/common/sys/kmem.h
+++ b/usr/src/uts/common/sys/kmem.h
@@ -23,6 +23,7 @@
* Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright 2013 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2018, Joyent, Inc.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
@@ -128,7 +129,8 @@ extern void kmem_cache_destroy(kmem_cache_t *);
extern void *kmem_cache_alloc(kmem_cache_t *, int);
extern void kmem_cache_free(kmem_cache_t *, void *);
extern uint64_t kmem_cache_stat(kmem_cache_t *, char *);
-extern void kmem_cache_reap_now(kmem_cache_t *);
+extern boolean_t kmem_cache_reap_active(void);
+extern void kmem_cache_reap_soon(kmem_cache_t *);
extern void kmem_cache_move_notify(kmem_cache_t *, void *);
#endif /* _KERNEL */