summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerry Jelinek <jerry.jelinek@joyent.com>2018-03-20 11:50:03 +0000
committerJerry Jelinek <jerry.jelinek@joyent.com>2018-03-20 11:50:03 +0000
commit8286146342075a9947c346f919cae55604bd2646 (patch)
tree620fa777eaeb38c8759a1e8720b68e5776eccedc
parent1932f584055941bdb546ada1524aa809fe6c9041 (diff)
parent015f38bba18ec2acb8888f945aa30883d339367b (diff)
downloadillumos-joyent-8286146342075a9947c346f919cae55604bd2646.tar.gz
[illumos-gate merge]
commit 015f38bba18ec2acb8888f945aa30883d339367b 9193 bootcfg -C doesn't work commit 5dafeea3ebd2dd77affc802bcb90f63faf01589f 9235 rename zpool_rewind_policy_t to zpool_load_policy_t commit ccef24b493bcbd146fcd6d8946666cae081470b6 9191 dump vdev tree to zfs_dbgmsg when spa load fails due to missing log devices commit c4de6c3ca45a0d091cbbc164e338e97f0959aea1 9190 Fix cleanup routine in import_cachefile_device_replaced.ksh commit d1de72cfa29ab77ff80e2bb0e668a6afa5bccaf0 9187 racing condition between vdev label and spa_last_synced_txg in vdev_validate commit 859bfead58cf0e8c589b713386bab81385a346e6 9076 ZFS Performance test concurrency should be lowered for better latency
-rw-r--r--usr/src/cmd/zdb/zdb.c6
-rw-r--r--usr/src/cmd/zpool/zpool_main.c11
-rw-r--r--usr/src/common/zfs/zfs_comutil.c40
-rw-r--r--usr/src/common/zfs/zfs_comutil.h4
-rw-r--r--usr/src/lib/libzfs/common/libzfs.h2
-rw-r--r--usr/src/lib/libzfs/common/libzfs_import.c4
-rw-r--r--usr/src/lib/libzfs/common/libzfs_pool.c27
-rw-r--r--usr/src/test/zfs-tests/include/commands.cfg1
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib3
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/random_reads.ksh13
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/random_readwrite.ksh13
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/random_writes.ksh13
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh13
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/sequential_writes.ksh13
-rw-r--r--usr/src/uts/common/fs/zfs/spa.c56
-rw-r--r--usr/src/uts/common/fs/zfs/vdev.c5
-rw-r--r--usr/src/uts/common/sys/fs/zfs.h26
17 files changed, 153 insertions, 97 deletions
diff --git a/usr/src/cmd/zdb/zdb.c b/usr/src/cmd/zdb/zdb.c
index 9c271a56b7..aafb0aa29f 100644
--- a/usr/src/cmd/zdb/zdb.c
+++ b/usr/src/cmd/zdb/zdb.c
@@ -5186,8 +5186,8 @@ main(int argc, char **argv)
(dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
- nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, max_txg) != 0 ||
- nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind) != 0)
+ nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 ||
+ nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0)
fatal("internal error: %s", strerror(ENOMEM));
error = 0;
@@ -5204,7 +5204,7 @@ main(int argc, char **argv)
}
if (nvlist_add_nvlist(cfg,
- ZPOOL_REWIND_POLICY, policy) != 0) {
+ ZPOOL_LOAD_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
diff --git a/usr/src/cmd/zpool/zpool_main.c b/usr/src/cmd/zpool/zpool_main.c
index 698684e8dc..aa0463608b 100644
--- a/usr/src/cmd/zpool/zpool_main.c
+++ b/usr/src/cmd/zpool/zpool_main.c
@@ -2326,8 +2326,9 @@ zpool_do_import(int argc, char **argv)
/* In the future, we can capture further policy and include it here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
- nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, txg) != 0 ||
- nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind_policy) != 0)
+ nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
+ nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
+ rewind_policy) != 0)
goto error;
if (searchdirs == NULL) {
@@ -2452,7 +2453,7 @@ zpool_do_import(int argc, char **argv)
if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
continue;
- verify(nvlist_add_nvlist(config, ZPOOL_REWIND_POLICY,
+ verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
policy) == 0);
if (argc == 0) {
@@ -3940,8 +3941,10 @@ zpool_do_clear(int argc, char **argv)
/* In future, further rewind policy choices can be passed along here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
- nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind_policy) != 0)
+ nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
+ rewind_policy) != 0) {
return (1);
+ }
pool = argv[0];
device = argc == 2 ? argv[1] : NULL;
diff --git a/usr/src/common/zfs/zfs_comutil.c b/usr/src/common/zfs/zfs_comutil.c
index 7688113e36..a5e7cb6d03 100644
--- a/usr/src/common/zfs/zfs_comutil.c
+++ b/usr/src/common/zfs/zfs_comutil.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
*/
/*
@@ -67,17 +67,17 @@ zfs_allocatable_devs(nvlist_t *nv)
}
void
-zpool_get_rewind_policy(nvlist_t *nvl, zpool_rewind_policy_t *zrpp)
+zpool_get_load_policy(nvlist_t *nvl, zpool_load_policy_t *zlpp)
{
nvlist_t *policy;
nvpair_t *elem;
char *nm;
/* Defaults */
- zrpp->zrp_request = ZPOOL_NO_REWIND;
- zrpp->zrp_maxmeta = 0;
- zrpp->zrp_maxdata = UINT64_MAX;
- zrpp->zrp_txg = UINT64_MAX;
+ zlpp->zlp_rewind = ZPOOL_NO_REWIND;
+ zlpp->zlp_maxmeta = 0;
+ zlpp->zlp_maxdata = UINT64_MAX;
+ zlpp->zlp_txg = UINT64_MAX;
if (nvl == NULL)
return;
@@ -85,24 +85,24 @@ zpool_get_rewind_policy(nvlist_t *nvl, zpool_rewind_policy_t *zrpp)
elem = NULL;
while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
nm = nvpair_name(elem);
- if (strcmp(nm, ZPOOL_REWIND_POLICY) == 0) {
+ if (strcmp(nm, ZPOOL_LOAD_POLICY) == 0) {
if (nvpair_value_nvlist(elem, &policy) == 0)
- zpool_get_rewind_policy(policy, zrpp);
+ zpool_get_load_policy(policy, zlpp);
return;
- } else if (strcmp(nm, ZPOOL_REWIND_REQUEST) == 0) {
- if (nvpair_value_uint32(elem, &zrpp->zrp_request) == 0)
- if (zrpp->zrp_request & ~ZPOOL_REWIND_POLICIES)
- zrpp->zrp_request = ZPOOL_NO_REWIND;
- } else if (strcmp(nm, ZPOOL_REWIND_REQUEST_TXG) == 0) {
- (void) nvpair_value_uint64(elem, &zrpp->zrp_txg);
- } else if (strcmp(nm, ZPOOL_REWIND_META_THRESH) == 0) {
- (void) nvpair_value_uint64(elem, &zrpp->zrp_maxmeta);
- } else if (strcmp(nm, ZPOOL_REWIND_DATA_THRESH) == 0) {
- (void) nvpair_value_uint64(elem, &zrpp->zrp_maxdata);
+ } else if (strcmp(nm, ZPOOL_LOAD_REWIND_POLICY) == 0) {
+ if (nvpair_value_uint32(elem, &zlpp->zlp_rewind) == 0)
+ if (zlpp->zlp_rewind & ~ZPOOL_REWIND_POLICIES)
+ zlpp->zlp_rewind = ZPOOL_NO_REWIND;
+ } else if (strcmp(nm, ZPOOL_LOAD_REQUEST_TXG) == 0) {
+ (void) nvpair_value_uint64(elem, &zlpp->zlp_txg);
+ } else if (strcmp(nm, ZPOOL_LOAD_META_THRESH) == 0) {
+ (void) nvpair_value_uint64(elem, &zlpp->zlp_maxmeta);
+ } else if (strcmp(nm, ZPOOL_LOAD_DATA_THRESH) == 0) {
+ (void) nvpair_value_uint64(elem, &zlpp->zlp_maxdata);
}
}
- if (zrpp->zrp_request == 0)
- zrpp->zrp_request = ZPOOL_NO_REWIND;
+ if (zlpp->zlp_rewind == 0)
+ zlpp->zlp_rewind = ZPOOL_NO_REWIND;
}
typedef struct zfs_version_spa_map {
diff --git a/usr/src/common/zfs/zfs_comutil.h b/usr/src/common/zfs/zfs_comutil.h
index f89054388a..8cc098ada2 100644
--- a/usr/src/common/zfs/zfs_comutil.h
+++ b/usr/src/common/zfs/zfs_comutil.h
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
*/
#ifndef _ZFS_COMUTIL_H
@@ -34,7 +34,7 @@ extern "C" {
#endif
extern boolean_t zfs_allocatable_devs(nvlist_t *);
-extern void zpool_get_rewind_policy(nvlist_t *, zpool_rewind_policy_t *);
+extern void zpool_get_load_policy(nvlist_t *, zpool_load_policy_t *);
extern int zfs_zpl_version_map(int spa_version);
extern int zfs_spa_version_map(int zpl_version);
diff --git a/usr/src/lib/libzfs/common/libzfs.h b/usr/src/lib/libzfs/common/libzfs.h
index 78b24bf70a..e8cfd75816 100644
--- a/usr/src/lib/libzfs/common/libzfs.h
+++ b/usr/src/lib/libzfs/common/libzfs.h
@@ -395,7 +395,7 @@ typedef struct importargs {
int can_be_active : 1; /* can the pool be active? */
int unique : 1; /* does 'poolname' already exist? */
int exists : 1; /* set on return if pool already exists */
- nvlist_t *policy; /* rewind policy (rewind txg, etc.) */
+ nvlist_t *policy; /* load policy (max txg, rewind, etc.) */
} importargs_t;
extern nvlist_t *zpool_search_import(libzfs_handle_t *, importargs_t *);
diff --git a/usr/src/lib/libzfs/common/libzfs_import.c b/usr/src/lib/libzfs/common/libzfs_import.c
index 8cbbd58696..dbe37944a7 100644
--- a/usr/src/lib/libzfs/common/libzfs_import.c
+++ b/usr/src/lib/libzfs/common/libzfs_import.c
@@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright 2015 RackTop Systems.
* Copyright 2017 Nexenta Systems, Inc.
*/
@@ -748,7 +748,7 @@ get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
}
if (policy != NULL) {
- if (nvlist_add_nvlist(config, ZPOOL_REWIND_POLICY,
+ if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
policy) != 0)
goto nomem;
}
diff --git a/usr/src/lib/libzfs/common/libzfs_pool.c b/usr/src/lib/libzfs/common/libzfs_pool.c
index f1ba77fd1b..4378151cae 100644
--- a/usr/src/lib/libzfs/common/libzfs_pool.c
+++ b/usr/src/lib/libzfs/common/libzfs_pool.c
@@ -397,6 +397,8 @@ static boolean_t
bootfs_name_valid(const char *pool, char *bootfs)
{
int len = strlen(pool);
+ if (bootfs[0] == '\0')
+ return (B_TRUE);
if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
return (B_FALSE);
@@ -550,8 +552,7 @@ zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
* bootfs property value has to be a dataset name and
* the dataset has to be in the same pool as it sets to.
*/
- if (strval[0] != '\0' && !bootfs_name_valid(poolname,
- strval)) {
+ if (!bootfs_name_valid(poolname, strval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is an invalid name"), strval);
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
@@ -1715,7 +1716,7 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
nvlist_t *props, int flags)
{
zfs_cmd_t zc = { 0 };
- zpool_rewind_policy_t policy;
+ zpool_load_policy_t policy;
nvlist_t *nv = NULL;
nvlist_t *nvinfo = NULL;
nvlist_t *missing = NULL;
@@ -1787,7 +1788,7 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
zcmd_free_nvlists(&zc);
- zpool_get_rewind_policy(config, &policy);
+ zpool_get_load_policy(config, &policy);
if (error) {
char desc[1024];
@@ -1796,7 +1797,7 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
* Dry-run failed, but we print out what success
* looks like if we found a best txg
*/
- if (policy.zrp_request & ZPOOL_TRY_REWIND) {
+ if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
B_TRUE, nv);
nvlist_free(nv);
@@ -1889,10 +1890,10 @@ zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
ret = -1;
else if (zhp != NULL)
zpool_close(zhp);
- if (policy.zrp_request &
+ if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
- ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
+ ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
}
nvlist_free(nv);
return (0);
@@ -3269,7 +3270,7 @@ zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
zfs_cmd_t zc = { 0 };
char msg[1024];
nvlist_t *tgt;
- zpool_rewind_policy_t policy;
+ zpool_load_policy_t policy;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
nvlist_t *nvi = NULL;
@@ -3301,8 +3302,8 @@ zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
&zc.zc_guid) == 0);
}
- zpool_get_rewind_policy(rewindnvl, &policy);
- zc.zc_cookie = policy.zrp_request;
+ zpool_get_load_policy(rewindnvl, &policy);
+ zc.zc_cookie = policy.zlp_rewind;
if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
return (-1);
@@ -3318,13 +3319,13 @@ zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
}
}
- if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
+ if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
errno != EPERM && errno != EACCES)) {
- if (policy.zrp_request &
+ if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
zpool_rewind_exclaim(hdl, zc.zc_name,
- ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
+ ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
nvi);
nvlist_free(nvi);
}
diff --git a/usr/src/test/zfs-tests/include/commands.cfg b/usr/src/test/zfs-tests/include/commands.cfg
index 1747f03da1..b85717523a 100644
--- a/usr/src/test/zfs-tests/include/commands.cfg
+++ b/usr/src/test/zfs-tests/include/commands.cfg
@@ -62,6 +62,7 @@ export USR_BIN_FILES='awk
isainfo
kill
ksh
+ kstat
ln
logname
ls
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib
index 04ce5f858d..c32f72b504 100644
--- a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib
@@ -23,6 +23,9 @@
#
function cleanup
{
+ # clear any remaining zinjections
+ log_must zinject -c all > /dev/null
+
destroy_pool $TESTPOOL1
log_must rm -f $CPATH $CPATHBKP $CPATHBKP2 $MD5FILE $MD5FILE2
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/random_reads.ksh b/usr/src/test/zfs-tests/tests/perf/regression/random_reads.ksh
index 655366e00c..72e6746851 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/random_reads.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/random_reads.ksh
@@ -25,6 +25,15 @@
# for all fio runs. The ARC is cleared with `zinject -a` prior to each run
# so reads will go to disk.
#
+# Thread/Concurrency settings:
+# PERF_NTHREADS defines the number of files created in the test filesystem,
+# as well as the number of threads that will simultaneously drive IO to
+# those files. The settings chosen are from measurements in the
+# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
+# are at peak throughput but lowest latency. Higher concurrency introduces
+# queue time latency and would reduce the impact of code-induced performance
+# regressions.
+#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
@@ -48,13 +57,13 @@ export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
fi
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/random_readwrite.ksh b/usr/src/test/zfs-tests/tests/perf/regression/random_readwrite.ksh
index f41a2b526e..2e02929b99 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/random_readwrite.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/random_readwrite.ksh
@@ -25,6 +25,15 @@
# and used for all fio runs. The ARC is cleared with `zinject -a` prior to
# each run so reads will go to disk.
#
+# Thread/Concurrency settings:
+# PERF_NTHREADS defines the number of files created in the test filesystem,
+# as well as the number of threads that will simultaneously drive IO to
+# those files. The settings chosen are from measurements in the
+# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
+# are at peak throughput but lowest latency. Higher concurrency introduces
+# queue time latency and would reduce the impact of code-induced performance
+# regressions.
+#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
@@ -48,13 +57,13 @@ export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'4 8 16 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES='' # bssplit used instead
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'32 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES='' # bssplit used instead
fi
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/random_writes.ksh b/usr/src/test/zfs-tests/tests/perf/regression/random_writes.ksh
index 9e201a827c..d4508ef882 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/random_writes.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/random_writes.ksh
@@ -24,6 +24,15 @@
# Prior to each fio run the dataset is recreated, and fio writes new files
# into an otherwise empty pool.
#
+# Thread/Concurrency settings:
+# PERF_NTHREADS defines the number of files created in the test filesystem,
+# as well as the number of threads that will simultaneously drive IO to
+# those files. The settings chosen are from measurements in the
+# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
+# are at peak throughput but lowest latency. Higher concurrency introduces
+# queue time latency and would reduce the impact of code-induced performance
+# regressions.
+#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
@@ -47,13 +56,13 @@ export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 64'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'32 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
fi
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh b/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh
index 580f2d94e4..232785648f 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh
@@ -25,6 +25,15 @@
# for all fio runs. The ARC is cleared with `zinject -a` prior to each run
# so reads will go to disk.
#
+# Thread/Concurrency settings:
+# PERF_NTHREADS defines the number of files created in the test filesystem,
+# as well as the number of threads that will simultaneously drive IO to
+# those files. The settings chosen are from measurements in the
+# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
+# are at peak throughput but lowest latency. Higher concurrency introduces
+# queue time latency and would reduce the impact of code-induced performance
+# regressions.
+#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
@@ -48,13 +57,13 @@ export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
fi
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/sequential_writes.ksh b/usr/src/test/zfs-tests/tests/perf/regression/sequential_writes.ksh
index 493a3d18b7..2799c43336 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/sequential_writes.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/sequential_writes.ksh
@@ -24,6 +24,15 @@
# Prior to each fio run the dataset is recreated, and fio writes new files
# into an otherwise empty pool.
#
+# Thread/Concurrency settings:
+# PERF_NTHREADS defines the number of files created in the test filesystem,
+# as well as the number of threads that will simultaneously drive IO to
+# those files. The settings chosen are from measurements in the
+# PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that
+# are at peak throughput but lowest latency. Higher concurrency introduces
+# queue time latency and would reduce the impact of code-induced performance
+# regressions.
+#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
@@ -47,13 +56,13 @@ export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
- export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
fi
diff --git a/usr/src/uts/common/fs/zfs/spa.c b/usr/src/uts/common/fs/zfs/spa.c
index ccee26fba2..064e4411db 100644
--- a/usr/src/uts/common/fs/zfs/spa.c
+++ b/usr/src/uts/common/fs/zfs/spa.c
@@ -1814,6 +1814,7 @@ spa_check_for_missing_logs(spa_t *spa)
if (idx > 0) {
spa_load_failed(spa, "some log devices are missing");
+ vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
} else {
@@ -1825,6 +1826,7 @@ spa_check_for_missing_logs(spa_t *spa)
spa_set_log_state(spa, SPA_LOG_CLEAR);
spa_load_note(spa, "some log devices are "
"missing, ZIL is dropped.");
+ vdev_dbgmsg_print_tree(rvd, 2);
break;
}
}
@@ -2019,13 +2021,13 @@ spa_load_verify(spa_t *spa)
{
zio_t *rio;
spa_load_error_t sle = { 0 };
- zpool_rewind_policy_t policy;
+ zpool_load_policy_t policy;
boolean_t verify_ok = B_FALSE;
int error = 0;
- zpool_get_rewind_policy(spa->spa_config, &policy);
+ zpool_get_load_policy(spa->spa_config, &policy);
- if (policy.zrp_request & ZPOOL_NEVER_REWIND)
+ if (policy.zlp_rewind & ZPOOL_NEVER_REWIND)
return (0);
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
@@ -2064,8 +2066,8 @@ spa_load_verify(spa_t *spa)
}
if (spa_load_verify_dryrun ||
- (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
- sle.sle_data_count <= policy.zrp_maxdata)) {
+ (!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
+ sle.sle_data_count <= policy.zlp_maxdata)) {
int64_t loss = 0;
verify_ok = B_TRUE;
@@ -2765,17 +2767,17 @@ spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
/*
* We will use spa_config if we decide to reload the spa or if spa_load
* fails and we rewind. We must thus regenerate the config using the
- * MOS information with the updated paths. Rewind policy is an import
- * setting and is not in the MOS. We copy it over to our new, trusted
- * config.
+ * MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
+ * pass settings on how to load the pool and is not stored in the MOS.
+ * We copy it over to our new, trusted config.
*/
mos_config_txg = fnvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_POOL_TXG);
nvlist_free(mos_config);
mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
- if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_REWIND_POLICY,
+ if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
&policy) == 0)
- fnvlist_add_nvlist(mos_config, ZPOOL_REWIND_POLICY, policy);
+ fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
spa_config_set(spa, mos_config);
spa->spa_config_source = SPA_CONFIG_SRC_MOS;
@@ -4034,11 +4036,11 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
- zpool_rewind_policy_t policy;
+ zpool_load_policy_t policy;
- zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
+ zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
&policy);
- if (policy.zrp_request & ZPOOL_DO_REWIND)
+ if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa_activate(spa, spa_mode_global);
@@ -4048,8 +4050,8 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
zfs_dbgmsg("spa_open_common: opening %s", pool);
- error = spa_load_best(spa, state, policy.zrp_txg,
- policy.zrp_request);
+ error = spa_load_best(spa, state, policy.zlp_txg,
+ policy.zlp_rewind);
if (error == EBADF) {
/*
@@ -5016,7 +5018,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
spa_t *spa;
char *altroot = NULL;
spa_load_state_t state = SPA_LOAD_IMPORT;
- zpool_rewind_policy_t policy;
+ zpool_load_policy_t policy;
uint64_t mode = spa_mode_global;
uint64_t readonly = B_FALSE;
int error;
@@ -5067,8 +5069,8 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
*/
spa_async_suspend(spa);
- zpool_get_rewind_policy(config, &policy);
- if (policy.zrp_request & ZPOOL_DO_REWIND)
+ zpool_get_load_policy(config, &policy);
+ if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
@@ -5078,9 +5080,9 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
zfs_dbgmsg("spa_import: importing %s", pool);
} else {
zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
- "(RECOVERY MODE)", pool, (longlong_t)policy.zrp_txg);
+ "(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
}
- error = spa_load_best(spa, state, policy.zrp_txg, policy.zrp_request);
+ error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
/*
* Propagate anything learned while loading the pool and pass it
@@ -5202,7 +5204,7 @@ spa_tryimport(nvlist_t *tryconfig)
spa_t *spa;
uint64_t state;
int error;
- zpool_rewind_policy_t policy;
+ zpool_load_policy_t policy;
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
return (NULL);
@@ -5218,16 +5220,14 @@ spa_tryimport(nvlist_t *tryconfig)
spa_activate(spa, FREAD);
/*
- * Rewind pool if a max txg was provided. Note that even though we
- * retrieve the complete rewind policy, only the rewind txg is relevant
- * for tryimport.
+ * Rewind pool if a max txg was provided.
*/
- zpool_get_rewind_policy(spa->spa_config, &policy);
- if (policy.zrp_txg != UINT64_MAX) {
- spa->spa_load_max_txg = policy.zrp_txg;
+ zpool_get_load_policy(spa->spa_config, &policy);
+ if (policy.zlp_txg != UINT64_MAX) {
+ spa->spa_load_max_txg = policy.zlp_txg;
spa->spa_extreme_rewind = B_TRUE;
zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
- poolname, (longlong_t)policy.zrp_txg);
+ poolname, (longlong_t)policy.zlp_txg);
} else {
zfs_dbgmsg("spa_tryimport: importing %s", poolname);
}
diff --git a/usr/src/uts/common/fs/zfs/vdev.c b/usr/src/uts/common/fs/zfs/vdev.c
index d7434c7ddc..388d13716c 100644
--- a/usr/src/uts/common/fs/zfs/vdev.c
+++ b/usr/src/uts/common/fs/zfs/vdev.c
@@ -1573,8 +1573,11 @@ vdev_validate(vdev_t *vd)
/*
* If we are performing an extreme rewind, we allow for a label that
* was modified at a point after the current txg.
+ * If config lock is not held do not check for the txg. spa_sync could
+ * be updating the vdev's label before updating spa_last_synced_txg.
*/
- if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0)
+ if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
+ spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
txg = UINT64_MAX;
else
txg = spa_last_synced_txg(spa);
diff --git a/usr/src/uts/common/sys/fs/zfs.h b/usr/src/uts/common/sys/fs/zfs.h
index 300c916244..790514ddef 100644
--- a/usr/src/uts/common/sys/fs/zfs.h
+++ b/usr/src/uts/common/sys/fs/zfs.h
@@ -491,7 +491,7 @@ typedef enum {
#define ZPL_VERSION_USERSPACE ZPL_VERSION_4
#define ZPL_VERSION_SA ZPL_VERSION_5
-/* Rewind request information */
+/* Rewind policy information */
#define ZPOOL_NO_REWIND 1 /* No policy - default behavior */
#define ZPOOL_NEVER_REWIND 2 /* Do not search for best txg or rewind */
#define ZPOOL_TRY_REWIND 4 /* Search for best txg, but do not rewind */
@@ -500,12 +500,12 @@ typedef enum {
#define ZPOOL_REWIND_MASK 28 /* All the possible rewind bits */
#define ZPOOL_REWIND_POLICIES 31 /* All the possible policy bits */
-typedef struct zpool_rewind_policy {
- uint32_t zrp_request; /* rewind behavior requested */
- uint64_t zrp_maxmeta; /* max acceptable meta-data errors */
- uint64_t zrp_maxdata; /* max acceptable data errors */
- uint64_t zrp_txg; /* specific txg to load */
-} zpool_rewind_policy_t;
+typedef struct zpool_load_policy {
+ uint32_t zlp_rewind; /* rewind policy requested */
+ uint64_t zlp_maxmeta; /* max acceptable meta-data errors */
+ uint64_t zlp_maxdata; /* max acceptable data errors */
+ uint64_t zlp_txg; /* specific txg to load */
+} zpool_load_policy_t;
/*
* The following are configuration names used in the nvlist describing a pool's
@@ -593,12 +593,12 @@ typedef struct zpool_rewind_policy {
#define ZPOOL_CONFIG_FRU "fru"
#define ZPOOL_CONFIG_AUX_STATE "aux_state"
-/* Rewind policy parameters */
-#define ZPOOL_REWIND_POLICY "rewind-policy"
-#define ZPOOL_REWIND_REQUEST "rewind-request"
-#define ZPOOL_REWIND_REQUEST_TXG "rewind-request-txg"
-#define ZPOOL_REWIND_META_THRESH "rewind-meta-thresh"
-#define ZPOOL_REWIND_DATA_THRESH "rewind-data-thresh"
+/* Pool load policy parameters */
+#define ZPOOL_LOAD_POLICY "load-policy"
+#define ZPOOL_LOAD_REWIND_POLICY "load-rewind-policy"
+#define ZPOOL_LOAD_REQUEST_TXG "load-request-txg"
+#define ZPOOL_LOAD_META_THRESH "load-meta-thresh"
+#define ZPOOL_LOAD_DATA_THRESH "load-data-thresh"
/* Rewind data discovered */
#define ZPOOL_CONFIG_LOAD_TIME "rewind_txg_ts"