summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerry Jelinek <jerry.jelinek@joyent.com>2018-06-19 11:57:34 +0000
committerJerry Jelinek <jerry.jelinek@joyent.com>2018-06-19 11:57:34 +0000
commit967bcd4bf4ed12e06a616379429f6f1a8765bbe4 (patch)
treeadea2ce2e9c554903afb66bca052415a6f52ccd5
parente95ae2f5523c90888c59197a541b270a64a362c6 (diff)
parentb6031810da58df96413bf76e068638fcab1f228a (diff)
downloadillumos-joyent-967bcd4bf4ed12e06a616379429f6f1a8765bbe4.tar.gz
[illumos-gate merge]
commit b6031810da58df96413bf76e068638fcab1f228a 9456 ztest failure in zil_commit_waiter_timeout commit c373aa8be7dba4bca17e8db696f27412617604b9 9082 Add ZFS performance test targeting ZIL latency
-rw-r--r--usr/src/pkg/manifests/system-test-zfstest.mf3
-rw-r--r--usr/src/test/zfs-tests/include/libtest.shlib52
-rw-r--r--usr/src/test/zfs-tests/runfiles/perf-regression.run2
-rw-r--r--usr/src/test/zfs-tests/tests/perf/fio/mkfiles.fio2
-rw-r--r--usr/src/test/zfs-tests/tests/perf/fio/random_reads.fio4
-rw-r--r--usr/src/test/zfs-tests/tests/perf/fio/random_readwrite.fio2
-rw-r--r--usr/src/test/zfs-tests/tests/perf/fio/random_writes.fio2
-rw-r--r--usr/src/test/zfs-tests/tests/perf/fio/sequential_reads.fio4
-rw-r--r--usr/src/test/zfs-tests/tests/perf/fio/sequential_writes.fio2
-rw-r--r--usr/src/test/zfs-tests/tests/perf/perf.shlib271
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/random_reads.ksh24
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/random_readwrite.ksh24
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/random_writes.ksh23
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/random_writes_zil.ksh73
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh26
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh24
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh38
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh24
-rw-r--r--usr/src/test/zfs-tests/tests/perf/regression/sequential_writes.ksh23
-rw-r--r--usr/src/test/zfs-tests/tests/perf/scripts/io.d81
-rw-r--r--usr/src/test/zfs-tests/tests/perf/scripts/offcpu-profile.d41
-rw-r--r--usr/src/test/zfs-tests/tests/perf/scripts/prefetch_io.d2
-rw-r--r--usr/src/test/zfs-tests/tests/perf/scripts/profile.d2
-rw-r--r--usr/src/test/zfs-tests/tests/perf/scripts/zil.d92
-rw-r--r--usr/src/uts/common/fs/zfs/zil.c2
25 files changed, 680 insertions, 163 deletions
diff --git a/usr/src/pkg/manifests/system-test-zfstest.mf b/usr/src/pkg/manifests/system-test-zfstest.mf
index 5916d9079a..140fc1221e 100644
--- a/usr/src/pkg/manifests/system-test-zfstest.mf
+++ b/usr/src/pkg/manifests/system-test-zfstest.mf
@@ -2726,6 +2726,7 @@ file path=opt/zfs-tests/tests/perf/perf.shlib mode=0444
file path=opt/zfs-tests/tests/perf/regression/random_reads mode=0555
file path=opt/zfs-tests/tests/perf/regression/random_readwrite mode=0555
file path=opt/zfs-tests/tests/perf/regression/random_writes mode=0555
+file path=opt/zfs-tests/tests/perf/regression/random_writes_zil mode=0555
file path=opt/zfs-tests/tests/perf/regression/sequential_reads mode=0555
file path=opt/zfs-tests/tests/perf/regression/sequential_reads_arc_cached \
mode=0555
@@ -2737,8 +2738,10 @@ file path=opt/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached \
file path=opt/zfs-tests/tests/perf/regression/sequential_writes mode=0555
file path=opt/zfs-tests/tests/perf/regression/setup mode=0555
file path=opt/zfs-tests/tests/perf/scripts/io.d mode=0444
+file path=opt/zfs-tests/tests/perf/scripts/offcpu-profile.d mode=0444
file path=opt/zfs-tests/tests/perf/scripts/prefetch_io.d mode=0444
file path=opt/zfs-tests/tests/perf/scripts/profile.d mode=0444
+file path=opt/zfs-tests/tests/perf/scripts/zil.d mode=0444
file path=opt/zfs-tests/tests/stress/races/remove_file_while_remap mode=0555
license cr_Sun license=cr_Sun
license lic_CDDL license=lic_CDDL
diff --git a/usr/src/test/zfs-tests/include/libtest.shlib b/usr/src/test/zfs-tests/include/libtest.shlib
index 512bb069f9..61c4f97d70 100644
--- a/usr/src/test/zfs-tests/include/libtest.shlib
+++ b/usr/src/test/zfs-tests/include/libtest.shlib
@@ -1226,6 +1226,58 @@ function destroy_pool #pool
return 0
}
+# Return 0 if created successfully; $? otherwise
+#
+# $1 - dataset name
+# $2-n - dataset options
+
+function create_dataset #dataset dataset_options
+{
+ typeset dataset=$1
+
+ shift
+
+ if [[ -z $dataset ]]; then
+ log_note "Missing dataset name."
+ return 1
+ fi
+
+ if datasetexists $dataset ; then
+ destroy_dataset $dataset
+ fi
+
+ log_must zfs create $@ $dataset
+
+ return 0
+}
+
+# Return 0 if destroy successfully or the dataset exists; $? otherwise
+# Note: In local zones, this function should return 0 silently.
+#
+# $1 - dataset name
+
+function destroy_dataset #dataset
+{
+ typeset dataset=$1
+ typeset mtpt
+
+ if [[ -z $dataset ]]; then
+ log_note "No dataset name given."
+ return 1
+ fi
+
+ if datasetexists "$dataset" ; then
+ mtpt=$(get_prop mountpoint "$dataset")
+ log_must zfs destroy -r $dataset
+ [[ -d $mtpt ]] && log_must rm -rf $mtpt
+ else
+ log_note "Dataset does not exist. ($dataset)"
+ return 1
+ fi
+
+ return 0
+}
+
#
# Firstly, create a pool with 5 datasets. Then, create a single zone and
# export the 5 datasets to it. In addition, we also add a ZFS filesystem
diff --git a/usr/src/test/zfs-tests/runfiles/perf-regression.run b/usr/src/test/zfs-tests/runfiles/perf-regression.run
index dbb30f0327..b28f0e7f78 100644
--- a/usr/src/test/zfs-tests/runfiles/perf-regression.run
+++ b/usr/src/test/zfs-tests/runfiles/perf-regression.run
@@ -26,5 +26,5 @@ outputdir = /var/tmp/test_results
[/opt/zfs-tests/tests/perf/regression]
tests = ['sequential_writes', 'sequential_reads', 'sequential_reads_arc_cached',
'sequential_reads_arc_cached_clone', 'sequential_reads_dbuf_cached',
- 'random_reads', 'random_writes', 'random_readwrite']
+ 'random_reads', 'random_writes', 'random_readwrite', 'random_writes_zil']
post =
diff --git a/usr/src/test/zfs-tests/tests/perf/fio/mkfiles.fio b/usr/src/test/zfs-tests/tests/perf/fio/mkfiles.fio
index 8289d546de..c7efda86d3 100644
--- a/usr/src/test/zfs-tests/tests/perf/fio/mkfiles.fio
+++ b/usr/src/test/zfs-tests/tests/perf/fio/mkfiles.fio
@@ -21,7 +21,7 @@ ioengine=psync
bs=1024k
rw=write
thread=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
numjobs=${NUMJOBS}
filesize=${FILE_SIZE}
buffer_compress_percentage=66
diff --git a/usr/src/test/zfs-tests/tests/perf/fio/random_reads.fio b/usr/src/test/zfs-tests/tests/perf/fio/random_reads.fio
index 25dd2ff838..c7279a85bb 100644
--- a/usr/src/test/zfs-tests/tests/perf/fio/random_reads.fio
+++ b/usr/src/test/zfs-tests/tests/perf/fio/random_reads.fio
@@ -10,7 +10,7 @@
#
#
-# Copyright (c) 2015 by Delphix. All rights reserved.
+# Copyright (c) 2016 by Delphix. All rights reserved.
#
[global]
@@ -21,7 +21,7 @@ overwrite=0
thread=1
rw=randread
time_based=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
runtime=${RUNTIME}
bs=${BLOCKSIZE}
ioengine=psync
diff --git a/usr/src/test/zfs-tests/tests/perf/fio/random_readwrite.fio b/usr/src/test/zfs-tests/tests/perf/fio/random_readwrite.fio
index 07090d4dcd..7d01c38ada 100644
--- a/usr/src/test/zfs-tests/tests/perf/fio/random_readwrite.fio
+++ b/usr/src/test/zfs-tests/tests/perf/fio/random_readwrite.fio
@@ -23,7 +23,7 @@ thread=1
rw=randrw
rwmixread=80
time_based=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
runtime=${RUNTIME}
bssplit=4k/50:8k/30:128k/10:1m/10
ioengine=psync
diff --git a/usr/src/test/zfs-tests/tests/perf/fio/random_writes.fio b/usr/src/test/zfs-tests/tests/perf/fio/random_writes.fio
index 9233a84260..5e2cb30026 100644
--- a/usr/src/test/zfs-tests/tests/perf/fio/random_writes.fio
+++ b/usr/src/test/zfs-tests/tests/perf/fio/random_writes.fio
@@ -20,7 +20,7 @@ fallocate=0
thread=1
rw=randwrite
time_based=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
runtime=${RUNTIME}
bs=${BLOCKSIZE}
ioengine=psync
diff --git a/usr/src/test/zfs-tests/tests/perf/fio/sequential_reads.fio b/usr/src/test/zfs-tests/tests/perf/fio/sequential_reads.fio
index b7d9fea5f3..be4a7f078e 100644
--- a/usr/src/test/zfs-tests/tests/perf/fio/sequential_reads.fio
+++ b/usr/src/test/zfs-tests/tests/perf/fio/sequential_reads.fio
@@ -10,7 +10,7 @@
#
#
-# Copyright (c) 2015 by Delphix. All rights reserved.
+# Copyright (c) 2016 by Delphix. All rights reserved.
#
[global]
@@ -21,7 +21,7 @@ overwrite=0
thread=1
rw=read
time_based=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
runtime=${RUNTIME}
bs=${BLOCKSIZE}
ioengine=psync
diff --git a/usr/src/test/zfs-tests/tests/perf/fio/sequential_writes.fio b/usr/src/test/zfs-tests/tests/perf/fio/sequential_writes.fio
index 0ee6d091db..65a65910fd 100644
--- a/usr/src/test/zfs-tests/tests/perf/fio/sequential_writes.fio
+++ b/usr/src/test/zfs-tests/tests/perf/fio/sequential_writes.fio
@@ -20,7 +20,7 @@ fallocate=0
thread=1
rw=write
time_based=1
-directory=/${TESTFS}
+directory=${DIRECTORY}
runtime=${RUNTIME}
bs=${BLOCKSIZE}
ioengine=psync
diff --git a/usr/src/test/zfs-tests/tests/perf/perf.shlib b/usr/src/test/zfs-tests/tests/perf/perf.shlib
index 24a85ba700..09428c67bf 100644
--- a/usr/src/test/zfs-tests/tests/perf/perf.shlib
+++ b/usr/src/test/zfs-tests/tests/perf/perf.shlib
@@ -36,6 +36,96 @@ function get_sync_str
echo $sync_str
}
+function get_suffix
+{
+ typeset threads=$1
+ typeset sync=$2
+ typeset iosize=$3
+
+ typeset sync_str=$(get_sync_str $sync)
+ typeset filesystems=$(get_nfilesystems)
+
+ typeset suffix="$sync_str.$iosize-ios"
+ suffix="$suffix.$threads-threads.$filesystems-filesystems"
+ echo $suffix
+}
+
+function do_fio_run_impl
+{
+ typeset script=$1
+ typeset do_recreate=$2
+ typeset clear_cache=$3
+
+ typeset threads=$4
+ typeset threads_per_fs=$5
+ typeset sync=$6
+ typeset iosize=$7
+
+ typeset sync_str=$(get_sync_str $sync)
+ log_note "Running with $threads $sync_str threads, $iosize ios"
+
+ if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then
+ log_must test $do_recreate
+ verify_threads_per_fs $threads $threads_per_fs
+ fi
+
+ if $do_recreate; then
+ recreate_perf_pool
+
+ #
+ # A value of zero for "threads_per_fs" is "special", and
+ # means a single filesystem should be used, regardless
+ # of the number of threads.
+ #
+ if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then
+ populate_perf_filesystems $((threads / threads_per_fs))
+ else
+ populate_perf_filesystems 1
+ fi
+ fi
+
+ if $clear_cache; then
+ # Clear the ARC
+ zpool export $PERFPOOL
+ zpool import $PERFPOOL
+ fi
+
+ if [[ -n $ZINJECT_DELAYS ]]; then
+ apply_zinject_delays
+ else
+ log_note "No per-device commands to execute."
+ fi
+
+ #
+ # Allow this to be overridden by the individual test case. This
+ # can be used to run the FIO job against something other than
+ # the default filesystem (e.g. against a clone).
+ #
+ export DIRECTORY=$(get_directory)
+ log_note "DIRECTORY: " $DIRECTORY
+
+ export RUNTIME=$PERF_RUNTIME
+ export FILESIZE=$((TOTAL_SIZE / threads))
+ export NUMJOBS=$threads
+ export SYNC_TYPE=$sync
+ export BLOCKSIZE=$iosize
+ sync
+
+ # This will be part of the output filename.
+ typeset suffix=$(get_suffix $threads $sync $iosize)
+
+ # Start the data collection
+ do_collect_scripts $suffix
+
+ # Define output file
+ typeset logbase="$(get_perf_output_dir)/$(basename \
+ $SUDO_COMMAND)"
+ typeset outfile="$logbase.fio.$suffix"
+
+ # Start the load
+ log_must fio --output $outfile $FIO_SCRIPTS/$script
+}
+
#
# This function will run fio in a loop, according to the .fio file passed
# in and a number of environment variables. The following variables can be
@@ -55,47 +145,21 @@ function do_fio_run
typeset script=$1
typeset do_recreate=$2
typeset clear_cache=$3
- typeset threads sync iosize
+ typeset threads threads_per_fs sync iosize
for threads in $PERF_NTHREADS; do
- for sync in $PERF_SYNC_TYPES; do
- for iosize in $PERF_IOSIZES; do
- typeset sync_str=$(get_sync_str $sync)
- log_note "Running with $threads" \
- "$sync_str threads, $iosize ios"
-
- if $do_recreate; then
- recreate_perfpool
- log_must zfs create $PERF_FS_OPTS \
- $TESTFS
- fi
-
- if $clear_cache; then
- # Clear the ARC
- zpool export $PERFPOOL
- zpool import $PERFPOOL
- fi
-
- export RUNTIME=$PERF_RUNTIME
- export FILESIZE=$((TOTAL_SIZE / threads))
- export NUMJOBS=$threads
- export SYNC_TYPE=$sync
- export BLOCKSIZE=$iosize
- sync
-
- # Start the data collection
- do_collect_scripts $threads $sync $iosize
-
- # This will be part of the output filename.
- typeset suffix="$sync_str.$iosize-ios.$threads-threads"
-
- # Define output file
- typeset logbase="$(get_perf_output_dir)/$(basename \
- $SUDO_COMMAND)"
- typeset outfile="$logbase.fio.$suffix"
-
- # Start the load
- log_must fio --output $outfile $FIO_SCRIPTS/$script
+ for threads_per_fs in $PERF_NTHREADS_PER_FS; do
+ for sync in $PERF_SYNC_TYPES; do
+ for iosize in $PERF_IOSIZES; do
+ do_fio_run_impl \
+ $script \
+ $do_recreate \
+ $clear_cache \
+ $threads \
+ $threads_per_fs \
+ $sync \
+ $iosize
+ done
done
done
done
@@ -108,17 +172,11 @@ function do_fio_run
#
function do_collect_scripts
{
- typeset threads=$1
- typeset sync=$2
- typeset iosize=$3
+ typeset suffix=$1
[[ -n $collect_scripts ]] || log_fail "No data collection scripts."
[[ -n $PERF_RUNTIME ]] || log_fail "No runtime specified."
- # This will be part of the output filename.
- typeset sync_str=$(get_sync_str $sync)
- typeset suffix="$sync_str.$iosize-ios.$threads-threads"
-
# Add in user supplied scripts and logfiles, if any.
typeset oIFS=$IFS
IFS=','
@@ -151,23 +209,122 @@ function get_perf_output_dir
echo $dir
}
+function apply_zinject_delays
+{
+ typeset idx=0
+ while [[ $idx -lt "${#ZINJECT_DELAYS[@]}" ]]; do
+ [[ -n ${ZINJECT_DELAYS[$idx]} ]] || \
+ log_must "No zinject delay found at index: $idx"
+
+ for disk in $DISKS; do
+ log_must zinject \
+ -d $disk -D ${ZINJECT_DELAYS[$idx]} $PERFPOOL
+ done
+
+ ((idx += 1))
+ done
+}
+
+function clear_zinject_delays
+{
+ log_must zinject -c all
+}
+
#
-# Destroy and create the pool used for performance tests. The
-# PERFPOOL_CREATE_CMD variable allows users to test with a custom pool
-# configuration by specifying the pool creation command in their environment.
-# If PERFPOOL_CREATE_CMD is empty, a pool using all available disks is created.
+# Destroy and create the pool used for performance tests.
#
-function recreate_perfpool
+function recreate_perf_pool
{
[[ -n $PERFPOOL ]] || log_fail "The \$PERFPOOL variable isn't set."
- poolexists $PERFPOOL && destroy_pool $PERFPOOL
+ #
+ # In case there's been some "leaked" zinject delays, or if the
+ # performance test injected some delays itself, we clear all
+ # delays before attempting to destroy the pool. Each delay
+ # places a hold on the pool, so the destroy will fail if there
+ # are any outstanding delays.
+ #
+ clear_zinject_delays
+
+ #
+ # This function handles the case where the pool already exists,
+ # and will destroy the previous pool and recreate a new pool.
+ #
+ create_pool $PERFPOOL $DISKS
+}
- if [[ -n $PERFPOOL_CREATE_CMD ]]; then
- log_must $PERFPOOL_CREATE_CMD
- else
- log_must eval "zpool create -f $PERFPOOL $DISKS"
- fi
+function verify_threads_per_fs
+{
+ typeset threads=$1
+ typeset threads_per_fs=$2
+
+ log_must test -n $threads
+ log_must test -n $threads_per_fs
+
+ #
+ # A value of "0" is treated as a "special value", and it is
+ # interpreted to mean all threads will run using a single
+ # filesystem.
+ #
+ [[ $threads_per_fs -eq 0 ]] && return
+
+ #
+ # The number of threads per filesystem must be a value greater
+ # than or equal to zero; since we just verified the value isn't
+ # 0 above, then it must be greater than zero here.
+ #
+ log_must test $threads_per_fs -ge 0
+
+ #
+ # This restriction can be lifted later if needed, but for now,
+ # we restrict the number of threads per filesystem to a value
+ # that evenly divides the thread count. This way, the threads
+ # will be evenly distributed over all the filesystems.
+ #
+ log_must test $((threads % threads_per_fs)) -eq 0
+}
+
+function populate_perf_filesystems
+{
+ typeset nfilesystems=${1:-1}
+
+ export TESTFS=""
+ for i in $(seq 1 $nfilesystems); do
+ typeset dataset="$PERFPOOL/fs$i"
+ create_dataset $dataset $PERF_FS_OPTS
+ if [[ -z "$TESTFS" ]]; then
+ TESTFS="$dataset"
+ else
+ TESTFS="$TESTFS $dataset"
+ fi
+ done
+}
+
+function get_nfilesystems
+{
+ typeset filesystems=( $TESTFS )
+ echo ${#filesystems[@]}
+}
+
+function get_directory
+{
+ typeset filesystems=( $TESTFS )
+ typeset directory=
+
+ typeset idx=0
+ while [[ $idx -lt "${#filesystems[@]}" ]]; do
+ mountpoint=$(get_prop mountpoint "${filesystems[$idx]}")
+
+ if [[ -n $directory ]]; then
+ directory=$directory:$mountpoint
+ else
+ directory=$mountpoint
+ fi
+
+ ((idx += 1))
+ done
+
+ echo $directory
}
function get_max_arc_size
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/random_reads.ksh b/usr/src/test/zfs-tests/tests/perf/regression/random_reads.ksh
index 72e6746851..5679a8d7eb 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/random_reads.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/random_reads.ksh
@@ -40,30 +40,30 @@
function cleanup
{
- log_must zfs destroy $TESTFS
+ recreate_perf_pool
}
-log_assert "Measure IO stats during random read load"
log_onexit cleanup
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
-export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
fi
@@ -73,14 +73,20 @@ fi
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
-export collect_scripts=("dtrace -s $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1"
- "io" "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat"
- "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "kstat zfs:0 1" "kstat")
+export collect_scripts=(
+ "kstat zfs:0 1" "kstat"
+ "vmstat 1" "vmstat"
+ "mpstat 1" "mpstat"
+ "iostat -xcnz 1" "iostat"
+ "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+ "dtrace -s $PERF_SCRIPTS/profile.d" "profile"
+)
log_note "Random reads with $PERF_RUNTYPE settings"
do_fio_run random_reads.fio false true
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/random_readwrite.ksh b/usr/src/test/zfs-tests/tests/perf/regression/random_readwrite.ksh
index 2e02929b99..63a14b28a3 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/random_readwrite.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/random_readwrite.ksh
@@ -40,30 +40,30 @@
function cleanup
{
- log_must zfs destroy $TESTFS
+ recreate_perf_pool
}
-log_assert "Measure IO stats during random read-write load"
log_onexit cleanup
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
-export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'4 8 16 64'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES='' # bssplit used instead
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'32 64'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES='' # bssplit used instead
fi
@@ -73,14 +73,20 @@ fi
# a subset of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
-export collect_scripts=("dtrace -s $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1"
- "io" "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat"
- "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "kstat zfs:0 1" "kstat")
+export collect_scripts=(
+ "kstat zfs:0 1" "kstat"
+ "vmstat 1" "vmstat"
+ "mpstat 1" "mpstat"
+ "iostat -xcnz 1" "iostat"
+ "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+ "dtrace -s $PERF_SCRIPTS/profile.d" "profile"
+)
log_note "Random reads and writes with $PERF_RUNTYPE settings"
do_fio_run random_readwrite.fio false true
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/random_writes.ksh b/usr/src/test/zfs-tests/tests/perf/regression/random_writes.ksh
index d4508ef882..fdb6421887 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/random_writes.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/random_writes.ksh
@@ -39,30 +39,30 @@
function cleanup
{
- log_must zfs destroy $TESTFS
+ recreate_perf_pool
}
-log_assert "Measure IO stats during random write load"
log_onexit cleanup
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
-export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'32 128'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
fi
@@ -70,9 +70,14 @@ fi
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
-export collect_scripts=("dtrace -s $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1"
- "io" "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat"
- "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "kstat zfs:0 1" "kstat")
+export collect_scripts=(
+ "kstat zfs:0 1" "kstat"
+ "vmstat 1" "vmstat"
+ "mpstat 1" "mpstat"
+ "iostat -xcnz 1" "iostat"
+ "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+ "dtrace -s $PERF_SCRIPTS/profile.d" "profile"
+)
log_note "Random writes with $PERF_RUNTYPE settings"
do_fio_run random_writes.fio true false
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/random_writes_zil.ksh b/usr/src/test/zfs-tests/tests/perf/regression/random_writes_zil.ksh
new file mode 100644
index 0000000000..15f99105f6
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/perf/regression/random_writes_zil.ksh
@@ -0,0 +1,73 @@
+#!/usr/bin/ksh
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/perf/perf.shlib
+
+function cleanup
+{
+ #
+ # We're using many filesystems depending on the number of
+ # threads for each test, and there's no good way to get a list
+ # of all the filesystems that should be destroyed on cleanup
+ # (i.e. the list of filesystems used for the last test ran).
+ # Thus, we simply recreate the pool as a way to destroy all
+ # filesystems and leave a fresh pool behind.
+ #
+ recreate_perf_pool
+}
+
+log_onexit cleanup
+
+recreate_perf_pool
+
+# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
+
+if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
+ export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
+ export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'1 2 4 8 16 32 64 128'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'}
+ export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
+ export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
+
+elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
+ export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
+ export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
+ export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 16 64'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'}
+ export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
+ export PERF_IOSIZES=${PERF_IOSIZES:-'8k'}
+fi
+
+lun_list=$(pool_to_lun_list $PERFPOOL)
+log_note "Collecting backend IO stats with lun list $lun_list"
+export collect_scripts=(
+ "kstat zfs:0 1" "kstat"
+ "vmstat -T d 1" "vmstat"
+ "mpstat -T d 1" "mpstat"
+ "iostat -T d -xcnz 1" "iostat"
+ "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+ "dtrace -s $PERF_SCRIPTS/zil.d $PERFPOOL 1" "zil"
+ "dtrace -s $PERF_SCRIPTS/profile.d" "profile"
+ "dtrace -s $PERF_SCRIPTS/offcpu-profile.d" "offcpu-profile"
+)
+
+log_note "ZIL specific random write workload with $PERF_RUNTYPE settings"
+do_fio_run random_writes.fio true false
+log_pass "Measure IO stats during ZIL specific random write workload"
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh b/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh
index 232785648f..6034a03151 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads.ksh
@@ -40,30 +40,30 @@
function cleanup
{
- log_must zfs destroy $TESTFS
+ recreate_perf_pool
}
-log_assert "Measure IO stats during sequential read load"
log_onexit cleanup
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
-export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
fi
@@ -73,15 +73,21 @@ fi
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
-export collect_scripts=("dtrace -s $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1"
- "io" "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
- "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat"
- "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "kstat zfs:0 1" "kstat")
+export collect_scripts=(
+ "kstat zfs:0 1" "kstat"
+ "vmstat 1" "vmstat"
+ "mpstat 1" "mpstat"
+ "iostat -xcnz 1" "iostat"
+ "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+ "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
+ "dtrace -s $PERF_SCRIPTS/profile.d" "profile"
+)
log_note "Sequential reads with $PERF_RUNTYPE settings"
do_fio_run sequential_reads.fio false true
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh b/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh
index 97bb8bdc31..4a0cc7ace7 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh
@@ -30,15 +30,13 @@
function cleanup
{
- log_must zfs destroy $TESTFS
+ recreate_perf_pool
}
-log_assert "Measure IO stats during sequential read load"
log_onexit cleanup
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
# Make sure the working set can be cached in the arc. Aim for 1/2 of arc.
export TOTAL_SIZE=$(($(get_max_arc_size) / 2))
@@ -48,12 +46,14 @@ if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
fi
@@ -63,15 +63,21 @@ fi
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
-export collect_scripts=("dtrace -s $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1"
- "io" "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
- "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat"
- "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "kstat zfs:0 1" "kstat")
+export collect_scripts=(
+ "kstat zfs:0 1" "kstat"
+ "vmstat 1" "vmstat"
+ "mpstat 1" "mpstat"
+ "iostat -xcnz 1" "iostat"
+ "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+ "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
+ "dtrace -s $PERF_SCRIPTS/profile.d" "profile"
+)
log_note "Sequential cached reads with $PERF_RUNTYPE settings"
do_fio_run sequential_reads.fio false false
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh b/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh
index cfc748c843..6f71811012 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh
@@ -36,15 +36,13 @@
function cleanup
{
- log_must zfs destroy $TESTFS
+ recreate_perf_pool
}
-log_assert "Measure IO stats during sequential read load"
log_onexit cleanup
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
# Make sure the working set can be cached in the arc. Aim for 1/2 of arc.
export TOTAL_SIZE=$(($(get_max_arc_size) / 2))
@@ -54,12 +52,14 @@ if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'}
fi
@@ -69,26 +69,42 @@ fi
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
+#
+# Only a single filesystem is used by this test. To be defensive, we
+# double check that TESTFS only contains a single filesystem. We
+# wouldn't want to assume this was the case, and have it actually
+# contain multiple filesystem (causing cascading failures later).
+#
+log_must test $(get_nfilesystems) -eq 1
+
log_note "Creating snapshot, $TESTSNAP, of $TESTFS"
create_snapshot $TESTFS $TESTSNAP
log_note "Creating clone, $PERFPOOL/$TESTCLONE, from $TESTFS@$TESTSNAP"
create_clone $TESTFS@$TESTSNAP $PERFPOOL/$TESTCLONE
#
-# Reset the TESTFS to point to the clone
+# We want to run FIO against the clone we created above, and not the
+# clone's originating filesystem. Thus, we override the default behavior
+# and explicitly set TESTFS to the clone.
#
export TESTFS=$PERFPOOL/$TESTCLONE
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
-export collect_scripts=("dtrace -s $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1"
- "io" "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
- "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat"
- "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "kstat zfs:0 1" "kstat")
+export collect_scripts=(
+ "kstat zfs:0 1" "kstat"
+ "vmstat 1" "vmstat"
+ "mpstat 1" "mpstat"
+ "iostat -xcnz 1" "iostat"
+ "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+ "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
+ "dtrace -s $PERF_SCRIPTS/profile.d" "profile"
+)
-log_note "Sequential cached reads from $TESTFS with $PERF_RUNTYPE settings"
+log_note "Sequential cached reads from $DIRECTORY with $PERF_RUNTYPE settings"
do_fio_run sequential_reads.fio false false
log_pass "Measure IO stats during sequential cached read load"
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh b/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh
index f7ea4b75c6..ff17d93e67 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh
@@ -34,15 +34,13 @@
function cleanup
{
- log_must zfs destroy $TESTFS
+ recreate_perf_pool
}
-log_assert "Measure IO stats during sequential read load"
log_onexit cleanup
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
# Ensure the working set can be cached in the dbuf cache.
export TOTAL_SIZE=$(($(get_max_dbuf_cache_size) * 3 / 4))
@@ -52,12 +50,14 @@ if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'16 64'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'64'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'64k'}
fi
@@ -67,15 +67,21 @@ fi
# of the available files.
export NUMJOBS=$(get_max $PERF_NTHREADS)
export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS))
+export DIRECTORY=$(get_directory)
log_must fio $FIO_SCRIPTS/mkfiles.fio
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
-export collect_scripts=("dtrace -s $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1"
- "io" "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
- "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat"
- "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "kstat zfs:0 1" "kstat")
+export collect_scripts=(
+ "kstat zfs:0 1" "kstat"
+ "vmstat 1" "vmstat"
+ "mpstat 1" "mpstat"
+ "iostat -xcnz 1" "iostat"
+ "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+ "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch"
+ "dtrace -s $PERF_SCRIPTS/profile.d" "profile"
+)
log_note "Sequential cached reads with $PERF_RUNTYPE settings"
do_fio_run sequential_reads.fio false false
diff --git a/usr/src/test/zfs-tests/tests/perf/regression/sequential_writes.ksh b/usr/src/test/zfs-tests/tests/perf/regression/sequential_writes.ksh
index 2799c43336..1007ea1c9c 100644
--- a/usr/src/test/zfs-tests/tests/perf/regression/sequential_writes.ksh
+++ b/usr/src/test/zfs-tests/tests/perf/regression/sequential_writes.ksh
@@ -37,32 +37,32 @@
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/perf/perf.shlib
-log_assert "Measure IO stats during sequential write load"
log_onexit cleanup
function cleanup
{
- log_must zfs destroy $TESTFS
+ recreate_perf_pool
}
-export TESTFS=$PERFPOOL/testfs
-recreate_perfpool
-log_must zfs create $PERF_FS_OPTS $TESTFS
+recreate_perf_pool
+populate_perf_filesystems
# Aim to fill the pool to 50% capacity while accounting for a 3x compressratio.
-export TOTAL_SIZE=$(($(get_prop avail $TESTFS) * 3 / 2))
+export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2))
# Variables for use by fio.
if [[ -n $PERF_REGRESSION_WEEKLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then
export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY}
export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'}
export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'}
+ export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'}
export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'}
export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'}
fi
@@ -70,9 +70,14 @@ fi
# Set up the scripts and output files that will log performance data.
lun_list=$(pool_to_lun_list $PERFPOOL)
log_note "Collecting backend IO stats with lun list $lun_list"
-export collect_scripts=("dtrace -s $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1"
- "io" "vmstat 1" "vmstat" "mpstat 1" "mpstat" "iostat -xcnz 1" "iostat"
- "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "kstat zfs:0 1" "kstat")
+export collect_scripts=(
+ "kstat zfs:0 1" "kstat"
+ "vmstat 1" "vmstat"
+ "mpstat 1" "mpstat"
+ "iostat -xcnz 1" "iostat"
+ "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io"
+ "dtrace -s $PERF_SCRIPTS/profile.d" "profile"
+)
log_note "Sequential writes with $PERF_RUNTYPE settings"
do_fio_run sequential_writes.fio true false
diff --git a/usr/src/test/zfs-tests/tests/perf/scripts/io.d b/usr/src/test/zfs-tests/tests/perf/scripts/io.d
index bbcbf8dc54..3da9a961e9 100644
--- a/usr/src/test/zfs-tests/tests/perf/scripts/io.d
+++ b/usr/src/test/zfs-tests/tests/perf/scripts/io.d
@@ -1,5 +1,3 @@
-#!/usr/sbin/dtrace -s
-
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
@@ -12,18 +10,24 @@
*/
/*
- * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
+ * Copyright (c) 2013, 2016 by Delphix. All rights reserved.
*/
/*
- * time: Seconds since the epoch
- * @ops: The number of reads and writes per interval
- * @bytes: Bytes read and written per interval
- * @latencies: Mean read and write latency per interval in ns
- * These aggregations are indexed with read/write for back end
- * statistics and zfs_read/zfs_write for ZPL level statistics.
+ * This measures the IO operations as seen by the ZPL layer (e.g.
+ * zfs_read and zfs_write), as well as the underlying block layer (e.g.
+ * the "io" dtrace provider).
+ *
+ * time: The number of seconds elapsed since the epoch
+ * @ops: collects the count of each metric (e.g. count of zfs_read calls)
+ * @latencies: collects the latency information of each metric
+ * @histograms: collects histograms of the latency for each metric
+ * @bytes: collects the throughput information for each metric
*/
+#include <sys/file.h>
+#include <sys/fs/zfs.h>
+
#pragma D option aggsortkey
#pragma D option quiet
@@ -33,16 +37,29 @@ BEGIN
@ops["write"] = count();
@ops["zfs_read"] = count();
@ops["zfs_write"] = count();
+ @ops["zfs_write_sync"] = count();
+ @ops["zfs_write_async"] = count();
@latencies["read"] = avg(0);
@latencies["write"] = avg(0);
@latencies["zfs_read"] = avg(0);
@latencies["zfs_write"] = avg(0);
+ @latencies["zfs_write_sync"] = avg(0);
+ @latencies["zfs_write_async"] = avg(0);
+ @histograms["read"] = quantize(0);
+ @histograms["write"] = quantize(0);
+ @histograms["zfs_read"] = quantize(0);
+ @histograms["zfs_write"] = quantize(0);
+ @histograms["zfs_write_sync"] = quantize(0);
+ @histograms["zfs_write_async"] = quantize(0);
@bytes["read"] = sum(0);
@bytes["write"] = sum(0);
@bytes["zfs_read"] = sum(0);
@bytes["zfs_write"] = sum(0);
+ @bytes["zfs_write_sync"] = sum(0);
+ @bytes["zfs_write_async"] = sum(0);
clear(@ops);
clear(@latencies);
+ clear(@histograms);
clear(@bytes);
}
@@ -50,24 +67,51 @@ fbt:zfs:zfs_read:entry,
fbt:zfs:zfs_write:entry
{
this->zp = (znode_t *)args[0]->v_data;
- this->poolname = stringof(this->zp->z_zfsvfs->z_os->os_spa->spa_name);
+ self->os = this->zp->z_zfsvfs->z_os;
+ self->poolname = stringof(self->os->os_spa->spa_name);
}
fbt:zfs:zfs_read:entry,
fbt:zfs:zfs_write:entry
-/ this->poolname == $$1 /
+/ self->poolname == $$1 /
{
- self->ts = timestamp;
- @ops[probefunc] = count();
- @bytes[probefunc] = sum(args[1]->uio_resid);
+ self->zfs_rw = timestamp;
+ self->bytes = args[1]->uio_resid;
+}
+
+fbt:zfs:zfs_write:entry
+/ self->zfs_rw != 0 /
+{
+ self->flag = self->os->os_sync == ZFS_SYNC_ALWAYS ? "sync" :
+ (args[2] & (FSYNC | FDSYNC)) ? "sync" : "async";
+}
+
+fbt:zfs:zfs_write:return
+/ self->zfs_rw != 0 /
+{
+ if (self->flag == "sync") {
+ this->name = "zfs_write_sync"
+ } else {
+ this->name = "zfs_write_async"
+ }
+
+ @ops[this->name] = count();
+ @bytes[this->name] = sum(self->bytes);
+ this->elapsed = timestamp - self->zfs_rw;
+ @latencies[this->name] = avg(this->elapsed);
+ @histograms[this->name] = quantize(this->elapsed);
}
fbt:zfs:zfs_read:return,
fbt:zfs:zfs_write:return
-/ self->ts != 0 /
+/ self->zfs_rw != 0 /
{
- @latencies[probefunc] = avg(timestamp - self->ts);
- self->ts = 0;
+ @ops[probefunc] = count();
+ @bytes[probefunc] = sum(self->bytes);
+ this->elapsed = timestamp - self->zfs_rw;
+ @latencies[probefunc] = avg(this->elapsed);
+ @histograms[probefunc] = quantize(this->elapsed);
+ self->zfs_rw = 0;
}
io:::start
@@ -84,6 +128,7 @@ io:::done
@ops[this->name] = count();
@bytes[this->name] = sum(args[0]->b_bcount);
@latencies[this->name] = avg(this->elapsed);
+ @histograms[this->name] = quantize(this->elapsed);
start[args[0]->b_edev, args[0]->b_blkno] = 0;
}
@@ -93,10 +138,12 @@ tick-$3s
printa("ops_%-21s%@u\n", @ops);
printa("bytes_%-21s%@u\n", @bytes);
printa("latencies_%-21s%@u\n", @latencies);
+ printa("histograms_%-21s%@u\n", @histograms);
clear(@ops);
clear(@bytes);
clear(@latencies);
+ clear(@histograms);
}
ERROR
diff --git a/usr/src/test/zfs-tests/tests/perf/scripts/offcpu-profile.d b/usr/src/test/zfs-tests/tests/perf/scripts/offcpu-profile.d
new file mode 100644
index 0000000000..fcb6e9ce25
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/perf/scripts/offcpu-profile.d
@@ -0,0 +1,41 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright (c) 2016 by Delphix. All rights reserved.
+ */
+
+#pragma D option stackframes=100
+
+/*
+ * @stacks: The number of times a stack has been recorded
+ */
+
+sched:::off-cpu
+{
+ self->ts = timestamp;
+}
+
+sched:::on-cpu
+/ self->ts != 0 /
+{
+ @stacks[stack()] = sum(timestamp - self->ts);
+ self->ts = 0;
+}
+
+ERROR
+{
+ trace(arg1);
+ trace(arg2);
+ trace(arg3);
+ trace(arg4);
+ trace(arg5);
+}
diff --git a/usr/src/test/zfs-tests/tests/perf/scripts/prefetch_io.d b/usr/src/test/zfs-tests/tests/perf/scripts/prefetch_io.d
index fb0706ceb7..03e6c4111e 100644
--- a/usr/src/test/zfs-tests/tests/perf/scripts/prefetch_io.d
+++ b/usr/src/test/zfs-tests/tests/perf/scripts/prefetch_io.d
@@ -1,5 +1,3 @@
-#!/usr/sbin/dtrace -Cs
-
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
diff --git a/usr/src/test/zfs-tests/tests/perf/scripts/profile.d b/usr/src/test/zfs-tests/tests/perf/scripts/profile.d
index e7fbd1fca5..734765ac25 100644
--- a/usr/src/test/zfs-tests/tests/perf/scripts/profile.d
+++ b/usr/src/test/zfs-tests/tests/perf/scripts/profile.d
@@ -1,5 +1,3 @@
-#!/usr/sbin/dtrace -s
-
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
diff --git a/usr/src/test/zfs-tests/tests/perf/scripts/zil.d b/usr/src/test/zfs-tests/tests/perf/scripts/zil.d
new file mode 100644
index 0000000000..1868e1d493
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/perf/scripts/zil.d
@@ -0,0 +1,92 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright (c) 2016, 2018 by Delphix. All rights reserved.
+ */
+
+/*
+ * This measures metrics that relate to the performance of the ZIL.
+ *
+ * The "zil_commit" and "zil_commit_writer" fuctions are instrumented.
+ * For each function, the number of times each function is called is
+ * tracked, as well as the average latency for function, and a histogram
+ * of the latencies for each function.
+ */
+
+#pragma D option aggsortkey
+#pragma D option quiet
+
+BEGIN
+{
+ @c["zil_commit"] = count();
+ @a["zil_commit"] = avg(0);
+ @h["zil_commit"] = quantize(0);
+
+ @c["zil_commit_writer"] = count();
+ @a["zil_commit_writer"] = avg(0);
+ @h["zil_commit_writer"] = quantize(0);
+
+ clear(@c);
+ clear(@a);
+ clear(@h);
+}
+
+fbt:zfs:zil_commit:entry
+/ args[0]->zl_spa->spa_name == $$1 /
+{
+ self->zc_elapsed = timestamp;
+}
+
+fbt:zfs:zil_commit:return
+/ self->zc_elapsed /
+{
+ @c[probefunc] = count();
+ @a[probefunc] = avg(timestamp - self->zc_elapsed);
+ @h[probefunc] = quantize(timestamp - self->zc_elapsed);
+ self->zc_elapsed = 0;
+}
+
+fbt:zfs:zil_commit_writer:entry
+/ self->zc_elapsed && args[0]->zl_spa->spa_name == $$1 /
+{
+ self->zcw_elapsed = timestamp;
+}
+
+fbt:zfs:zil_commit_writer:return
+/ self->zcw_elapsed /
+{
+ @c[probefunc] = count();
+ @a[probefunc] = avg(timestamp - self->zcw_elapsed);
+ @h[probefunc] = quantize(timestamp - self->zcw_elapsed);
+ self->zcw_elapsed = 0;
+}
+
+tick-$2s
+{
+ printf("%u\n", `time);
+ printa("counts_%-21s %@u\n", @c);
+ printa("avgs_%-21s %@u\n", @a);
+ printa("histograms_%-21s %@u\n", @h);
+
+ clear(@c);
+ clear(@a);
+ clear(@h);
+}
+
+ERROR
+{
+ trace(arg1);
+ trace(arg2);
+ trace(arg3);
+ trace(arg4);
+ trace(arg5);
+}
diff --git a/usr/src/uts/common/fs/zfs/zil.c b/usr/src/uts/common/fs/zfs/zil.c
index 0e02377de5..66dbf3c386 100644
--- a/usr/src/uts/common/fs/zfs/zil.c
+++ b/usr/src/uts/common/fs/zfs/zil.c
@@ -2291,7 +2291,7 @@ zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
*/
lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb);
- ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
+ IMPLY(nlwb != NULL, lwb->lwb_state != LWB_STATE_OPENED);
/*
* Since the lwb's zio hadn't been issued by the time this thread