summaryrefslogtreecommitdiff
path: root/usr/src/test/zfs-tests/include
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/test/zfs-tests/include')
-rw-r--r--usr/src/test/zfs-tests/include/Makefile44
-rw-r--r--usr/src/test/zfs-tests/include/commands.cfg161
-rw-r--r--usr/src/test/zfs-tests/include/default.cfg149
-rw-r--r--usr/src/test/zfs-tests/include/libtest.shlib2571
-rw-r--r--usr/src/test/zfs-tests/include/math.shlib43
-rw-r--r--usr/src/test/zfs-tests/include/properties.shlib63
6 files changed, 3031 insertions, 0 deletions
diff --git a/usr/src/test/zfs-tests/include/Makefile b/usr/src/test/zfs-tests/include/Makefile
new file mode 100644
index 0000000000..82c00da07e
--- /dev/null
+++ b/usr/src/test/zfs-tests/include/Makefile
@@ -0,0 +1,44 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2012 by Delphix. All rights reserved.
+#
+
+include $(SRC)/test/Makefile.com
+
+include $(SRC)/Makefile.master
+
+SRCS = commands.cfg \
+ default.cfg \
+ libtest.shlib \
+ math.shlib \
+ properties.shlib
+
+ROOTOPTPKG = $(ROOT)/opt/zfs-tests
+INCLUDEDIR = $(ROOTOPTPKG)/include
+
+CMDS = $(SRCS:%=$(INCLUDEDIR)/%)
+$(CMDS) := FILEMODE = 0555
+
+all: $(SRCS)
+
+install: $(CMDS)
+
+clean lint clobber:
+
+$(CMDS): $(INCLUDEDIR) $(SRCS)
+
+$(INCLUDEDIR):
+ $(INS.dir)
+
+$(INCLUDEDIR)/%: %
+ $(INS.file)
diff --git a/usr/src/test/zfs-tests/include/commands.cfg b/usr/src/test/zfs-tests/include/commands.cfg
new file mode 100644
index 0000000000..cd8353d260
--- /dev/null
+++ b/usr/src/test/zfs-tests/include/commands.cfg
@@ -0,0 +1,161 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2012 by Delphix. All rights reserved.
+#
+
+export ARP="/usr/sbin/arp"
+export AWK="/usr/bin/awk"
+export BASENAME="/usr/bin/basename"
+export BC="/usr/bin/bc"
+export BUNZIP2="/usr/bin/bunzip2"
+export BZCAT="/usr/bin/bzcat"
+# bzip2, bunzip2 and bzcat prepend the contents of $BZIP2 or $BZIP to any
+# arguments. Setting BZIP2 here will lead to hard-to-diagnose unhappiness.
+export CAT="/usr/bin/cat"
+export CD="/usr/bin/cd"
+export CHGRP="/usr/bin/chgrp"
+export CHMOD="/usr/bin/chmod"
+export CHOWN="/usr/bin/chown"
+export CKSUM="/usr/bin/cksum"
+export CLRI="/usr/sbin/clri"
+export CMP="/usr/bin/cmp"
+export COMPRESS="/usr/bin/compress"
+export COREADM="/usr/bin/coreadm"
+export CP="/usr/bin/cp"
+export CPIO="/usr/bin/cpio"
+export CUT="/usr/bin/cut"
+export DATE="/usr/bin/date"
+export DD="/usr/bin/dd"
+export DEVFSADM="/usr/sbin/devfsadm"
+export DF="/usr/bin/df"
+export DIFF="/usr/bin/diff"
+export DIRCMP="/usr/bin/dircmp"
+export DIRNAME="/usr/bin/dirname"
+export DU="/usr/bin/du"
+export DUMPADM="/usr/sbin/dumpadm"
+export ECHO="/usr/bin/echo"
+export EGREP="/usr/bin/egrep"
+# Don't use $ENV here, because in ksh scripts it evaluates to
+# $HOME/.kshrc - likely not what you wanted.
+export FDISK="/usr/sbin/fdisk"
+export FF="/usr/sbin/ff"
+export FGREP="/usr/bin/fgrep"
+export FILE="/usr/bin/file"
+export FIND="/usr/bin/find"
+export FMADM="/usr/sbin/fmadm"
+export FMDUMP="/usr/sbin/fmdump"
+export FORMAT="/usr/sbin/format"
+export FSCK="/usr/sbin/fsck"
+export FSDB="/usr/sbin/fsdb"
+export FSIRAND="/usr/sbin/fsirand"
+export FSTYP="/usr/sbin/fstyp"
+export GETENT="/usr/bin/getent"
+export GETMAJOR="/usr/sbin/getmajor"
+export GNUDD="/usr/gnu/bin/dd"
+export GREP="/usr/bin/grep"
+export GROUPADD="/usr/sbin/groupadd"
+export GROUPDEL="/usr/sbin/groupdel"
+export GROUPMOD="/usr/sbin/groupmod"
+export GROUPS="/usr/bin/groups"
+export HEAD="/usr/bin/head"
+export HOSTNAME="/usr/bin/hostname"
+export ID="/usr/bin/id"
+export ISAINFO="/usr/bin/isainfo"
+export ISCSIADM="/usr/sbin/iscsiadm"
+export KILL="/usr/bin/kill"
+export KSH="/usr/bin/ksh"
+export KSTAT="/usr/bin/kstat"
+export LABELIT="/usr/sbin/labelit"
+export LOCKFS="/usr/sbin/lockfs"
+export LOFIADM="/usr/sbin/lofiadm"
+export LOGNAME="/usr/bin/logname"
+export LS="/usr/bin/ls"
+export MD5SUM="/usr/bin/md5sum"
+export MDB="/usr/bin/mdb"
+export METACLEAR="/usr/sbin/metaclear"
+export METADB="/usr/sbin/metadb"
+export METAINIT="/usr/sbin/metainit"
+export METASTAT="/usr/sbin/metastat"
+export MKDIR="/usr/bin/mkdir"
+export MKFILE="/usr/sbin/mkfile"
+export MKNOD="/usr/sbin/mknod"
+export MODINFO="/usr/sbin/modinfo"
+export MODUNLOAD="/usr/sbin/modunload"
+export MOUNT="/usr/sbin/mount"
+export MV="/usr/bin/mv"
+export NAWK="/usr/bin/nawk"
+export NCHECK="/usr/sbin/ncheck"
+export NEWFS="/usr/sbin/newfs"
+export PACK="/usr/bin/pack"
+export PAGESIZE="/usr/bin/pagesize"
+export PAX="/usr/bin/pax"
+export PFEXEC="/usr/bin/pfexec"
+export PGREP="/usr/bin/pgrep"
+export PING="/usr/sbin/ping"
+export PKGINFO="/usr/bin/pkginfo"
+export PKILL="/usr/bin/pkill"
+export PPRIV="/usr/bin/ppriv"
+export PRINTF="/usr/bin/printf"
+export PRTVTOC="/usr/sbin/prtvtoc"
+export PS="/usr/bin/ps"
+export PSRINFO="/usr/sbin/psrinfo"
+export PWD="/usr/bin/pwd"
+export PYTHON="/usr/bin/python"
+export QUOTAON="/usr/sbin/quotaon"
+export RCP="/usr/bin/rcp"
+export REBOOT="/usr/sbin/reboot"
+export RM="/usr/bin/rm"
+export RMDIR="/usr/bin/rmdir"
+export RSH="/usr/bin/rsh"
+export RUNAT="/usr/bin/runat"
+export SED="/usr/bin/sed"
+export SHARE="/usr/sbin/share"
+export SHUF="/usr/bin/shuf"
+export SLEEP="/usr/bin/sleep"
+export SORT="/usr/bin/sort"
+export STRINGS="/usr/bin/strings"
+export SU="/usr/bin/su"
+export SUM="/usr/bin/sum"
+export SVCADM="/usr/sbin/svcadm"
+export SVCS="/usr/bin/svcs"
+export SWAP="/usr/sbin/swap"
+export SWAPADD="/sbin/swapadd"
+export SYNC="/usr/bin/sync"
+export TAIL="/usr/bin/tail"
+export TAR="/usr/sbin/tar"
+export TOUCH="/usr/bin/touch"
+export TR="/usr/bin/tr"
+export TRUE="/usr/bin/true"
+export TUNEFS="/usr/sbin/tunefs"
+export UFSDUMP="/usr/sbin/ufsdump"
+export UFSRESTORE="/usr/sbin/ufsrestore"
+export UMASK="/usr/bin/umask"
+export UMOUNT="/usr/sbin/umount"
+export UNAME="/usr/bin/uname"
+export UNCOMPRESS="/usr/bin/uncompress"
+export UNIQ="/usr/bin/uniq"
+export UNPACK="/usr/bin/unpack"
+export UNSHARE="/usr/sbin/unshare"
+export USERADD="/usr/sbin/useradd"
+export USERDEL="/usr/sbin/userdel"
+export USERMOD="/usr/sbin/usermod"
+export WAIT="/usr/bin/wait"
+export WC="/usr/bin/wc"
+export ZDB="/usr/sbin/zdb"
+export ZFS="/usr/sbin/zfs"
+export ZHACK="/usr/sbin/zhack"
+export ZLOGIN="/usr/sbin/zlogin"
+export ZONEADM="/usr/sbin/zoneadm"
+export ZONECFG="/usr/sbin/zonecfg"
+export ZONENAME="/usr/bin/zonename"
+export ZPOOL="/usr/sbin/zpool"
diff --git a/usr/src/test/zfs-tests/include/default.cfg b/usr/src/test/zfs-tests/include/default.cfg
new file mode 100644
index 0000000000..72079889cc
--- /dev/null
+++ b/usr/src/test/zfs-tests/include/default.cfg
@@ -0,0 +1,149 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# Copyright (c) 2012 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/commands.cfg
+. $STF_SUITE/include/libtest.shlib
+
+# Define run length constants
+export RT_LONG="3"
+export RT_MEDIUM="2"
+export RT_SHORT="1"
+
+# Define macro for zone test
+export ZONE_POOL="zonepool"
+export ZONE_CTR="zonectr"
+
+# Test Suite Specific Commands
+export CHG_USR_EXEC="/opt/zfs-tests/bin/chg_usr_exec"
+export DEVNAME2DEVID="/opt/zfs-tests/bin/devname2devid"
+export DIR_RD_UPDATE="/opt/zfs-tests/bin/dir_rd_update"
+export FILE_CHECK="/opt/zfs-tests/bin/file_check"
+export FILE_TRUNC="/opt/zfs-tests/bin/file_trunc"
+export FILE_WRITE="/opt/zfs-tests/bin/file_write"
+export LARGEST_FILE="/opt/zfs-tests/bin/largest_file"
+export MKBUSY="/opt/zfs-tests/bin/mkbusy"
+export MKTREE="/opt/zfs-tests/bin/mktree"
+export MMAPWRITE="/opt/zfs-tests/bin/mmapwrite"
+export RANDFREE_FILE="/opt/zfs-tests/bin/randfree_file"
+export READMMAP="/opt/zfs-tests/bin/readmmap"
+export RENAME_DIR="/opt/zfs-tests/bin/rename_dir"
+export RM_LNKCNT_ZERO_FILE="/opt/zfs-tests/bin/rm_lnkcnt_zero_file"
+
+# ensure we're running in the C locale, since
+# localised messages may result in test failures
+export LC_ALL="C"
+export LANG="C"
+
+#
+# pattern to ignore from 'zpool list'.
+#
+export NO_POOLS="no pools available"
+
+# pattern to ignore from 'zfs list'.
+export NO_DATASETS="no datasets available"
+
+export TEST_BASE_DIR="/"
+
+# Default to compression ON
+export COMPRESSION_PROP=on
+
+# Default to using the checksum
+export CHECKSUM_PROP=on
+
+# some common variables used by test scripts :
+
+# some test pool names
+export TESTPOOL=testpool.$$
+export TESTPOOL1=testpool1.$$
+export TESTPOOL2=testpool2.$$
+export TESTPOOL3=testpool3.$$
+
+# some test file system names
+export TESTFS=testfs.$$
+export TESTFS1=testfs1.$$
+export TESTFS2=testfs2.$$
+export TESTFS3=testfs3.$$
+
+# some test directory names
+export TESTDIR=${TEST_BASE_DIR%%/}/testdir$$
+export TESTDIR0=${TEST_BASE_DIR%%/}/testdir0$$
+export TESTDIR1=${TEST_BASE_DIR%%/}/testdir1$$
+export TESTDIR2=${TEST_BASE_DIR%%/}/testdir2$$
+
+export ZFSROOT=
+
+export TESTSNAP=testsnap$$
+export TESTSNAP1=testsnap1$$
+export TESTSNAP2=testsnap2$$
+export TESTCLONE=testclone$$
+export TESTCLONE1=testclone1$$
+export TESTCLONE2=testclone2$$
+export TESTCLCT=testclct$$
+export TESTCTR=testctr$$
+export TESTCTR1=testctr1$$
+export TESTCTR2=testctr2$$
+export TESTVOL=testvol$$
+export TESTVOL1=testvol1$$
+export TESTVOL2=testvol2$$
+export TESTFILE0=testfile0.$$
+export TESTFILE1=testfile1.$$
+export TESTFILE2=testfile2.$$
+
+export LONGPNAME="poolname50charslong_012345678901234567890123456789"
+export LONGFSNAME="fsysname50charslong_012345678901234567890123456789"
+export SNAPFS="$TESTPOOL/$TESTFS@$TESTSNAP"
+export SNAPFS1="$TESTPOOL/$TESTVOL@$TESTSNAP"
+
+export VOLSIZE=150m
+export BIGVOLSIZE=1eb
+
+# Default to limit disks to be checked
+export MAX_FINDDISKSNUM=6
+
+# For iscsi target support
+export ISCSITGTFILE=/tmp/iscsitgt_file
+export ISCSITGT_FMRI=svc:/system/iscsitgt:default
+
+export AUTO_SNAP=$($SVCS -a | $GREP auto-snapshot | $GREP online | $AWK \
+ '{print $3}')
+
+#
+# finally, if we're running in a local zone
+# we take some additional actions
+if ! is_global_zone; then
+ reexport_pool
+fi
+
+export ZFS_VERSION=5
+export ZFS_ALL_VERSIONS="1 2 3 4 5"
+
+for i in $ZFS_ALL_VERSIONS; do
+ eval 'export ZFS_VERSION_$i="v${i}-fs"'
+done
diff --git a/usr/src/test/zfs-tests/include/libtest.shlib b/usr/src/test/zfs-tests/include/libtest.shlib
new file mode 100644
index 0000000000..3cd2927827
--- /dev/null
+++ b/usr/src/test/zfs-tests/include/libtest.shlib
@@ -0,0 +1,2571 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#
+# Copyright (c) 2012 by Delphix. All rights reserved.
+#
+
+. ${STF_TOOLS}/contrib/include/logapi.shlib
+
+ZFS=${ZFS:-/usr/sbin/zfs}
+ZPOOL=${ZPOOL:-/usr/sbin/zpool}
+
+# Determine whether a dataset is mounted
+#
+# $1 dataset name
+# $2 filesystem type; optional - defaulted to zfs
+#
+# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
+
+function ismounted
+{
+ typeset fstype=$2
+ [[ -z $fstype ]] && fstype=zfs
+ typeset out dir name ret
+
+ case $fstype in
+ zfs)
+ if [[ "$1" == "/"* ]] ; then
+ for out in $($ZFS mount | $AWK '{print $2}'); do
+ [[ $1 == $out ]] && return 0
+ done
+ else
+ for out in $($ZFS mount | $AWK '{print $1}'); do
+ [[ $1 == $out ]] && return 0
+ done
+ fi
+ ;;
+ ufs|nfs)
+ out=$($DF -F $fstype $1 2>/dev/null)
+ ret=$?
+ (($ret != 0)) && return $ret
+
+ dir=${out%%\(*}
+ dir=${dir%% *}
+ name=${out##*\(}
+ name=${name%%\)*}
+ name=${name%% *}
+
+ [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
+ ;;
+ esac
+
+ return 1
+}
+
+# Return 0 if a dataset is mounted; 1 otherwise
+#
+# $1 dataset name
+# $2 filesystem type; optional - defaulted to zfs
+
+function mounted
+{
+ ismounted $1 $2
+ (($? == 0)) && return 0
+ return 1
+}
+
+# Return 0 if a dataset is unmounted; 1 otherwise
+#
+# $1 dataset name
+# $2 filesystem type; optional - defaulted to zfs
+
+function unmounted
+{
+ ismounted $1 $2
+ (($? == 1)) && return 0
+ return 1
+}
+
+# split line on ","
+#
+# $1 - line to split
+
+function splitline
+{
+ $ECHO $1 | $SED "s/,/ /g"
+}
+
+function default_setup
+{
+ default_setup_noexit "$@"
+
+ log_pass
+}
+
+#
+# Given a list of disks, setup storage pools and datasets.
+#
+function default_setup_noexit
+{
+ typeset disklist=$1
+ typeset container=$2
+ typeset volume=$3
+
+ if is_global_zone; then
+ if poolexists $TESTPOOL ; then
+ destroy_pool $TESTPOOL
+ fi
+ [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
+ log_must $ZPOOL create -f $TESTPOOL $disklist
+ else
+ reexport_pool
+ fi
+
+ $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
+ $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
+
+ log_must $ZFS create $TESTPOOL/$TESTFS
+ log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
+
+ if [[ -n $container ]]; then
+ $RM -rf $TESTDIR1 || \
+ log_unresolved Could not remove $TESTDIR1
+ $MKDIR -p $TESTDIR1 || \
+ log_unresolved Could not create $TESTDIR1
+
+ log_must $ZFS create $TESTPOOL/$TESTCTR
+ log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
+ log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
+ log_must $ZFS set mountpoint=$TESTDIR1 \
+ $TESTPOOL/$TESTCTR/$TESTFS1
+ fi
+
+ if [[ -n $volume ]]; then
+ if is_global_zone ; then
+ log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
+ else
+ log_must $ZFS create $TESTPOOL/$TESTVOL
+ fi
+ fi
+}
+
+#
+# Given a list of disks, setup a storage pool, file system and
+# a container.
+#
+function default_container_setup
+{
+ typeset disklist=$1
+
+ default_setup "$disklist" "true"
+}
+
+#
+# Given a list of disks, setup a storage pool,file system
+# and a volume.
+#
+function default_volume_setup
+{
+ typeset disklist=$1
+
+ default_setup "$disklist" "" "true"
+}
+
+#
+# Given a list of disks, setup a storage pool,file system,
+# a container and a volume.
+#
+function default_container_volume_setup
+{
+ typeset disklist=$1
+
+ default_setup "$disklist" "true" "true"
+}
+
+#
+# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
+# filesystem
+#
+# $1 Existing filesystem or volume name. Default, $TESTFS
+# $2 snapshot name. Default, $TESTSNAP
+#
+function create_snapshot
+{
+ typeset fs_vol=${1:-$TESTFS}
+ typeset snap=${2:-$TESTSNAP}
+
+ [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
+ [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
+
+ if snapexists $fs_vol@$snap; then
+ log_fail "$fs_vol@$snap already exists."
+ fi
+ datasetexists $fs_vol || \
+ log_fail "$fs_vol must exist."
+
+ log_must $ZFS snapshot $fs_vol@$snap
+}
+
+#
+# Create a clone from a snapshot, default clone name is $TESTCLONE.
+#
+# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
+# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
+#
+function create_clone # snapshot clone
+{
+ typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
+ typeset clone=${2:-$TESTPOOL/$TESTCLONE}
+
+ [[ -z $snap ]] && \
+ log_fail "Snapshot name is undefined."
+ [[ -z $clone ]] && \
+ log_fail "Clone name is undefined."
+
+ log_must $ZFS clone $snap $clone
+}
+
+function default_mirror_setup
+{
+ default_mirror_setup_noexit $1 $2 $3
+
+ log_pass
+}
+
+#
+# Given a pair of disks, set up a storage pool and dataset for the mirror
+# @parameters: $1 the primary side of the mirror
+# $2 the secondary side of the mirror
+# @uses: ZPOOL ZFS TESTPOOL TESTFS
+function default_mirror_setup_noexit
+{
+ readonly func="default_mirror_setup_noexit"
+ typeset primary=$1
+ typeset secondary=$2
+
+ [[ -z $primary ]] && \
+ log_fail "$func: No parameters passed"
+ [[ -z $secondary ]] && \
+ log_fail "$func: No secondary partition passed"
+ [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
+ log_must $ZPOOL create -f $TESTPOOL mirror $@
+ log_must $ZFS create $TESTPOOL/$TESTFS
+ log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
+}
+
+#
+# create a number of mirrors.
+# We create a number($1) of 2 way mirrors using the pairs of disks named
+# on the command line. These mirrors are *not* mounted
+# @parameters: $1 the number of mirrors to create
+# $... the devices to use to create the mirrors on
+# @uses: ZPOOL ZFS TESTPOOL
+function setup_mirrors
+{
+ typeset -i nmirrors=$1
+
+ shift
+ while ((nmirrors > 0)); do
+ log_must test -n "$1" -a -n "$2"
+ [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
+ log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
+ shift 2
+ ((nmirrors = nmirrors - 1))
+ done
+}
+
+#
+# create a number of raidz pools.
+# We create a number($1) of 2 raidz pools using the pairs of disks named
+# on the command line. These pools are *not* mounted
+# @parameters: $1 the number of pools to create
+# $... the devices to use to create the pools on
+# @uses: ZPOOL ZFS TESTPOOL
+function setup_raidzs
+{
+ typeset -i nraidzs=$1
+
+ shift
+ while ((nraidzs > 0)); do
+ log_must test -n "$1" -a -n "$2"
+ [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
+ log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
+ shift 2
+ ((nraidzs = nraidzs - 1))
+ done
+}
+
+#
+# Destroy the configured testpool mirrors.
+# the mirrors are of the form ${TESTPOOL}{number}
+# @uses: ZPOOL ZFS TESTPOOL
+function destroy_mirrors
+{
+ default_cleanup_noexit
+
+ log_pass
+}
+
+#
+# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
+# $1 the list of disks
+#
+function default_raidz_setup
+{
+ typeset disklist="$*"
+ disks=(${disklist[*]})
+
+ if [[ ${#disks[*]} -lt 2 ]]; then
+ log_fail "A raid-z requires a minimum of two disks."
+ fi
+
+ [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
+ log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
+ log_must $ZFS create $TESTPOOL/$TESTFS
+ log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
+
+ log_pass
+}
+
+#
+# Common function used to cleanup storage pools and datasets.
+#
+# Invoked at the start of the test suite to ensure the system
+# is in a known state, and also at the end of each set of
+# sub-tests to ensure errors from one set of tests doesn't
+# impact the execution of the next set.
+
+function default_cleanup
+{
+ default_cleanup_noexit
+
+ log_pass
+}
+
+function default_cleanup_noexit
+{
+ typeset exclude=""
+ typeset pool=""
+ #
+ # Destroying the pool will also destroy any
+ # filesystems it contains.
+ #
+ if is_global_zone; then
+ $ZFS unmount -a > /dev/null 2>&1
+ exclude=`eval $ECHO \"'(${KEEP})'\"`
+ ALL_POOLS=$($ZPOOL list -H -o name \
+ | $GREP -v "$NO_POOLS" | $EGREP -v "$exclude")
+ # Here, we loop through the pools we're allowed to
+ # destroy, only destroying them if it's safe to do
+ # so.
+ while [ ! -z ${ALL_POOLS} ]
+ do
+ for pool in ${ALL_POOLS}
+ do
+ if safe_to_destroy_pool $pool ;
+ then
+ destroy_pool $pool
+ fi
+ ALL_POOLS=$($ZPOOL list -H -o name \
+ | $GREP -v "$NO_POOLS" \
+ | $EGREP -v "$exclude")
+ done
+ done
+
+ $ZFS mount -a
+ else
+ typeset fs=""
+ for fs in $($ZFS list -H -o name \
+ | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
+ datasetexists $fs && \
+ log_must $ZFS destroy -Rf $fs
+ done
+
+ # Need cleanup here to avoid garbage dir left.
+ for fs in $($ZFS list -H -o name); do
+ [[ $fs == /$ZONE_POOL ]] && continue
+ [[ -d $fs ]] && log_must $RM -rf $fs/*
+ done
+
+ #
+ # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
+ # the default value
+ #
+ for fs in $($ZFS list -H -o name); do
+ if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
+ log_must $ZFS set reservation=none $fs
+ log_must $ZFS set recordsize=128K $fs
+ log_must $ZFS set mountpoint=/$fs $fs
+ typeset enc=""
+ enc=$(get_prop encryption $fs)
+ if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
+ [[ "$enc" == "off" ]]; then
+ log_must $ZFS set checksum=on $fs
+ fi
+ log_must $ZFS set compression=off $fs
+ log_must $ZFS set atime=on $fs
+ log_must $ZFS set devices=off $fs
+ log_must $ZFS set exec=on $fs
+ log_must $ZFS set setuid=on $fs
+ log_must $ZFS set readonly=off $fs
+ log_must $ZFS set snapdir=hidden $fs
+ log_must $ZFS set aclmode=groupmask $fs
+ log_must $ZFS set aclinherit=secure $fs
+ fi
+ done
+ fi
+
+ [[ -d $TESTDIR ]] && \
+ log_must $RM -rf $TESTDIR
+}
+
+
+#
+# Common function used to cleanup storage pools, file systems
+# and containers.
+#
+function default_container_cleanup
+{
+ if ! is_global_zone; then
+ reexport_pool
+ fi
+
+ ismounted $TESTPOOL/$TESTCTR/$TESTFS1
+ [[ $? -eq 0 ]] && \
+ log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
+
+ datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
+ log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
+
+ datasetexists $TESTPOOL/$TESTCTR && \
+ log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
+
+ [[ -e $TESTDIR1 ]] && \
+ log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
+
+ default_cleanup
+}
+
+#
+# Common function used to cleanup snapshot of file system or volume. Default to
+# delete the file system's snapshot
+#
+# $1 snapshot name
+#
+function destroy_snapshot
+{
+ typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
+
+ if ! snapexists $snap; then
+ log_fail "'$snap' does not existed."
+ fi
+
+ #
+ # For the sake of the value which come from 'get_prop' is not equal
+ # to the really mountpoint when the snapshot is unmounted. So, firstly
+ # check and make sure this snapshot's been mounted in current system.
+ #
+ typeset mtpt=""
+ if ismounted $snap; then
+ mtpt=$(get_prop mountpoint $snap)
+ (($? != 0)) && \
+ log_fail "get_prop mountpoint $snap failed."
+ fi
+
+ log_must $ZFS destroy $snap
+ [[ $mtpt != "" && -d $mtpt ]] && \
+ log_must $RM -rf $mtpt
+}
+
+#
+# Common function used to cleanup clone.
+#
+# $1 clone name
+#
+function destroy_clone
+{
+ typeset clone=${1:-$TESTPOOL/$TESTCLONE}
+
+ if ! datasetexists $clone; then
+ log_fail "'$clone' does not existed."
+ fi
+
+ # With the same reason in destroy_snapshot
+ typeset mtpt=""
+ if ismounted $clone; then
+ mtpt=$(get_prop mountpoint $clone)
+ (($? != 0)) && \
+ log_fail "get_prop mountpoint $clone failed."
+ fi
+
+ log_must $ZFS destroy $clone
+ [[ $mtpt != "" && -d $mtpt ]] && \
+ log_must $RM -rf $mtpt
+}
+
+# Return 0 if a snapshot exists; $? otherwise
+#
+# $1 - snapshot name
+
+function snapexists
+{
+ $ZFS list -H -t snapshot "$1" > /dev/null 2>&1
+ return $?
+}
+
+#
+# Set a property to a certain value on a dataset.
+# Sets a property of the dataset to the value as passed in.
+# @param:
+# $1 dataset who's property is being set
+# $2 property to set
+# $3 value to set property to
+# @return:
+# 0 if the property could be set.
+# non-zero otherwise.
+# @use: ZFS
+#
+function dataset_setprop
+{
+ typeset fn=dataset_setprop
+
+ if (($# < 3)); then
+ log_note "$fn: Insufficient parameters (need 3, had $#)"
+ return 1
+ fi
+ typeset output=
+ output=$($ZFS set $2=$3 $1 2>&1)
+ typeset rv=$?
+ if ((rv != 0)); then
+ log_note "Setting property on $1 failed."
+ log_note "property $2=$3"
+ log_note "Return Code: $rv"
+ log_note "Output: $output"
+ return $rv
+ fi
+ return 0
+}
+
+#
+# Assign suite defined dataset properties.
+# This function is used to apply the suite's defined default set of
+# properties to a dataset.
+# @parameters: $1 dataset to use
+# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
+# @returns:
+# 0 if the dataset has been altered.
+# 1 if no pool name was passed in.
+# 2 if the dataset could not be found.
+# 3 if the dataset could not have it's properties set.
+#
+function dataset_set_defaultproperties
+{
+ typeset dataset="$1"
+
+ [[ -z $dataset ]] && return 1
+
+ typeset confset=
+ typeset -i found=0
+ for confset in $($ZFS list); do
+ if [[ $dataset = $confset ]]; then
+ found=1
+ break
+ fi
+ done
+ [[ $found -eq 0 ]] && return 2
+ if [[ -n $COMPRESSION_PROP ]]; then
+ dataset_setprop $dataset compression $COMPRESSION_PROP || \
+ return 3
+ log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
+ fi
+ if [[ -n $CHECKSUM_PROP ]]; then
+ dataset_setprop $dataset checksum $CHECKSUM_PROP || \
+ return 3
+ log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
+ fi
+ return 0
+}
+
+#
+# Check a numeric assertion
+# @parameter: $@ the assertion to check
+# @output: big loud notice if assertion failed
+# @use: log_fail
+#
+function assert
+{
+ (($@)) || log_fail "$@"
+}
+
+#
+# Function to format partition size of a disk
+# Given a disk cxtxdx reduces all partitions
+# to 0 size
+#
+function zero_partitions #<whole_disk_name>
+{
+ typeset diskname=$1
+ typeset i
+
+ for i in 0 1 3 4 5 6 7
+ do
+ set_partition $i "" 0mb $diskname
+ done
+}
+
+#
+# Given a slice, size and disk, this function
+# formats the slice to the specified size.
+# Size should be specified with units as per
+# the `format` command requirements eg. 100mb 3gb
+#
+function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
+{
+ typeset -i slicenum=$1
+ typeset start=$2
+ typeset size=$3
+ typeset disk=$4
+ [[ -z $slicenum || -z $size || -z $disk ]] && \
+ log_fail "The slice, size or disk name is unspecified."
+ typeset format_file=/var/tmp/format_in.$$
+
+ $ECHO "partition" >$format_file
+ $ECHO "$slicenum" >> $format_file
+ $ECHO "" >> $format_file
+ $ECHO "" >> $format_file
+ $ECHO "$start" >> $format_file
+ $ECHO "$size" >> $format_file
+ $ECHO "label" >> $format_file
+ $ECHO "" >> $format_file
+ $ECHO "q" >> $format_file
+ $ECHO "q" >> $format_file
+
+ $FORMAT -e -s -d $disk -f $format_file
+ typeset ret_val=$?
+ $RM -f $format_file
+ [[ $ret_val -ne 0 ]] && \
+ log_fail "Unable to format $disk slice $slicenum to $size"
+ return 0
+}
+
+#
+# Get the end cyl of the given slice
+#
+function get_endslice #<disk> <slice>
+{
+ typeset disk=$1
+ typeset slice=$2
+ if [[ -z $disk || -z $slice ]] ; then
+ log_fail "The disk name or slice number is unspecified."
+ fi
+
+ disk=${disk#/dev/dsk/}
+ disk=${disk#/dev/rdsk/}
+ disk=${disk%s*}
+
+ typeset -i ratio=0
+ ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
+ $GREP "sectors\/cylinder" | \
+ $AWK '{print $2}')
+
+ if ((ratio == 0)); then
+ return
+ fi
+
+ typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
+ $NAWK -v token="$slice" '{if ($1==token) print $6}')
+
+ ((endcyl = (endcyl + 1) / ratio))
+ echo $endcyl
+}
+
+
+#
+# Given a size,disk and total slice number, this function formats the
+# disk slices from 0 to the total slice number with the same specified
+# size.
+#
+function partition_disk #<slice_size> <whole_disk_name> <total_slices>
+{
+ typeset -i i=0
+ typeset slice_size=$1
+ typeset disk_name=$2
+ typeset total_slices=$3
+ typeset cyl
+
+ zero_partitions $disk_name
+ while ((i < $total_slices)); do
+ if ((i == 2)); then
+ ((i = i + 1))
+ continue
+ fi
+ set_partition $i "$cyl" $slice_size $disk_name
+ cyl=$(get_endslice $disk_name $i)
+ ((i = i+1))
+ done
+}
+
+#
+# This function continues to write to a filenum number of files into dirnum
+# number of directories until either $FILE_WRITE returns an error or the
+# maximum number of files per directory have been written.
+#
+# Usage:
+# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
+#
+# Return value: 0 on success
+# non 0 on error
+#
+# Where :
+# destdir: is the directory where everything is to be created under
+# dirnum: the maximum number of subdirectories to use, -1 no limit
+# filenum: the maximum number of files per subdirectory
+# bytes: number of bytes to write
+# num_writes: numer of types to write out bytes
+# data: the data that will be writen
+#
+# E.g.
+# file_fs /testdir 20 25 1024 256 0
+#
+# Note: bytes * num_writes equals the size of the testfile
+#
+function fill_fs # destdir dirnum filenum bytes num_writes data
+{
+ typeset destdir=${1:-$TESTDIR}
+ typeset -i dirnum=${2:-50}
+ typeset -i filenum=${3:-50}
+ typeset -i bytes=${4:-8192}
+ typeset -i num_writes=${5:-10240}
+ typeset -i data=${6:-0}
+
+ typeset -i odirnum=1
+ typeset -i idirnum=0
+ typeset -i fn=0
+ typeset -i retval=0
+
+ log_must $MKDIR -p $destdir/$idirnum
+ while (($odirnum > 0)); do
+ if ((dirnum >= 0 && idirnum >= dirnum)); then
+ odirnum=0
+ break
+ fi
+ $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
+ -b $bytes -c $num_writes -d $data
+ retval=$?
+ if (($retval != 0)); then
+ odirnum=0
+ break
+ fi
+ if (($fn >= $filenum)); then
+ fn=0
+ ((idirnum = idirnum + 1))
+ log_must $MKDIR -p $destdir/$idirnum
+ else
+ ((fn = fn + 1))
+ fi
+ done
+ return $retval
+}
+
+#
+# Simple function to get the specified property. If unable to
+# get the property then exits.
+#
+# Note property is in 'parsable' format (-p)
+#
+function get_prop # property dataset
+{
+ typeset prop_val
+ typeset prop=$1
+ typeset dataset=$2
+
+ prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
+ if [[ $? -ne 0 ]]; then
+ log_note "Unable to get $prop property for dataset " \
+ "$dataset"
+ return 1
+ fi
+
+ $ECHO $prop_val
+ return 0
+}
+
+#
+# Simple function to get the specified property of pool. If unable to
+# get the property then exits.
+#
+function get_pool_prop # property pool
+{
+ typeset prop_val
+ typeset prop=$1
+ typeset pool=$2
+
+ if poolexists $pool ; then
+ prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
+ $AWK '{print $3}')
+ if [[ $? -ne 0 ]]; then
+ log_note "Unable to get $prop property for pool " \
+ "$pool"
+ return 1
+ fi
+ else
+ log_note "Pool $pool not exists."
+ return 1
+ fi
+
+ $ECHO $prop_val
+ return 0
+}
+
+# Return 0 if a pool exists; $? otherwise
+#
+# $1 - pool name
+
+function poolexists
+{
+ typeset pool=$1
+
+ if [[ -z $pool ]]; then
+ log_note "No pool name given."
+ return 1
+ fi
+
+ $ZPOOL get name "$pool" > /dev/null 2>&1
+ return $?
+}
+
+# Return 0 if all the specified datasets exist; $? otherwise
+#
+# $1-n dataset name
+function datasetexists
+{
+ if (($# == 0)); then
+ log_note "No dataset name given."
+ return 1
+ fi
+
+ while (($# > 0)); do
+ $ZFS get name $1 > /dev/null 2>&1 || \
+ return $?
+ shift
+ done
+
+ return 0
+}
+
+# return 0 if none of the specified datasets exists, otherwise return 1.
+#
+# $1-n dataset name
+function datasetnonexists
+{
+ if (($# == 0)); then
+ log_note "No dataset name given."
+ return 1
+ fi
+
+ while (($# > 0)); do
+ $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
+ && return 1
+ shift
+ done
+
+ return 0
+}
+
+#
+# Given a mountpoint, or a dataset name, determine if it is shared.
+#
+# Returns 0 if shared, 1 otherwise.
+#
+function is_shared
+{
+ typeset fs=$1
+ typeset mtpt
+
+ if [[ $fs != "/"* ]] ; then
+ if datasetnonexists "$fs" ; then
+ return 1
+ else
+ mtpt=$(get_prop mountpoint "$fs")
+ case $mtpt in
+ none|legacy|-) return 1
+ ;;
+ *) fs=$mtpt
+ ;;
+ esac
+ fi
+ fi
+
+ for mtpt in `$SHARE | $AWK '{print $2}'` ; do
+ if [[ $mtpt == $fs ]] ; then
+ return 0
+ fi
+ done
+
+ typeset stat=$($SVCS -H -o STA nfs/server:default)
+ if [[ $stat != "ON" ]]; then
+ log_note "Current nfs/server status: $stat"
+ fi
+
+ return 1
+}
+
+#
+# Given a mountpoint, determine if it is not shared.
+#
+# Returns 0 if not shared, 1 otherwise.
+#
+function not_shared
+{
+ typeset fs=$1
+
+ is_shared $fs
+ if (($? == 0)); then
+ return 1
+ fi
+
+ return 0
+}
+
+#
+# Helper function to unshare a mountpoint.
+#
+function unshare_fs #fs
+{
+ typeset fs=$1
+
+ is_shared $fs
+ if (($? == 0)); then
+ log_must $ZFS unshare $fs
+ fi
+
+ return 0
+}
+
+#
+# Check NFS server status and trigger it online.
+#
+function setup_nfs_server
+{
+ # Cannot share directory in non-global zone.
+ #
+ if ! is_global_zone; then
+ log_note "Cannot trigger NFS server by sharing in LZ."
+ return
+ fi
+
+ typeset nfs_fmri="svc:/network/nfs/server:default"
+ if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
+ #
+ # Only really sharing operation can enable NFS server
+ # to online permanently.
+ #
+ typeset dummy=/tmp/dummy
+
+ if [[ -d $dummy ]]; then
+ log_must $RM -rf $dummy
+ fi
+
+ log_must $MKDIR $dummy
+ log_must $SHARE $dummy
+
+ #
+ # Waiting for fmri's status to be the final status.
+ # Otherwise, in transition, an asterisk (*) is appended for
+ # instances, unshare will reverse status to 'DIS' again.
+ #
+ # Waiting for 1's at least.
+ #
+ log_must $SLEEP 1
+ timeout=10
+ while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
+ do
+ log_must $SLEEP 1
+
+ ((timeout -= 1))
+ done
+
+ log_must $UNSHARE $dummy
+ log_must $RM -rf $dummy
+ fi
+
+ log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
+}
+
+#
+# To verify whether calling process is in global zone
+#
+# Return 0 if in global zone, 1 in non-global zone
+#
+function is_global_zone
+{
+ typeset cur_zone=$($ZONENAME 2>/dev/null)
+ if [[ $cur_zone != "global" ]]; then
+ return 1
+ fi
+ return 0
+}
+
+#
+# Verify whether test is permitted to run from
+# global zone, local zone, or both
+#
+# $1 zone limit, could be "global", "local", or "both"(no limit)
+#
+# Return 0 if permitted, otherwise exit with log_unsupported
+#
+function verify_runnable # zone limit
+{
+ typeset limit=$1
+
+ [[ -z $limit ]] && return 0
+
+ if is_global_zone ; then
+ case $limit in
+ global|both)
+ ;;
+ local) log_unsupported "Test is unable to run from "\
+ "global zone."
+ ;;
+ *) log_note "Warning: unknown limit $limit - " \
+ "use both."
+ ;;
+ esac
+ else
+ case $limit in
+ local|both)
+ ;;
+ global) log_unsupported "Test is unable to run from "\
+ "local zone."
+ ;;
+ *) log_note "Warning: unknown limit $limit - " \
+ "use both."
+ ;;
+ esac
+
+ reexport_pool
+ fi
+
+ return 0
+}
+
+# Return 0 if create successfully or the pool exists; $? otherwise
+# Note: In local zones, this function should return 0 silently.
+#
+# $1 - pool name
+# $2-n - [keyword] devs_list
+
+function create_pool #pool devs_list
+{
+ typeset pool=${1%%/*}
+
+ shift
+
+ if [[ -z $pool ]]; then
+ log_note "Missing pool name."
+ return 1
+ fi
+
+ if poolexists $pool ; then
+ destroy_pool $pool
+ fi
+
+ if is_global_zone ; then
+ [[ -d /$pool ]] && $RM -rf /$pool
+ log_must $ZPOOL create -f $pool $@
+ fi
+
+ return 0
+}
+
+# Return 0 if destroy successfully or the pool exists; $? otherwise
+# Note: In local zones, this function should return 0 silently.
+#
+# $1 - pool name
+# Destroy pool with the given parameters.
+
+function destroy_pool #pool
+{
+ typeset pool=${1%%/*}
+ typeset mtpt
+
+ if [[ -z $pool ]]; then
+ log_note "No pool name given."
+ return 1
+ fi
+
+ if is_global_zone ; then
+ if poolexists "$pool" ; then
+ mtpt=$(get_prop mountpoint "$pool")
+ log_must $ZPOOL destroy -f $pool
+
+ [[ -d $mtpt ]] && \
+ log_must $RM -rf $mtpt
+ else
+ log_note "Pool not exist. ($pool)"
+ return 1
+ fi
+ fi
+
+ return 0
+}
+
+#
+# Firstly, create a pool with 5 datasets. Then, create a single zone and
+# export the 5 datasets to it. In addition, we also add a ZFS filesystem
+# and a zvol device to the zone.
+#
+# $1 zone name
+# $2 zone root directory prefix
+# $3 zone ip
+#
+function zfs_zones_setup #zone_name zone_root zone_ip
+{
+ typeset zone_name=${1:-$(hostname)-z}
+ typeset zone_root=${2:-"/zone_root"}
+ typeset zone_ip=${3:-"10.1.1.10"}
+ typeset prefix_ctr=$ZONE_CTR
+ typeset pool_name=$ZONE_POOL
+ typeset -i cntctr=5
+ typeset -i i=0
+
+ # Create pool and 5 container within it
+ #
+ [[ -d /$pool_name ]] && $RM -rf /$pool_name
+ log_must $ZPOOL create -f $pool_name $DISKS
+ while ((i < cntctr)); do
+ log_must $ZFS create $pool_name/$prefix_ctr$i
+ ((i += 1))
+ done
+
+ # create a zvol
+ log_must $ZFS create -V 1g $pool_name/zone_zvol
+
+ #
+ # If current system support slog, add slog device for pool
+ #
+ if verify_slog_support ; then
+ typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
+ log_must $MKFILE 100M $sdevs
+ log_must $ZPOOL add $pool_name log mirror $sdevs
+ fi
+
+ # this isn't supported just yet.
+ # Create a filesystem. In order to add this to
+ # the zone, it must have it's mountpoint set to 'legacy'
+ # log_must $ZFS create $pool_name/zfs_filesystem
+ # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
+
+ [[ -d $zone_root ]] && \
+ log_must $RM -rf $zone_root/$zone_name
+ [[ ! -d $zone_root ]] && \
+ log_must $MKDIR -p -m 0700 $zone_root/$zone_name
+
+ # Create zone configure file and configure the zone
+ #
+ typeset zone_conf=/tmp/zone_conf.$$
+ $ECHO "create" > $zone_conf
+ $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
+ $ECHO "set autoboot=true" >> $zone_conf
+ i=0
+ while ((i < cntctr)); do
+ $ECHO "add dataset" >> $zone_conf
+ $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
+ $zone_conf
+ $ECHO "end" >> $zone_conf
+ ((i += 1))
+ done
+
+ # add our zvol to the zone
+ $ECHO "add device" >> $zone_conf
+ $ECHO "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
+ $ECHO "end" >> $zone_conf
+
+ # add a corresponding zvol rdsk to the zone
+ $ECHO "add device" >> $zone_conf
+ $ECHO "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
+ $ECHO "end" >> $zone_conf
+
+ # once it's supported, we'll add our filesystem to the zone
+ # $ECHO "add fs" >> $zone_conf
+ # $ECHO "set type=zfs" >> $zone_conf
+ # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
+ # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
+ # $ECHO "end" >> $zone_conf
+
+ $ECHO "verify" >> $zone_conf
+ $ECHO "commit" >> $zone_conf
+ log_must $ZONECFG -z $zone_name -f $zone_conf
+ log_must $RM -f $zone_conf
+
+ # Install the zone
+ $ZONEADM -z $zone_name install
+ if (($? == 0)); then
+ log_note "SUCCESS: $ZONEADM -z $zone_name install"
+ else
+ log_fail "FAIL: $ZONEADM -z $zone_name install"
+ fi
+
+ # Install sysidcfg file
+ #
+ typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
+ $ECHO "system_locale=C" > $sysidcfg
+ $ECHO "terminal=dtterm" >> $sysidcfg
+ $ECHO "network_interface=primary {" >> $sysidcfg
+ $ECHO "hostname=$zone_name" >> $sysidcfg
+ $ECHO "}" >> $sysidcfg
+ $ECHO "name_service=NONE" >> $sysidcfg
+ $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
+ $ECHO "security_policy=NONE" >> $sysidcfg
+ $ECHO "timezone=US/Eastern" >> $sysidcfg
+
+ # Boot this zone
+ log_must $ZONEADM -z $zone_name boot
+}
+
+#
+# Reexport TESTPOOL & TESTPOOL(1-4)
+#
+function reexport_pool
+{
+ typeset -i cntctr=5
+ typeset -i i=0
+
+ while ((i < cntctr)); do
+ if ((i == 0)); then
+ TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
+ if ! ismounted $TESTPOOL; then
+ log_must $ZFS mount $TESTPOOL
+ fi
+ else
+ eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
+ if eval ! ismounted \$TESTPOOL$i; then
+ log_must eval $ZFS mount \$TESTPOOL$i
+ fi
+ fi
+ ((i += 1))
+ done
+}
+
+#
+# Verify a given disk is online or offline
+#
+# Return 0 is pool/disk matches expected state, 1 otherwise
+#
+function check_state # pool disk state{online,offline}
+{
+ typeset pool=$1
+ typeset disk=${2#/dev/dsk/}
+ typeset state=$3
+
+ $ZPOOL status -v $pool | grep "$disk" \
+ | grep -i "$state" > /dev/null 2>&1
+
+ return $?
+}
+
+#
+# Get the mountpoint of snapshot
+# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
+# as its mountpoint
+#
+function snapshot_mountpoint
+{
+ typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
+
+ if [[ $dataset != *@* ]]; then
+ log_fail "Error name of snapshot '$dataset'."
+ fi
+
+ typeset fs=${dataset%@*}
+ typeset snap=${dataset#*@}
+
+ if [[ -z $fs || -z $snap ]]; then
+ log_fail "Error name of snapshot '$dataset'."
+ fi
+
+ $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
+}
+
+#
+# Given a pool and file system, this function will verify the file system
+# using the zdb internal tool. Note that the pool is exported and imported
+# to ensure it has consistent state.
+#
+function verify_filesys # pool filesystem dir
+{
+ typeset pool="$1"
+ typeset filesys="$2"
+ typeset zdbout="/tmp/zdbout.$$"
+
+ shift
+ shift
+ typeset dirs=$@
+ typeset search_path=""
+
+ log_note "Calling $ZDB to verify filesystem '$filesys'"
+ $ZFS unmount -a > /dev/null 2>&1
+ log_must $ZPOOL export $pool
+
+ if [[ -n $dirs ]] ; then
+ for dir in $dirs ; do
+ search_path="$search_path -d $dir"
+ done
+ fi
+
+ log_must $ZPOOL import $search_path $pool
+
+ $ZDB -cudi $filesys > $zdbout 2>&1
+ if [[ $? != 0 ]]; then
+ log_note "Output: $ZDB -cudi $filesys"
+ $CAT $zdbout
+ log_fail "$ZDB detected errors with: '$filesys'"
+ fi
+
+ log_must $ZFS mount -a
+ log_must $RM -rf $zdbout
+}
+
+#
+# Given a pool, and this function list all disks in the pool
+#
+function get_disklist # pool
+{
+ typeset disklist=""
+
+ disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
+ $GREP -v "\-\-\-\-\-" | \
+ $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
+
+ $ECHO $disklist
+}
+
+#
+# Destroy all existing metadevices and state database
+#
+function destroy_metas
+{
+ typeset metad
+
+ for metad in $($METASTAT -p | $AWK '{print $1}'); do
+ log_must $METACLEAR -rf $metad
+ done
+
+ for metad in $($METADB | $CUT -f6 | $GREP dev | $UNIQ); do
+ log_must $METADB -fd $metad
+ done
+}
+
+# /**
+# This function kills a given list of processes after a time period. We use
+# this in the stress tests instead of STF_TIMEOUT so that we can have processes
+# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
+# would be listed as FAIL, which we don't want : we're happy with stress tests
+# running for a certain amount of time, then finishing.
+#
+# @param $1 the time in seconds after which we should terminate these processes
+# @param $2..$n the processes we wish to terminate.
+# */
+function stress_timeout
+{
+ typeset -i TIMEOUT=$1
+ shift
+ typeset cpids="$@"
+
+ log_note "Waiting for child processes($cpids). " \
+ "It could last dozens of minutes, please be patient ..."
+ log_must $SLEEP $TIMEOUT
+
+ log_note "Killing child processes after ${TIMEOUT} stress timeout."
+ typeset pid
+ for pid in $cpids; do
+ $PS -p $pid > /dev/null 2>&1
+ if (($? == 0)); then
+ log_must $KILL -USR1 $pid
+ fi
+ done
+}
+
+#
+# Verify a given hotspare disk is inuse or avail
+#
+# Return 0 is pool/disk matches expected state, 1 otherwise
+#
+function check_hotspare_state # pool disk state{inuse,avail}
+{
+ typeset pool=$1
+ typeset disk=${2#/dev/dsk/}
+ typeset state=$3
+
+ cur_state=$(get_device_state $pool $disk "spares")
+
+ if [[ $state != ${cur_state} ]]; then
+ return 1
+ fi
+ return 0
+}
+
+#
+# Verify a given slog disk is inuse or avail
+#
+# Return 0 is pool/disk matches expected state, 1 otherwise
+#
+function check_slog_state # pool disk state{online,offline,unavail}
+{
+ typeset pool=$1
+ typeset disk=${2#/dev/dsk/}
+ typeset state=$3
+
+ cur_state=$(get_device_state $pool $disk "logs")
+
+ if [[ $state != ${cur_state} ]]; then
+ return 1
+ fi
+ return 0
+}
+
+#
+# Verify a given vdev disk is inuse or avail
+#
+# Return 0 is pool/disk matches expected state, 1 otherwise
+#
+function check_vdev_state # pool disk state{online,offline,unavail}
+{
+ typeset pool=$1
+ typeset disk=${2#/dev/dsk/}
+ typeset state=$3
+
+ cur_state=$(get_device_state $pool $disk)
+
+ if [[ $state != ${cur_state} ]]; then
+ return 1
+ fi
+ return 0
+}
+
+#
+# Check the output of 'zpool status -v <pool>',
+# and to see if the content of <token> contain the <keyword> specified.
+#
+# Return 0 is contain, 1 otherwise
+#
+function check_pool_status # pool token keyword
+{
+ typeset pool=$1
+ typeset token=$2
+ typeset keyword=$3
+
+ $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
+ ($1==token) {print $0}' \
+ | $GREP -i "$keyword" > /dev/null 2>&1
+
+ return $?
+}
+
+#
+# These 5 following functions are instance of check_pool_status()
+# is_pool_resilvering - to check if the pool is resilver in progress
+# is_pool_resilvered - to check if the pool is resilver completed
+# is_pool_scrubbing - to check if the pool is scrub in progress
+# is_pool_scrubbed - to check if the pool is scrub completed
+# is_pool_scrub_stopped - to check if the pool is scrub stopped
+#
+function is_pool_resilvering #pool
+{
+ check_pool_status "$1" "scan" "resilver in progress since "
+ return $?
+}
+
+function is_pool_resilvered #pool
+{
+ check_pool_status "$1" "scan" "resilvered "
+ return $?
+}
+
+function is_pool_scrubbing #pool
+{
+ check_pool_status "$1" "scan" "scrub in progress since "
+ return $?
+}
+
+function is_pool_scrubbed #pool
+{
+ check_pool_status "$1" "scan" "scrub repaired"
+ return $?
+}
+
+function is_pool_scrub_stopped #pool
+{
+ check_pool_status "$1" "scan" "scrub canceled"
+ return $?
+}
+
+#
+# Use create_pool()/destroy_pool() to clean up the infomation in
+# in the given disk to avoid slice overlapping.
+#
+function cleanup_devices #vdevs
+{
+ typeset pool="foopool$$"
+
+ if poolexists $pool ; then
+ destroy_pool $pool
+ fi
+
+ create_pool $pool $@
+ destroy_pool $pool
+
+ return 0
+}
+
+#
+# Verify the rsh connectivity to each remote host in RHOSTS.
+#
+# Return 0 if remote host is accessible; otherwise 1.
+# $1 remote host name
+# $2 username
+#
+function verify_rsh_connect #rhost, username
+{
+ typeset rhost=$1
+ typeset username=$2
+ typeset rsh_cmd="$RSH -n"
+ typeset cur_user=
+
+ $GETENT hosts $rhost >/dev/null 2>&1
+ if (($? != 0)); then
+ log_note "$rhost cannot be found from" \
+ "administrative database."
+ return 1
+ fi
+
+ $PING $rhost 3 >/dev/null 2>&1
+ if (($? != 0)); then
+ log_note "$rhost is not reachable."
+ return 1
+ fi
+
+ if ((${#username} != 0)); then
+ rsh_cmd="$rsh_cmd -l $username"
+ cur_user="given user \"$username\""
+ else
+ cur_user="current user \"`$LOGNAME`\""
+ fi
+
+ if ! $rsh_cmd $rhost $TRUE; then
+ log_note "$RSH to $rhost is not accessible" \
+ "with $cur_user."
+ return 1
+ fi
+
+ return 0
+}
+
+#
+# Verify the remote host connection via rsh after rebooting
+# $1 remote host
+#
+function verify_remote
+{
+ rhost=$1
+
+ #
+ # The following loop waits for the remote system rebooting.
+ # Each iteration will wait for 150 seconds. there are
+ # total 5 iterations, so the total timeout value will
+ # be 12.5 minutes for the system rebooting. This number
+ # is an approxiate number.
+ #
+ typeset -i count=0
+ while ! verify_rsh_connect $rhost; do
+ sleep 150
+ ((count = count + 1))
+ if ((count > 5)); then
+ return 1
+ fi
+ done
+ return 0
+}
+
+#
+# Replacement function for /usr/bin/rsh. This function will include
+# the /usr/bin/rsh and meanwhile return the execution status of the
+# last command.
+#
+# $1 usrname passing down to -l option of /usr/bin/rsh
+# $2 remote machine hostname
+# $3... command string
+#
+
+function rsh_status
+{
+ typeset ruser=$1
+ typeset rhost=$2
+ typeset -i ret=0
+ typeset cmd_str=""
+ typeset rsh_str=""
+
+ shift; shift
+ cmd_str="$@"
+
+ err_file=/tmp/${rhost}.$$.err
+ if ((${#ruser} == 0)); then
+ rsh_str="$RSH -n"
+ else
+ rsh_str="$RSH -n -l $ruser"
+ fi
+
+ $rsh_str $rhost /usr/bin/ksh -c "'$cmd_str; \
+ print -u 2 \"status=\$?\"'" \
+ >/dev/null 2>$err_file
+ ret=$?
+ if (($ret != 0)); then
+ $CAT $err_file
+ $RM -f $std_file $err_file
+ log_fail "$RSH itself failed with exit code $ret..."
+ fi
+
+ ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
+ $CUT -d= -f2)
+ (($ret != 0)) && $CAT $err_file >&2
+
+ $RM -f $err_file >/dev/null 2>&1
+ return $ret
+}
+
+#
+# Get the SUNWstc-fs-zfs package installation path in a remote host
+# $1 remote host name
+#
+function get_remote_pkgpath
+{
+ typeset rhost=$1
+ typeset pkgpath=""
+
+ pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
+ $CUT -d: -f2")
+
+ $ECHO $pkgpath
+}
+
+#/**
+# A function to find and locate free disks on a system or from given
+# disks as the parameter. It works by locating disks that are in use
+# as swap devices, SVM devices, and dump devices, and also disks
+# listed in /etc/vfstab
+#
+# $@ given disks to find which are free, default is all disks in
+# the test system
+#
+# @return a string containing the list of available disks
+#*/
+function find_disks
+{
+ sfi=/tmp/swaplist.$$
+ msi=/tmp/metastat.$$
+ dmpi=/tmp/dumpdev.$$
+ max_finddisksnum=${MAX_FINDDISKSNUM:-6}
+
+ $SWAP -l > $sfi
+ $METASTAT -c > $msi 2>/dev/null
+ $DUMPADM > $dmpi 2>/dev/null
+
+# write an awk script that can process the output of format
+# to produce a list of disks we know about. Note that we have
+# to escape "$2" so that the shell doesn't interpret it while
+# we're creating the awk script.
+# -------------------
+ $CAT > /tmp/find_disks.awk <<EOF
+#!/bin/nawk -f
+ BEGIN { FS="."; }
+
+ /^Specify disk/{
+ searchdisks=0;
+ }
+
+ {
+ if (searchdisks && \$2 !~ "^$"){
+ split(\$2,arr," ");
+ print arr[1];
+ }
+ }
+
+ /^AVAILABLE DISK SELECTIONS:/{
+ searchdisks=1;
+ }
+EOF
+#---------------------
+
+ $CHMOD 755 /tmp/find_disks.awk
+ disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
+ $RM /tmp/find_disks.awk
+
+ unused=""
+ for disk in $disks; do
+ # Check for mounted
+ $GREP "${disk}[sp]" /etc/mnttab >/dev/null
+ (($? == 0)) && continue
+ # Check for swap
+ $GREP "${disk}[sp]" $sfi >/dev/null
+ (($? == 0)) && continue
+ # Check for SVM
+ $GREP "${disk}" $msi >/dev/null
+ (($? == 0)) && continue
+ # check for dump device
+ $GREP "${disk}[sp]" $dmpi >/dev/null
+ (($? == 0)) && continue
+ # check to see if this disk hasn't been explicitly excluded
+ # by a user-set environment variable
+ $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
+ (($? == 0)) && continue
+ unused_candidates="$unused_candidates $disk"
+ done
+ $RM $sfi
+ $RM $msi
+ $RM $dmpi
+
+# now just check to see if those disks do actually exist
+# by looking for a device pointing to the first slice in
+# each case. limit the number to max_finddisksnum
+ count=0
+ for disk in $unused_candidates; do
+ if [ -b /dev/dsk/${disk}s0 ]; then
+ if [ $count -lt $max_finddisksnum ]; then
+ unused="$unused $disk"
+ # do not impose limit if $@ is provided
+ [[ -z $@ ]] && ((count = count + 1))
+ fi
+ fi
+ done
+
+# finally, return our disk list
+ $ECHO $unused
+}
+
+#
+# Add specified user to specified group
+#
+# $1 group name
+# $2 user name
+# $3 base of the homedir (optional)
+#
+function add_user #<group_name> <user_name> <basedir>
+{
+ typeset gname=$1
+ typeset uname=$2
+ typeset basedir=${3:-"/var/tmp"}
+
+ if ((${#gname} == 0 || ${#uname} == 0)); then
+ log_fail "group name or user name are not defined."
+ fi
+
+ log_must $USERADD -g $gname -d $basedir/$uname -m $uname
+
+ return 0
+}
+
+#
+# Delete the specified user.
+#
+# $1 login name
+# $2 base of the homedir (optional)
+#
+function del_user #<logname> <basedir>
+{
+ typeset user=$1
+ typeset basedir=${2:-"/var/tmp"}
+
+ if ((${#user} == 0)); then
+ log_fail "login name is necessary."
+ fi
+
+ if $ID $user > /dev/null 2>&1; then
+ log_must $USERDEL $user
+ fi
+
+ [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
+
+ return 0
+}
+
+#
+# Select valid gid and create specified group.
+#
+# $1 group name
+#
+function add_group #<group_name>
+{
+ typeset group=$1
+
+ if ((${#group} == 0)); then
+ log_fail "group name is necessary."
+ fi
+
+ # Assign 100 as the base gid
+ typeset -i gid=100
+ while true; do
+ $GROUPADD -g $gid $group > /dev/null 2>&1
+ typeset -i ret=$?
+ case $ret in
+ 0) return 0 ;;
+ # The gid is not unique
+ 4) ((gid += 1)) ;;
+ *) return 1 ;;
+ esac
+ done
+}
+
+#
+# Delete the specified group.
+#
+# $1 group name
+#
+function del_group #<group_name>
+{
+ typeset grp=$1
+ if ((${#grp} == 0)); then
+ log_fail "group name is necessary."
+ fi
+
+ $GROUPMOD -n $grp $grp > /dev/null 2>&1
+ typeset -i ret=$?
+ case $ret in
+ # Group does not exist.
+ 6) return 0 ;;
+ # Name already exists as a group name
+ 9) log_must $GROUPDEL $grp ;;
+ *) return 1 ;;
+ esac
+
+ return 0
+}
+
+#
+# This function will return true if it's safe to destroy the pool passed
+# as argument 1. It checks for pools based on zvols and files, and also
+# files contained in a pool that may have a different mountpoint.
+#
+function safe_to_destroy_pool { # $1 the pool name
+
+ typeset pool=""
+ typeset DONT_DESTROY=""
+
+ # We check that by deleting the $1 pool, we're not
+ # going to pull the rug out from other pools. Do this
+ # by looking at all other pools, ensuring that they
+ # aren't built from files or zvols contained in this pool.
+
+ for pool in $($ZPOOL list -H -o name)
+ do
+ ALTMOUNTPOOL=""
+
+ # this is a list of the top-level directories in each of the
+ # files that make up the path to the files the pool is based on
+ FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
+ $AWK '{print $1}')
+
+ # this is a list of the zvols that make up the pool
+ ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "/dev/zvol/dsk/$1$" \
+ | $AWK '{print $1}')
+
+ # also want to determine if it's a file-based pool using an
+ # alternate mountpoint...
+ POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
+ $GREP / | $AWK '{print $1}' | \
+ $AWK -F/ '{print $2}' | $GREP -v "dev")
+
+ for pooldir in $POOL_FILE_DIRS
+ do
+ OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
+ $GREP "${pooldir}$" | $AWK '{print $1}')
+
+ ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
+ done
+
+
+ if [ ! -z "$ZVOLPOOL" ]
+ then
+ DONT_DESTROY="true"
+ log_note "Pool $pool is built from $ZVOLPOOL on $1"
+ fi
+
+ if [ ! -z "$FILEPOOL" ]
+ then
+ DONT_DESTROY="true"
+ log_note "Pool $pool is built from $FILEPOOL on $1"
+ fi
+
+ if [ ! -z "$ALTMOUNTPOOL" ]
+ then
+ DONT_DESTROY="true"
+ log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
+ fi
+ done
+
+ if [ -z "${DONT_DESTROY}" ]
+ then
+ return 0
+ else
+ log_note "Warning: it is not safe to destroy $1!"
+ return 1
+ fi
+}
+
+#
+# Get IP address of hostname
+# $1 hostname
+#
+function getipbyhost
+{
+ typeset ip
+ ip=`$ARP $1 2>/dev/null | $AWK -F\) '{print $1}' \
+ | $AWK -F\('{print $2}'`
+ $ECHO $ip
+}
+
+#
+# Setup iSCSI initiator to target
+# $1 target hostname
+#
+function iscsi_isetup
+{
+ # check svc:/network/iscsi_initiator:default state, try to enable it
+ # if the state is not ON
+ typeset ISCSII_FMRI="svc:/network/iscsi_initiator:default"
+ if [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) ]]; then
+ log_must $SVCADM enable $ISCSII_FMRI
+
+ typeset -i retry=20
+ while [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) && \
+ ($retry -ne 0) ]]
+ do
+ ((retry = retry - 1))
+ $SLEEP 1
+ done
+
+ if [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) ]]; then
+ log_fail "$ISCSII_FMRI service can not be enabled!"
+ fi
+ fi
+
+ log_must $ISCSIADM add discovery-address $(getipbyhost $1)
+ log_must $ISCSIADM modify discovery --sendtargets enable
+ log_must $DEVFSADM -i iscsi
+}
+
+#
+# Check whether iscsi parameter is set as remote
+#
+# return 0 if iscsi is set as remote, otherwise 1
+#
+function check_iscsi_remote
+{
+ if [[ $iscsi == "remote" ]] ; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+#
+# Check if a volume is a valide iscsi target
+# $1 volume name
+# return 0 if suceeds, otherwise, return 1
+#
+function is_iscsi_target
+{
+ typeset dataset=$1
+ typeset target targets
+
+ [[ -z $dataset ]] && return 1
+
+ targets=$($ISCSITADM list target | $GREP "Target:" | $AWK '{print $2}')
+ [[ -z $targets ]] && return 1
+
+ for target in $targets; do
+ [[ $dataset == $target ]] && return 0
+ done
+
+ return 1
+}
+
+#
+# Get the iSCSI name of a target
+# $1 target name
+#
+function iscsi_name
+{
+ typeset target=$1
+ typeset name
+
+ [[ -z $target ]] && log_fail "No parameter."
+
+ if ! is_iscsi_target $target ; then
+ log_fail "Not a target."
+ fi
+
+ name=$($ISCSITADM list target $target | $GREP "iSCSI Name:" \
+ | $AWK '{print $2}')
+
+ return $name
+}
+
+#
+# check svc:/system/iscsitgt:default state, try to enable it if the state
+# is not ON
+#
+function iscsitgt_setup
+{
+ log_must $RM -f $ISCSITGTFILE
+ if [[ "ON" == $($SVCS -H -o sta $ISCSITGT_FMRI) ]]; then
+ log_note "iscsitgt is already enabled"
+ return
+ fi
+
+ log_must $SVCADM enable -t $ISCSITGT_FMRI
+
+ typeset -i retry=20
+ while [[ "ON" != $($SVCS -H -o sta $ISCSITGT_FMRI) && \
+ ($retry -ne 0) ]]
+ do
+ $SLEEP 1
+ ((retry = retry - 1))
+ done
+
+ if [[ "ON" != $($SVCS -H -o sta $ISCSITGT_FMRI) ]]; then
+ log_fail "$ISCSITGT_FMRI service can not be enabled!"
+ fi
+
+ log_must $TOUCH $ISCSITGTFILE
+}
+
+#
+# set DISABLED state of svc:/system/iscsitgt:default
+# which is the most suiteable state if $ISCSITGTFILE exists
+#
+function iscsitgt_cleanup
+{
+ if [[ -e $ISCSITGTFILE ]]; then
+ log_must $SVCADM disable $ISCSITGT_FMRI
+ log_must $RM -f $ISCSITGTFILE
+ fi
+}
+
+#
+# Close iSCSI initiator to target
+# $1 target hostname
+#
+function iscsi_iclose
+{
+ log_must $ISCSIADM modify discovery --sendtargets disable
+ log_must $ISCSIADM remove discovery-address $(getipbyhost $1)
+ $DEVFSADM -Cv
+}
+
+#
+# Get the available ZFS compression options
+# $1 option type zfs_set|zfs_compress
+#
+function get_compress_opts
+{
+ typeset COMPRESS_OPTS
+ typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
+ gzip-6 gzip-7 gzip-8 gzip-9"
+
+ if [[ $1 == "zfs_compress" ]] ; then
+ COMPRESS_OPTS="on lzjb"
+ elif [[ $1 == "zfs_set" ]] ; then
+ COMPRESS_OPTS="on off lzjb"
+ fi
+ typeset valid_opts="$COMPRESS_OPTS"
+ $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
+ if [[ $? -eq 0 ]]; then
+ valid_opts="$valid_opts $GZIP_OPTS"
+ fi
+ $ECHO "$valid_opts"
+}
+
+#
+# Verify zfs operation with -p option work as expected
+# $1 operation, value could be create, clone or rename
+# $2 dataset type, value could be fs or vol
+# $3 dataset name
+# $4 new dataset name
+#
+function verify_opt_p_ops
+{
+ typeset ops=$1
+ typeset datatype=$2
+ typeset dataset=$3
+ typeset newdataset=$4
+
+ if [[ $datatype != "fs" && $datatype != "vol" ]]; then
+ log_fail "$datatype is not supported."
+ fi
+
+ # check parameters accordingly
+ case $ops in
+ create)
+ newdataset=$dataset
+ dataset=""
+ if [[ $datatype == "vol" ]]; then
+ ops="create -V $VOLSIZE"
+ fi
+ ;;
+ clone)
+ if [[ -z $newdataset ]]; then
+ log_fail "newdataset should not be empty" \
+ "when ops is $ops."
+ fi
+ log_must datasetexists $dataset
+ log_must snapexists $dataset
+ ;;
+ rename)
+ if [[ -z $newdataset ]]; then
+ log_fail "newdataset should not be empty" \
+ "when ops is $ops."
+ fi
+ log_must datasetexists $dataset
+ log_mustnot snapexists $dataset
+ ;;
+ *)
+ log_fail "$ops is not supported."
+ ;;
+ esac
+
+ # make sure the upper level filesystem does not exist
+ if datasetexists ${newdataset%/*} ; then
+ log_must $ZFS destroy -rRf ${newdataset%/*}
+ fi
+
+ # without -p option, operation will fail
+ log_mustnot $ZFS $ops $dataset $newdataset
+ log_mustnot datasetexists $newdataset ${newdataset%/*}
+
+ # with -p option, operation should succeed
+ log_must $ZFS $ops -p $dataset $newdataset
+ if ! datasetexists $newdataset ; then
+ log_fail "-p option does not work for $ops"
+ fi
+
+ # when $ops is create or clone, redo the operation still return zero
+ if [[ $ops != "rename" ]]; then
+ log_must $ZFS $ops -p $dataset $newdataset
+ fi
+
+ return 0
+}
+
+#
+# Get configuration of pool
+# $1 pool name
+# $2 config name
+#
+function get_config
+{
+ typeset pool=$1
+ typeset config=$2
+ typeset alt_root
+
+ if ! poolexists "$pool" ; then
+ return 1
+ fi
+ alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
+ if [[ $alt_root == "-" ]]; then
+ value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
+ '{print $2}')
+ else
+ value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
+ '{print $2}')
+ fi
+ if [[ -n $value ]] ; then
+ value=${value#'}
+ value=${value%'}
+ fi
+ echo $value
+
+ return 0
+}
+
+#
+# Privated function. Random select one of items from arguments.
+#
+# $1 count
+# $2-n string
+#
+function _random_get
+{
+ typeset cnt=$1
+ shift
+
+ typeset str="$@"
+ typeset -i ind
+ ((ind = RANDOM % cnt + 1))
+
+ typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
+ $ECHO $ret
+}
+
+#
+# Random select one of item from arguments which include NONE string
+#
+function random_get_with_non
+{
+ typeset -i cnt=$#
+ ((cnt =+ 1))
+
+ _random_get "$cnt" "$@"
+}
+
+#
+# Random select one of item from arguments which doesn't include NONE string
+#
+function random_get
+{
+ _random_get "$#" "$@"
+}
+
+#
+# Detect if the current system support slog
+#
+function verify_slog_support
+{
+ typeset dir=/tmp/disk.$$
+ typeset pool=foo.$$
+ typeset vdev=$dir/a
+ typeset sdev=$dir/b
+
+ $MKDIR -p $dir
+ $MKFILE 64M $vdev $sdev
+
+ typeset -i ret=0
+ if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
+ ret=1
+ fi
+ $RM -r $dir
+
+ return $ret
+}
+
+#
+# The function will generate a dataset name with specific length
+# $1, the length of the name
+# $2, the base string to construct the name
+#
+function gen_dataset_name
+{
+ typeset -i len=$1
+ typeset basestr="$2"
+ typeset -i baselen=${#basestr}
+ typeset -i iter=0
+ typeset l_name=""
+
+ if ((len % baselen == 0)); then
+ ((iter = len / baselen))
+ else
+ ((iter = len / baselen + 1))
+ fi
+ while ((iter > 0)); do
+ l_name="${l_name}$basestr"
+
+ ((iter -= 1))
+ done
+
+ $ECHO $l_name
+}
+
+#
+# Get cksum tuple of dataset
+# $1 dataset name
+#
+# sample zdb output:
+# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
+# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
+# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
+# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
+function datasetcksum
+{
+ typeset cksum
+ $SYNC
+ cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
+ | $AWK -F= '{print $7}')
+ $ECHO $cksum
+}
+
+#
+# Get cksum of file
+# #1 file path
+#
+function checksum
+{
+ typeset cksum
+ cksum=$($CKSUM $1 | $AWK '{print $1}')
+ $ECHO $cksum
+}
+
+#
+# Get the given disk/slice state from the specific field of the pool
+#
+function get_device_state #pool disk field("", "spares","logs")
+{
+ typeset pool=$1
+ typeset disk=${2#/dev/dsk/}
+ typeset field=${3:-$pool}
+
+ state=$($ZPOOL status -v "$pool" 2>/dev/null | \
+ $NAWK -v device=$disk -v pool=$pool -v field=$field \
+ 'BEGIN {startconfig=0; startfield=0; }
+ /config:/ {startconfig=1}
+ (startconfig==1) && ($1==field) {startfield=1; next;}
+ (startfield==1) && ($1==device) {print $2; exit;}
+ (startfield==1) &&
+ ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
+ echo $state
+}
+
+
+#
+# print the given directory filesystem type
+#
+# $1 directory name
+#
+function get_fstype
+{
+ typeset dir=$1
+
+ if [[ -z $dir ]]; then
+ log_fail "Usage: get_fstype <directory>"
+ fi
+
+ #
+ # $ df -n /
+ # / : ufs
+ #
+ $DF -n $dir | $AWK '{print $3}'
+}
+
+#
+# Given a disk, label it to VTOC regardless what label was on the disk
+# $1 disk
+#
+function labelvtoc
+{
+ typeset disk=$1
+ if [[ -z $disk ]]; then
+ log_fail "The disk name is unspecified."
+ fi
+ typeset label_file=/var/tmp/labelvtoc.$$
+ typeset arch=$($UNAME -p)
+
+ if [[ $arch == "i386" ]]; then
+ $ECHO "label" > $label_file
+ $ECHO "0" >> $label_file
+ $ECHO "" >> $label_file
+ $ECHO "q" >> $label_file
+ $ECHO "q" >> $label_file
+
+ $FDISK -B $disk >/dev/null 2>&1
+ # wait a while for fdisk finishes
+ $SLEEP 60
+ elif [[ $arch == "sparc" ]]; then
+ $ECHO "label" > $label_file
+ $ECHO "0" >> $label_file
+ $ECHO "" >> $label_file
+ $ECHO "" >> $label_file
+ $ECHO "" >> $label_file
+ $ECHO "q" >> $label_file
+ else
+ log_fail "unknown arch type"
+ fi
+
+ $FORMAT -e -s -d $disk -f $label_file
+ typeset -i ret_val=$?
+ $RM -f $label_file
+ #
+ # wait the format to finish
+ #
+ $SLEEP 60
+ if ((ret_val != 0)); then
+ log_fail "unable to label $disk as VTOC."
+ fi
+
+ return 0
+}
+
+#
+# check if the system was installed as zfsroot or not
+# return: 0 ture, otherwise false
+#
+function is_zfsroot
+{
+ $DF -n / | $GREP zfs > /dev/null 2>&1
+ return $?
+}
+
+#
+# get the root filesystem name if it's zfsroot system.
+#
+# return: root filesystem name
+function get_rootfs
+{
+ typeset rootfs=""
+ rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
+ /etc/mnttab)
+ if [[ -z "$rootfs" ]]; then
+ log_fail "Can not get rootfs"
+ fi
+ $ZFS list $rootfs > /dev/null 2>&1
+ if (($? == 0)); then
+ $ECHO $rootfs
+ else
+ log_fail "This is not a zfsroot system."
+ fi
+}
+
+#
+# get the rootfs's pool name
+# return:
+# rootpool name
+#
+function get_rootpool
+{
+ typeset rootfs=""
+ typeset rootpool=""
+ rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
+ /etc/mnttab)
+ if [[ -z "$rootfs" ]]; then
+ log_fail "Can not get rootpool"
+ fi
+ $ZFS list $rootfs > /dev/null 2>&1
+ if (($? == 0)); then
+ rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
+ $ECHO $rootpool
+ else
+ log_fail "This is not a zfsroot system."
+ fi
+}
+
+#
+# Get the sub string from specified source string
+#
+# $1 source string
+# $2 start position. Count from 1
+# $3 offset
+#
+function get_substr #src_str pos offset
+{
+ typeset pos offset
+
+ $ECHO $1 | \
+ $NAWK -v pos=$2 -v offset=$3 '{print substr($0, pos, offset)}'
+}
+
+#
+# Check if the given device is physical device
+#
+function is_physical_device #device
+{
+ typeset device=${1#/dev/dsk/}
+ device=${device#/dev/rdsk/}
+
+ $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
+ return $?
+}
+
+#
+# Get the directory path of given device
+#
+function get_device_dir #device
+{
+ typeset device=$1
+
+ if ! $(is_physical_device $device) ; then
+ if [[ $device != "/" ]]; then
+ device=${device%/*}
+ fi
+ $ECHO $device
+ else
+ $ECHO "/dev/dsk"
+ fi
+}
+
+#
+# Get the package name
+#
+function get_package_name
+{
+ typeset dirpath=${1:-$STC_NAME}
+
+ echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
+}
+
+#
+# Get the word numbers from a string separated by white space
+#
+function get_word_count
+{
+ $ECHO $1 | $WC -w
+}
+
+#
+# To verify if the require numbers of disks is given
+#
+function verify_disk_count
+{
+ typeset -i min=${2:-1}
+
+ typeset -i count=$(get_word_count "$1")
+
+ if ((count < min)); then
+ log_untested "A minimum of $min disks is required to run." \
+ " You specified $count disk(s)"
+ fi
+}
+
+function ds_is_volume
+{
+ typeset type=$(get_prop type $1)
+ [[ $type = "volume" ]] && return 0
+ return 1
+}
+
+function ds_is_filesystem
+{
+ typeset type=$(get_prop type $1)
+ [[ $type = "filesystem" ]] && return 0
+ return 1
+}
+
+function ds_is_snapshot
+{
+ typeset type=$(get_prop type $1)
+ [[ $type = "snapshot" ]] && return 0
+ return 1
+}
+
+#
+# Check if Trusted Extensions are installed and enabled
+#
+function is_te_enabled
+{
+ $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled"
+ if (($? != 0)); then
+ return 1
+ else
+ return 0
+ fi
+}
diff --git a/usr/src/test/zfs-tests/include/math.shlib b/usr/src/test/zfs-tests/include/math.shlib
new file mode 100644
index 0000000000..38479d3521
--- /dev/null
+++ b/usr/src/test/zfs-tests/include/math.shlib
@@ -0,0 +1,43 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2012 by Delphix. All rights reserved.
+#
+
+#
+# Return 0 if the percentage difference between $a and $b is $percent or
+# greater. Return 1 if the percentage is lower or if we would divide by
+# zero. For use like this:
+#
+# Do $action if the calculated percentage is greater or equal to that passed in:
+# within_percent A B P && $action
+# Do $action if the calculated percentage is less than that passed in:
+# within_percent A B P || $action
+#
+function within_percent
+{
+ typeset a=$1
+ typeset b=$1
+ typeset percent=$3
+
+ # Set $a or $b to $2 such that a >= b
+ [[ '1' = $($ECHO "if ($2 > $a) 1" | $BC) ]] && a=$2 || b=$2
+
+ # Prevent division by 0
+ [[ $a =~ [1-9] ]] || return 1
+
+ typeset p=$($ECHO "scale=2; $b * 100 / $a" | $BC)
+ log_note "Comparing $a and $b given $percent% (calculated: $p%)"
+ [[ '1' = $($ECHO "scale=2; if ($p >= $percent) 1" | $BC) ]] && return 0
+
+ return 1
+}
diff --git a/usr/src/test/zfs-tests/include/properties.shlib b/usr/src/test/zfs-tests/include/properties.shlib
new file mode 100644
index 0000000000..bb0b4ff586
--- /dev/null
+++ b/usr/src/test/zfs-tests/include/properties.shlib
@@ -0,0 +1,63 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2012 by Delphix. All rights reserved.
+#
+
+typeset -a compress_props=('on' 'off' 'lzjb' 'gzip' 'gzip-1' 'gzip-2' 'gzip-3'
+ 'gzip-4' 'gzip-5' 'gzip-6' 'gzip-7' 'gzip-8' 'gzip-9' 'zle')
+
+typeset -a checksum_props=('on' 'off' 'fletcher2' 'fletcher4' 'sha256')
+
+#
+# Given the property array passed in, return 'num_props' elements to the
+# user, excluding any elements below 'start.' This allows us to exclude
+# 'off' and 'on' which can be either unwanted, or a duplicate of another
+# property respectively.
+#
+function get_rand_prop
+{
+ typeset prop_array=($(eval echo \${$1[@]}))
+ typeset -i num_props=$2
+ typeset -i start=$3
+ typeset retstr=""
+
+ [[ -z $prop_array || -z $num_props || -z $start ]] && \
+ log_fail "get_rand_prop: bad arguments"
+
+ typeset prop_max=$((${#prop_array[@]} - 1))
+ typeset -i i
+ for i in $($SHUF -i $start-$prop_max -n $num_props); do
+ retstr="${prop_array[$i]} $retstr"
+ done
+ echo $retstr
+}
+
+function get_rand_compress
+{
+ get_rand_prop compress_props $1 2
+}
+
+function get_rand_compress_any
+{
+ get_rand_prop compress_props $1 0
+}
+
+function get_rand_checksum
+{
+ get_rand_prop checksum_props $1 2
+}
+
+function get_rand_checksum_any
+{
+ get_rand_prop checksum_props $1 0
+}