summaryrefslogtreecommitdiff
path: root/usr/src/test/zfs-tests/include/libtest.shlib
diff options
context:
space:
mode:
authorJerry Jelinek <jerry.jelinek@joyent.com>2016-11-14 14:03:06 +0000
committerJerry Jelinek <jerry.jelinek@joyent.com>2016-11-14 14:03:06 +0000
commiteb8f6f014c29ae7a6650458f49da8960ed82bfcf (patch)
treed8cc134dc1a4d3119493593b836822d612768dc5 /usr/src/test/zfs-tests/include/libtest.shlib
parent91b5490a419451a7a9fad5fff2b5f1f83ad87955 (diff)
parent1d32ba663e202c24a5a1f2e5aef83fffb447cb7f (diff)
downloadillumos-joyent-eb8f6f014c29ae7a6650458f49da8960ed82bfcf.tar.gz
[illumos-gate merge]
commit 1d32ba663e202c24a5a1f2e5aef83fffb447cb7f 7290 ZFS test suite needs to control what utilities it can run commit f4fb84c03b3f4c46dab36872e953dd3c27317c3a 7576 Print the correct size of loader.efi when failing to load it into memory. commit 40510e8eba18690b9a9843b26393725eeb0f1dac 6676 Race between unique_insert() and unique_remove() causes ZFS fsid change
Diffstat (limited to 'usr/src/test/zfs-tests/include/libtest.shlib')
-rw-r--r--usr/src/test/zfs-tests/include/libtest.shlib596
1 files changed, 298 insertions, 298 deletions
diff --git a/usr/src/test/zfs-tests/include/libtest.shlib b/usr/src/test/zfs-tests/include/libtest.shlib
index 1d3202768b..dc4e3ca3d2 100644
--- a/usr/src/test/zfs-tests/include/libtest.shlib
+++ b/usr/src/test/zfs-tests/include/libtest.shlib
@@ -22,7 +22,7 @@
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
-# Copyright (c) 2012, 2015 by Delphix. All rights reserved.
+# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
# Copyright 2016 Nexenta Systems, Inc.
#
@@ -47,17 +47,17 @@ function ismounted
case $fstype in
zfs)
if [[ "$1" == "/"* ]] ; then
- for out in $($ZFS mount | $AWK '{print $2}'); do
+ for out in $(zfs mount | awk '{print $2}'); do
[[ $1 == $out ]] && return 0
done
else
- for out in $($ZFS mount | $AWK '{print $1}'); do
+ for out in $(zfs mount | awk '{print $1}'); do
[[ $1 == $out ]] && return 0
done
fi
;;
ufs|nfs)
- out=$($DF -F $fstype $1 2>/dev/null)
+ out=$(df -F $fstype $1 2>/dev/null)
ret=$?
(($ret != 0)) && return $ret
@@ -104,7 +104,7 @@ function unmounted
function splitline
{
- $ECHO $1 | $SED "s/,/ /g"
+ echo $1 | sed "s/,/ /g"
}
function default_setup
@@ -127,36 +127,36 @@ function default_setup_noexit
if poolexists $TESTPOOL ; then
destroy_pool $TESTPOOL
fi
- [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
- log_must $ZPOOL create -f $TESTPOOL $disklist
+ [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
+ log_must zpool create -f $TESTPOOL $disklist
else
reexport_pool
fi
- $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
- $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
+ rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
+ mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
- log_must $ZFS create $TESTPOOL/$TESTFS
- log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
+ log_must zfs create $TESTPOOL/$TESTFS
+ log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
if [[ -n $container ]]; then
- $RM -rf $TESTDIR1 || \
+ rm -rf $TESTDIR1 || \
log_unresolved Could not remove $TESTDIR1
- $MKDIR -p $TESTDIR1 || \
+ mkdir -p $TESTDIR1 || \
log_unresolved Could not create $TESTDIR1
- log_must $ZFS create $TESTPOOL/$TESTCTR
- log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
- log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
- log_must $ZFS set mountpoint=$TESTDIR1 \
+ log_must zfs create $TESTPOOL/$TESTCTR
+ log_must zfs set canmount=off $TESTPOOL/$TESTCTR
+ log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
+ log_must zfs set mountpoint=$TESTDIR1 \
$TESTPOOL/$TESTCTR/$TESTFS1
fi
if [[ -n $volume ]]; then
if is_global_zone ; then
- log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
+ log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
else
- log_must $ZFS create $TESTPOOL/$TESTVOL
+ log_must zfs create $TESTPOOL/$TESTVOL
fi
fi
}
@@ -215,7 +215,7 @@ function create_snapshot
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
- log_must $ZFS snapshot $fs_vol@$snap
+ log_must zfs snapshot $fs_vol@$snap
}
#
@@ -234,7 +234,7 @@ function create_clone # snapshot clone
[[ -z $clone ]] && \
log_fail "Clone name is undefined."
- log_must $ZFS clone $snap $clone
+ log_must zfs clone $snap $clone
}
function default_mirror_setup
@@ -259,10 +259,10 @@ function default_mirror_setup_noexit
log_fail "$func: No parameters passed"
[[ -z $secondary ]] && \
log_fail "$func: No secondary partition passed"
- [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
- log_must $ZPOOL create -f $TESTPOOL mirror $@
- log_must $ZFS create $TESTPOOL/$TESTFS
- log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
+ [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
+ log_must zpool create -f $TESTPOOL mirror $@
+ log_must zfs create $TESTPOOL/$TESTFS
+ log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
}
#
@@ -279,8 +279,8 @@ function setup_mirrors
shift
while ((nmirrors > 0)); do
log_must test -n "$1" -a -n "$2"
- [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
- log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
+ [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
+ log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
shift 2
((nmirrors = nmirrors - 1))
done
@@ -300,8 +300,8 @@ function setup_raidzs
shift
while ((nraidzs > 0)); do
log_must test -n "$1" -a -n "$2"
- [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
- log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
+ [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
+ log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
shift 2
((nraidzs = nraidzs - 1))
done
@@ -331,10 +331,10 @@ function default_raidz_setup
log_fail "A raid-z requires a minimum of two disks."
fi
- [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
- log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
- log_must $ZFS create $TESTPOOL/$TESTFS
- log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
+ [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
+ log_must zpool create -f $TESTPOOL raidz $1 $2 $3
+ log_must zfs create $TESTPOOL/$TESTFS
+ log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
log_pass
}
@@ -363,10 +363,10 @@ function default_cleanup_noexit
# filesystems it contains.
#
if is_global_zone; then
- $ZFS unmount -a > /dev/null 2>&1
- exclude=`eval $ECHO \"'(${KEEP})'\"`
- ALL_POOLS=$($ZPOOL list -H -o name \
- | $GREP -v "$NO_POOLS" | $EGREP -v "$exclude")
+ zfs unmount -a > /dev/null 2>&1
+ exclude=`eval echo \"'(${KEEP})'\"`
+ ALL_POOLS=$(zpool list -H -o name \
+ | grep -v "$NO_POOLS" | egrep -v "$exclude")
# Here, we loop through the pools we're allowed to
# destroy, only destroying them if it's safe to do
# so.
@@ -378,57 +378,57 @@ function default_cleanup_noexit
then
destroy_pool $pool
fi
- ALL_POOLS=$($ZPOOL list -H -o name \
- | $GREP -v "$NO_POOLS" \
- | $EGREP -v "$exclude")
+ ALL_POOLS=$(zpool list -H -o name \
+ | grep -v "$NO_POOLS" \
+ | egrep -v "$exclude")
done
done
- $ZFS mount -a
+ zfs mount -a
else
typeset fs=""
- for fs in $($ZFS list -H -o name \
- | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
+ for fs in $(zfs list -H -o name \
+ | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
datasetexists $fs && \
- log_must $ZFS destroy -Rf $fs
+ log_must zfs destroy -Rf $fs
done
# Need cleanup here to avoid garbage dir left.
- for fs in $($ZFS list -H -o name); do
+ for fs in $(zfs list -H -o name); do
[[ $fs == /$ZONE_POOL ]] && continue
- [[ -d $fs ]] && log_must $RM -rf $fs/*
+ [[ -d $fs ]] && log_must rm -rf $fs/*
done
#
# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
# the default value
#
- for fs in $($ZFS list -H -o name); do
+ for fs in $(zfs list -H -o name); do
if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
- log_must $ZFS set reservation=none $fs
- log_must $ZFS set recordsize=128K $fs
- log_must $ZFS set mountpoint=/$fs $fs
+ log_must zfs set reservation=none $fs
+ log_must zfs set recordsize=128K $fs
+ log_must zfs set mountpoint=/$fs $fs
typeset enc=""
enc=$(get_prop encryption $fs)
if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
[[ "$enc" == "off" ]]; then
- log_must $ZFS set checksum=on $fs
+ log_must zfs set checksum=on $fs
fi
- log_must $ZFS set compression=off $fs
- log_must $ZFS set atime=on $fs
- log_must $ZFS set devices=off $fs
- log_must $ZFS set exec=on $fs
- log_must $ZFS set setuid=on $fs
- log_must $ZFS set readonly=off $fs
- log_must $ZFS set snapdir=hidden $fs
- log_must $ZFS set aclmode=groupmask $fs
- log_must $ZFS set aclinherit=secure $fs
+ log_must zfs set compression=off $fs
+ log_must zfs set atime=on $fs
+ log_must zfs set devices=off $fs
+ log_must zfs set exec=on $fs
+ log_must zfs set setuid=on $fs
+ log_must zfs set readonly=off $fs
+ log_must zfs set snapdir=hidden $fs
+ log_must zfs set aclmode=groupmask $fs
+ log_must zfs set aclinherit=secure $fs
fi
done
fi
[[ -d $TESTDIR ]] && \
- log_must $RM -rf $TESTDIR
+ log_must rm -rf $TESTDIR
}
@@ -444,16 +444,16 @@ function default_container_cleanup
ismounted $TESTPOOL/$TESTCTR/$TESTFS1
[[ $? -eq 0 ]] && \
- log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
+ log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
- log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
+ log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
datasetexists $TESTPOOL/$TESTCTR && \
- log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
+ log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
[[ -e $TESTDIR1 ]] && \
- log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
+ log_must rm -rf $TESTDIR1 > /dev/null 2>&1
default_cleanup
}
@@ -484,9 +484,9 @@ function destroy_snapshot
log_fail "get_prop mountpoint $snap failed."
fi
- log_must $ZFS destroy $snap
+ log_must zfs destroy $snap
[[ $mtpt != "" && -d $mtpt ]] && \
- log_must $RM -rf $mtpt
+ log_must rm -rf $mtpt
}
#
@@ -510,9 +510,9 @@ function destroy_clone
log_fail "get_prop mountpoint $clone failed."
fi
- log_must $ZFS destroy $clone
+ log_must zfs destroy $clone
[[ $mtpt != "" && -d $mtpt ]] && \
- log_must $RM -rf $mtpt
+ log_must rm -rf $mtpt
}
# Return 0 if a snapshot exists; $? otherwise
@@ -521,7 +521,7 @@ function destroy_clone
function snapexists
{
- $ZFS list -H -t snapshot "$1" > /dev/null 2>&1
+ zfs list -H -t snapshot "$1" > /dev/null 2>&1
return $?
}
@@ -546,7 +546,7 @@ function dataset_setprop
return 1
fi
typeset output=
- output=$($ZFS set $2=$3 $1 2>&1)
+ output=$(zfs set $2=$3 $1 2>&1)
typeset rv=$?
if ((rv != 0)); then
log_note "Setting property on $1 failed."
@@ -578,7 +578,7 @@ function dataset_set_defaultproperties
typeset confset=
typeset -i found=0
- for confset in $($ZFS list); do
+ for confset in $(zfs list); do
if [[ $dataset = $confset ]]; then
found=1
break
@@ -641,20 +641,20 @@ function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk
log_fail "The slice, size or disk name is unspecified."
typeset format_file=/var/tmp/format_in.$$
- $ECHO "partition" >$format_file
- $ECHO "$slicenum" >> $format_file
- $ECHO "" >> $format_file
- $ECHO "" >> $format_file
- $ECHO "$start" >> $format_file
- $ECHO "$size" >> $format_file
- $ECHO "label" >> $format_file
- $ECHO "" >> $format_file
- $ECHO "q" >> $format_file
- $ECHO "q" >> $format_file
-
- $FORMAT -e -s -d $disk -f $format_file
+ echo "partition" >$format_file
+ echo "$slicenum" >> $format_file
+ echo "" >> $format_file
+ echo "" >> $format_file
+ echo "$start" >> $format_file
+ echo "$size" >> $format_file
+ echo "label" >> $format_file
+ echo "" >> $format_file
+ echo "q" >> $format_file
+ echo "q" >> $format_file
+
+ format -e -s -d $disk -f $format_file
typeset ret_val=$?
- $RM -f $format_file
+ rm -f $format_file
[[ $ret_val -ne 0 ]] && \
log_fail "Unable to format $disk slice $slicenum to $size"
return 0
@@ -676,16 +676,16 @@ function get_endslice #<disk> <slice>
disk=${disk%s*}
typeset -i ratio=0
- ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
- $GREP "sectors\/cylinder" | \
- $AWK '{print $2}')
+ ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
+ grep "sectors\/cylinder" | \
+ awk '{print $2}')
if ((ratio == 0)); then
return
fi
- typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
- $NAWK -v token="$slice" '{if ($1==token) print $6}')
+ typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
+ nawk -v token="$slice" '{if ($1==token) print $6}')
((endcyl = (endcyl + 1) / ratio))
echo $endcyl
@@ -719,7 +719,7 @@ function partition_disk #<slice_size> <whole_disk_name> <total_slices>
#
# This function continues to write to a filenum number of files into dirnum
-# number of directories until either $FILE_WRITE returns an error or the
+# number of directories until either file_write returns an error or the
# maximum number of files per directory have been written.
#
# Usage:
@@ -755,13 +755,13 @@ function fill_fs # destdir dirnum filenum bytes num_writes data
typeset -i fn=0
typeset -i retval=0
- log_must $MKDIR -p $destdir/$idirnum
+ log_must mkdir -p $destdir/$idirnum
while (($odirnum > 0)); do
if ((dirnum >= 0 && idirnum >= dirnum)); then
odirnum=0
break
fi
- $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
+ file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
-b $bytes -c $num_writes -d $data
retval=$?
if (($retval != 0)); then
@@ -771,7 +771,7 @@ function fill_fs # destdir dirnum filenum bytes num_writes data
if (($fn >= $filenum)); then
fn=0
((idirnum = idirnum + 1))
- log_must $MKDIR -p $destdir/$idirnum
+ log_must mkdir -p $destdir/$idirnum
else
((fn = fn + 1))
fi
@@ -791,14 +791,14 @@ function get_prop # property dataset
typeset prop=$1
typeset dataset=$2
- prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
+ prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
if [[ $? -ne 0 ]]; then
log_note "Unable to get $prop property for dataset " \
"$dataset"
return 1
fi
- $ECHO $prop_val
+ echo $prop_val
return 0
}
@@ -813,8 +813,8 @@ function get_pool_prop # property pool
typeset pool=$2
if poolexists $pool ; then
- prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
- $AWK '{print $3}')
+ prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
+ awk '{print $3}')
if [[ $? -ne 0 ]]; then
log_note "Unable to get $prop property for pool " \
"$pool"
@@ -825,7 +825,7 @@ function get_pool_prop # property pool
return 1
fi
- $ECHO $prop_val
+ echo $prop_val
return 0
}
@@ -842,7 +842,7 @@ function poolexists
return 1
fi
- $ZPOOL get name "$pool" > /dev/null 2>&1
+ zpool get name "$pool" > /dev/null 2>&1
return $?
}
@@ -857,7 +857,7 @@ function datasetexists
fi
while (($# > 0)); do
- $ZFS get name $1 > /dev/null 2>&1 || \
+ zfs get name $1 > /dev/null 2>&1 || \
return $?
shift
done
@@ -876,7 +876,7 @@ function datasetnonexists
fi
while (($# > 0)); do
- $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
+ zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
&& return 1
shift
done
@@ -908,13 +908,13 @@ function is_shared
fi
fi
- for mtpt in `$SHARE | $AWK '{print $2}'` ; do
+ for mtpt in `share | awk '{print $2}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
- typeset stat=$($SVCS -H -o STA nfs/server:default)
+ typeset stat=$(svcs -H -o STA nfs/server:default)
if [[ $stat != "ON" ]]; then
log_note "Current nfs/server status: $stat"
fi
@@ -948,7 +948,7 @@ function unshare_fs #fs
is_shared $fs
if (($? == 0)); then
- log_must $ZFS unshare $fs
+ log_must zfs unshare $fs
fi
return 0
@@ -967,7 +967,7 @@ function setup_nfs_server
fi
typeset nfs_fmri="svc:/network/nfs/server:default"
- if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
+ if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
#
# Only really sharing operation can enable NFS server
# to online permanently.
@@ -975,11 +975,11 @@ function setup_nfs_server
typeset dummy=/tmp/dummy
if [[ -d $dummy ]]; then
- log_must $RM -rf $dummy
+ log_must rm -rf $dummy
fi
- log_must $MKDIR $dummy
- log_must $SHARE $dummy
+ log_must mkdir $dummy
+ log_must share $dummy
#
# Waiting for fmri's status to be the final status.
@@ -988,20 +988,20 @@ function setup_nfs_server
#
# Waiting for 1's at least.
#
- log_must $SLEEP 1
+ log_must sleep 1
timeout=10
- while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
+ while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
do
- log_must $SLEEP 1
+ log_must sleep 1
((timeout -= 1))
done
- log_must $UNSHARE $dummy
- log_must $RM -rf $dummy
+ log_must unshare $dummy
+ log_must rm -rf $dummy
fi
- log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
+ log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
}
#
@@ -1011,7 +1011,7 @@ function setup_nfs_server
#
function is_global_zone
{
- typeset cur_zone=$($ZONENAME 2>/dev/null)
+ typeset cur_zone=$(zonename 2>/dev/null)
if [[ $cur_zone != "global" ]]; then
return 1
fi
@@ -1083,8 +1083,8 @@ function create_pool #pool devs_list
fi
if is_global_zone ; then
- [[ -d /$pool ]] && $RM -rf /$pool
- log_must $ZPOOL create -f $pool $@
+ [[ -d /$pool ]] && rm -rf /$pool
+ log_must zpool create -f $pool $@
fi
return 0
@@ -1117,16 +1117,16 @@ function destroy_pool #pool
typeset -i wait_time=10 ret=1 count=0
must=""
while [[ $ret -ne 0 ]]; do
- $must $ZPOOL destroy -f $pool
+ $must zpool destroy -f $pool
ret=$?
[[ $ret -eq 0 ]] && break
log_note "zpool destroy failed with $ret"
[[ count++ -ge 7 ]] && must=log_must
- $SLEEP $wait_time
+ sleep $wait_time
done
[[ -d $mtpt ]] && \
- log_must $RM -rf $mtpt
+ log_must rm -rf $mtpt
else
log_note "Pool does not exist. ($pool)"
return 1
@@ -1157,96 +1157,96 @@ function zfs_zones_setup #zone_name zone_root zone_ip
# Create pool and 5 container within it
#
- [[ -d /$pool_name ]] && $RM -rf /$pool_name
- log_must $ZPOOL create -f $pool_name $DISKS
+ [[ -d /$pool_name ]] && rm -rf /$pool_name
+ log_must zpool create -f $pool_name $DISKS
while ((i < cntctr)); do
- log_must $ZFS create $pool_name/$prefix_ctr$i
+ log_must zfs create $pool_name/$prefix_ctr$i
((i += 1))
done
# create a zvol
- log_must $ZFS create -V 1g $pool_name/zone_zvol
+ log_must zfs create -V 1g $pool_name/zone_zvol
#
# If current system support slog, add slog device for pool
#
if verify_slog_support ; then
typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
- log_must $MKFILE $MINVDEVSIZE $sdevs
- log_must $ZPOOL add $pool_name log mirror $sdevs
+ log_must mkfile $MINVDEVSIZE $sdevs
+ log_must zpool add $pool_name log mirror $sdevs
fi
# this isn't supported just yet.
# Create a filesystem. In order to add this to
# the zone, it must have it's mountpoint set to 'legacy'
- # log_must $ZFS create $pool_name/zfs_filesystem
- # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
+ # log_must zfs create $pool_name/zfs_filesystem
+ # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
[[ -d $zone_root ]] && \
- log_must $RM -rf $zone_root/$zone_name
+ log_must rm -rf $zone_root/$zone_name
[[ ! -d $zone_root ]] && \
- log_must $MKDIR -p -m 0700 $zone_root/$zone_name
+ log_must mkdir -p -m 0700 $zone_root/$zone_name
# Create zone configure file and configure the zone
#
typeset zone_conf=/tmp/zone_conf.$$
- $ECHO "create" > $zone_conf
- $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
- $ECHO "set autoboot=true" >> $zone_conf
+ echo "create" > $zone_conf
+ echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
+ echo "set autoboot=true" >> $zone_conf
i=0
while ((i < cntctr)); do
- $ECHO "add dataset" >> $zone_conf
- $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
+ echo "add dataset" >> $zone_conf
+ echo "set name=$pool_name/$prefix_ctr$i" >> \
$zone_conf
- $ECHO "end" >> $zone_conf
+ echo "end" >> $zone_conf
((i += 1))
done
# add our zvol to the zone
- $ECHO "add device" >> $zone_conf
- $ECHO "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
- $ECHO "end" >> $zone_conf
+ echo "add device" >> $zone_conf
+ echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
+ echo "end" >> $zone_conf
# add a corresponding zvol rdsk to the zone
- $ECHO "add device" >> $zone_conf
- $ECHO "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
- $ECHO "end" >> $zone_conf
+ echo "add device" >> $zone_conf
+ echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
+ echo "end" >> $zone_conf
# once it's supported, we'll add our filesystem to the zone
- # $ECHO "add fs" >> $zone_conf
- # $ECHO "set type=zfs" >> $zone_conf
- # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
- # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
- # $ECHO "end" >> $zone_conf
+ # echo "add fs" >> $zone_conf
+ # echo "set type=zfs" >> $zone_conf
+ # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
+ # echo "set dir=/export/zfs_filesystem" >> $zone_conf
+ # echo "end" >> $zone_conf
- $ECHO "verify" >> $zone_conf
- $ECHO "commit" >> $zone_conf
- log_must $ZONECFG -z $zone_name -f $zone_conf
- log_must $RM -f $zone_conf
+ echo "verify" >> $zone_conf
+ echo "commit" >> $zone_conf
+ log_must zonecfg -z $zone_name -f $zone_conf
+ log_must rm -f $zone_conf
# Install the zone
- $ZONEADM -z $zone_name install
+ zoneadm -z $zone_name install
if (($? == 0)); then
- log_note "SUCCESS: $ZONEADM -z $zone_name install"
+ log_note "SUCCESS: zoneadm -z $zone_name install"
else
- log_fail "FAIL: $ZONEADM -z $zone_name install"
+ log_fail "FAIL: zoneadm -z $zone_name install"
fi
# Install sysidcfg file
#
typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
- $ECHO "system_locale=C" > $sysidcfg
- $ECHO "terminal=dtterm" >> $sysidcfg
- $ECHO "network_interface=primary {" >> $sysidcfg
- $ECHO "hostname=$zone_name" >> $sysidcfg
- $ECHO "}" >> $sysidcfg
- $ECHO "name_service=NONE" >> $sysidcfg
- $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
- $ECHO "security_policy=NONE" >> $sysidcfg
- $ECHO "timezone=US/Eastern" >> $sysidcfg
+ echo "system_locale=C" > $sysidcfg
+ echo "terminal=dtterm" >> $sysidcfg
+ echo "network_interface=primary {" >> $sysidcfg
+ echo "hostname=$zone_name" >> $sysidcfg
+ echo "}" >> $sysidcfg
+ echo "name_service=NONE" >> $sysidcfg
+ echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
+ echo "security_policy=NONE" >> $sysidcfg
+ echo "timezone=US/Eastern" >> $sysidcfg
# Boot this zone
- log_must $ZONEADM -z $zone_name boot
+ log_must zoneadm -z $zone_name boot
}
#
@@ -1261,12 +1261,12 @@ function reexport_pool
if ((i == 0)); then
TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
if ! ismounted $TESTPOOL; then
- log_must $ZFS mount $TESTPOOL
+ log_must zfs mount $TESTPOOL
fi
else
eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
if eval ! ismounted \$TESTPOOL$i; then
- log_must eval $ZFS mount \$TESTPOOL$i
+ log_must eval zfs mount \$TESTPOOL$i
fi
fi
((i += 1))
@@ -1284,7 +1284,7 @@ function check_state # pool disk state{online,offline}
typeset disk=${2#/dev/dsk/}
typeset state=$3
- $ZPOOL status -v $pool | grep "$disk" \
+ zpool status -v $pool | grep "$disk" \
| grep -i "$state" > /dev/null 2>&1
return $?
@@ -1310,7 +1310,7 @@ function snapshot_mountpoint
log_fail "Error name of snapshot '$dataset'."
fi
- $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
+ echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
}
#
@@ -1329,9 +1329,9 @@ function verify_filesys # pool filesystem dir
typeset dirs=$@
typeset search_path=""
- log_note "Calling $ZDB to verify filesystem '$filesys'"
- $ZFS unmount -a > /dev/null 2>&1
- log_must $ZPOOL export $pool
+ log_note "Calling zdb to verify filesystem '$filesys'"
+ zfs unmount -a > /dev/null 2>&1
+ log_must zpool export $pool
if [[ -n $dirs ]] ; then
for dir in $dirs ; do
@@ -1339,17 +1339,17 @@ function verify_filesys # pool filesystem dir
done
fi
- log_must $ZPOOL import $search_path $pool
+ log_must zpool import $search_path $pool
- $ZDB -cudi $filesys > $zdbout 2>&1
+ zdb -cudi $filesys > $zdbout 2>&1
if [[ $? != 0 ]]; then
- log_note "Output: $ZDB -cudi $filesys"
- $CAT $zdbout
- log_fail "$ZDB detected errors with: '$filesys'"
+ log_note "Output: zdb -cudi $filesys"
+ cat $zdbout
+ log_fail "zdb detected errors with: '$filesys'"
fi
- log_must $ZFS mount -a
- log_must $RM -rf $zdbout
+ log_must zfs mount -a
+ log_must rm -rf $zdbout
}
#
@@ -1359,11 +1359,11 @@ function get_disklist # pool
{
typeset disklist=""
- disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
- $GREP -v "\-\-\-\-\-" | \
- $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
+ disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
+ grep -v "\-\-\-\-\-" | \
+ egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
- $ECHO $disklist
+ echo $disklist
}
# /**
@@ -1384,14 +1384,14 @@ function stress_timeout
log_note "Waiting for child processes($cpids). " \
"It could last dozens of minutes, please be patient ..."
- log_must $SLEEP $TIMEOUT
+ log_must sleep $TIMEOUT
log_note "Killing child processes after ${TIMEOUT} stress timeout."
typeset pid
for pid in $cpids; do
- $PS -p $pid > /dev/null 2>&1
+ ps -p $pid > /dev/null 2>&1
if (($? == 0)); then
- log_must $KILL -USR1 $pid
+ log_must kill -USR1 $pid
fi
done
}
@@ -1465,9 +1465,9 @@ function check_pool_status # pool token keyword
typeset token=$2
typeset keyword=$3
- $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
+ zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
($1==token) {print $0}' \
- | $GREP -i "$keyword" > /dev/null 2>&1
+ | grep -i "$keyword" > /dev/null 2>&1
return $?
}
@@ -1539,17 +1539,17 @@ function verify_rsh_connect #rhost, username
{
typeset rhost=$1
typeset username=$2
- typeset rsh_cmd="$RSH -n"
+ typeset rsh_cmd="rsh -n"
typeset cur_user=
- $GETENT hosts $rhost >/dev/null 2>&1
+ getent hosts $rhost >/dev/null 2>&1
if (($? != 0)); then
log_note "$rhost cannot be found from" \
"administrative database."
return 1
fi
- $PING $rhost 3 >/dev/null 2>&1
+ ping $rhost 3 >/dev/null 2>&1
if (($? != 0)); then
log_note "$rhost is not reachable."
return 1
@@ -1559,11 +1559,11 @@ function verify_rsh_connect #rhost, username
rsh_cmd="$rsh_cmd -l $username"
cur_user="given user \"$username\""
else
- cur_user="current user \"`$LOGNAME`\""
+ cur_user="current user \"`logname`\""
fi
- if ! $rsh_cmd $rhost $TRUE; then
- log_note "$RSH to $rhost is not accessible" \
+ if ! $rsh_cmd $rhost true; then
+ log_note "rsh to $rhost is not accessible" \
"with $cur_user."
return 1
fi
@@ -1620,9 +1620,9 @@ function rsh_status
err_file=/tmp/${rhost}.$$.err
if ((${#ruser} == 0)); then
- rsh_str="$RSH -n"
+ rsh_str="rsh -n"
else
- rsh_str="$RSH -n -l $ruser"
+ rsh_str="rsh -n -l $ruser"
fi
$rsh_str $rhost /usr/bin/ksh -c "'$cmd_str; \
@@ -1630,16 +1630,16 @@ function rsh_status
>/dev/null 2>$err_file
ret=$?
if (($ret != 0)); then
- $CAT $err_file
- $RM -f $std_file $err_file
- log_fail "$RSH itself failed with exit code $ret..."
+ cat $err_file
+ rm -f $std_file $err_file
+ log_fail "rsh itself failed with exit code $ret..."
fi
- ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
- $CUT -d= -f2)
- (($ret != 0)) && $CAT $err_file >&2
+ ret=$(grep -v 'print -u 2' $err_file | grep 'status=' | \
+ cut -d= -f2)
+ (($ret != 0)) && cat $err_file >&2
- $RM -f $err_file >/dev/null 2>&1
+ rm -f $err_file >/dev/null 2>&1
return $ret
}
@@ -1652,10 +1652,10 @@ function get_remote_pkgpath
typeset rhost=$1
typeset pkgpath=""
- pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
- $CUT -d: -f2")
+ pkgpath=$(rsh -n $rhost "pkginfo -l SUNWstc-fs-zfs | grep BASEDIR: |\
+ cut -d: -f2")
- $ECHO $pkgpath
+ echo $pkgpath
}
#/**
@@ -1674,15 +1674,15 @@ function find_disks
dmpi=/tmp/dumpdev.$$
max_finddisksnum=${MAX_FINDDISKSNUM:-6}
- $SWAP -l > $sfi
- $DUMPADM > $dmpi 2>/dev/null
+ swap -l > $sfi
+ dumpadm > $dmpi 2>/dev/null
# write an awk script that can process the output of format
# to produce a list of disks we know about. Note that we have
# to escape "$2" so that the shell doesn't interpret it while
# we're creating the awk script.
# -------------------
- $CAT > /tmp/find_disks.awk <<EOF
+ cat > /tmp/find_disks.awk <<EOF
#!/bin/nawk -f
BEGIN { FS="."; }
@@ -1703,29 +1703,29 @@ function find_disks
EOF
#---------------------
- $CHMOD 755 /tmp/find_disks.awk
- disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
- $RM /tmp/find_disks.awk
+ chmod 755 /tmp/find_disks.awk
+ disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
+ rm /tmp/find_disks.awk
unused=""
for disk in $disks; do
# Check for mounted
- $GREP "${disk}[sp]" /etc/mnttab >/dev/null
+ grep "${disk}[sp]" /etc/mnttab >/dev/null
(($? == 0)) && continue
# Check for swap
- $GREP "${disk}[sp]" $sfi >/dev/null
+ grep "${disk}[sp]" $sfi >/dev/null
(($? == 0)) && continue
# check for dump device
- $GREP "${disk}[sp]" $dmpi >/dev/null
+ grep "${disk}[sp]" $dmpi >/dev/null
(($? == 0)) && continue
# check to see if this disk hasn't been explicitly excluded
# by a user-set environment variable
- $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
+ echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
(($? == 0)) && continue
unused_candidates="$unused_candidates $disk"
done
- $RM $sfi
- $RM $dmpi
+ rm $sfi
+ rm $dmpi
# now just check to see if those disks do actually exist
# by looking for a device pointing to the first slice in
@@ -1742,7 +1742,7 @@ EOF
done
# finally, return our disk list
- $ECHO $unused
+ echo $unused
}
#
@@ -1762,7 +1762,7 @@ function add_user #<group_name> <user_name> <basedir>
log_fail "group name or user name are not defined."
fi
- log_must $USERADD -g $gname -d $basedir/$uname -m $uname
+ log_must useradd -g $gname -d $basedir/$uname -m $uname
return 0
}
@@ -1782,11 +1782,11 @@ function del_user #<logname> <basedir>
log_fail "login name is necessary."
fi
- if $ID $user > /dev/null 2>&1; then
- log_must $USERDEL $user
+ if id $user > /dev/null 2>&1; then
+ log_must userdel $user
fi
- [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
+ [[ -d $basedir/$user ]] && rm -fr $basedir/$user
return 0
}
@@ -1807,7 +1807,7 @@ function add_group #<group_name>
# Assign 100 as the base gid
typeset -i gid=100
while true; do
- $GROUPADD -g $gid $group > /dev/null 2>&1
+ groupadd -g $gid $group > /dev/null 2>&1
typeset -i ret=$?
case $ret in
0) return 0 ;;
@@ -1830,13 +1830,13 @@ function del_group #<group_name>
log_fail "group name is necessary."
fi
- $GROUPMOD -n $grp $grp > /dev/null 2>&1
+ groupmod -n $grp $grp > /dev/null 2>&1
typeset -i ret=$?
case $ret in
# Group does not exist.
6) return 0 ;;
# Name already exists as a group name
- 9) log_must $GROUPDEL $grp ;;
+ 9) log_must groupdel $grp ;;
*) return 1 ;;
esac
@@ -1858,29 +1858,29 @@ function safe_to_destroy_pool { # $1 the pool name
# by looking at all other pools, ensuring that they
# aren't built from files or zvols contained in this pool.
- for pool in $($ZPOOL list -H -o name)
+ for pool in $(zpool list -H -o name)
do
ALTMOUNTPOOL=""
# this is a list of the top-level directories in each of the
# files that make up the path to the files the pool is based on
- FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
- $AWK '{print $1}')
+ FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
+ awk '{print $1}')
# this is a list of the zvols that make up the pool
- ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "/dev/zvol/dsk/$1$" \
- | $AWK '{print $1}')
+ ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
+ | awk '{print $1}')
# also want to determine if it's a file-based pool using an
# alternate mountpoint...
- POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
- $GREP / | $AWK '{print $1}' | \
- $AWK -F/ '{print $2}' | $GREP -v "dev")
+ POOL_FILE_DIRS=$(zpool status -v $pool | \
+ grep / | awk '{print $1}' | \
+ awk -F/ '{print $2}' | grep -v "dev")
for pooldir in $POOL_FILE_DIRS
do
- OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
- $GREP "${pooldir}$" | $AWK '{print $1}')
+ OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
+ grep "${pooldir}$" | awk '{print $1}')
ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
done
@@ -1930,11 +1930,11 @@ function get_compress_opts
COMPRESS_OPTS="on off lzjb"
fi
typeset valid_opts="$COMPRESS_OPTS"
- $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
+ zfs get 2>&1 | grep gzip >/dev/null 2>&1
if [[ $? -eq 0 ]]; then
valid_opts="$valid_opts $GZIP_OPTS"
fi
- $ECHO "$valid_opts"
+ echo "$valid_opts"
}
#
@@ -1987,22 +1987,22 @@ function verify_opt_p_ops
# make sure the upper level filesystem does not exist
if datasetexists ${newdataset%/*} ; then
- log_must $ZFS destroy -rRf ${newdataset%/*}
+ log_must zfs destroy -rRf ${newdataset%/*}
fi
# without -p option, operation will fail
- log_mustnot $ZFS $ops $dataset $newdataset
+ log_mustnot zfs $ops $dataset $newdataset
log_mustnot datasetexists $newdataset ${newdataset%/*}
# with -p option, operation should succeed
- log_must $ZFS $ops -p $dataset $newdataset
+ log_must zfs $ops -p $dataset $newdataset
if ! datasetexists $newdataset ; then
log_fail "-p option does not work for $ops"
fi
# when $ops is create or clone, redo the operation still return zero
if [[ $ops != "rename" ]]; then
- log_must $ZFS $ops -p $dataset $newdataset
+ log_must zfs $ops -p $dataset $newdataset
fi
return 0
@@ -2022,12 +2022,12 @@ function get_config
if ! poolexists "$pool" ; then
return 1
fi
- alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
+ alt_root=$(zpool list -H $pool | awk '{print $NF}')
if [[ $alt_root == "-" ]]; then
- value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
+ value=$(zdb -C $pool | grep "$config:" | awk -F: \
'{print $2}')
else
- value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
+ value=$(zdb -e $pool | grep "$config:" | awk -F: \
'{print $2}')
fi
if [[ -n $value ]] ; then
@@ -2054,8 +2054,8 @@ function _random_get
typeset -i ind
((ind = RANDOM % cnt + 1))
- typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
- $ECHO $ret
+ typeset ret=$(echo "$str" | cut -f $ind -d ' ')
+ echo $ret
}
#
@@ -2087,14 +2087,14 @@ function verify_slog_support
typeset vdev=$dir/a
typeset sdev=$dir/b
- $MKDIR -p $dir
- $MKFILE $MINVDEVSIZE $vdev $sdev
+ mkdir -p $dir
+ mkfile $MINVDEVSIZE $vdev $sdev
typeset -i ret=0
- if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
+ if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
ret=1
fi
- $RM -r $dir
+ rm -r $dir
return $ret
}
@@ -2123,7 +2123,7 @@ function gen_dataset_name
((iter -= 1))
done
- $ECHO $l_name
+ echo $l_name
}
#
@@ -2138,10 +2138,10 @@ function gen_dataset_name
function datasetcksum
{
typeset cksum
- $SYNC
- cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
- | $AWK -F= '{print $7}')
- $ECHO $cksum
+ sync
+ cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
+ | awk -F= '{print $7}')
+ echo $cksum
}
#
@@ -2151,8 +2151,8 @@ function datasetcksum
function checksum
{
typeset cksum
- cksum=$($CKSUM $1 | $AWK '{print $1}')
- $ECHO $cksum
+ cksum=$(cksum $1 | awk '{print $1}')
+ echo $cksum
}
#
@@ -2164,8 +2164,8 @@ function get_device_state #pool disk field("", "spares","logs")
typeset disk=${2#/dev/dsk/}
typeset field=${3:-$pool}
- state=$($ZPOOL status -v "$pool" 2>/dev/null | \
- $NAWK -v device=$disk -v pool=$pool -v field=$field \
+ state=$(zpool status -v "$pool" 2>/dev/null | \
+ nawk -v device=$disk -v pool=$pool -v field=$field \
'BEGIN {startconfig=0; startfield=0; }
/config:/ {startconfig=1}
(startconfig==1) && ($1==field) {startfield=1; next;}
@@ -2193,7 +2193,7 @@ function get_fstype
# $ df -n /
# / : ufs
#
- $DF -n $dir | $AWK '{print $3}'
+ df -n $dir | awk '{print $3}'
}
#
@@ -2207,36 +2207,36 @@ function labelvtoc
log_fail "The disk name is unspecified."
fi
typeset label_file=/var/tmp/labelvtoc.$$
- typeset arch=$($UNAME -p)
+ typeset arch=$(uname -p)
if [[ $arch == "i386" ]]; then
- $ECHO "label" > $label_file
- $ECHO "0" >> $label_file
- $ECHO "" >> $label_file
- $ECHO "q" >> $label_file
- $ECHO "q" >> $label_file
+ echo "label" > $label_file
+ echo "0" >> $label_file
+ echo "" >> $label_file
+ echo "q" >> $label_file
+ echo "q" >> $label_file
- $FDISK -B $disk >/dev/null 2>&1
+ fdisk -B $disk >/dev/null 2>&1
# wait a while for fdisk finishes
- $SLEEP 60
+ sleep 60
elif [[ $arch == "sparc" ]]; then
- $ECHO "label" > $label_file
- $ECHO "0" >> $label_file
- $ECHO "" >> $label_file
- $ECHO "" >> $label_file
- $ECHO "" >> $label_file
- $ECHO "q" >> $label_file
+ echo "label" > $label_file
+ echo "0" >> $label_file
+ echo "" >> $label_file
+ echo "" >> $label_file
+ echo "" >> $label_file
+ echo "q" >> $label_file
else
log_fail "unknown arch type"
fi
- $FORMAT -e -s -d $disk -f $label_file
+ format -e -s -d $disk -f $label_file
typeset -i ret_val=$?
- $RM -f $label_file
+ rm -f $label_file
#
# wait the format to finish
#
- $SLEEP 60
+ sleep 60
if ((ret_val != 0)); then
log_fail "unable to label $disk as VTOC."
fi
@@ -2250,7 +2250,7 @@ function labelvtoc
#
function is_zfsroot
{
- $DF -n / | $GREP zfs > /dev/null 2>&1
+ df -n / | grep zfs > /dev/null 2>&1
return $?
}
@@ -2261,14 +2261,14 @@ function is_zfsroot
function get_rootfs
{
typeset rootfs=""
- rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
+ rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
/etc/mnttab)
if [[ -z "$rootfs" ]]; then
log_fail "Can not get rootfs"
fi
- $ZFS list $rootfs > /dev/null 2>&1
+ zfs list $rootfs > /dev/null 2>&1
if (($? == 0)); then
- $ECHO $rootfs
+ echo $rootfs
else
log_fail "This is not a zfsroot system."
fi
@@ -2283,15 +2283,15 @@ function get_rootpool
{
typeset rootfs=""
typeset rootpool=""
- rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
+ rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
/etc/mnttab)
if [[ -z "$rootfs" ]]; then
log_fail "Can not get rootpool"
fi
- $ZFS list $rootfs > /dev/null 2>&1
+ zfs list $rootfs > /dev/null 2>&1
if (($? == 0)); then
- rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
- $ECHO $rootpool
+ rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
+ echo $rootpool
else
log_fail "This is not a zfsroot system."
fi
@@ -2305,7 +2305,7 @@ function is_physical_device #device
typeset device=${1#/dev/dsk/}
device=${device#/dev/rdsk/}
- $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
+ echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
return $?
}
@@ -2320,9 +2320,9 @@ function get_device_dir #device
if [[ $device != "/" ]]; then
device=${device%/*}
fi
- $ECHO $device
+ echo $device
else
- $ECHO "/dev/dsk"
+ echo "/dev/dsk"
fi
}
@@ -2341,7 +2341,7 @@ function get_package_name
#
function get_word_count
{
- $ECHO $1 | $WC -w
+ echo $1 | wc -w
}
#
@@ -2385,7 +2385,7 @@ function ds_is_snapshot
#
function is_te_enabled
{
- $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled"
+ svcs -H -o state labeld 2>/dev/null | grep "enabled"
if (($? != 0)); then
return 1
else
@@ -2396,12 +2396,12 @@ function is_te_enabled
# Utility function to determine if a system has multiple cpus.
function is_mp
{
- (($($PSRINFO | $WC -l) > 1))
+ (($(psrinfo | wc -l) > 1))
}
function get_cpu_freq
{
- $PSRINFO -v 0 | $AWK '/processor operates at/ {print $6}'
+ psrinfo -v 0 | awk '/processor operates at/ {print $6}'
}
# Run the given command as the user provided.
@@ -2410,7 +2410,7 @@ function user_run
typeset user=$1
shift
- eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err
+ eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
return $?
}
@@ -2435,14 +2435,14 @@ function vdevs_in_pool
shift
- typeset tmpfile=$($MKTEMP)
- $ZPOOL list -Hv "$pool" >$tmpfile
+ typeset tmpfile=$(mktemp)
+ zpool list -Hv "$pool" >$tmpfile
for vdev in $@; do
- $GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1
+ grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
[[ $? -ne 0 ]] && return 1
done
- $RM -f $tmpfile
+ rm -f $tmpfile
return 0;
}