summaryrefslogtreecommitdiff
path: root/usr
diff options
context:
space:
mode:
authorJerry Jelinek <jerry.jelinek@joyent.com>2019-05-30 11:32:30 +0000
committerJerry Jelinek <jerry.jelinek@joyent.com>2019-05-30 11:32:30 +0000
commit1205f1a47f18f403254c28de55875156744b9e0a (patch)
tree9756c8b0e0c83d523cd1223ed503a012a923b22d /usr
parentfd807bef73987a10c8851965bae1f29ababa5f2b (diff)
parent4fbfc69b99ccacf0999510e626df37e53b3d56ef (diff)
downloadillumos-joyent-1205f1a47f18f403254c28de55875156744b9e0a.tar.gz
[illumos-gate merge]
commit 4fbfc69b99ccacf0999510e626df37e53b3d56ef 10991 Allow user ACE in ACL to match SID in token extra SIDs commit 10a268c1c2abd3fd9cc708870a3020567adeb92c 11067 debug statistics crash ld(1) when -z allextract commit 0bc0887e1cf0f912077b83256f295ad0ed1c715c 11057 hidden undefined weak symbols should not leave relocations 11058 libld entrance descriptor assertions get NDEBUG check backwards commit b3b3563308068a7fafd98cf3021c73aedc1caa6c 11060 sys/zone.h exposes more than it should commit 810e978011ea80127b0a15368276459b566a3241 10989 Ricoh scanner error after DFS get referral commit a1096253735019dce07e79caf2df1e13078770fb 10988 SMB should not create in directories marked delete-on-close commit b5b772b09624dfff0e83299d0f7b895129f7bf26 8583 Windows 10 fails to delete read-only files with SMB2 commit 575d359d36700f4571e4a87c94966a99b8b7f48a 10987 SMB1 fails renaming an open file commit 91ca6bffd299a6eb6fd63dd0ef807bbd6fb4f835 10985 Hitting file handler count limit of 16k commit e4c795beb33bf59dd4ad2e3f88f493111484b890 10952 defer new resilvers and misc. resilver-related fixes commit 1b55eab738595803f2f751ac1714f659b2e48b9b 11062 psrinfo: comparison between pointer and integer commit 80bd8a585c4ef2f19c09c9c4379635e411c91fe6 11063 acl_common.c: error: comparison between pointer and integer commit 84ce06cea75304aa6dec12e94975d5372dd9c672 11072 Deadlock in lofi after 11043 Conflicts: usr/src/uts/common/sys/zone.h
Diffstat (limited to 'usr')
-rw-r--r--usr/src/cmd/lp/include/printers.h10
-rw-r--r--usr/src/cmd/psrinfo/psrinfo.c2
-rw-r--r--usr/src/cmd/sgs/libld/common/entry.c2
-rw-r--r--usr/src/cmd/sgs/libld/common/machrel.amd.c30
-rw-r--r--usr/src/cmd/sgs/libld/common/machrel.intel.c30
-rw-r--r--usr/src/cmd/sgs/libld/common/machrel.sparc.c42
-rw-r--r--usr/src/cmd/sgs/liblddbg/common/statistics.c5
-rw-r--r--usr/src/cmd/sgs/packages/common/SUNWonld-README2
-rw-r--r--usr/src/cmd/smbsrv/smbstat/smbstat.c1
-rw-r--r--usr/src/cmd/zpool/zpool_main.c57
-rw-r--r--usr/src/common/acl/acl_common.c4
-rw-r--r--usr/src/common/zfs/zfeature_common.c7
-rw-r--r--usr/src/common/zfs/zfeature_common.h1
-rw-r--r--usr/src/lib/libzfs/common/libzfs.h1
-rw-r--r--usr/src/lib/libzfs/common/libzfs_pool.c6
-rw-r--r--usr/src/lib/libzfs/common/libzfs_util.c3
-rw-r--r--usr/src/man/man1m/zpool.1m18
-rw-r--r--usr/src/man/man5/zpool-features.523
-rw-r--r--usr/src/pkg/manifests/system-test-zfstest.mf45
-rw-r--r--usr/src/test/zfs-tests/runfiles/delphix.run7
-rw-r--r--usr/src/test/zfs-tests/runfiles/omnios.run7
-rw-r--r--usr/src/test/zfs-tests/runfiles/openindiana.run7
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg1
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh2
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh16
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh14
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/Makefile21
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/cleanup.ksh36
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/setup.ksh34
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.cfg49
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib124
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_001_pos.ksh70
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_002_pos.ksh70
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_003_pos.ksh101
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_004_pos.ksh88
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_005_pos.ksh86
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_006_neg.ksh43
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_007_pos.ksh67
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/Makefile21
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/cleanup.ksh33
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/setup.ksh39
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver.cfg30
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_bad_args.ksh71
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh86
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh2
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh11
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh8
-rw-r--r--usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh26
-rwxr-xr-xusr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh4
-rw-r--r--usr/src/test/zfs-tests/tests/functional/removal/removal.kshlib4
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_common_open.c17
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_node.c27
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_ofile.c23
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_trans2_dfs.c12
-rw-r--r--usr/src/uts/common/fs/zfs/dsl_scan.c142
-rw-r--r--usr/src/uts/common/fs/zfs/spa.c20
-rw-r--r--usr/src/uts/common/fs/zfs/sys/spa_impl.h7
-rw-r--r--usr/src/uts/common/fs/zfs/sys/vdev.h2
-rw-r--r--usr/src/uts/common/fs/zfs/sys/vdev_impl.h1
-rw-r--r--usr/src/uts/common/fs/zfs/sys/zfs_fuid.h7
-rw-r--r--usr/src/uts/common/fs/zfs/vdev.c42
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_indirect.c23
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_label.c6
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_removal.c6
-rw-r--r--usr/src/uts/common/fs/zfs/zfs_acl.c37
-rw-r--r--usr/src/uts/common/fs/zfs/zfs_fuid.c56
-rw-r--r--usr/src/uts/common/fs/zfs/zil.c2
-rw-r--r--usr/src/uts/common/io/lofi.c43
-rw-r--r--usr/src/uts/common/smbsrv/smb_ktypes.h2
-rw-r--r--usr/src/uts/common/sys/fs/zfs.h2
-rw-r--r--usr/src/uts/common/sys/tsol/tndb.h4
-rw-r--r--usr/src/uts/common/sys/zone.h28
72 files changed, 1766 insertions, 210 deletions
diff --git a/usr/src/cmd/lp/include/printers.h b/usr/src/cmd/lp/include/printers.h
index ff66ead90c..915f9a6eb3 100644
--- a/usr/src/cmd/lp/include/printers.h
+++ b/usr/src/cmd/lp/include/printers.h
@@ -32,16 +32,6 @@
#define _LP_PRINTERS_H
/*
- * The following conflicts are also defined in sys/processor.h.
- */
-#if defined PS_FAULTED
-#undef PS_FAULTED
-#endif
-#if defined PS_DISABLED
-#undef PS_DISABLED
-#endif
-
-/*
* Define the following to support administrator configurable
* streams modules:
*/
diff --git a/usr/src/cmd/psrinfo/psrinfo.c b/usr/src/cmd/psrinfo/psrinfo.c
index 6ee6291f30..1e237b581f 100644
--- a/usr/src/cmd/psrinfo/psrinfo.c
+++ b/usr/src/cmd/psrinfo/psrinfo.c
@@ -752,7 +752,7 @@ nocpuid:
return (EXIT_SUCCESS);
}
- if (opt_t != NULL) {
+ if (opt_t != 0) {
if (optind != argc)
usage(_("cannot specify CPUs with -t"));
if (opt_s || opt_v)
diff --git a/usr/src/cmd/sgs/libld/common/entry.c b/usr/src/cmd/sgs/libld/common/entry.c
index 396075d09f..f55e2cf723 100644
--- a/usr/src/cmd/sgs/libld/common/entry.c
+++ b/usr/src/cmd/sgs/libld/common/entry.c
@@ -472,7 +472,7 @@ ld_ent_setup(Ofl_desc *ofl, Xword segalign)
AL_CNT_SEGMENTS)) == NULL)
return (S_ERROR);
-#ifdef NDEBUG /* assert() is enabled */
+#ifndef NDEBUG /* assert() is enabled */
/*
* Enforce the segment name rule: Any segment that can
* be referenced by an entrance descriptor must have
diff --git a/usr/src/cmd/sgs/libld/common/machrel.amd.c b/usr/src/cmd/sgs/libld/common/machrel.amd.c
index ebf7cc5a8f..dbdae79bd1 100644
--- a/usr/src/cmd/sgs/libld/common/machrel.amd.c
+++ b/usr/src/cmd/sgs/libld/common/machrel.amd.c
@@ -435,7 +435,20 @@ ld_perform_outreloc(Rel_desc * orsp, Ofl_desc * ofl, Boolean *remain_seen)
if (orsp->rel_rtype == R_AMD64_DTPMOD64)
raddend = 0;
- relbits = (char *)relosp->os_outdata->d_buf;
+ if ((orsp->rel_rtype != M_R_NONE) &&
+ (orsp->rel_rtype != M_R_RELATIVE)) {
+ if (ndx == 0) {
+ Conv_inv_buf_t inv_buf;
+ Is_desc *isp = orsp->rel_isdesc;
+
+ ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_REL_NOSYMBOL),
+ conv_reloc_type(ofl->ofl_nehdr->e_machine,
+ orsp->rel_rtype, 0, &inv_buf),
+ isp->is_file->ifl_name, EC_WORD(isp->is_scnndx),
+ isp->is_name, EC_XWORD(roffset));
+ return (S_ERROR);
+ }
+ }
rea.r_info = ELF_R_INFO(ndx, orsp->rel_rtype);
rea.r_offset = roffset;
@@ -448,6 +461,8 @@ ld_perform_outreloc(Rel_desc * orsp, Ofl_desc * ofl, Boolean *remain_seen)
*/
assert(relosp->os_szoutrels <= relosp->os_shdr->sh_size);
+ relbits = (char *)relosp->os_outdata->d_buf;
+
(void) memcpy((relbits + relosp->os_szoutrels),
(char *)&rea, sizeof (Rela));
relosp->os_szoutrels += (Xword)sizeof (Rela);
@@ -1135,6 +1150,19 @@ ld_add_outrel(Word flags, Rel_desc *rsp, Ofl_desc *ofl)
return (1);
/*
+ * If the symbol will be reduced, we can't leave outstanding
+ * relocations against it, as nothing will ever be able to satisfy them
+ * (and the symbol won't be in .dynsym
+ */
+ if ((sdp != NULL) &&
+ (sdp->sd_sym->st_shndx == SHN_UNDEF) &&
+ (rsp->rel_rtype != M_R_NONE) &&
+ (rsp->rel_rtype != M_R_RELATIVE)) {
+ if (ld_sym_reducable(ofl, sdp))
+ return (1);
+ }
+
+ /*
* If we are adding a output relocation against a section
* symbol (non-RELATIVE) then mark that section. These sections
* will be added to the .dynsym symbol table.
diff --git a/usr/src/cmd/sgs/libld/common/machrel.intel.c b/usr/src/cmd/sgs/libld/common/machrel.intel.c
index 28999e7d89..d2b72c78a6 100644
--- a/usr/src/cmd/sgs/libld/common/machrel.intel.c
+++ b/usr/src/cmd/sgs/libld/common/machrel.intel.c
@@ -355,7 +355,20 @@ ld_perform_outreloc(Rel_desc * orsp, Ofl_desc * ofl, Boolean *remain_seen)
return (S_ERROR);
}
- relbits = (char *)relosp->os_outdata->d_buf;
+ if ((orsp->rel_rtype != M_R_NONE) &&
+ (orsp->rel_rtype != M_R_RELATIVE)) {
+ if (ndx == 0) {
+ Conv_inv_buf_t inv_buf;
+ Is_desc *isp = orsp->rel_isdesc;
+
+ ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_REL_NOSYMBOL),
+ conv_reloc_type(ofl->ofl_nehdr->e_machine,
+ orsp->rel_rtype, 0, &inv_buf),
+ isp->is_file->ifl_name, EC_WORD(isp->is_scnndx),
+ isp->is_name, EC_XWORD(roffset));
+ return (S_ERROR);
+ }
+ }
rea.r_info = ELF_R_INFO(ndx, orsp->rel_rtype);
rea.r_offset = roffset;
@@ -367,6 +380,8 @@ ld_perform_outreloc(Rel_desc * orsp, Ofl_desc * ofl, Boolean *remain_seen)
*/
assert(relosp->os_szoutrels <= relosp->os_shdr->sh_size);
+ relbits = (char *)relosp->os_outdata->d_buf;
+
(void) memcpy((relbits + relosp->os_szoutrels),
(char *)&rea, sizeof (Rel));
relosp->os_szoutrels += sizeof (Rel);
@@ -1136,6 +1151,19 @@ ld_add_outrel(Word flags, Rel_desc *rsp, Ofl_desc *ofl)
return (1);
/*
+ * If the symbol will be reduced, we can't leave outstanding
+ * relocations against it, as nothing will ever be able to satisfy them
+ * (and the symbol won't be in .dynsym
+ */
+ if ((sdp != NULL) &&
+ (sdp->sd_sym->st_shndx == SHN_UNDEF) &&
+ (rsp->rel_rtype != M_R_NONE) &&
+ (rsp->rel_rtype != M_R_RELATIVE)) {
+ if (ld_sym_reducable(ofl, sdp))
+ return (1);
+ }
+
+ /*
* If we are adding a output relocation against a section
* symbol (non-RELATIVE) then mark that section. These sections
* will be added to the .dynsym symbol table.
diff --git a/usr/src/cmd/sgs/libld/common/machrel.sparc.c b/usr/src/cmd/sgs/libld/common/machrel.sparc.c
index 6b56e9c444..02d180b1c2 100644
--- a/usr/src/cmd/sgs/libld/common/machrel.sparc.c
+++ b/usr/src/cmd/sgs/libld/common/machrel.sparc.c
@@ -145,7 +145,7 @@ ld_mach_eflags(Ehdr *ehdr, Ofl_desc *ofl)
* Determine which memory model to mark the binary with. The options
* are (most restrictive to least):
*
- * EF_SPARCV9_TSO 0x0 Total Store Order
+ * EF_SPARCV9_TSO 0x0 Total Store Order
* EF_SPARCV9_PSO 0x1 Partial Store Order
* EF_SPARCV9_RMO 0x2 Relaxed Memory Order
*
@@ -779,7 +779,25 @@ ld_perform_outreloc(Rel_desc *orsp, Ofl_desc *ofl, Boolean *remain_seen)
if (orsp->rel_rtype == M_R_DTPMOD)
raddend = 0;
- relbits = (char *)relosp->os_outdata->d_buf;
+ /*
+ * Note that the other case which writes out the relocation, above, is
+ * M_R_REGISTER specific and so does not need this check.
+ */
+ if ((orsp->rel_rtype != M_R_NONE) &&
+ (orsp->rel_rtype != M_R_REGISTER) &&
+ (orsp->rel_rtype != M_R_RELATIVE)) {
+ if (ndx == 0) {
+ Conv_inv_buf_t inv_buf;
+ Is_desc *isp = orsp->rel_isdesc;
+
+ ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_REL_NOSYMBOL),
+ conv_reloc_type(ofl->ofl_nehdr->e_machine,
+ orsp->rel_rtype, 0, &inv_buf),
+ isp->is_file->ifl_name, EC_WORD(isp->is_scnndx),
+ isp->is_name, EC_XWORD(roffset));
+ return (S_ERROR);
+ }
+ }
rea.r_info = ELF_R_INFO(ndx,
ELF_R_TYPE_INFO(RELAUX_GET_TYPEDATA(orsp), orsp->rel_rtype));
@@ -793,6 +811,8 @@ ld_perform_outreloc(Rel_desc *orsp, Ofl_desc *ofl, Boolean *remain_seen)
*/
assert(relosp->os_szoutrels <= relosp->os_shdr->sh_size);
+ relbits = (char *)relosp->os_outdata->d_buf;
+
(void) memcpy((relbits + relosp->os_szoutrels),
(char *)&rea, sizeof (Rela));
relosp->os_szoutrels += (Xword)sizeof (Rela);
@@ -927,7 +947,7 @@ tls_fixups(Ofl_desc *ofl, Rel_desc *arsp)
case R_SPARC_TLS_IE_LDX:
/*
* Current instruction:
- * ld{x} [r1 + r2], r3
+ * ld{x} [r1 + r2], r3
*
* Need to update this to:
*
@@ -1009,7 +1029,7 @@ gotop_fixups(Ofl_desc *ofl, Rel_desc *arsp)
case R_SPARC_GOTDATA_OP:
/*
* Current instruction:
- * ld{x} [r1 + r2], r3
+ * ld{x} [r1 + r2], r3
*
* Need to update this to:
*
@@ -1411,6 +1431,20 @@ ld_add_outrel(Word flags, Rel_desc *rsp, Ofl_desc *ofl)
return (1);
/*
+ * If the symbol will be reduced, we can't leave outstanding
+ * relocations against it, as nothing will ever be able to satisfy them
+ * (and the symbol won't be in .dynsym
+ */
+ if ((sdp != NULL) &&
+ (sdp->sd_sym->st_shndx == SHN_UNDEF) &&
+ (rsp->rel_rtype != M_R_NONE) &&
+ (rsp->rel_rtype != M_R_REGISTER) &&
+ (rsp->rel_rtype != M_R_RELATIVE)) {
+ if (ld_sym_reducable(ofl, sdp))
+ return (1);
+ }
+
+ /*
* Certain relocations do not make sense in a 64bit shared object,
* if building a shared object do a sanity check on the output
* relocations being created.
diff --git a/usr/src/cmd/sgs/liblddbg/common/statistics.c b/usr/src/cmd/sgs/liblddbg/common/statistics.c
index 223440208f..3346a7b708 100644
--- a/usr/src/cmd/sgs/liblddbg/common/statistics.c
+++ b/usr/src/cmd/sgs/liblddbg/common/statistics.c
@@ -184,7 +184,7 @@ Dbg_statistics_ar(Ofl_desc *ofl)
arsym = adp->ad_start;
aux = adp->ad_aux;
- while (arsym->as_off) {
+ while ((arsym != NULL) && (arsym->as_off != NULL)) {
/*
* Assume that symbols from the same member file are
* adjacent within the archive symbol table.
@@ -199,10 +199,9 @@ Dbg_statistics_ar(Ofl_desc *ofl)
}
if ((count == 0) || (used == 0))
continue;
-#ifndef UDIV_NOT_SUPPORTED
+
dbg_print(lml, MSG_INTL(MSG_STATS_AR), adp->ad_name, count,
used, ((used * 100) / count));
-#endif
}
Dbg_util_nl(lml, DBG_NL_STD);
}
diff --git a/usr/src/cmd/sgs/packages/common/SUNWonld-README b/usr/src/cmd/sgs/packages/common/SUNWonld-README
index 2df3146d03..731e52c973 100644
--- a/usr/src/cmd/sgs/packages/common/SUNWonld-README
+++ b/usr/src/cmd/sgs/packages/common/SUNWonld-README
@@ -1667,3 +1667,5 @@ Bugid Risk Synopsis
producing relocatable objects
10366 ld(1) should support GNU-style linker sets
10581 ld(1) should know kernel modules are a thing
+11057 hidden undefined weak symbols should not leave relocations
+11067 debug statistics crash ld(1) when -z allextract
diff --git a/usr/src/cmd/smbsrv/smbstat/smbstat.c b/usr/src/cmd/smbsrv/smbstat/smbstat.c
index 9982ddfad0..b881165706 100644
--- a/usr/src/cmd/smbsrv/smbstat/smbstat.c
+++ b/usr/src/cmd/smbsrv/smbstat/smbstat.c
@@ -102,6 +102,7 @@
#include <math.h>
#include <umem.h>
#include <locale.h>
+#include <sys/processor.h>
#include <smbsrv/smb_kstat.h>
#if !defined(TEXT_DOMAIN)
diff --git a/usr/src/cmd/zpool/zpool_main.c b/usr/src/cmd/zpool/zpool_main.c
index 267afda023..50c51308a6 100644
--- a/usr/src/cmd/zpool/zpool_main.c
+++ b/usr/src/cmd/zpool/zpool_main.c
@@ -90,6 +90,7 @@ static int zpool_do_split(int, char **);
static int zpool_do_initialize(int, char **);
static int zpool_do_scrub(int, char **);
+static int zpool_do_resilver(int, char **);
static int zpool_do_import(int, char **);
static int zpool_do_export(int, char **);
@@ -142,6 +143,7 @@ typedef enum {
HELP_REMOVE,
HELP_INITIALIZE,
HELP_SCRUB,
+ HELP_RESILVER,
HELP_STATUS,
HELP_UPGRADE,
HELP_GET,
@@ -194,6 +196,7 @@ static zpool_command_t command_table[] = {
{ "split", zpool_do_split, HELP_SPLIT },
{ NULL },
{ "initialize", zpool_do_initialize, HELP_INITIALIZE },
+ { "resilver", zpool_do_resilver, HELP_RESILVER },
{ "scrub", zpool_do_scrub, HELP_SCRUB },
{ NULL },
{ "import", zpool_do_import, HELP_IMPORT },
@@ -275,6 +278,8 @@ get_usage(zpool_help_t idx)
return (gettext("\tinitialize [-cs] <pool> [<device> ...]\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-s | -p] <pool> ...\n"));
+ case HELP_RESILVER:
+ return (gettext("\tresilver <pool> ...\n"));
case HELP_STATUS:
return (gettext("\tstatus [-DgLPvx] [-T d|u] [pool] ... "
"[interval [count]]\n"));
@@ -1693,11 +1698,14 @@ print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
- if (ps != NULL && ps->pss_state == DSS_SCANNING &&
- vs->vs_scan_processed != 0 && children == 0) {
- (void) printf(gettext(" (%s)"),
- (ps->pss_func == POOL_SCAN_RESILVER) ?
- "resilvering" : "repairing");
+ if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0) {
+ if (vs->vs_scan_processed != 0) {
+ (void) printf(gettext(" (%s)"),
+ (ps->pss_func == POOL_SCAN_RESILVER) ?
+ "resilvering" : "repairing");
+ } else if (vs->vs_resilver_deferred) {
+ (void) printf(gettext(" (awaiting resilver)"));
+ }
}
if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
@@ -4522,7 +4530,7 @@ scrub_callback(zpool_handle_t *zhp, void *data)
* Ignore faulted pools.
*/
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
- (void) fprintf(stderr, gettext("cannot scrub '%s': pool is "
+ (void) fprintf(stderr, gettext("cannot scan '%s': pool is "
"currently unavailable\n"), zpool_get_name(zhp));
return (1);
}
@@ -4590,6 +4598,43 @@ zpool_do_scrub(int argc, char **argv)
return (for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb));
}
+/*
+ * zpool resilver <pool> ...
+ *
+ * Restarts any in-progress resilver
+ */
+int
+zpool_do_resilver(int argc, char **argv)
+{
+ int c;
+ scrub_cbdata_t cb;
+
+ cb.cb_type = POOL_SCAN_RESILVER;
+ cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
+ cb.cb_argc = argc;
+ cb.cb_argv = argv;
+
+ /* check options */
+ while ((c = getopt(argc, argv, "")) != -1) {
+ switch (c) {
+ case '?':
+ (void) fprintf(stderr, gettext("invalid option '%c'\n"),
+ optopt);
+ usage(B_FALSE);
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+
+ if (argc < 1) {
+ (void) fprintf(stderr, gettext("missing pool name argument\n"));
+ usage(B_FALSE);
+ }
+
+ return (for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb));
+}
+
static void
zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
{
diff --git a/usr/src/common/acl/acl_common.c b/usr/src/common/acl/acl_common.c
index eb4fca808e..765d6cef12 100644
--- a/usr/src/common/acl/acl_common.c
+++ b/usr/src/common/acl/acl_common.c
@@ -1636,7 +1636,7 @@ acl_trivial_access_masks(mode_t mode, boolean_t isdir, trivial_acl_t *masks)
int
acl_trivial_create(mode_t mode, boolean_t isdir, ace_t **acl, int *count)
{
- int index = 0;
+ int index = 0;
int error;
trivial_acl_t masks;
@@ -1696,7 +1696,7 @@ ace_trivial_common(void *acep, int aclcnt,
uint64_t cookie = 0;
while ((cookie = walk(acep, cookie, aclcnt, &flags, &type, &mask))
- != NULL) {
+ != 0) {
switch (flags & ACE_TYPE_FLAGS) {
case ACE_OWNER:
case ACE_GROUP|ACE_IDENTIFIER_GROUP:
diff --git a/usr/src/common/zfs/zfeature_common.c b/usr/src/common/zfs/zfeature_common.c
index 1f1f55292d..feab5145c0 100644
--- a/usr/src/common/zfs/zfeature_common.c
+++ b/usr/src/common/zfs/zfeature_common.c
@@ -315,10 +315,13 @@ zpool_feature_init(void)
"freed or remapped.",
ZFEATURE_FLAG_READONLY_COMPAT, obsolete_counts_deps);
- {
zfeature_register(SPA_FEATURE_ALLOCATION_CLASSES,
"org.zfsonlinux:allocation_classes", "allocation_classes",
"Support for separate allocation classes.",
ZFEATURE_FLAG_READONLY_COMPAT, NULL);
- }
+
+ zfeature_register(SPA_FEATURE_RESILVER_DEFER,
+ "com.datto:resilver_defer", "resilver_defer",
+ "Support for defering new resilvers when one is already running.",
+ ZFEATURE_FLAG_READONLY_COMPAT, NULL);
}
diff --git a/usr/src/common/zfs/zfeature_common.h b/usr/src/common/zfs/zfeature_common.h
index af29560ae9..ebe9626caf 100644
--- a/usr/src/common/zfs/zfeature_common.h
+++ b/usr/src/common/zfs/zfeature_common.h
@@ -63,6 +63,7 @@ typedef enum spa_feature {
SPA_FEATURE_POOL_CHECKPOINT,
SPA_FEATURE_SPACEMAP_V2,
SPA_FEATURE_ALLOCATION_CLASSES,
+ SPA_FEATURE_RESILVER_DEFER,
SPA_FEATURES
} spa_feature_t;
diff --git a/usr/src/lib/libzfs/common/libzfs.h b/usr/src/lib/libzfs/common/libzfs.h
index a1d099a0a2..35d0156b9c 100644
--- a/usr/src/lib/libzfs/common/libzfs.h
+++ b/usr/src/lib/libzfs/common/libzfs.h
@@ -141,6 +141,7 @@ typedef enum zfs_error {
EZFS_TOOMANY, /* argument list too long */
EZFS_INITIALIZING, /* currently initializing */
EZFS_NO_INITIALIZE, /* no active initialize */
+ EZFS_NO_RESILVER_DEFER, /* pool doesn't support resilver_defer */
EZFS_UNKNOWN
} zfs_error_t;
diff --git a/usr/src/lib/libzfs/common/libzfs_pool.c b/usr/src/lib/libzfs/common/libzfs_pool.c
index 8e59d312d3..6cc8dce54b 100644
--- a/usr/src/lib/libzfs/common/libzfs_pool.c
+++ b/usr/src/lib/libzfs/common/libzfs_pool.c
@@ -2039,6 +2039,10 @@ zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot scrub %s"), zc.zc_name);
}
+ } else if (func == POOL_SCAN_RESILVER) {
+ assert(cmd == POOL_SCRUB_NORMAL);
+ (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ "cannot restart resilver on %s"), zc.zc_name);
} else if (func == POOL_SCAN_NONE) {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
@@ -2066,6 +2070,8 @@ zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
}
} else if (err == ENOENT) {
return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
+ } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
+ return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg));
} else {
return (zpool_standard_error(hdl, err, msg));
}
diff --git a/usr/src/lib/libzfs/common/libzfs_util.c b/usr/src/lib/libzfs/common/libzfs_util.c
index 91278b50b9..d70ed28d1d 100644
--- a/usr/src/lib/libzfs/common/libzfs_util.c
+++ b/usr/src/lib/libzfs/common/libzfs_util.c
@@ -263,6 +263,9 @@ libzfs_error_description(libzfs_handle_t *hdl)
case EZFS_NO_INITIALIZE:
return (dgettext(TEXT_DOMAIN, "there is no active "
"initialization"));
+ case EZFS_NO_RESILVER_DEFER:
+ return (dgettext(TEXT_DOMAIN, "this action requires the "
+ "resilver_defer feature"));
case EZFS_UNKNOWN:
return (dgettext(TEXT_DOMAIN, "unknown error"));
default:
diff --git a/usr/src/man/man1m/zpool.1m b/usr/src/man/man1m/zpool.1m
index 9ec0408b4f..415a0ea2f2 100644
--- a/usr/src/man/man1m/zpool.1m
+++ b/usr/src/man/man1m/zpool.1m
@@ -27,7 +27,7 @@
.\" Copyright (c) 2017 George Melikov. All Rights Reserved.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd April 27, 2018
+.Dd May 15, 2019
.Dt ZPOOL 1M
.Os
.Sh NAME
@@ -157,6 +157,9 @@
.Op Fl f
.Ar pool Ar device Op Ar new_device
.Nm
+.Cm resilver
+.Ar pool Ns ...
+.Nm
.Cm scrub
.Op Fl s | Fl p
.Ar pool Ns ...
@@ -1777,6 +1780,19 @@ Not all devices can be overridden in this manner.
.El
.It Xo
.Nm
+.Cm resilver
+.Ar pool Ns ...
+.Xc
+Starts a resilver.
+If an existing resilver is already running it will be restarted from the
+beginning.
+Any drives that were scheduled for a deferred resilver will be added to the
+new one.
+This requires the
+.Sy resilver_defer
+feature.
+.It Xo
+.Nm
.Cm scrub
.Op Fl s | Fl p
.Ar pool Ns ...
diff --git a/usr/src/man/man5/zpool-features.5 b/usr/src/man/man5/zpool-features.5
index 3f83366f0e..ff34ce5d48 100644
--- a/usr/src/man/man5/zpool-features.5
+++ b/usr/src/man/man5/zpool-features.5
@@ -15,7 +15,7 @@
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
-.TH ZPOOL-FEATURES 5 "Apr 19, 2019"
+.TH ZPOOL-FEATURES 5 "May 15, 2019"
.SH NAME
zpool\-features \- ZFS pool feature descriptions
.SH DESCRIPTION
@@ -683,5 +683,26 @@ This feature becomes \fBactive\fR when a dedicated allocation class vdev
removal, it can be returned to the \fBenabled\fR state if all the top-level
vdevs from an allocation class are removed.
+.RE
+.sp
+.ne 2
+.na
+\fB\fBresilver_defer\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.datto:resilver_defer
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES none
+.TE
+
+This feature allows zfs to postpone new resilvers if an existing one is already
+in progress. Without this feature, any new resilvers will cause the currently
+running one to be immediately restarted from the beginning.
+
+This feature becomes \fBactive\fR once a resilver has been deferred, and
+returns to being \fBenabled\fR when the deferred resilver begins.
+
.SH "SEE ALSO"
\fBzfs\fR(1M), \fBzpool\fR(1M)
diff --git a/usr/src/pkg/manifests/system-test-zfstest.mf b/usr/src/pkg/manifests/system-test-zfstest.mf
index febf7ac38c..a29f2c6bc8 100644
--- a/usr/src/pkg/manifests/system-test-zfstest.mf
+++ b/usr/src/pkg/manifests/system-test-zfstest.mf
@@ -88,7 +88,9 @@ dir path=opt/zfs-tests/tests/functional/cli_root/zpool_labelclear
dir path=opt/zfs-tests/tests/functional/cli_root/zpool_offline
dir path=opt/zfs-tests/tests/functional/cli_root/zpool_online
dir path=opt/zfs-tests/tests/functional/cli_root/zpool_remove
+dir path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen
dir path=opt/zfs-tests/tests/functional/cli_root/zpool_replace
+dir path=opt/zfs-tests/tests/functional/cli_root/zpool_resilver
dir path=opt/zfs-tests/tests/functional/cli_root/zpool_scrub
dir path=opt/zfs-tests/tests/functional/cli_root/zpool_set
dir path=opt/zfs-tests/tests/functional/cli_root/zpool_status
@@ -1727,6 +1729,36 @@ file \
file \
path=opt/zfs-tests/tests/functional/cli_root/zpool_remove/zpool_remove_003_pos \
mode=0555
+file path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/cleanup \
+ mode=0555
+file path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/setup mode=0555
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.cfg \
+ mode=0444
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib \
+ mode=0444
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_001_pos \
+ mode=0555
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_002_pos \
+ mode=0555
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_003_pos \
+ mode=0555
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_004_pos \
+ mode=0555
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_005_pos \
+ mode=0555
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_006_neg \
+ mode=0555
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_007_pos \
+ mode=0555
file path=opt/zfs-tests/tests/functional/cli_root/zpool_replace/cleanup \
mode=0555
file path=opt/zfs-tests/tests/functional/cli_root/zpool_replace/setup \
@@ -1734,6 +1766,19 @@ file path=opt/zfs-tests/tests/functional/cli_root/zpool_replace/setup \
file \
path=opt/zfs-tests/tests/functional/cli_root/zpool_replace/zpool_replace_001_neg \
mode=0555
+file path=opt/zfs-tests/tests/functional/cli_root/zpool_resilver/cleanup \
+ mode=0555
+file path=opt/zfs-tests/tests/functional/cli_root/zpool_resilver/setup \
+ mode=0555
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver.cfg \
+ mode=0444
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_bad_args \
+ mode=0555
+file \
+ path=opt/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart \
+ mode=0555
file path=opt/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup \
mode=0555
file path=opt/zfs-tests/tests/functional/cli_root/zpool_scrub/setup mode=0555
diff --git a/usr/src/test/zfs-tests/runfiles/delphix.run b/usr/src/test/zfs-tests/runfiles/delphix.run
index 2cd454d08e..dbee1a5433 100644
--- a/usr/src/test/zfs-tests/runfiles/delphix.run
+++ b/usr/src/test/zfs-tests/runfiles/delphix.run
@@ -339,9 +339,14 @@ tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
[/opt/zfs-tests/tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg']
+[/opt/zfs-tests/tests/functional/cli_root/zpool_resilver]
+tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart']
+tags = ['functional', 'cli_root', 'zpool_resilver']
+
[/opt/zfs-tests/tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
- 'zpool_scrub_004_pos', 'zpool_scrub_005_pos', 'zpool_scrub_multiple_copies']
+ 'zpool_scrub_004_pos', 'zpool_scrub_005_pos', 'zpool_scrub_print_repairing',
+ 'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies']
[/opt/zfs-tests/tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg']
diff --git a/usr/src/test/zfs-tests/runfiles/omnios.run b/usr/src/test/zfs-tests/runfiles/omnios.run
index 5f77cec090..875b529e9e 100644
--- a/usr/src/test/zfs-tests/runfiles/omnios.run
+++ b/usr/src/test/zfs-tests/runfiles/omnios.run
@@ -340,9 +340,14 @@ tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
[/opt/zfs-tests/tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg']
+[/opt/zfs-tests/tests/functional/cli_root/zpool_resilver]
+tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart']
+tags = ['functional', 'cli_root', 'zpool_resilver']
+
[/opt/zfs-tests/tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
- 'zpool_scrub_004_pos', 'zpool_scrub_005_pos', 'zpool_scrub_multiple_copies']
+ 'zpool_scrub_004_pos', 'zpool_scrub_005_pos', 'zpool_scrub_print_repairing',
+ 'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies']
[/opt/zfs-tests/tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg']
diff --git a/usr/src/test/zfs-tests/runfiles/openindiana.run b/usr/src/test/zfs-tests/runfiles/openindiana.run
index 357d83bcd3..f8c0c40328 100644
--- a/usr/src/test/zfs-tests/runfiles/openindiana.run
+++ b/usr/src/test/zfs-tests/runfiles/openindiana.run
@@ -340,9 +340,14 @@ tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
[/opt/zfs-tests/tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg']
+[/opt/zfs-tests/tests/functional/cli_root/zpool_resilver]
+tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart']
+tags = ['functional', 'cli_root', 'zpool_resilver']
+
[/opt/zfs-tests/tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
- 'zpool_scrub_004_pos', 'zpool_scrub_005_pos', 'zpool_scrub_multiple_copies']
+ 'zpool_scrub_004_pos', 'zpool_scrub_005_pos', 'zpool_scrub_print_repairing',
+ 'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies']
[/opt/zfs-tests/tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg']
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg
index 1449cfaef9..fc4ce85bdb 100644
--- a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg
@@ -78,4 +78,5 @@ typeset -a properties=(
"feature@zpool_checkpoint"
"feature@spacemap_v2"
"feature@allocation_classes"
+ "feature@resilver_defer"
)
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh
index c4569fc2af..feb726119d 100644
--- a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh
@@ -34,6 +34,8 @@
verify_runnable "global"
+log_must set_tunable32 zfs_scan_suspend_progress 0
+
for pool in "$TESTPOOL" "$TESTPOOL1"; do
datasetexists $pool/$TESTFS && \
log_must zfs destroy -Rf $pool/$TESTFS
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh
index 74957770cb..b8c39fd3fa 100644
--- a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh
@@ -42,10 +42,10 @@
# each sync.
# 2. Add data to pool
# 3. Re-import the pool so that data isn't cached
-# 4. Use zinject to slow down device I/O
+# 4. Use zfs_scan_suspend_progress to ensure resilvers don't progress
# 5. Trigger the resilvering
# 6. Use spa freeze to stop writing to the pool.
-# 7. Clear zinject events (needed to export the pool)
+# 7. Re-enable scan progress
# 8. Export the pool
#
@@ -58,7 +58,7 @@ function custom_cleanup
# Revert zfs_txg_timeout to defaults
[[ -n ZFS_TXG_TIMEOUT ]] &&
log_must set_zfs_txg_timeout $ZFS_TXG_TIMEOUT
-
+ log_must set_tunable32 zfs_scan_suspend_progress 0
cleanup
}
@@ -85,22 +85,16 @@ function test_replacing_vdevs
log_must zpool export $TESTPOOL1
log_must cp $CPATHBKP $CPATH
log_must zpool import -c $CPATH -o cachefile=$CPATH $TESTPOOL1
- typeset device
- for device in $zinjectdevices ; do
- log_must zinject -d $device -D 200:1 $TESTPOOL1 > /dev/null
- done
+ log_must set_tunable32 zfs_scan_suspend_progress 1
log_must zpool replace $TESTPOOL1 $replacevdev $replaceby
# Cachefile: pool in resilvering state
log_must cp $CPATH $CPATHBKP2
- # We must disable zinject in order to export the pool, so we freeze
- # it first to prevent writing out subsequent resilvering progress.
- log_must zpool freeze $TESTPOOL1
# Confirm pool is still replacing
log_must pool_is_replacing $TESTPOOL1
- log_must zinject -c all > /dev/null
log_must zpool export $TESTPOOL1
+ log_must set_tunable32 zfs_scan_suspend_progress 0
( $earlyremove ) && log_must rm $replacevdev
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh
index d57ced1040..d1f3da524b 100644
--- a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh
@@ -63,6 +63,7 @@ function custom_cleanup
[[ -n ZFS_TXG_TIMEOUT ]] &&
log_must set_zfs_txg_timeout $ZFS_TXG_TIMEOUT
log_must rm -rf $BACKUP_DEVICE_DIR
+ log_must set_tunable32 zfs_scan_suspend_progress 0
cleanup
}
@@ -96,22 +97,17 @@ function test_replace_vdev
# This should not free original data.
log_must overwrite_data $TESTPOOL1 ""
- # Steps to insure resilvering happens very slowly.
log_must zpool export $TESTPOOL1
log_must zpool import -d $DEVICE_DIR $TESTPOOL1
- typeset device
- for device in $zinjectdevices ; do
- log_must zinject -d $device -D 200:1 $TESTPOOL1 > /dev/null
- done
+
+ # Ensure resilvering doesn't complete.
+ log_must set_tunable32 zfs_scan_suspend_progress 1
log_must zpool replace $TESTPOOL1 $replacevdev $replaceby
- # We must disable zinject in order to export the pool, so we freeze
- # it first to prevent writing out subsequent resilvering progress.
- log_must zpool freeze $TESTPOOL1
# Confirm pool is still replacing
log_must pool_is_replacing $TESTPOOL1
- log_must zinject -c all > /dev/null
log_must zpool export $TESTPOOL1
+ log_must set_tunable32 zfs_scan_suspend_progress 0
############################################################
# Test 1: rewind while device is resilvering.
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/Makefile b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/Makefile
new file mode 100644
index 0000000000..f8957207a0
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/Makefile
@@ -0,0 +1,21 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2019 Joyent, Inc.
+#
+
+include $(SRC)/Makefile.master
+
+ROOTOPTPKG = $(ROOT)/opt/zfs-tests
+TARGETDIR = $(ROOTOPTPKG)/tests/functional/cli_root/zpool_reopen
+
+include $(SRC)/test/zfs-tests/Makefile.com
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/cleanup.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/cleanup.ksh
new file mode 100755
index 0000000000..a6facfd986
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/cleanup.ksh
@@ -0,0 +1,36 @@
+#!/bin/ksh -p
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2016, 2017 by Intel Corporation. All rights reserved.
+# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+# Copyright 2019 Joyent, Inc.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
+
+verify_runnable "global"
+
+cleanup_devices $DISKS
+
+# Unplug the disk and remove scsi_debug module
+case "$(uname)" in
+Linux)
+ for SDDEVICE in $(get_debug_device); do
+ remove_disk $SDDEVICE
+ done
+ unload_scsi_debug
+ ;;
+esac
+
+log_pass
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/setup.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/setup.ksh
new file mode 100755
index 0000000000..c775f06153
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/setup.ksh
@@ -0,0 +1,34 @@
+#!/bin/ksh -p
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2016, 2017 by Intel Corporation. All rights reserved.
+# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+# Copyright 2019 Joyent, Inc.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.cfg
+
+verify_runnable "global"
+
+# Create scsi_debug devices for the reopen tests
+case "$(uname)" in
+Linux)
+ load_scsi_debug $SDSIZE $SDHOSTS $SDTGTS $SDLUNS '512b'
+ ;;
+SunOS)
+ log_unsupported "scsi debug module unsupported"
+ ;;
+esac
+
+log_pass
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.cfg b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.cfg
new file mode 100644
index 0000000000..36c84821f1
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.cfg
@@ -0,0 +1,49 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2016, 2017 by Intel Corporation. All rights reserved.
+# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+# Copyright 2019 Joyent, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+verify_runnable "global"
+
+export DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
+export DISKSARRAY=$DISKS
+export SMALL_FILE_SIZE=10
+export LARGE_FILE_SIZE=80
+export MAXTIMEOUT=40
+
+export SDSIZE=256
+export SDHOSTS=1
+export SDTGTS=1
+export SDLUNS=1
+
+export DISK1=$(echo $DISKS | nawk '{print $1}')
+export DISK2=$(echo $DISKS | nawk '{print $2}')
+export DISK3=$(echo $DISKS | nawk '{print $3}')
+
+case "$(uname)" in
+Linux)
+ set_slice_prefix
+ set_device_dir
+ devs_id[0]=$(get_persistent_disk_name $DISK1)
+ devs_id[1]=$(get_persistent_disk_name $DISK2)
+ devs_id[2]=$(get_persistent_disk_name $DISK3)
+ export devs_id
+ ;;
+SunOS)
+ DEV_DSKDIR="/dev"
+ ;;
+esac
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
new file mode 100755
index 0000000000..075ad85e9f
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
@@ -0,0 +1,124 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+#
+# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.cfg
+
+#
+# Clear labels on the given disks
+#
+function clear_labels #disks
+{
+ for disk in $@; do
+ if ( is_loop_device $disk ) || ( is_mpath_device $disk ); then
+ zpool labelclear -f /dev/$disk
+ else
+ zpool labelclear -f /dev/${disk}1
+ fi
+ done
+}
+
+#
+# Set the REMOVED_DISK and REMOVED_DISK_ID constants for device
+# used for re-plugging. When the disk is loop device use the
+# scsi_debug emulated drive. Otherwise use the real drive.
+#
+function set_removed_disk
+{
+ if is_loop_device $DISK1; then
+ export REMOVED_DISK=$(get_debug_device)
+ export REMOVED_DISK_ID=$(get_persistent_disk_name $REMOVED_DISK)
+ elif ( is_real_device $DISK1 ) || ( is_mpath_device $DISK1 ); then
+ export REMOVED_DISK="$DISK1"
+ export REMOVED_DISK_ID=${devs_id[0]}
+ else
+ log_fail "No drives that supports removal"
+ fi
+}
+
+#
+# Generate random file of the given size in MiB
+#
+function generate_random_file #path size_mb
+{
+ typeset path=$1
+ typeset -i size_mb=$2
+ file_write -o create -f $path -b 1048576 -s0 -c $size_mb -d R
+}
+
+#
+# Wait until specific event or timeout occur.
+#
+# The passed function is executed with pool name as argument
+# with an interval of 1 second until it succeeds or until the
+# timeout occurs.
+# It returns 1 on timeout or 0 otherwise.
+#
+function wait_for_action #pool timeout function
+{
+ typeset pool=$1
+ typeset -i timeout=$2
+ typeset func=$3
+
+ while [ $timeout -gt 0 ]; do
+ (( --timeout ))
+ if ( $func $pool ); then
+ return 0
+ fi
+ sleep 1
+ done
+
+ return 1
+}
+
+#
+# Helpers for wait_for_action function:
+# wait_for_resilver_start - wait until resilver is started
+# wait_for_resilver_end - wait until resilver is finished
+# wait_for_scrub_end - wait until scrub is finished
+#
+function wait_for_resilver_start #pool timeout
+{
+ wait_for_action $1 $2 is_pool_resilvering
+ return $?
+}
+
+function wait_for_resilver_end #pool timeout
+{
+ wait_for_action $1 $2 is_pool_resilvered
+ return $?
+}
+
+function wait_for_scrub_end #pool timeout
+{
+ wait_for_action $1 $2 is_pool_scrubbed
+ return $?
+}
+
+#
+# Check if scan action has been restarted on the given pool
+#
+
+function is_scan_restarted #pool
+{
+ typeset pool=$1
+ zpool history -i $pool | grep -q "scan aborted, restarting"
+ return $?
+}
+
+function is_deferred_scan_started #pool
+{
+ typeset pool=$1
+ zpool history -i $pool | grep -q "starting deferred resilver"
+ return $?
+}
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_001_pos.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_001_pos.ksh
new file mode 100755
index 0000000000..68ebf669c9
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_001_pos.ksh
@@ -0,0 +1,70 @@
+#!/bin/ksh -p
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
+
+#
+# DESCRIPTION:
+# Test if zpool reopen with no arguments works correctly.
+#
+# STRATEGY:
+# 1. Create a pool.
+# 2. Remove a disk.
+# 3. Reopen a pool and verify if removed disk is marked as unavailable.
+# 4. "Plug back" disk.
+# 5. Reopen a pool and verify if removed disk is marked online again.
+# 6. Check if reopen caused resilver start.
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ # bring back removed disk online for further tests
+ insert_disk $REMOVED_DISK $scsi_host
+ poolexists $TESTPOOL && destroy_pool $TESTPOOL
+ clear_labels $REMOVED_DISK $DISK2
+}
+
+log_assert "Testing zpool reopen with no arguments"
+log_onexit cleanup
+
+set_removed_disk
+scsi_host=$(get_scsi_host $REMOVED_DISK)
+
+# 1. Create a pool.
+default_mirror_setup_noexit $REMOVED_DISK_ID $DISK2
+# 2. Remove a disk.
+remove_disk $REMOVED_DISK
+# 3. Reopen a pool and verify if removed disk is marked as unavailable.
+log_must zpool reopen
+log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
+# Write some data to the pool
+log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE
+# 4. "Plug back" disk.
+insert_disk $REMOVED_DISK $scsi_host
+# 5. Reopen a pool and verify if removed disk is marked online again.
+log_must zpool reopen
+log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "online"
+# 6. Check if reopen caused resilver start.
+log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
+
+# clean up
+log_must zpool destroy $TESTPOOL
+clear_labels $REMOVED_DISK $DISK2
+
+log_pass "Zpool reopen with no arguments test passed"
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_002_pos.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_002_pos.ksh
new file mode 100755
index 0000000000..444c8a6852
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_002_pos.ksh
@@ -0,0 +1,70 @@
+#!/bin/ksh -p
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
+
+#
+# DESCRIPTION:
+# Test if zpool reopen with pool name as argument works correctly.
+#
+# STRATEGY:
+# 1. Create a pool.
+# 2. Remove a disk.
+# 3. Reopen a pool and verify if removed disk is marked as unavailable.
+# 4. "Plug back" disk.
+# 5. Reopen a pool and verify if removed disk is marked online again.
+# 6. Check if reopen caused resilver start.
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ # bring back removed disk online for further tests
+ insert_disk $REMOVED_DISK $scsi_host
+ poolexists $TESTPOOL && destroy_pool $TESTPOOL
+ clear_labels $REMOVED_DISK $DISK2
+}
+
+log_assert "Testing zpool reopen with no arguments"
+log_onexit cleanup
+
+set_removed_disk
+scsi_host=$(get_scsi_host $REMOVED_DISK)
+
+# 1. Create a pool.
+default_mirror_setup_noexit $REMOVED_DISK_ID $DISK2
+# 2. Remove a disk.
+remove_disk $REMOVED_DISK
+# 3. Reopen a pool and verify if removed disk is marked as unavailable.
+log_must zpool reopen $TESTPOOL
+log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
+# Write some data to the pool
+log_must generate_random_file /$TESTPOOL/data $SMALL_FILE_SIZE
+# 4. "Plug back" disk.
+insert_disk $REMOVED_DISK $scsi_host
+# 5. Reopen a pool and verify if removed disk is marked online again.
+log_must zpool reopen $TESTPOOL
+log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "online"
+# 6. Check if reopen caused resilver start.
+log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
+
+# clean up
+log_must zpool destroy $TESTPOOL
+clear_labels $REMOVED_DISK $DISK2
+
+log_pass "Zpool reopen with no arguments test passed"
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_003_pos.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_003_pos.ksh
new file mode 100755
index 0000000000..6ac7488184
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_003_pos.ksh
@@ -0,0 +1,101 @@
+#!/bin/ksh -p
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
+
+#
+# DESCRIPTION:
+# Test zpool reopen while scrub is running.
+# Checks if re-plugged device is fully resilvered.
+#
+# STRATEGY:
+# 1. Create a pool
+# 2. Remove a disk.
+# 3. Write a test file to the pool and calculate its checksum.
+# 4. Execute scrub.
+# 5. "Plug back" disk.
+# 6. Reopen a pool.
+# 7. Check if scrub scan is replaced by resilver.
+# 8. Put another device offline and check if the test file checksum is correct.
+#
+# NOTES:
+# A 250ms delay is added to make sure that the scrub is running while
+# the reopen kicks the resilver.
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ log_must zinject -c all
+ rm -f $TESTFILE_MD5 2>/dev/null
+ # bring back removed disk online for further tests
+ insert_disk $REMOVED_DISK $scsi_host
+ poolexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_assert "Testing zpool reopen with pool name as argument"
+log_onexit cleanup
+
+set_removed_disk
+scsi_host=$(get_scsi_host $REMOVED_DISK)
+
+# 1. Create a pool
+default_mirror_setup_noexit $REMOVED_DISK_ID $DISK2
+# 2. Remove a disk.
+remove_disk $REMOVED_DISK
+
+log_must zpool reopen $TESTPOOL
+log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
+
+# 3. Write a test file to the pool and calculate its checksum.
+TESTFILE=/$TESTPOOL/data
+TESTFILE_MD5=$(mktemp --tmpdir=/var/tmp)
+log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
+log_must md5sum $TESTFILE > $TESTFILE_MD5
+
+# 4. Execute scrub.
+# add delay to I/O requests for remaining disk in pool
+log_must zinject -d $DISK2 -D250:1 $TESTPOOL
+log_must zpool scrub $TESTPOOL
+
+# 5. "Plug back" disk.
+insert_disk $REMOVED_DISK $scsi_host
+# 6. Reopen a pool.
+log_must zpool reopen $TESTPOOL
+log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "online"
+# 7. Check if scrub scan is replaced by resilver.
+# the scrub operation has to be running while reopen is executed
+log_must is_pool_scrubbing $TESTPOOL true
+# remove delay from disk
+log_must zinject -c all
+# the scrub will be replaced by resilver, wait until it ends
+log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
+# check if the scrub scan has been interrupted by resilver
+log_must is_scan_restarted $TESTPOOL
+
+# 8. Put another device offline and check if the test file checksum is correct.
+log_must zpool offline $TESTPOOL $DISK2
+log_must md5sum -c $TESTFILE_MD5
+log_must zpool online $TESTPOOL $DISK2
+sleep 1
+
+# clean up
+rm -f $TESTFILE_MD5 2>/dev/null
+log_must zpool destroy $TESTPOOL
+
+log_pass "Zpool reopen test successful"
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_004_pos.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_004_pos.ksh
new file mode 100755
index 0000000000..956ceebafb
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_004_pos.ksh
@@ -0,0 +1,88 @@
+#!/bin/ksh -p
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
+
+#
+# DESCRIPTION:
+# Test zpool reopen -n while scrub is running.
+# Checks if re-plugged device is NOT resilvered.
+#
+# STRATEGY:
+# 1. Create a pool
+# 2. Remove a disk.
+# 3. Write test file to pool.
+# 4. Execute scrub.
+# 5. "Plug back" disk.
+# 6. Reopen a pool with an -n flag.
+# 7. Check if resilver was deferred.
+# 8. Check if trying to put device to offline fails because of no valid
+# replicas.
+#
+# NOTES:
+# A 125ms delay is added to make sure that the scrub is running while
+# the reopen is invoked.
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ log_must zinject -c all
+ # bring back removed disk online for further tests
+ insert_disk $REMOVED_DISK $scsi_host
+ poolexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_assert "Testing zpool reopen with pool name as argument"
+log_onexit cleanup
+
+set_removed_disk
+scsi_host=$(get_scsi_host $REMOVED_DISK)
+
+# 1. Create a pool
+default_mirror_setup_noexit $REMOVED_DISK_ID $DISK2
+# 2. Remove a disk.
+remove_disk $REMOVED_DISK
+log_must zpool reopen -n $TESTPOOL
+log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
+# 3. Write test file to pool.
+log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
+# 4. Execute scrub.
+# add delay to I/O requests for remaining disk in pool
+log_must zinject -d $DISK2 -D125:1 $TESTPOOL
+log_must zpool scrub $TESTPOOL
+# 5. "Plug back" disk.
+insert_disk $REMOVED_DISK $scsi_host
+# 6. Reopen a pool with an -n flag.
+log_must zpool reopen -n $TESTPOOL
+log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "online"
+# remove delay from disk
+log_must zinject -c all
+# 7. Check if scrub scan is NOT replaced by resilver.
+log_must wait_for_scrub_end $TESTPOOL $MAXTIMEOUT
+log_must is_deferred_scan_started $TESTPOOL
+
+# 8. Check if trying to put device to offline fails because of no valid
+# replicas.
+log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
+log_must zpool offline $TESTPOOL $DISK2
+
+# clean up
+log_must zpool destroy $TESTPOOL
+
+log_pass "Zpool reopen test successful"
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_005_pos.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_005_pos.ksh
new file mode 100755
index 0000000000..fc298d0106
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_005_pos.ksh
@@ -0,0 +1,86 @@
+#!/bin/ksh -p
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
+
+#
+# DESCRIPTION:
+# Test zpool reopen -n while resilver is running.
+# Checks if the resilver is restarted.
+#
+# STRATEGY:
+# 1. Create a pool
+# 2. Remove a disk.
+# 3. Write test file to pool.
+# 4. "Plug back" disk.
+# 5. Reopen a pool and wait until resilvering is started.
+# 6. Reopen a pool again with -n flag.
+# 7. Wait until resilvering is finished and check if it was restarted.
+#
+# NOTES:
+# A 25ms delay is added to make sure that the resilver is running while
+# the reopen is invoked.
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ log_must zinject -c all
+ insert_disk $REMOVED_DISK $scsi_host
+ poolexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_assert "Testing zpool reopen with pool name as argument"
+log_onexit cleanup
+
+set_removed_disk
+scsi_host=$(get_scsi_host $REMOVED_DISK)
+
+# 1. Create a pool
+default_mirror_setup_noexit $REMOVED_DISK_ID $DISK2
+# 2. Remove a disk.
+remove_disk $REMOVED_DISK
+
+log_must zpool reopen $TESTPOOL
+log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "unavail"
+# 3. Write test file to pool.
+log_must generate_random_file /$TESTPOOL/data $LARGE_FILE_SIZE
+# 4. "Plug back" disk.
+insert_disk $REMOVED_DISK $scsi_host
+
+# 5. Reopen a pool and wait until resilvering is started.
+log_must zpool reopen $TESTPOOL
+log_must check_state $TESTPOOL "$REMOVED_DISK_ID" "online"
+# add delay to I/O requests for the reopened disk
+log_must zinject -d $REMOVED_DISK_ID -D25:1 $TESTPOOL
+# wait until resilver starts
+log_must wait_for_resilver_start $TESTPOOL $MAXTIMEOUT
+
+# 6. Reopen a pool again with -n flag.
+log_must zpool reopen -n $TESTPOOL
+
+# 7. Wait until resilvering is finished and check if it was restarted.
+log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
+# remove delay from disk
+log_must zinject -c all
+log_mustnot is_scan_restarted $TESTPOOL
+
+# clean up
+log_must zpool destroy $TESTPOOL
+
+log_pass "Zpool reopen test successful"
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_006_neg.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_006_neg.ksh
new file mode 100755
index 0000000000..6533bde68f
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_006_neg.ksh
@@ -0,0 +1,43 @@
+#!/bin/ksh -p
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# Wrong arguments passed to zpool reopen should cause an error.
+#
+# STRATEGY:
+# 1. Create an array with bad 'zpool reopen' arguments.
+# 2. For each argument execute the 'zpool reopen' command and verify
+# if it returns an error.
+#
+
+verify_runnable "global"
+
+# 1. Create an array with bad 'zpool reopen' arguments.
+typeset -a args=("!" "1" "-s" "--n" "-1" "-" "-c" "-f" "-d 2" "-abc" "-na")
+
+log_assert "Test 'zpool reopen' with invalid arguments."
+
+# 2. For each argument execute the 'zpool reopen' command and verify
+# if it returns an error.
+for arg in ${args[@]}; do
+ log_mustnot zpool reopen $arg
+done
+
+log_pass "Passing invalid arguments to 'zpool reopen' failed as expected."
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_007_pos.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_007_pos.ksh
new file mode 100755
index 0000000000..4ba56af85d
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen_007_pos.ksh
@@ -0,0 +1,67 @@
+#!/bin/ksh -p
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
+
+#
+# DESCRIPTION:
+# Test zpool reopen while performing IO to the pool.
+# Verify that no IO errors of any kind of reported.
+#
+# STRATEGY:
+# 1. Create a non-redundant pool.
+# 2. Repeat:
+# a. Write files to the pool.
+# b. Execute 'zpool reopen'.
+# 3. Verify that no errors are reported by 'zpool status'.
+
+verify_runnable "global"
+
+function cleanup
+{
+ poolexists $TESTPOOL && destroy_pool $TESTPOOL
+}
+
+log_assert "Testing zpool reopen with concurrent user IO"
+log_onexit cleanup
+
+set_removed_disk
+scsi_host=$(get_scsi_host $REMOVED_DISK)
+
+# 1. Create a non-redundant pool.
+log_must zpool create $TESTPOOL $DISK1 $DISK2 $DISK3
+
+for i in $(seq 10); do
+ # 3a. Write files in the background to the pool.
+ mkfile 64m /$TESTPOOL/data.$i &
+
+ # 3b. Execute 'zpool reopen'.
+ log_must zpool reopen $TESTPOOL
+
+ for disk in $DISK1 $DISK2 $DISK3; do
+ zpool status -P -v $TESTPOOL | grep $disk | \
+ read -r name state rd wr cksum
+ log_must [ $state = "ONLINE" ]
+ log_must [ $rd -eq 0 ]
+ log_must [ $wr -eq 0 ]
+ log_must [ $cksum -eq 0 ]
+ done
+done
+
+wait
+
+log_pass "Zpool reopen with concurrent user IO successful"
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/Makefile b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/Makefile
new file mode 100644
index 0000000000..6f70031d6a
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/Makefile
@@ -0,0 +1,21 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2019 Joyent, Inc.
+#
+
+include $(SRC)/Makefile.master
+
+ROOTOPTPKG = $(ROOT)/opt/zfs-tests
+TARGETDIR = $(ROOTOPTPKG)/tests/functional/cli_root/zpool_resilver
+
+include $(SRC)/test/zfs-tests/Makefile.com
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/cleanup.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/cleanup.ksh
new file mode 100755
index 0000000000..c74e23919c
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/cleanup.ksh
@@ -0,0 +1,33 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_scrub/zpool_scrub.cfg
+
+verify_runnable "global"
+
+destroy_mirrors
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/setup.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/setup.ksh
new file mode 100755
index 0000000000..48ceecdf9e
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/setup.ksh
@@ -0,0 +1,39 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2018 by Datto. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_resilver/zpool_resilver.cfg
+
+verify_runnable "global"
+verify_disk_count "$DISKS" 3
+
+default_mirror_setup_noexit $DISK1 $DISK2 $DISK3
+
+mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
+
+# Create 256M of data
+log_must file_write -b 1048576 -c 256 -o create -d 0 -f $mntpnt/bigfile
+log_pass
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver.cfg b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver.cfg
new file mode 100644
index 0000000000..5c013c7232
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver.cfg
@@ -0,0 +1,30 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2018 by Datto. All rights reserved.
+#
+
+export DISK1=$(echo $DISKS | nawk '{print $1}')
+export DISK2=$(echo $DISKS | nawk '{print $2}')
+export DISK3=$(echo $DISKS | nawk '{print $3}')
+
+export MAXTIMEOUT=80
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_bad_args.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_bad_args.ksh
new file mode 100755
index 0000000000..abd5140866
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_bad_args.ksh
@@ -0,0 +1,71 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2018 by Datto. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# A badly formed parameter passed to 'zpool resilver' should
+# return an error.
+#
+# STRATEGY:
+# 1. Create an array containing bad 'zpool reilver' parameters.
+# 2. For each element, execute the sub-command.
+# 3. Verify it returns an error.
+# 4. Confirm the sub-command returns an error if the resilver_defer
+# feature isn't active.
+#
+
+verify_runnable "global"
+
+set -A args "" "-?" "blah blah" "-%" "--?" "-*" "-=" \
+ "-a" "-b" "-c" "-d" "-e" "-f" "-g" "-h" "-i" "-j" "-k" "-l" \
+ "-m" "-n" "-o" "-p" "-q" "-r" "-s" "-t" "-u" "-v" "-w" "-x" "-y" "-z" \
+ "-A" "-B" "-C" "-D" "-E" "-F" "-G" "-H" "-I" "-J" "-K" "-L" \
+ "-M" "-N" "-O" "-P" "-Q" "-R" "-S" "-T" "-U" "-V" "-W" "-X" "-W" "-Z"
+
+function cleanup
+{
+ log_must destroy_pool $TESTPOOL2
+ log_must rm -f $TEST_BASE_DIR/zpool_resilver.dat
+}
+
+log_onexit cleanup
+
+log_assert "Execute 'zpool resilver' using invalid parameters."
+
+typeset -i i=0
+while [[ $i -lt ${#args[*]} ]]; do
+ log_mustnot zpool resilver ${args[i]}
+
+ ((i = i + 1))
+done
+
+log_must mkfile $MINVDEVSIZE $TEST_BASE_DIR/zpool_resilver.dat
+log_must zpool create -d $TESTPOOL2 $TEST_BASE_DIR/zpool_resilver.dat
+log_mustnot zpool resilver $TESTPOOL2
+
+log_pass "Badly formed 'zpool resilver' parameters fail as expected."
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh
new file mode 100755
index 0000000000..4f98ced960
--- /dev/null
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh
@@ -0,0 +1,86 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2018 Datto Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
+. $STF_SUITE/tests/functional/cli_root/zpool_resilver/zpool_resilver.cfg
+
+#
+# DESCRIPTION:
+# "Verify 'zpool resilver' restarts in-progress resilvers"
+#
+# STRATEGY:
+# 1. Write some data and detatch the first drive so it has resilver
+# work to do
+# 2. Repeat the process with a second disk
+# 3. Reattach the drives, causing the second drive's resilver to be
+# deferred
+# 4. Manually restart the resilver with all drives
+#
+
+verify_runnable "global"
+
+function cleanup
+{
+ log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must rm -f $mntpnt/biggerfile1
+ log_must rm -f $mntpnt/biggerfile2
+}
+
+log_onexit cleanup
+
+log_assert "Verify 'zpool resilver' restarts in-progress resilvers"
+
+mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
+
+# 1. Write some data and detatch the first drive so it has resilver work to do
+log_must file_write -b 524288 -c 1024 -o create -d 0 -f $mntpnt/biggerfile1
+log_must sync
+log_must zpool detach $TESTPOOL $DISK2
+
+# 2. Repeat the process with a second disk
+log_must file_write -b 524288 -c 1024 -o create -d 0 -f $mntpnt/biggerfile2
+log_must sync
+log_must zpool detach $TESTPOOL $DISK3
+
+# 3. Reattach the drives, causing the second drive's resilver to be deferred
+log_must set_tunable32 zfs_scan_suspend_progress 1
+
+log_must zpool attach $TESTPOOL $DISK1 $DISK2
+log_must is_pool_resilvering $TESTPOOL true
+
+log_must zpool attach $TESTPOOL $DISK1 $DISK3
+log_must is_pool_resilvering $TESTPOOL true
+
+# 4. Manually restart the resilver with all drives
+log_must zpool resilver $TESTPOOL
+log_must is_deferred_scan_started $TESTPOOL
+log_must set_tunable32 zfs_scan_suspend_progress 0
+log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
+log_must check_state $TESTPOOL "$DISK2" "online"
+log_must check_state $TESTPOOL "$DISK3" "online"
+
+log_pass "Verified 'zpool resilver' restarts in-progress resilvers"
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh
index b8674764bc..ca1a3f4693 100644
--- a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh
@@ -30,5 +30,5 @@
verify_runnable "global"
-log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_DEFAULT
+log_must set_tunable32 zfs_scan_suspend_progress 0
destroy_mirrors
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh
index 712097bb1c..71a204060b 100644
--- a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh
@@ -45,18 +45,12 @@
# 5. Resume the paused scrub and verify scrub is again being performed.
# 6. Verify zpool scrub -s succeed when the system is scrubbing.
#
-# NOTES:
-# Artificially limit the scrub speed by setting the zfs_scan_vdev_limit
-# low and adding a 50ms zio delay in order to ensure that the scrub does
-# not complete early.
-#
verify_runnable "global"
function cleanup
{
- log_must zinject -c all
- log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_DEFAULT
+ log_must set_tunable32 zfs_scan_suspend_progress 0
log_must rm -f $mntpnt/biggerfile
}
@@ -69,8 +63,7 @@ mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
log_must file_write -b 1048576 -c 1024 -o create -d 0 -f $mntpnt/biggerfile
log_must sync
-log_must zinject -d $DISK1 -D50:1 $TESTPOOL
-log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_SLOW
+log_must set_tunable32 zfs_scan_suspend_progress 1
log_must zpool scrub $TESTPOOL
log_must is_pool_scrubbing $TESTPOOL true
log_must zpool scrub -p $TESTPOOL
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh
index c52ad84bc5..56225456b8 100644
--- a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh
@@ -42,23 +42,19 @@
# 2. Kick off a scrub
# 2. Kick off a second scrub and verify it fails
#
-# NOTES:
-# Artificially limit the scrub speed by setting the zfs_scan_vdev_limit
-# low in order to ensure that the scrub does not complete early.
-#
verify_runnable "global"
function cleanup
{
- log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_DEFAULT
+ log_must set_tunable32 zfs_scan_suspend_progress 0
}
log_onexit cleanup
log_assert "Scrub command fails when there is already a scrub in progress"
-log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_SLOW
+log_must set_tunable32 zfs_scan_suspend_progress 1
log_must zpool scrub $TESTPOOL
log_must is_pool_scrubbing $TESTPOOL true
log_mustnot zpool scrub $TESTPOOL
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh
index 492c7b20aa..9b6274cd10 100644
--- a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh
@@ -39,24 +39,40 @@
# STRATEGY:
# 1. Setup a mirror pool and filled with data.
# 2. Detach one of devices
-# 3. Verify scrub failed until the resilver completed
+# 3. Create a file for the resilver to work on so it takes some time
+# 4. Export/import the pool to ensure the cache is dropped
+# 5. Verify scrub failed until the resilver completed
#
function cleanup
{
- log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_DEFAULT
+ log_must set_tunable32 zfs_scan_suspend_progress 0
+ rm -f $mntpnt/extra
}
verify_runnable "global"
+log_onexit cleanup
+
log_assert "Resilver prevent scrub from starting until the resilver completes"
-log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_SLOW
-log_must zpool detach $TESTPOOL $DISK2
-log_must zpool attach $TESTPOOL $DISK1 $DISK2
+mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
+
+# Temporarily prevent scan progress so our test doesn't race
+log_must set_tunable32 zfs_scan_suspend_progress 1
+
+while ! is_pool_resilvering $TESTPOOL; do
+ log_must zpool detach $TESTPOOL $DISK2
+ log_must file_write -b 1048576 -c 128 -o create -d 0 -f $mntpnt/extra
+ log_must zpool export $TESTPOOL
+ log_must zpool import $TESTPOOL
+ log_must zpool attach $TESTPOOL $DISK1 $DISK2
+done
+
log_must is_pool_resilvering $TESTPOOL
log_mustnot zpool scrub $TESTPOOL
+log_must set_tunable32 zfs_scan_suspend_progress 0
while ! is_pool_resilvered $TESTPOOL; do
sleep 1
done
diff --git a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh
index 7a07e64334..77555d1fc1 100755
--- a/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh
+++ b/usr/src/test/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_offline_device.ksh
@@ -22,6 +22,7 @@
#
# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+# Copyright 2019 Joyent, Inc.
#
. $STF_SUITE/include/libtest.shlib
@@ -125,7 +126,8 @@ log_must wait_for_resilver_end $TESTPOOL2 $RESILVER_TIMEOUT
zpool_scrub_sync $TESTPOOL2
# 7. Verify data integrity
-cksum=$(zpool status $TESTPOOL2 | awk 'L{print $NF;L=0} /CKSUM$/{L=1}')
+cksum=$(zpool status $TESTPOOL2 | awk '{if ($NF == "CKSUM") {fnd=1; next} \
+ if (fnd && NF < 1) {fnd=0; next} if (fnd) csum += $NF} END {print csum}')
if [[ $cksum != 0 ]]; then
log_fail "Unexpected CKSUM errors found on $TESTPOOL2 ($cksum)"
fi
diff --git a/usr/src/test/zfs-tests/tests/functional/removal/removal.kshlib b/usr/src/test/zfs-tests/tests/functional/removal/removal.kshlib
index a13014a601..05488572c7 100644
--- a/usr/src/test/zfs-tests/tests/functional/removal/removal.kshlib
+++ b/usr/src/test/zfs-tests/tests/functional/removal/removal.kshlib
@@ -62,7 +62,7 @@ function attempt_during_removal # pool disk callback [args]
typeset callback=$3
shift 3
- mdb_ctf_set_int zfs_remove_max_bytes_pause 0t0
+ mdb_ctf_set_int zfs_removal_suspend_progress 0t1
log_must zpool remove $pool $disk
@@ -81,7 +81,7 @@ function attempt_during_removal # pool disk callback [args]
#
log_must is_pool_removing $pool
- mdb_ctf_set_int zfs_remove_max_bytes_pause -0t1
+ mdb_ctf_set_int zfs_removal_suspend_progress 0t0
log_must wait_for_removal $pool
log_mustnot vdevs_in_pool $pool $disk
diff --git a/usr/src/uts/common/fs/smbsrv/smb_common_open.c b/usr/src/uts/common/fs/smbsrv/smb_common_open.c
index 7b4afdabc6..bca7530c6e 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_common_open.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_common_open.c
@@ -38,6 +38,8 @@
#include <smbsrv/smb_fsops.h>
#include <smbsrv/smbinfo.h>
+int smb_session_ofile_max = 32768;
+
static volatile uint32_t smb_fids = 0;
#define SMB_UNIQ_FID() atomic_inc_32_nv(&smb_fids)
@@ -339,7 +341,7 @@ smb_open_subr(smb_request_t *sr)
}
op->desired_access = smb_access_generic_to_file(op->desired_access);
- if (sr->session->s_file_cnt >= SMB_SESSION_OFILE_MAX) {
+ if (sr->session->s_file_cnt >= smb_session_ofile_max) {
ASSERT(sr->uid_user);
cmn_err(CE_NOTE, "smbsrv[%s\\%s]: TOO_MANY_OPENED_FILES",
sr->uid_user->u_domain, sr->uid_user->u_name);
@@ -527,6 +529,11 @@ smb_open_subr(smb_request_t *sr)
smb_node_release(dnode);
return (NT_STATUS_ACCESS_DENIED);
}
+ if (op->create_options & FILE_DELETE_ON_CLOSE) {
+ smb_node_release(node);
+ smb_node_release(dnode);
+ return (NT_STATUS_CANNOT_DELETE);
+ }
}
}
@@ -695,6 +702,14 @@ smb_open_subr(smb_request_t *sr)
}
/*
+ * Don't create in directories marked "Delete on close".
+ */
+ if (dnode->flags & NODE_FLAGS_DELETE_ON_CLOSE) {
+ smb_node_release(dnode);
+ return (NT_STATUS_DELETE_PENDING);
+ }
+
+ /*
* lock the parent dir node in case another create
* request to the same parent directory comes in.
*/
diff --git a/usr/src/uts/common/fs/smbsrv/smb_node.c b/usr/src/uts/common/fs/smbsrv/smb_node.c
index 9a3a6199b6..eb34b04038 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_node.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_node.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
*/
/*
* SMB Node State Machine
@@ -596,8 +596,8 @@ smb_node_root_init(smb_server_t *sv, smb_node_t **svrootp)
/*
* Helper function for smb_node_set_delete_on_close(). Assumes node is a dir.
* Return 0 if this is an empty dir. Otherwise return a NT_STATUS code.
- * We distinguish between readdir failure and non-empty dir by returning
- * different values.
+ * Unfortunately, to find out if a directory is empty, we have to read it
+ * and check for anything other than "." or ".." in the readdir buf.
*/
static uint32_t
smb_rmdir_possible(smb_node_t *n, uint32_t flags)
@@ -618,9 +618,9 @@ smb_rmdir_possible(smb_node_t *n, uint32_t flags)
#define dp u.u_dp
if (smb_vop_readdir(n->vp, 0, buf, &bsize, &eof, flags, zone_kcred()))
- return (NT_STATUS_CANNOT_DELETE);
+ return (NT_STATUS_INTERNAL_ERROR);
if (bsize == 0)
- return (NT_STATUS_CANNOT_DELETE);
+ return (0); /* empty dir */
bufptr = buf;
while ((bufptr += reclen) < buf + bsize) {
if (edp) {
@@ -647,23 +647,13 @@ smb_rmdir_possible(smb_node_t *n, uint32_t flags)
* whichever the first file handle is closed will trigger the node to be
* marked as delete-on-close. The credentials of that ofile will be used
* as the delete-on-close credentials of the node.
+ *
+ * Note that "read-only" tests have already happened before this call.
*/
uint32_t
smb_node_set_delete_on_close(smb_node_t *node, cred_t *cr, uint32_t flags)
{
- int rc = 0;
uint32_t status;
- smb_attr_t attr;
-
- if (node->n_pending_dosattr & FILE_ATTRIBUTE_READONLY)
- return (NT_STATUS_CANNOT_DELETE);
-
- bzero(&attr, sizeof (smb_attr_t));
- attr.sa_mask = SMB_AT_DOSATTR;
- rc = smb_fsop_getattr(NULL, zone_kcred(), node, &attr);
- if ((rc != 0) || (attr.sa_dosattr & FILE_ATTRIBUTE_READONLY)) {
- return (NT_STATUS_CANNOT_DELETE);
- }
/*
* If the directory is not empty we should fail setting del-on-close
@@ -679,8 +669,9 @@ smb_node_set_delete_on_close(smb_node_t *node, cred_t *cr, uint32_t flags)
mutex_enter(&node->n_mutex);
if (node->flags & NODE_FLAGS_DELETE_ON_CLOSE) {
+ /* It was already marked. We're done. */
mutex_exit(&node->n_mutex);
- return (NT_STATUS_CANNOT_DELETE);
+ return (NT_STATUS_SUCCESS);
}
crhold(cr);
diff --git a/usr/src/uts/common/fs/smbsrv/smb_ofile.c b/usr/src/uts/common/fs/smbsrv/smb_ofile.c
index fb5ae2c1b0..25dee633bf 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_ofile.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_ofile.c
@@ -1143,13 +1143,10 @@ smb_ofile_open_check(smb_ofile_t *of, uint32_t desired_access,
/*
* smb_ofile_rename_check
*
- * An open file can be renamed if
- *
- * 1. isn't opened for data writing or deleting
- *
- * 2. Opened with "Deny Delete" share mode
- * But not opened for data reading or executing
- * (opened for accessing meta data)
+ * This does the work described in MS-FSA 2.1.5.1.2.2 (Algorithm
+ * to Check Sharing Access to an Existing Stream or Directory),
+ * where the "open in-progress" has DesiredAccess = DELETE and
+ * SharingMode = SHARE_READ | SHARE_WRITE | SHARE_DELETE.
*/
uint32_t
@@ -1164,18 +1161,14 @@ smb_ofile_rename_check(smb_ofile_t *of)
return (NT_STATUS_INVALID_HANDLE);
}
- if (of->f_granted_access &
- (FILE_WRITE_DATA | FILE_APPEND_DATA | DELETE)) {
+ if ((of->f_granted_access & FILE_DATA_ALL) == 0) {
mutex_exit(&of->f_mutex);
- return (NT_STATUS_SHARING_VIOLATION);
+ return (NT_STATUS_SUCCESS);
}
if ((of->f_share_access & FILE_SHARE_DELETE) == 0) {
- if (of->f_granted_access &
- (FILE_READ_DATA | FILE_EXECUTE)) {
- mutex_exit(&of->f_mutex);
- return (NT_STATUS_SHARING_VIOLATION);
- }
+ mutex_exit(&of->f_mutex);
+ return (NT_STATUS_SHARING_VIOLATION);
}
mutex_exit(&of->f_mutex);
diff --git a/usr/src/uts/common/fs/smbsrv/smb_trans2_dfs.c b/usr/src/uts/common/fs/smbsrv/smb_trans2_dfs.c
index 918bf78727..2c10577413 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_trans2_dfs.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_trans2_dfs.c
@@ -72,16 +72,16 @@ smb_com_trans2_get_dfs_referral(smb_request_t *sr, smb_xa_t *xa)
status = smb_dfs_get_referrals(sr, &fsctl);
- /* Out param is the API-level return code. */
+ /*
+ * Out param is the API-level return code.
+ * Out data (rep_data_mb) is the referral.
+ */
doserr = smb_status2doserr(status);
(void) smb_mbc_encodef(&xa->rep_param_mb, "w", doserr);
-
-#if 0 /* XXX - Is API-level return code enough? */
- if (status) {
- smbsr_error(sr, NT_STATUS_NO_SUCH_DEVICE, 0, 0);
+ if (status != 0) {
+ smbsr_error(sr, status, ERRDOS, doserr);
return (SDRC_ERROR);
}
-#endif
return (SDRC_SUCCESS);
}
diff --git a/usr/src/uts/common/fs/zfs/dsl_scan.c b/usr/src/uts/common/fs/zfs/dsl_scan.c
index 78c8c2a581..b5ef5a89e9 100644
--- a/usr/src/uts/common/fs/zfs/dsl_scan.c
+++ b/usr/src/uts/common/fs/zfs/dsl_scan.c
@@ -183,12 +183,15 @@ unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
unsigned int zfs_obsolete_min_time_ms = 500;
/* min millisecs to resilver per txg */
unsigned int zfs_resilver_min_time_ms = 3000;
+int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
/* max number of blocks to free in a single TXG */
uint64_t zfs_async_block_max_blocks = UINT64_MAX;
+int zfs_resilver_disable_defer = 0; /* set to disable resilver deferring */
+
/*
* We wait a few txgs after importing a pool to begin scanning so that
* the import / mounting code isn't held up by scrub / resilver IO.
@@ -455,7 +458,6 @@ dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_ASYNC_DESTROY);
- bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
offsetof(scan_ds_t, sds_node));
avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
@@ -513,6 +515,8 @@ dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
}
}
+ bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
+
/* reload the queue into the in-core state */
if (scn->scn_phys.scn_queue_obj != 0) {
zap_cursor_t zc;
@@ -751,6 +755,11 @@ dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
spa->spa_scrub_reopen = B_FALSE;
(void) spa_vdev_state_exit(spa, NULL, 0);
+ if (func == POOL_SCAN_RESILVER) {
+ dsl_resilver_restart(spa->spa_dsl_pool, 0);
+ return (0);
+ }
+
if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
/* got scrub start cmd, resume paused scrub */
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
@@ -766,6 +775,41 @@ dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
+/*
+ * Sets the resilver defer flag to B_FALSE on all leaf devs under vd. Returns
+ * B_TRUE if we have devices that need to be resilvered and are available to
+ * accept resilver I/Os.
+ */
+static boolean_t
+dsl_scan_clear_deferred(vdev_t *vd, dmu_tx_t *tx)
+{
+ boolean_t resilver_needed = B_FALSE;
+ spa_t *spa = vd->vdev_spa;
+
+ for (int c = 0; c < vd->vdev_children; c++) {
+ resilver_needed |=
+ dsl_scan_clear_deferred(vd->vdev_child[c], tx);
+ }
+
+ if (vd == spa->spa_root_vdev &&
+ spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
+ spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
+ vdev_config_dirty(vd);
+ spa->spa_resilver_deferred = B_FALSE;
+ return (resilver_needed);
+ }
+
+ if (!vdev_is_concrete(vd) || vd->vdev_aux ||
+ !vd->vdev_ops->vdev_op_leaf)
+ return (resilver_needed);
+
+ if (vd->vdev_resilver_deferred)
+ vd->vdev_resilver_deferred = B_FALSE;
+
+ return (!vdev_is_dead(vd) && !vd->vdev_offline &&
+ vdev_resilver_needed(vd, NULL, NULL));
+}
+
/* ARGSUSED */
static void
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
@@ -865,6 +909,25 @@ dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
* Let the async thread assess this and handle the detach.
*/
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
+
+ /*
+ * Clear any deferred_resilver flags in the config.
+ * If there are drives that need resilvering, kick
+ * off an asynchronous request to start resilver.
+ * dsl_scan_clear_deferred() may update the config
+ * before the resilver can restart. In the event of
+ * a crash during this period, the spa loading code
+ * will find the drives that need to be resilvered
+ * when the machine reboots and start the resilver then.
+ */
+ boolean_t resilver_needed =
+ dsl_scan_clear_deferred(spa->spa_root_vdev, tx);
+ if (resilver_needed) {
+ spa_history_log_internal(spa,
+ "starting deferred resilver", tx,
+ "errors=%llu", spa_get_errlog_size(spa));
+ spa_async_request(spa, SPA_ASYNC_RESILVER);
+ }
}
scn->scn_phys.scn_end_time = gethrestime_sec();
@@ -935,6 +998,7 @@ dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
/* can't pause a scrub when there is no in-progress scrub */
spa->spa_scan_pass_scrub_pause = gethrestime_sec();
scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
+ scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
} else {
@@ -949,6 +1013,7 @@ dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
spa->spa_scan_pass_scrub_pause = 0;
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
+ scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
}
@@ -2335,6 +2400,20 @@ dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
if (scn->scn_phys.scn_state != DSS_SCANNING)
return;
+ /*
+ * This function is special because it is the only thing
+ * that can add scan_io_t's to the vdev scan queues from
+ * outside dsl_scan_sync(). For the most part this is ok
+ * as long as it is called from within syncing context.
+ * However, dsl_scan_sync() expects that no new sio's will
+ * be added between when all the work for a scan is done
+ * and the next txg when the scan is actually marked as
+ * completed. This check ensures we do not issue new sio's
+ * during this period.
+ */
+ if (scn->scn_done_txg != 0)
+ return;
+
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
@@ -2986,6 +3065,26 @@ dsl_scan_active(dsl_scan_t *scn)
}
static boolean_t
+dsl_scan_check_deferred(vdev_t *vd)
+{
+ boolean_t need_resilver = B_FALSE;
+
+ for (int c = 0; c < vd->vdev_children; c++) {
+ need_resilver |=
+ dsl_scan_check_deferred(vd->vdev_child[c]);
+ }
+
+ if (!vdev_is_concrete(vd) || vd->vdev_aux ||
+ !vd->vdev_ops->vdev_op_leaf)
+ return (need_resilver);
+
+ if (!vd->vdev_resilver_deferred)
+ need_resilver = B_TRUE;
+
+ return (need_resilver);
+}
+
+static boolean_t
dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
@@ -3032,6 +3131,13 @@ dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
if (!vdev_dtl_need_resilver(vd, DVA_GET_OFFSET(dva), psize))
return (B_FALSE);
+ /*
+ * Check that this top-level vdev has a device under it which
+ * is resilvering and is not deferred.
+ */
+ if (!dsl_scan_check_deferred(vd))
+ return (B_FALSE);
+
return (B_TRUE);
}
@@ -3193,12 +3299,19 @@ dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
int err = 0;
state_sync_type_t sync_type = SYNC_OPTIONAL;
+ if (spa->spa_resilver_deferred &&
+ !spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
+ spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
+
/*
* Check for scn_restart_txg before checking spa_load_state, so
* that we can restart an old-style scan while the pool is being
- * imported (see dsl_scan_init).
+ * imported (see dsl_scan_init). We also restart scans if there
+ * is a deferred resilver and the user has manually disabled
+ * deferred resilvers via the tunable.
*/
- if (dsl_scan_restarting(scn, tx)) {
+ if (dsl_scan_restarting(scn, tx) ||
+ (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) {
pool_scan_func_t func = POOL_SCAN_SCRUB;
dsl_scan_done(scn, B_FALSE, tx);
if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
@@ -3265,6 +3378,27 @@ dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
return;
/*
+ * zfs_scan_suspend_progress can be set to disable scan progress.
+ * We don't want to spin the txg_sync thread, so we add a delay
+ * here to simulate the time spent doing a scan. This is mostly
+ * useful for testing and debugging.
+ */
+ if (zfs_scan_suspend_progress) {
+ uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
+ int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
+ zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
+
+ while (zfs_scan_suspend_progress &&
+ !txg_sync_waiting(scn->scn_dp) &&
+ !spa_shutting_down(scn->scn_dp->dp_spa) &&
+ NSEC2MSEC(scan_time_ns) < mintime) {
+ delay(hz);
+ scan_time_ns = gethrtime() - scn->scn_sync_start_time;
+ }
+ return;
+ }
+
+ /*
* It is possible to switch from unsorted to sorted at any time,
* but afterwards the scan will remain sorted unless reloaded from
* a checkpoint after a reboot.
@@ -3393,6 +3527,8 @@ dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
(longlong_t)tx->tx_txg);
}
} else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) {
+ ASSERT(scn->scn_clearing);
+
/* need to issue scrubbing IOs from per-vdev queues */
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
diff --git a/usr/src/uts/common/fs/zfs/spa.c b/usr/src/uts/common/fs/zfs/spa.c
index 1eac783da7..92856782a2 100644
--- a/usr/src/uts/common/fs/zfs/spa.c
+++ b/usr/src/uts/common/fs/zfs/spa.c
@@ -6127,9 +6127,14 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
/*
* Schedule the resilver to restart in the future. We do this to
* ensure that dmu_sync-ed blocks have been stitched into the
- * respective datasets.
+ * respective datasets. We do not do this if resilvers have been
+ * deferred.
*/
- dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
+ if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
+ spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
+ vdev_set_deferred_resilver(spa, newvd);
+ else
+ dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
@@ -7020,6 +7025,10 @@ spa_scan(spa_t *spa, pool_scan_func_t func)
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
+ if (func == POOL_SCAN_RESILVER &&
+ !spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
+ return (SET_ERROR(ENOTSUP));
+
/*
* If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do.
@@ -7111,6 +7120,7 @@ static void
spa_async_thread(void *arg)
{
spa_t *spa = (spa_t *)arg;
+ dsl_pool_t *dp = spa->spa_dsl_pool;
int tasks;
ASSERT(spa->spa_sync_on);
@@ -7188,8 +7198,10 @@ spa_async_thread(void *arg)
/*
* Kick off a resilver.
*/
- if (tasks & SPA_ASYNC_RESILVER)
- dsl_resilver_restart(spa->spa_dsl_pool, 0);
+ if (tasks & SPA_ASYNC_RESILVER &&
+ (!dsl_scan_resilvering(dp) ||
+ !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
+ dsl_resilver_restart(dp, 0);
if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
mutex_enter(&spa_namespace_lock);
diff --git a/usr/src/uts/common/fs/zfs/sys/spa_impl.h b/usr/src/uts/common/fs/zfs/sys/spa_impl.h
index 337a34087c..b905f0d740 100644
--- a/usr/src/uts/common/fs/zfs/sys/spa_impl.h
+++ b/usr/src/uts/common/fs/zfs/sys/spa_impl.h
@@ -281,6 +281,13 @@ struct spa {
uint64_t spa_scan_pass_scrub_spent_paused; /* total paused */
uint64_t spa_scan_pass_exam; /* examined bytes per pass */
uint64_t spa_scan_pass_issued; /* issued bytes per pass */
+
+ /*
+ * We are in the middle of a resilver, and another resilver
+ * is needed once this one completes. This is set iff any
+ * vdev_resilver_deferred is set.
+ */
+ boolean_t spa_resilver_deferred;
kmutex_t spa_async_lock; /* protect async state */
kthread_t *spa_async_thread; /* thread doing async task */
int spa_async_suspended; /* async tasks suspended */
diff --git a/usr/src/uts/common/fs/zfs/sys/vdev.h b/usr/src/uts/common/fs/zfs/sys/vdev.h
index e21989641b..3a3662c612 100644
--- a/usr/src/uts/common/fs/zfs/sys/vdev.h
+++ b/usr/src/uts/common/fs/zfs/sys/vdev.h
@@ -149,6 +149,8 @@ extern int vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg);
extern void vdev_state_dirty(vdev_t *vd);
extern void vdev_state_clean(vdev_t *vd);
+extern void vdev_set_deferred_resilver(spa_t *spa, vdev_t *vd);
+
typedef enum vdev_config_flag {
VDEV_CONFIG_SPARE = 1 << 0,
VDEV_CONFIG_L2CACHE = 1 << 1,
diff --git a/usr/src/uts/common/fs/zfs/sys/vdev_impl.h b/usr/src/uts/common/fs/zfs/sys/vdev_impl.h
index 88f22938ea..b9b364f6e7 100644
--- a/usr/src/uts/common/fs/zfs/sys/vdev_impl.h
+++ b/usr/src/uts/common/fs/zfs/sys/vdev_impl.h
@@ -347,6 +347,7 @@ struct vdev {
boolean_t vdev_cant_write; /* vdev is failing all writes */
boolean_t vdev_isspare; /* was a hot spare */
boolean_t vdev_isl2cache; /* was a l2cache device */
+ boolean_t vdev_resilver_deferred; /* resilver deferred */
vdev_queue_t vdev_queue; /* I/O deadline schedule queue */
vdev_cache_t vdev_cache; /* physical block cache */
spa_aux_vdev_t *vdev_aux; /* for l2cache and spares vdevs */
diff --git a/usr/src/uts/common/fs/zfs/sys/zfs_fuid.h b/usr/src/uts/common/fs/zfs/sys/zfs_fuid.h
index 0feb3ce4bb..8efa99df3e 100644
--- a/usr/src/uts/common/fs/zfs/sys/zfs_fuid.h
+++ b/usr/src/uts/common/fs/zfs/sys/zfs_fuid.h
@@ -21,6 +21,8 @@
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
*/
#ifndef _SYS_FS_ZFS_FUID_H
@@ -70,8 +72,8 @@ typedef enum {
* Used for mapping ephemeral uid/gid during ACL setting to FUIDs
*/
typedef struct zfs_fuid {
- list_node_t z_next;
- uint64_t z_id; /* uid/gid being converted to fuid */
+ list_node_t z_next;
+ uint64_t z_id; /* uid/gid being converted to fuid */
uint64_t z_domidx; /* index in AVL domain table */
uint64_t z_logfuid; /* index for domain in log */
} zfs_fuid_t;
@@ -111,6 +113,7 @@ extern void zfs_fuid_map_ids(struct znode *zp, cred_t *cr,
uid_t *uid, uid_t *gid);
extern zfs_fuid_info_t *zfs_fuid_info_alloc(void);
extern void zfs_fuid_info_free(zfs_fuid_info_t *);
+extern boolean_t zfs_user_in_cred(zfsvfs_t *, uint64_t, cred_t *);
extern boolean_t zfs_groupmember(zfsvfs_t *, uint64_t, cred_t *);
void zfs_fuid_sync(zfsvfs_t *, dmu_tx_t *);
extern int zfs_fuid_find_by_domain(zfsvfs_t *, const char *domain,
diff --git a/usr/src/uts/common/fs/zfs/vdev.c b/usr/src/uts/common/fs/zfs/vdev.c
index 0dc86bd195..c9f1212168 100644
--- a/usr/src/uts/common/fs/zfs/vdev.c
+++ b/usr/src/uts/common/fs/zfs/vdev.c
@@ -759,6 +759,9 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
&vd->vdev_resilver_txg);
+ if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
+ vdev_set_deferred_resilver(spa, vd);
+
/*
* When importing a pool, we want to ignore the persistent fault
* state, as the diagnosis made on another system may not be
@@ -1732,8 +1735,13 @@ vdev_open(vdev_t *vd)
* since this would just restart the scrub we are already doing.
*/
if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
- vdev_resilver_needed(vd, NULL, NULL))
- spa_async_request(spa, SPA_ASYNC_RESILVER);
+ vdev_resilver_needed(vd, NULL, NULL)) {
+ if (dsl_scan_resilvering(spa->spa_dsl_pool) &&
+ spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
+ vdev_set_deferred_resilver(spa, vd);
+ else
+ spa_async_request(spa, SPA_ASYNC_RESILVER);
+ }
return (0);
}
@@ -2440,6 +2448,9 @@ vdev_dtl_should_excise(vdev_t *vd)
if (vd->vdev_state < VDEV_STATE_DEGRADED)
return (B_FALSE);
+ if (vd->vdev_resilver_deferred)
+ return (B_FALSE);
+
if (vd->vdev_resilver_txg == 0 ||
range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
return (B_TRUE);
@@ -3473,8 +3484,14 @@ vdev_clear(spa_t *spa, vdev_t *vd)
if (vd != rvd && vdev_writeable(vd->vdev_top))
vdev_state_dirty(vd->vdev_top);
- if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
- spa_async_request(spa, SPA_ASYNC_RESILVER);
+ if (vd->vdev_aux == NULL && !vdev_is_dead(vd)) {
+ if (dsl_scan_resilvering(spa->spa_dsl_pool) &&
+ spa_feature_is_enabled(spa,
+ SPA_FEATURE_RESILVER_DEFER))
+ vdev_set_deferred_resilver(spa, vd);
+ else
+ spa_async_request(spa, SPA_ASYNC_RESILVER);
+ }
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
@@ -3617,6 +3634,8 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
vd->vdev_mg->mg_fragmentation : 0;
}
+ if (vd->vdev_ops->vdev_op_leaf)
+ vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
@@ -4330,3 +4349,18 @@ vdev_deadman(vdev_t *vd)
mutex_exit(&vq->vq_lock);
}
}
+
+void
+vdev_set_deferred_resilver(spa_t *spa, vdev_t *vd)
+{
+ for (uint64_t i = 0; i < vd->vdev_children; i++)
+ vdev_set_deferred_resilver(spa, vd->vdev_child[i]);
+
+ if (!vd->vdev_ops->vdev_op_leaf || !vdev_writeable(vd) ||
+ range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
+ return;
+ }
+
+ vd->vdev_resilver_deferred = B_TRUE;
+ spa->spa_resilver_deferred = B_TRUE;
+}
diff --git a/usr/src/uts/common/fs/zfs/vdev_indirect.c b/usr/src/uts/common/fs/zfs/vdev_indirect.c
index 958e8cd858..062c4073a8 100644
--- a/usr/src/uts/common/fs/zfs/vdev_indirect.c
+++ b/usr/src/uts/common/fs/zfs/vdev_indirect.c
@@ -1239,6 +1239,8 @@ vdev_indirect_read_all(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
+ ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
+
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int i = 0; i < is->is_children; i++) {
@@ -1321,7 +1323,8 @@ vdev_indirect_io_start(zio_t *zio)
vdev_indirect_child_io_done, zio));
} else {
iv->iv_split_block = B_TRUE;
- if (zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
+ if (zio->io_type == ZIO_TYPE_READ &&
+ zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
/*
* Read all copies. Note that for simplicity,
* we don't bother consulting the DTL in the
@@ -1330,13 +1333,17 @@ vdev_indirect_io_start(zio_t *zio)
vdev_indirect_read_all(zio);
} else {
/*
- * Read one copy of each split segment, from the
- * top-level vdev. Since we don't know the
- * checksum of each split individually, the child
- * zio can't ensure that we get the right data.
- * E.g. if it's a mirror, it will just read from a
- * random (healthy) leaf vdev. We have to verify
- * the checksum in vdev_indirect_io_done().
+ * If this is a read zio, we read one copy of each
+ * split segment, from the top-level vdev. Since
+ * we don't know the checksum of each split
+ * individually, the child zio can't ensure that
+ * we get the right data. E.g. if it's a mirror,
+ * it will just read from a random (healthy) leaf
+ * vdev. We have to verify the checksum in
+ * vdev_indirect_io_done().
+ *
+ * For write zios, the vdev code will ensure we write
+ * to all children.
*/
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
diff --git a/usr/src/uts/common/fs/zfs/vdev_label.c b/usr/src/uts/common/fs/zfs/vdev_label.c
index 3c469ffe43..abf1c666a6 100644
--- a/usr/src/uts/common/fs/zfs/vdev_label.c
+++ b/usr/src/uts/common/fs/zfs/vdev_label.c
@@ -377,6 +377,12 @@ vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
vd->vdev_top_zap);
}
+
+ if (vd->vdev_resilver_deferred) {
+ ASSERT(vd->vdev_ops->vdev_op_leaf);
+ ASSERT(spa->spa_resilver_deferred);
+ fnvlist_add_boolean(nv, ZPOOL_CONFIG_RESILVER_DEFER);
+ }
}
if (getstats) {
diff --git a/usr/src/uts/common/fs/zfs/vdev_removal.c b/usr/src/uts/common/fs/zfs/vdev_removal.c
index c6874ae06a..aaaece7c4d 100644
--- a/usr/src/uts/common/fs/zfs/vdev_removal.c
+++ b/usr/src/uts/common/fs/zfs/vdev_removal.c
@@ -127,7 +127,7 @@ int vdev_removal_max_span = 32 * 1024;
* This is used by the test suite so that it can ensure that certain
* actions happen while in the middle of a removal.
*/
-uint64_t zfs_remove_max_bytes_pause = UINT64_MAX;
+int zfs_removal_suspend_progress = 0;
#define VDEV_REMOVAL_ZAP_OBJS "lzap"
@@ -1433,14 +1433,14 @@ spa_vdev_remove_thread(void *arg)
/*
* This delay will pause the removal around the point
- * specified by zfs_remove_max_bytes_pause. We do this
+ * specified by zfs_removal_suspend_progress. We do this
* solely from the test suite or during debugging.
*/
uint64_t bytes_copied =
spa->spa_removing_phys.sr_copied;
for (int i = 0; i < TXG_SIZE; i++)
bytes_copied += svr->svr_bytes_done[i];
- while (zfs_remove_max_bytes_pause <= bytes_copied &&
+ while (zfs_removal_suspend_progress &&
!svr->svr_thread_exit)
delay(hz);
diff --git a/usr/src/uts/common/fs/zfs/zfs_acl.c b/usr/src/uts/common/fs/zfs/zfs_acl.c
index 149103206a..be97c64514 100644
--- a/usr/src/uts/common/fs/zfs/zfs_acl.c
+++ b/usr/src/uts/common/fs/zfs/zfs_acl.c
@@ -2106,18 +2106,13 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_acl_t *aclp;
int error;
- uid_t uid = crgetuid(cr);
- uint64_t who;
+ uint64_t who; /* FUID from the ACE */
uint16_t type, iflags;
uint16_t entry_type;
uint32_t access_mask;
uint32_t deny_mask = 0;
zfs_ace_hdr_t *acep = NULL;
- boolean_t checkit;
- uid_t gowner;
- uid_t fowner;
-
- zfs_fuid_map_ids(zp, cr, &fowner, &gowner);
+ boolean_t checkit; /* ACE ID matches */
mutex_enter(&zp->z_acl_lock);
@@ -2150,11 +2145,13 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
switch (entry_type) {
case ACE_OWNER:
- if (uid == fowner)
- checkit = B_TRUE;
+ who = zp->z_uid;
+ /*FALLTHROUGH*/
+ case 0: /* USER Entry */
+ checkit = zfs_user_in_cred(zfsvfs, who, cr);
break;
case OWNING_GROUP:
- who = gowner;
+ who = zp->z_gid;
/*FALLTHROUGH*/
case ACE_IDENTIFIER_GROUP:
checkit = zfs_groupmember(zfsvfs, who, cr);
@@ -2163,21 +2160,13 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
checkit = B_TRUE;
break;
- /* USER Entry */
default:
- if (entry_type == 0) {
- uid_t newid;
-
- newid = zfs_fuid_map_id(zfsvfs, who, cr,
- ZFS_ACE_USER);
- if (newid != IDMAP_WK_CREATOR_OWNER_UID &&
- uid == newid)
- checkit = B_TRUE;
- break;
- } else {
- mutex_exit(&zp->z_acl_lock);
- return (SET_ERROR(EIO));
- }
+ /*
+ * The zfs_acl_valid_ace_type check above
+ * should make this case impossible.
+ */
+ mutex_exit(&zp->z_acl_lock);
+ return (SET_ERROR(EIO));
}
if (checkit) {
diff --git a/usr/src/uts/common/fs/zfs/zfs_fuid.c b/usr/src/uts/common/fs/zfs/zfs_fuid.c
index 7877a97b49..9ce111d9ce 100644
--- a/usr/src/uts/common/fs/zfs/zfs_fuid.c
+++ b/usr/src/uts/common/fs/zfs/zfs_fuid.c
@@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/zfs_context.h>
@@ -492,7 +493,7 @@ zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
uint64_t idx;
ksid_t *ksid;
uint32_t rid;
- char *kdomain;
+ char *kdomain;
const char *domain;
uid_t id;
@@ -685,6 +686,57 @@ zfs_fuid_info_free(zfs_fuid_info_t *fuidp)
}
/*
+ * Check to see if user ID is in the list of SIDs in CR.
+ */
+boolean_t
+zfs_user_in_cred(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
+{
+ ksid_t *ksid = crgetsid(cr, KSID_USER);
+ ksidlist_t *ksidlist = crgetsidlist(cr);
+ uid_t uid;
+
+ /* Check for match with cred->cr_uid */
+ uid = zfs_fuid_map_id(zfsvfs, id, cr, ZFS_ACE_USER);
+ if (uid != IDMAP_WK_CREATOR_OWNER_UID &&
+ uid == crgetuid(cr))
+ return (B_TRUE);
+
+ /* Check for any match in the ksidlist */
+ if (ksid && ksidlist) {
+ int i;
+ ksid_t *ksid_vec;
+ uint32_t idx = FUID_INDEX(id);
+ uint32_t rid = FUID_RID(id);
+ const char *domain;
+
+ if (idx == 0) {
+ /*
+ * The ID passed in has idx zero, which means
+ * it's just a Unix UID. That can never match
+ * anything in ksid_vec[] because those all
+ * have ksid->ks_id set to a Group ID.
+ */
+ return (B_FALSE);
+ }
+
+ domain = zfs_fuid_find_by_idx(zfsvfs, idx);
+ ASSERT(domain != NULL);
+
+ if (strcmp(domain, IDMAP_WK_CREATOR_SID_AUTHORITY) == 0)
+ return (B_FALSE);
+
+ ksid_vec = ksidlist->ksl_sids;
+ for (i = 0; i != ksidlist->ksl_nsid; i++) {
+ if ((strcmp(domain,
+ ksid_vec[i].ks_domain->kd_name) == 0) &&
+ rid == ksid_vec[i].ks_rid)
+ return (B_TRUE);
+ }
+ }
+ return (B_FALSE);
+}
+
+/*
* Check to see if id is a groupmember. If cred
* has ksid info then sidlist is checked first
* and if still not found then POSIX groups are checked
@@ -699,7 +751,7 @@ zfs_groupmember(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr)
uid_t gid;
if (ksid && ksidlist) {
- int i;
+ int i;
ksid_t *ksid_groups;
uint32_t idx = FUID_INDEX(id);
uint32_t rid = FUID_RID(id);
diff --git a/usr/src/uts/common/fs/zfs/zil.c b/usr/src/uts/common/fs/zfs/zil.c
index 547ebac383..c51175f6e3 100644
--- a/usr/src/uts/common/fs/zfs/zil.c
+++ b/usr/src/uts/common/fs/zfs/zil.c
@@ -1253,7 +1253,7 @@ zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb)
* root zios). This is required because of how we can
* defer the DKIOCFLUSHWRITECACHE commands for each lwb.
*
- * When the DKIOCFLUSHWRITECACHE commands are defered,
+ * When the DKIOCFLUSHWRITECACHE commands are deferred,
* the previous lwb will rely on this lwb to flush the
* vdevs written to by that previous lwb. Thus, we need
* to ensure this lwb doesn't issue the flush until
diff --git a/usr/src/uts/common/io/lofi.c b/usr/src/uts/common/io/lofi.c
index 38cde773d4..f59c7ec848 100644
--- a/usr/src/uts/common/io/lofi.c
+++ b/usr/src/uts/common/io/lofi.c
@@ -25,6 +25,7 @@
* Copyright (c) 2016 Andrey Sokolov
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright 2019 Joyent, Inc.
+ * Copyright 2019 OmniOS Community Edition (OmniOSce) Association.
*/
/*
@@ -2785,34 +2786,28 @@ lofi_copy_devpath(struct lofi_ioctl *klip)
(void) snprintf(namebuf, sizeof (namebuf), "%d", klip->li_id);
mutex_enter(&lofi_devlink_cache.ln_lock);
- do {
+ for (;;) {
error = nvlist_lookup_nvlist(lofi_devlink_cache.ln_data,
namebuf, &nvl);
- if (error != 0) {
- /* No data in cache, wait for some. */
- ticks = ddi_get_lbolt() +
- lofi_timeout * drv_usectohz(1000000);
- error = cv_timedwait(&lofi_devlink_cache.ln_cv,
- &lofi_devlink_cache.ln_lock, ticks);
- if (error == -1)
- break; /* timeout */
- error = 1;
- continue; /* Read again. */
- }
-
- if (nvl != NULL) {
- if (nvlist_lookup_string(nvl, DEV_NAME, &str) == 0) {
- if (strncmp(str, "/dev/" LOFI_CHAR_NAME,
- sizeof ("/dev/" LOFI_CHAR_NAME) - 1) == 0) {
- error = 1;
- continue;
- }
- (void) strlcpy(klip->li_devpath, str,
- sizeof (klip->li_devpath));
- }
+ if (error == 0 &&
+ nvlist_lookup_string(nvl, DEV_NAME, &str) == 0 &&
+ strncmp(str, "/dev/" LOFI_CHAR_NAME,
+ sizeof ("/dev/" LOFI_CHAR_NAME) - 1) != 0) {
+ (void) strlcpy(klip->li_devpath, str,
+ sizeof (klip->li_devpath));
+ break;
}
- } while (error != 0);
+ /*
+ * Either there is no data in the cache, or the
+ * cache entry still has the wrong device name.
+ */
+ ticks = ddi_get_lbolt() + lofi_timeout * drv_usectohz(1000000);
+ error = cv_timedwait(&lofi_devlink_cache.ln_cv,
+ &lofi_devlink_cache.ln_lock, ticks);
+ if (error == -1)
+ break; /* timeout */
+ }
mutex_exit(&lofi_devlink_cache.ln_lock);
}
diff --git a/usr/src/uts/common/smbsrv/smb_ktypes.h b/usr/src/uts/common/smbsrv/smb_ktypes.h
index f22c6d3a0e..20b1e1f014 100644
--- a/usr/src/uts/common/smbsrv/smb_ktypes.h
+++ b/usr/src/uts/common/smbsrv/smb_ktypes.h
@@ -792,8 +792,6 @@ typedef struct tcon {
*/
#define SMB_SESSION_INACTIVITY_TIMEOUT (15 * 60)
-#define SMB_SESSION_OFILE_MAX (16 * 1024)
-
/* SMB1 signing */
struct smb_sign {
unsigned int flags;
diff --git a/usr/src/uts/common/sys/fs/zfs.h b/usr/src/uts/common/sys/fs/zfs.h
index b54fb27d1e..9942a4c561 100644
--- a/usr/src/uts/common/sys/fs/zfs.h
+++ b/usr/src/uts/common/sys/fs/zfs.h
@@ -597,6 +597,7 @@ typedef struct zpool_load_policy {
#define ZPOOL_CONFIG_VDEV_TOP_ZAP "com.delphix:vdev_zap_top"
#define ZPOOL_CONFIG_VDEV_LEAF_ZAP "com.delphix:vdev_zap_leaf"
#define ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS "com.delphix:has_per_vdev_zaps"
+#define ZPOOL_CONFIG_RESILVER_DEFER "com.datto:resilver_defer"
#define ZPOOL_CONFIG_CACHEFILE "cachefile" /* not stored on disk */
#define ZPOOL_CONFIG_MMP_STATE "mmp_state" /* not stored on disk */
#define ZPOOL_CONFIG_MMP_TXG "mmp_txg" /* not stored on disk */
@@ -895,6 +896,7 @@ typedef struct vdev_stat {
uint64_t vs_initialize_state; /* vdev_initialzing_state_t */
uint64_t vs_initialize_action_time; /* time_t */
uint64_t vs_checkpoint_space; /* checkpoint-consumed space */
+ uint64_t vs_resilver_deferred; /* resilver deferred */
} vdev_stat_t;
/*
diff --git a/usr/src/uts/common/sys/tsol/tndb.h b/usr/src/uts/common/sys/tsol/tndb.h
index c7b68b66d8..f74cd5090d 100644
--- a/usr/src/uts/common/sys/tsol/tndb.h
+++ b/usr/src/uts/common/sys/tsol/tndb.h
@@ -248,6 +248,7 @@ typedef struct tsol_zcent {
} tsol_zcent_t;
#define TSOL_MLP_END(mlp) ((mlp)->mlp_ipp == 0 && (mlp)->mlp_port == 0)
+#if (defined(_KERNEL) || defined(_KMEMUSER))
typedef struct tsol_tpc {
kmutex_t tpc_lock; /* lock for structure */
uint_t tpc_refcnt; /* reference count */
@@ -265,6 +266,7 @@ typedef struct tsol_tnrhc {
char rhc_isbcast; /* broadcast address */
char rhc_local; /* loopback or local interace */
} tsol_tnrhc_t;
+#endif /* _KERNEL || _KMEMUSER */
/* Size of remote host hash tables in kernel */
#define TNRHC_SIZE 256
@@ -361,11 +363,13 @@ typedef struct tsol_address {
in_addr_t ip_address;
} tsol_address_t;
+#if (defined(_KERNEL) || defined(_KMEMUSER))
/* This is shared between tcache and mdb */
typedef struct tnrhc_hash_s {
tsol_tnrhc_t *tnrh_list;
kmutex_t tnrh_lock;
} tnrhc_hash_t;
+#endif /* _KERNEL || _KMEMUSER */
#ifdef _KERNEL
typedef enum {
diff --git a/usr/src/uts/common/sys/zone.h b/usr/src/uts/common/sys/zone.h
index a4ec347ce4..976841fae0 100644
--- a/usr/src/uts/common/sys/zone.h
+++ b/usr/src/uts/common/sys/zone.h
@@ -20,30 +20,39 @@
*/
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2018 Joyent, Inc.
+ * Copyright 2019 Nexenta Systems, Inc. All rights reserved.
* Copyright 2014 Igor Kozhukhov <ikozhukhov@gmail.com>.
- * Copyright 2018, Joyent, Inc.
*/
#ifndef _SYS_ZONE_H
#define _SYS_ZONE_H
#include <sys/types.h>
-#include <sys/mutex.h>
#include <sys/param.h>
+#include <sys/tsol/label.h>
+#include <sys/uadmin.h>
+#include <netinet/in.h>
+
+#ifdef _KERNEL
+/*
+ * Many includes are kernel-only to reduce namespace pollution of
+ * userland applications.
+ */
+#include <sys/mutex.h>
#include <sys/rctl.h>
#include <sys/ipc_rctl.h>
#include <sys/pset.h>
-#include <sys/tsol/label.h>
#include <sys/cred.h>
#include <sys/netstack.h>
-#include <sys/uadmin.h>
#include <sys/ksynch.h>
#include <sys/socket_impl.h>
#include <sys/secflags.h>
-#include <netinet/in.h>
#include <sys/cpu_uarray.h>
#include <sys/nvpair.h>
+#include <sys/list.h>
+#include <sys/loadavg.h>
+#endif /* _KERNEL */
#ifdef __cplusplus
extern "C" {
@@ -350,13 +359,6 @@ typedef struct zone_net_data {
#ifdef _KERNEL
-/*
- * We need to protect the definition of 'list_t' from userland applications and
- * libraries which may be defining ther own versions.
- */
-#include <sys/list.h>
-#include <sys/loadavg.h>
-
#define GLOBAL_ZONEUNIQID 0 /* uniqid of the global zone */
/*