diff options
| -rw-r--r-- | usr/src/cmd/fm/modules/common/zfs-diagnosis/zfs_de.c | 42 | ||||
| -rw-r--r-- | usr/src/uts/common/fs/zfs/spa.c | 30 | ||||
| -rw-r--r-- | usr/src/uts/common/fs/zfs/sys/spa_impl.h | 1 | ||||
| -rw-r--r-- | usr/src/uts/common/fs/zfs/zfs_ioctl.c | 4 |
4 files changed, 41 insertions, 36 deletions
diff --git a/usr/src/cmd/fm/modules/common/zfs-diagnosis/zfs_de.c b/usr/src/cmd/fm/modules/common/zfs-diagnosis/zfs_de.c index 7f053e5d09..afbc2836ba 100644 --- a/usr/src/cmd/fm/modules/common/zfs-diagnosis/zfs_de.c +++ b/usr/src/cmd/fm/modules/common/zfs-diagnosis/zfs_de.c @@ -404,18 +404,12 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class) } /* - * Determine if this ereport corresponds to an open case. Cases are - * indexed by ENA, since ZFS does all the work of chaining together - * related ereports. - * - * We also detect if an ereport corresponds to an open case by context, - * such as: - * - * - An error occurred during an open of a pool with an existing - * case. - * - * - An error occurred for a device which already has an open - * case. + * Determine if this ereport corresponds to an open case. Previous + * incarnations of this DE used the ENA to chain events together as + * part of the same case. The problem with this is that we rely on + * global uniqueness of cases based on (pool_guid, vdev_guid) pair when + * generating SERD engines. Instead, we have a case for each vdev or + * pool, regardless of the ENA. */ (void) nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, &pool_guid); @@ -427,24 +421,8 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class) for (zcp = uu_list_first(zfs_cases); zcp != NULL; zcp = uu_list_next(zfs_cases, zcp)) { - /* - * Matches a known ENA. - */ - if (zcp->zc_data.zc_ena == ena) - break; - - /* - * Matches a case involving load errors for this same pool. - */ if (zcp->zc_data.zc_pool_guid == pool_guid && - zcp->zc_data.zc_pool_state == SPA_LOAD_OPEN && - pool_state == SPA_LOAD_OPEN) - break; - - /* - * Device errors for the same device. - */ - if (vdev_guid != 0 && zcp->zc_data.zc_vdev_guid == vdev_guid) + zcp->zc_data.zc_vdev_guid == vdev_guid) break; } @@ -569,11 +547,7 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class) zfs_case_solve(hdl, zcp, "fault.fs.zfs.log_replay", B_TRUE); } else if (fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.vdev.*")) { /* - * Device fault. If this occurred during pool open, then defer - * reporting the fault. If the pool itself could not be opeend, - * we only report the pool fault, not every device fault that - * may have caused the problem. If we do not see a pool fault - * within the timeout period, then we'll solve the device case. + * Device fault. */ zfs_case_solve(hdl, zcp, "fault.fs.zfs.device", B_TRUE); } else if (fmd_nvl_class_match(hdl, nvl, diff --git a/usr/src/uts/common/fs/zfs/spa.c b/usr/src/uts/common/fs/zfs/spa.c index f503592396..20bd8482be 100644 --- a/usr/src/uts/common/fs/zfs/spa.c +++ b/usr/src/uts/common/fs/zfs/spa.c @@ -1141,6 +1141,15 @@ spa_check_logs(spa_t *spa) return (0); } +static void +spa_aux_check_removed(spa_aux_vdev_t *sav) +{ + int i; + + for (i = 0; i < sav->sav_count; i++) + spa_check_removed(sav->sav_vdevs[i]); +} + /* * Load an existing storage pool, using the pool's builtin spa_config as a * source of configuration information. @@ -1503,6 +1512,7 @@ spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) spa->spa_pool_props_object, zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), sizeof (uint64_t), 1, &autoreplace); + spa->spa_autoreplace = (autoreplace != 0); (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, zpool_prop_to_name(ZPOOL_PROP_DELEGATION), @@ -1524,8 +1534,18 @@ spa_load(spa_t *spa, nvlist_t *config, spa_load_state_t state, int mosconfig) * unopenable vdevs so that the normal autoreplace handler can take * over. */ - if (autoreplace && state != SPA_LOAD_TRYIMPORT) + if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) { spa_check_removed(spa->spa_root_vdev); + /* + * For the import case, this is done in spa_import(), because + * at this point we're using the spare definitions from + * the MOS config, not necessarily from the userland config. + */ + if (state != SPA_LOAD_IMPORT) { + spa_aux_check_removed(&spa->spa_spares); + spa_aux_check_removed(&spa->spa_l2cache); + } + } /* * Load the vdev state for all toplevel vdevs. @@ -2642,6 +2662,14 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props) spa->spa_l2cache.sav_sync = B_TRUE; } + /* + * Check for any removed devices. + */ + if (spa->spa_autoreplace) { + spa_aux_check_removed(&spa->spa_spares); + spa_aux_check_removed(&spa->spa_l2cache); + } + if (spa_writeable(spa)) { /* * Update the config cache to include the newly-imported pool. diff --git a/usr/src/uts/common/fs/zfs/sys/spa_impl.h b/usr/src/uts/common/fs/zfs/sys/spa_impl.h index 84da684885..9c224d752f 100644 --- a/usr/src/uts/common/fs/zfs/sys/spa_impl.h +++ b/usr/src/uts/common/fs/zfs/sys/spa_impl.h @@ -171,6 +171,7 @@ struct spa { int spa_mode; /* FREAD | FWRITE */ spa_log_state_t spa_log_state; /* log state */ uint64_t spa_autoexpand; /* lun expansion on/off */ + boolean_t spa_autoreplace; /* autoreplace set in open */ /* * spa_refcnt & spa_config_lock must be the last elements * because refcount_t changes size based on compilation options. diff --git a/usr/src/uts/common/fs/zfs/zfs_ioctl.c b/usr/src/uts/common/fs/zfs/zfs_ioctl.c index aa1582f262..40d848fe1a 100644 --- a/usr/src/uts/common/fs/zfs/zfs_ioctl.c +++ b/usr/src/uts/common/fs/zfs/zfs_ioctl.c @@ -1952,8 +1952,10 @@ zfs_ioc_pool_set_props(zfs_cmd_t *zc) spa_config_sync(spa, B_FALSE, B_TRUE); } mutex_exit(&spa_namespace_lock); - if (spa != NULL) + if (spa != NULL) { + nvlist_free(props); return (0); + } } if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) { |
