summaryrefslogtreecommitdiff
path: root/usr/src/uts/common
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common')
-rw-r--r--usr/src/uts/common/fs/zfs/arc.c10
-rw-r--r--usr/src/uts/common/fs/zfs/dsl_synctask.c5
-rw-r--r--usr/src/uts/common/fs/zfs/spa.c42
-rw-r--r--usr/src/uts/common/fs/zfs/spa_misc.c21
-rw-r--r--usr/src/uts/common/fs/zfs/sys/spa.h1
-rw-r--r--usr/src/uts/common/fs/zfs/vdev.c9
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_label.c9
-rw-r--r--usr/src/uts/common/fs/zfs/zfs_ioctl.c159
-rw-r--r--usr/src/uts/common/fs/zfs/zfs_vfsops.c21
-rw-r--r--usr/src/uts/common/fs/zfs/zio.c2
-rw-r--r--usr/src/uts/common/fs/zfs/zvol.c15
-rw-r--r--usr/src/uts/common/sys/fs/zfs.h2
12 files changed, 197 insertions, 99 deletions
diff --git a/usr/src/uts/common/fs/zfs/arc.c b/usr/src/uts/common/fs/zfs/arc.c
index f1ee503cd7..a82718e8bc 100644
--- a/usr/src/uts/common/fs/zfs/arc.c
+++ b/usr/src/uts/common/fs/zfs/arc.c
@@ -4445,6 +4445,16 @@ l2arc_feed_thread(void)
ASSERT(spa != NULL);
/*
+ * If the pool is read-only then force the feed thread to
+ * sleep a little longer.
+ */
+ if (!spa_writeable(spa)) {
+ next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
+ spa_config_exit(spa, SCL_L2ARC, dev);
+ continue;
+ }
+
+ /*
* Avoid contributing to memory pressure.
*/
if (arc_reclaim_needed()) {
diff --git a/usr/src/uts/common/fs/zfs/dsl_synctask.c b/usr/src/uts/common/fs/zfs/dsl_synctask.c
index 832685b0fc..b0818ce274 100644
--- a/usr/src/uts/common/fs/zfs/dsl_synctask.c
+++ b/usr/src/uts/common/fs/zfs/dsl_synctask.c
@@ -213,6 +213,8 @@ dsl_sync_task_do(dsl_pool_t *dp,
dsl_sync_task_group_t *dstg;
int err;
+ ASSERT(spa_writeable(dp->dp_spa));
+
dstg = dsl_sync_task_group_create(dp);
dsl_sync_task_create(dstg, checkfunc, syncfunc,
arg1, arg2, blocks_modified);
@@ -228,6 +230,9 @@ dsl_sync_task_do_nowait(dsl_pool_t *dp,
{
dsl_sync_task_group_t *dstg;
+ if (!spa_writeable(dp->dp_spa))
+ return;
+
dstg = dsl_sync_task_group_create(dp);
dsl_sync_task_create(dstg, checkfunc, syncfunc,
arg1, arg2, blocks_modified);
diff --git a/usr/src/uts/common/fs/zfs/spa.c b/usr/src/uts/common/fs/zfs/spa.c
index 59c7656044..b6190e4cfa 100644
--- a/usr/src/uts/common/fs/zfs/spa.c
+++ b/usr/src/uts/common/fs/zfs/spa.c
@@ -181,6 +181,8 @@ spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
+ spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
+ (spa_mode(spa) == FREAD), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
@@ -530,7 +532,9 @@ spa_prop_set(spa_t *spa, nvlist_t *nvp)
nvpair_name(elem))) == ZPROP_INVAL)
return (EINVAL);
- if (prop == ZPOOL_PROP_CACHEFILE || prop == ZPOOL_PROP_ALTROOT)
+ if (prop == ZPOOL_PROP_CACHEFILE ||
+ prop == ZPOOL_PROP_ALTROOT ||
+ prop == ZPOOL_PROP_READONLY)
continue;
need_sync = B_TRUE;
@@ -2241,12 +2245,14 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
static int
spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
{
+ int mode = spa->spa_mode;
+
spa_unload(spa);
spa_deactivate(spa);
spa->spa_load_max_txg--;
- spa_activate(spa, spa_mode_global);
+ spa_activate(spa, mode);
spa_async_suspend(spa);
return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
@@ -3257,6 +3263,8 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
char *altroot = NULL;
spa_load_state_t state = SPA_LOAD_IMPORT;
zpool_rewind_policy_t policy;
+ uint64_t mode = spa_mode_global;
+ uint64_t readonly = B_FALSE;
int error;
nvlist_t *nvroot;
nvlist_t **spares, **l2cache;
@@ -3276,6 +3284,10 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
*/
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
+ (void) nvlist_lookup_uint64(props,
+ zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
+ if (readonly)
+ mode = FREAD;
spa = spa_add(pool, config, altroot);
spa->spa_import_flags = flags;
@@ -3295,7 +3307,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
return (0);
}
- spa_activate(spa, spa_mode_global);
+ spa_activate(spa, mode);
/*
* Don't start async tasks until we know everything is healthy.
@@ -3676,6 +3688,8 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
+ ASSERT(spa_writeable(spa));
+
txg = spa_vdev_enter(spa);
if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
@@ -3787,6 +3801,8 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
int newvd_isspare;
int error;
+ ASSERT(spa_writeable(spa));
+
txg = spa_vdev_enter(spa);
oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
@@ -3984,6 +4000,8 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
uint64_t unspare_guid;
char *vdpath;
+ ASSERT(spa_writeable(spa));
+
txg = spa_vdev_enter(spa);
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
@@ -4223,8 +4241,7 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
vdev_t *rvd, **vml = NULL; /* vdev modify list */
boolean_t activate_slog;
- if (!spa_writeable(spa))
- return (EROFS);
+ ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
@@ -4641,6 +4658,8 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
int error = 0;
boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
+ ASSERT(spa_writeable(spa));
+
if (!locked)
txg = spa_vdev_enter(spa);
@@ -4859,6 +4878,8 @@ spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
vdev_t *vd;
boolean_t sync = B_FALSE;
+ ASSERT(spa_writeable(spa));
+
spa_vdev_state_enter(spa, SCL_ALL);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
@@ -5304,9 +5325,11 @@ spa_sync_props(void *arg1, void *arg2, dmu_tx_t *tx)
ASSERT(spa->spa_root != NULL);
break;
+ case ZPOOL_PROP_READONLY:
case ZPOOL_PROP_CACHEFILE:
/*
- * 'cachefile' is also a non-persisitent property.
+ * 'readonly' and 'cachefile' are also non-persisitent
+ * properties.
*/
break;
default:
@@ -5438,6 +5461,8 @@ spa_sync(spa_t *spa, uint64_t txg)
dmu_tx_t *tx;
int error;
+ VERIFY(spa_writeable(spa));
+
/*
* Lock out configuration changes.
*/
@@ -5656,7 +5681,8 @@ spa_sync_allpools(void)
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
- if (spa_state(spa) != POOL_STATE_ACTIVE || spa_suspended(spa))
+ if (spa_state(spa) != POOL_STATE_ACTIVE ||
+ !spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
@@ -5736,6 +5762,8 @@ spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
void
spa_upgrade(spa_t *spa, uint64_t version)
{
+ ASSERT(spa_writeable(spa));
+
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
diff --git a/usr/src/uts/common/fs/zfs/spa_misc.c b/usr/src/uts/common/fs/zfs/spa_misc.c
index f2bd1bff3e..1b54afb0be 100644
--- a/usr/src/uts/common/fs/zfs/spa_misc.c
+++ b/usr/src/uts/common/fs/zfs/spa_misc.c
@@ -1078,12 +1078,12 @@ spa_rename(const char *name, const char *newname)
}
/*
- * Determine whether a pool with given pool_guid exists. If device_guid is
- * non-zero, determine whether the pool exists *and* contains a device with the
- * specified device_guid.
+ * Return the spa_t associated with given pool_guid, if it exists. If
+ * device_guid is non-zero, determine whether the pool exists *and* contains
+ * a device with the specified device_guid.
*/
-boolean_t
-spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
+spa_t *
+spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
{
spa_t *spa;
avl_tree_t *t = &spa_namespace_avl;
@@ -1114,7 +1114,16 @@ spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
}
}
- return (spa != NULL);
+ return (spa);
+}
+
+/*
+ * Determine whether a pool with the given pool_guid exists.
+ */
+boolean_t
+spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
+{
+ return (spa_by_guid(pool_guid, device_guid) != NULL);
}
char *
diff --git a/usr/src/uts/common/fs/zfs/sys/spa.h b/usr/src/uts/common/fs/zfs/sys/spa.h
index e228455abb..456ec06dc4 100644
--- a/usr/src/uts/common/fs/zfs/sys/spa.h
+++ b/usr/src/uts/common/fs/zfs/sys/spa.h
@@ -602,6 +602,7 @@ extern objset_t *spa_meta_objset(spa_t *spa);
/* Miscellaneous support routines */
extern int spa_rename(const char *oldname, const char *newname);
+extern spa_t *spa_by_guid(uint64_t pool_guid, uint64_t device_guid);
extern boolean_t spa_guid_exists(uint64_t pool_guid, uint64_t device_guid);
extern char *spa_strdup(const char *);
extern void spa_strfree(char *);
diff --git a/usr/src/uts/common/fs/zfs/vdev.c b/usr/src/uts/common/fs/zfs/vdev.c
index 600da9a83d..bac3e86054 100644
--- a/usr/src/uts/common/fs/zfs/vdev.c
+++ b/usr/src/uts/common/fs/zfs/vdev.c
@@ -1541,6 +1541,7 @@ vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
ASSERT(vd == vd->vdev_top);
ASSERT(!vd->vdev_ishole);
ASSERT(ISP2(flags));
+ ASSERT(spa_writeable(vd->vdev_spa));
if (flags & VDD_METASLAB)
(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
@@ -1596,6 +1597,7 @@ vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
+ ASSERT(spa_writeable(vd->vdev_spa));
mutex_enter(sm->sm_lock);
if (!space_map_contains(sm, txg, size))
@@ -2355,11 +2357,11 @@ vdev_clear(spa_t *spa, vdev_t *vd)
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
- vdev_reopen(vd);
+ vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
vd->vdev_forcefault = B_FALSE;
- if (vd != rvd)
+ if (vd != rvd && vdev_writeable(vd->vdev_top))
vdev_state_dirty(vd->vdev_top);
if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
@@ -2701,6 +2703,8 @@ vdev_config_dirty(vdev_t *vd)
vdev_t *rvd = spa->spa_root_vdev;
int c;
+ ASSERT(spa_writeable(spa));
+
/*
* If this is an aux vdev (as with l2cache and spare devices), then we
* update the vdev config manually and set the sync flag.
@@ -2789,6 +2793,7 @@ vdev_state_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
+ ASSERT(spa_writeable(spa));
ASSERT(vd == vd->vdev_top);
/*
diff --git a/usr/src/uts/common/fs/zfs/vdev_label.c b/usr/src/uts/common/fs/zfs/vdev_label.c
index ec5ec6ee34..c08ed8ba04 100644
--- a/usr/src/uts/common/fs/zfs/vdev_label.c
+++ b/usr/src/uts/common/fs/zfs/vdev_label.c
@@ -574,6 +574,15 @@ vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
return (B_TRUE);
/*
+ * We can't rely on a pool's state if it's been imported
+ * read-only. Instead we look to see if the pools is marked
+ * read-only in the namespace and set the state to active.
+ */
+ if ((spa = spa_by_guid(pool_guid, device_guid)) != NULL &&
+ spa_mode(spa) == FREAD)
+ state = POOL_STATE_ACTIVE;
+
+ /*
* If the device is marked ACTIVE, then this device is in use by another
* pool on the system.
*/
diff --git a/usr/src/uts/common/fs/zfs/zfs_ioctl.c b/usr/src/uts/common/fs/zfs/zfs_ioctl.c
index 1dd8a799de..1b63c9bf45 100644
--- a/usr/src/uts/common/fs/zfs/zfs_ioctl.c
+++ b/usr/src/uts/common/fs/zfs/zfs_ioctl.c
@@ -88,12 +88,18 @@ typedef enum {
DATASET_NAME
} zfs_ioc_namecheck_t;
+typedef enum {
+ POOL_CHECK_NONE = 1 << 0,
+ POOL_CHECK_SUSPENDED = 1 << 1,
+ POOL_CHECK_READONLY = 1 << 2
+} zfs_ioc_poolcheck_t;
+
typedef struct zfs_ioc_vec {
zfs_ioc_func_t *zvec_func;
zfs_secpolicy_func_t *zvec_secpolicy;
zfs_ioc_namecheck_t zvec_namecheck;
boolean_t zvec_his_log;
- boolean_t zvec_pool_check;
+ zfs_ioc_poolcheck_t zvec_pool_check;
} zfs_ioc_vec_t;
/* This array is indexed by zfs_userquota_prop_t */
@@ -4622,126 +4628,141 @@ zfs_ioc_get_holds(zfs_cmd_t *zc)
*/
static zfs_ioc_vec_t zfs_ioc_vec[] = {
{ zfs_ioc_pool_create, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_destroy, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_import, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_export, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_configs, zfs_secpolicy_none, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_stats, zfs_secpolicy_read, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_tryimport, zfs_secpolicy_config, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_pool_scan, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_pool_freeze, zfs_secpolicy_config, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_READONLY },
{ zfs_ioc_pool_upgrade, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_pool_get_history, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_vdev_add, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_remove, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_set_state, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_FALSE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_attach, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_detach, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_setpath, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_vdev_setfru, zfs_secpolicy_config, POOL_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_objset_stats, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED },
{ zfs_ioc_objset_zplprops, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_dataset_list_next, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED },
{ zfs_ioc_snapshot_list_next, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_TRUE },
- { zfs_ioc_set_prop, zfs_secpolicy_none, DATASET_NAME, B_TRUE, B_TRUE },
- { zfs_ioc_create, zfs_secpolicy_create, DATASET_NAME, B_TRUE, B_TRUE },
+ POOL_CHECK_SUSPENDED },
+ { zfs_ioc_set_prop, zfs_secpolicy_none, DATASET_NAME, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_create, zfs_secpolicy_create, DATASET_NAME, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_destroy, zfs_secpolicy_destroy, DATASET_NAME, B_TRUE,
- B_TRUE},
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_rollback, zfs_secpolicy_rollback, DATASET_NAME, B_TRUE,
- B_TRUE },
- { zfs_ioc_rename, zfs_secpolicy_rename, DATASET_NAME, B_TRUE, B_TRUE },
- { zfs_ioc_recv, zfs_secpolicy_receive, DATASET_NAME, B_TRUE, B_TRUE },
- { zfs_ioc_send, zfs_secpolicy_send, DATASET_NAME, B_TRUE, B_FALSE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_rename, zfs_secpolicy_rename, DATASET_NAME, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_recv, zfs_secpolicy_receive, DATASET_NAME, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_send, zfs_secpolicy_send, DATASET_NAME, B_TRUE,
+ POOL_CHECK_NONE },
{ zfs_ioc_inject_fault, zfs_secpolicy_inject, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_clear_fault, zfs_secpolicy_inject, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_inject_list_next, zfs_secpolicy_inject, NO_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_error_log, zfs_secpolicy_inject, POOL_NAME, B_FALSE,
- B_FALSE },
- { zfs_ioc_clear, zfs_secpolicy_config, POOL_NAME, B_TRUE, B_FALSE },
+ POOL_CHECK_NONE },
+ { zfs_ioc_clear, zfs_secpolicy_config, POOL_NAME, B_TRUE,
+ POOL_CHECK_NONE },
{ zfs_ioc_promote, zfs_secpolicy_promote, DATASET_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_destroy_snaps, zfs_secpolicy_destroy_snaps, DATASET_NAME,
- B_TRUE, B_TRUE },
+ B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_snapshot, zfs_secpolicy_snapshot, DATASET_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_dsobj_to_dsname, zfs_secpolicy_diff, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_obj_to_path, zfs_secpolicy_diff, DATASET_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED },
{ zfs_ioc_pool_set_props, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_pool_get_props, zfs_secpolicy_read, POOL_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_set_fsacl, zfs_secpolicy_fsacl, DATASET_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_get_fsacl, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_FALSE },
- { zfs_ioc_share, zfs_secpolicy_share, DATASET_NAME, B_FALSE, B_FALSE },
+ POOL_CHECK_NONE },
+ { zfs_ioc_share, zfs_secpolicy_share, DATASET_NAME, B_FALSE,
+ POOL_CHECK_NONE },
{ zfs_ioc_inherit_prop, zfs_secpolicy_inherit, DATASET_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_smb_acl, zfs_secpolicy_smb_acl, DATASET_NAME, B_FALSE,
- B_FALSE },
- { zfs_ioc_userspace_one, zfs_secpolicy_userspace_one,
- DATASET_NAME, B_FALSE, B_FALSE },
- { zfs_ioc_userspace_many, zfs_secpolicy_userspace_many,
- DATASET_NAME, B_FALSE, B_FALSE },
+ POOL_CHECK_NONE },
+ { zfs_ioc_userspace_one, zfs_secpolicy_userspace_one, DATASET_NAME,
+ B_FALSE, POOL_CHECK_NONE },
+ { zfs_ioc_userspace_many, zfs_secpolicy_userspace_many, DATASET_NAME,
+ B_FALSE, POOL_CHECK_NONE },
{ zfs_ioc_userspace_upgrade, zfs_secpolicy_userspace_upgrade,
- DATASET_NAME, B_FALSE, B_TRUE },
- { zfs_ioc_hold, zfs_secpolicy_hold, DATASET_NAME, B_TRUE, B_TRUE },
+ DATASET_NAME, B_FALSE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
+ { zfs_ioc_hold, zfs_secpolicy_hold, DATASET_NAME, B_TRUE,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_release, zfs_secpolicy_release, DATASET_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_get_holds, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED },
{ zfs_ioc_objset_recvd_props, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_FALSE },
+ POOL_CHECK_NONE },
{ zfs_ioc_vdev_split, zfs_secpolicy_config, POOL_NAME, B_TRUE,
- B_TRUE },
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_next_obj, zfs_secpolicy_read, DATASET_NAME, B_FALSE,
- B_FALSE },
- { zfs_ioc_diff, zfs_secpolicy_diff, DATASET_NAME, B_FALSE, B_FALSE },
+ POOL_CHECK_NONE },
+ { zfs_ioc_diff, zfs_secpolicy_diff, DATASET_NAME, B_FALSE,
+ POOL_CHECK_NONE },
{ zfs_ioc_tmp_snapshot, zfs_secpolicy_tmp_snapshot, DATASET_NAME,
- B_FALSE, B_FALSE },
+ B_FALSE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_obj_to_stats, zfs_secpolicy_diff, DATASET_NAME, B_FALSE,
- B_TRUE }
+ POOL_CHECK_SUSPENDED }
};
int
-pool_status_check(const char *name, zfs_ioc_namecheck_t type)
+pool_status_check(const char *name, zfs_ioc_namecheck_t type,
+ zfs_ioc_poolcheck_t check)
{
spa_t *spa;
int error;
ASSERT(type == POOL_NAME || type == DATASET_NAME);
+ if (check & POOL_CHECK_NONE)
+ return (0);
+
error = spa_open(name, &spa, FTAG);
if (error == 0) {
- if (spa_suspended(spa))
+ if ((check & POOL_CHECK_SUSPENDED) && spa_suspended(spa))
error = EAGAIN;
+ else if ((check & POOL_CHECK_READONLY) && !spa_writeable(spa))
+ error = EROFS;
spa_close(spa, FTAG);
}
return (error);
@@ -4893,17 +4914,17 @@ zfsdev_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
case POOL_NAME:
if (pool_namecheck(zc->zc_name, NULL, NULL) != 0)
error = EINVAL;
- if (zfs_ioc_vec[vec].zvec_pool_check)
- error = pool_status_check(zc->zc_name,
- zfs_ioc_vec[vec].zvec_namecheck);
+ error = pool_status_check(zc->zc_name,
+ zfs_ioc_vec[vec].zvec_namecheck,
+ zfs_ioc_vec[vec].zvec_pool_check);
break;
case DATASET_NAME:
if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0)
error = EINVAL;
- if (zfs_ioc_vec[vec].zvec_pool_check)
- error = pool_status_check(zc->zc_name,
- zfs_ioc_vec[vec].zvec_namecheck);
+ error = pool_status_check(zc->zc_name,
+ zfs_ioc_vec[vec].zvec_namecheck,
+ zfs_ioc_vec[vec].zvec_pool_check);
break;
case NO_NAME:
diff --git a/usr/src/uts/common/fs/zfs/zfs_vfsops.c b/usr/src/uts/common/fs/zfs/zfs_vfsops.c
index 46aa3260e7..20d100cea2 100644
--- a/usr/src/uts/common/fs/zfs/zfs_vfsops.c
+++ b/usr/src/uts/common/fs/zfs/zfs_vfsops.c
@@ -417,7 +417,8 @@ zfs_register_callbacks(vfs_t *vfsp)
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
- if (vfs_optionisset(vfsp, MNTOPT_RO, NULL)) {
+ if (vfs_optionisset(vfsp, MNTOPT_RO, NULL) ||
+ !spa_writeable(dmu_objset_spa(os))) {
readonly = B_TRUE;
do_readonly = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) {
@@ -1046,12 +1047,15 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
- if (zil_replay_disable) {
- zil_destroy(zfsvfs->z_log, B_FALSE);
- } else {
- zfsvfs->z_replay = B_TRUE;
- zil_replay(zfsvfs->z_os, zfsvfs, zfs_replay_vector);
- zfsvfs->z_replay = B_FALSE;
+ if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
+ if (zil_replay_disable) {
+ zil_destroy(zfsvfs->z_log, B_FALSE);
+ } else {
+ zfsvfs->z_replay = B_TRUE;
+ zil_replay(zfsvfs->z_os, zfsvfs,
+ zfs_replay_vector);
+ zfsvfs->z_replay = B_FALSE;
+ }
}
zfsvfs->z_vfs->vfs_flag |= readonly; /* restore readonly bit */
}
@@ -1813,7 +1817,8 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
* Evict cached data
*/
if (dmu_objset_is_dirty_anywhere(zfsvfs->z_os))
- txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
+ if (!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY))
+ txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
(void) dmu_objset_evict_dbufs(zfsvfs->z_os);
return (0);
diff --git a/usr/src/uts/common/fs/zfs/zio.c b/usr/src/uts/common/fs/zfs/zio.c
index 258a9a3318..1ba2330bdc 100644
--- a/usr/src/uts/common/fs/zfs/zio.c
+++ b/usr/src/uts/common/fs/zfs/zio.c
@@ -2282,7 +2282,7 @@ zio_vdev_io_start(zio_t *zio)
ASSERT(P2PHASE(zio->io_offset, align) == 0);
ASSERT(P2PHASE(zio->io_size, align) == 0);
- ASSERT(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
+ VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
/*
* If this is a repair I/O, and there's no self-healing involved --
diff --git a/usr/src/uts/common/fs/zfs/zvol.c b/usr/src/uts/common/fs/zfs/zvol.c
index 07afe02241..47b6c5a87a 100644
--- a/usr/src/uts/common/fs/zfs/zvol.c
+++ b/usr/src/uts/common/fs/zfs/zvol.c
@@ -500,7 +500,7 @@ zvol_create_minor(const char *name)
zv->zv_min_bs = DEV_BSHIFT;
zv->zv_minor = minor;
zv->zv_objset = os;
- if (dmu_objset_is_snapshot(os))
+ if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
zv->zv_flags |= ZVOL_RDONLY;
mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
@@ -512,10 +512,12 @@ zvol_create_minor(const char *name)
ASSERT(error == 0);
zv->zv_volblocksize = doi.doi_data_block_size;
- if (zil_replay_disable)
- zil_destroy(dmu_objset_zil(os), B_FALSE);
- else
- zil_replay(os, zv, zvol_replay_vector);
+ if (spa_writeable(dmu_objset_spa(os))) {
+ if (zil_replay_disable)
+ zil_destroy(dmu_objset_zil(os), B_FALSE);
+ else
+ zil_replay(os, zv, zvol_replay_vector);
+ }
dmu_objset_disown(os, FTAG);
zv->zv_objset = NULL;
@@ -605,7 +607,8 @@ zvol_first_open(zvol_state_t *zv)
VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
NULL) == 0);
- if (readonly || dmu_objset_is_snapshot(os))
+ if (readonly || dmu_objset_is_snapshot(os) ||
+ !spa_writeable(dmu_objset_spa(os)))
zv->zv_flags |= ZVOL_RDONLY;
else
zv->zv_flags &= ~ZVOL_RDONLY;
diff --git a/usr/src/uts/common/sys/fs/zfs.h b/usr/src/uts/common/sys/fs/zfs.h
index 68ea235c6a..da0b12bab4 100644
--- a/usr/src/uts/common/sys/fs/zfs.h
+++ b/usr/src/uts/common/sys/fs/zfs.h
@@ -160,6 +160,7 @@ typedef enum {
ZPOOL_PROP_DEDUPRATIO,
ZPOOL_PROP_FREE,
ZPOOL_PROP_ALLOCATED,
+ ZPOOL_PROP_READONLY,
ZPOOL_NUM_PROPS
} zpool_prop_t;
@@ -827,6 +828,7 @@ typedef enum {
#define ZFS_IMPORT_VERBATIM 0x1
#define ZFS_IMPORT_ANY_HOST 0x2
#define ZFS_IMPORT_MISSING_LOG 0x4
+#define ZFS_IMPORT_ONLY 0x8
/*
* Sysevent payload members. ZFS will generate the following sysevents with the