summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/fs
diff options
context:
space:
mode:
authorOlaf Faaland <faaland1@llnl.gov>2019-03-28 20:30:57 +0000
committerDan McDonald <danmcd@joyent.com>2019-04-03 14:42:53 -0400
commite0f1c0afa46cc84d4b1e40124032a9a87310386e (patch)
tree8aa10b26a7ae7380d42c5352be4dc06b0ed4cae0 /usr/src/uts/common/fs
parentc93ad993b7959fc974ed6f4a92fce6041d98bd11 (diff)
downloadillumos-joyent-e0f1c0afa46cc84d4b1e40124032a9a87310386e.tar.gz
10499 Multi-modifier protection (MMP)
Portions contributed by: Jerry Jelinek <jerry.jelinek@joyent.com> Portions contributed by: Tim Chase <tim@chase2k.com> Portions contributed by: sanjeevbagewadi <sanjeev.bagewadi@gmail.com> Portions contributed by: John L. Hammond <john.hammond@intel.com> Portions contributed by: Giuseppe Di Natale <dinatale2@llnl.gov> Portions contributed by: Prakash Surya <surya1@llnl.gov> Reviewed by: George Melikov <mail@gmelikov.ru> Reviewed by: Tom Caputi <tcaputi@datto.com> Reviewed by: Kash Pande <kash@tripleback.net> Reviewed by: loli10K <ezomori.nozomu@gmail.com> Reviewed by: George Melikov <mail@gmelikov.ru> Reviewed by: Tony Hutter <hutter2@llnl.gov> Reviewed by: Gu Zheng <guzheng2331314@163.com> Reviewed by: Matthew Ahrens <mahrens@delphix.com> Reviewed by: Ned Bass <bass6@llnl.gov> Reviewed by: Andreas Dilger <andreas.dilger@intel.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed by: Andy Stormont <astormont@racktopsystems.com> Reviewed by: Toomas Soome <tsoome@me.com> Reviewed by: Kody Kantor <kody.kantor@joyent.com> Approved by: Dan McDonald <danmcd@joyent.com>
Diffstat (limited to 'usr/src/uts/common/fs')
-rw-r--r--usr/src/uts/common/fs/zfs/dsl_pool.c3
-rw-r--r--usr/src/uts/common/fs/zfs/mmp.c582
-rw-r--r--usr/src/uts/common/fs/zfs/spa.c281
-rw-r--r--usr/src/uts/common/fs/zfs/spa_config.c3
-rw-r--r--usr/src/uts/common/fs/zfs/spa_misc.c33
-rw-r--r--usr/src/uts/common/fs/zfs/sys/dsl_pool.h1
-rw-r--r--usr/src/uts/common/fs/zfs/sys/mmp.h68
-rw-r--r--usr/src/uts/common/fs/zfs/sys/spa.h2
-rw-r--r--usr/src/uts/common/fs/zfs/sys/spa_impl.h7
-rw-r--r--usr/src/uts/common/fs/zfs/sys/uberblock.h3
-rw-r--r--usr/src/uts/common/fs/zfs/sys/uberblock_impl.h3
-rw-r--r--usr/src/uts/common/fs/zfs/sys/vdev.h2
-rw-r--r--usr/src/uts/common/fs/zfs/sys/vdev_impl.h11
-rw-r--r--usr/src/uts/common/fs/zfs/sys/zio.h12
-rw-r--r--usr/src/uts/common/fs/zfs/uberblock.c5
-rw-r--r--usr/src/uts/common/fs/zfs/vdev.c13
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_label.c27
-rw-r--r--usr/src/uts/common/fs/zfs/zfs_ioctl.c7
-rw-r--r--usr/src/uts/common/fs/zfs/zio.c8
19 files changed, 1036 insertions, 35 deletions
diff --git a/usr/src/uts/common/fs/zfs/dsl_pool.c b/usr/src/uts/common/fs/zfs/dsl_pool.c
index 2a9ec5585a..08a35f68f3 100644
--- a/usr/src/uts/common/fs/zfs/dsl_pool.c
+++ b/usr/src/uts/common/fs/zfs/dsl_pool.c
@@ -50,6 +50,7 @@
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dsl_userhold.h>
+#include <sys/mmp.h>
/*
* ZFS Write Throttle
@@ -192,6 +193,7 @@ dsl_pool_open_impl(spa_t *spa, uint64_t txg)
dp->dp_meta_rootbp = *bp;
rrw_init(&dp->dp_config_rwlock, B_TRUE);
txg_init(dp, txg);
+ mmp_init(spa);
txg_list_create(&dp->dp_dirty_datasets, spa,
offsetof(dsl_dataset_t, ds_dirty_link));
@@ -393,6 +395,7 @@ dsl_pool_close(dsl_pool_t *dp)
*/
arc_flush(dp->dp_spa, FALSE);
+ mmp_fini(dp->dp_spa);
txg_fini(dp);
dsl_scan_fini(dp);
dmu_buf_user_evict_wait();
diff --git a/usr/src/uts/common/fs/zfs/mmp.c b/usr/src/uts/common/fs/zfs/mmp.c
new file mode 100644
index 0000000000..105e2bfdfd
--- /dev/null
+++ b/usr/src/uts/common/fs/zfs/mmp.c
@@ -0,0 +1,582 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
+ * Copyright 2019 Joyent, Inc.
+ */
+
+#include <sys/abd.h>
+#include <sys/mmp.h>
+#include <sys/spa.h>
+#include <sys/spa_impl.h>
+#include <sys/time.h>
+#include <sys/vdev.h>
+#include <sys/vdev_impl.h>
+#include <sys/zfs_context.h>
+#include <sys/callb.h>
+
+/*
+ * Multi-Modifier Protection (MMP) attempts to prevent a user from importing
+ * or opening a pool on more than one host at a time. In particular, it
+ * prevents "zpool import -f" on a host from succeeding while the pool is
+ * already imported on another host. There are many other ways in which a
+ * device could be used by two hosts for different purposes at the same time
+ * resulting in pool damage. This implementation does not attempt to detect
+ * those cases.
+ *
+ * MMP operates by ensuring there are frequent visible changes on disk (a
+ * "heartbeat") at all times. And by altering the import process to check
+ * for these changes and failing the import when they are detected. This
+ * functionality is enabled by setting the 'multihost' pool property to on.
+ *
+ * Uberblocks written by the txg_sync thread always go into the first
+ * (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP.
+ * They are used to hold uberblocks which are exactly the same as the last
+ * synced uberblock except that the ub_timestamp is frequently updated.
+ * Like all other uberblocks, the slot is written with an embedded checksum,
+ * and slots with invalid checksums are ignored. This provides the
+ * "heartbeat", with no risk of overwriting good uberblocks that must be
+ * preserved, e.g. previous txgs and associated block pointers.
+ *
+ * Two optional fields are added to uberblock structure: ub_mmp_magic and
+ * ub_mmp_delay. The magic field allows zfs to tell whether ub_mmp_delay is
+ * valid. The delay field is a decaying average of the amount of time between
+ * completion of successive MMP writes, in nanoseconds. It is used to predict
+ * how long the import must wait to detect activity in the pool, before
+ * concluding it is not in use.
+ *
+ * During import an activity test may now be performed to determine if
+ * the pool is in use. The activity test is typically required if the
+ * ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is
+ * POOL_STATE_ACTIVE, and the pool is not a root pool.
+ *
+ * The activity test finds the "best" uberblock (highest txg & timestamp),
+ * waits some time, and then finds the "best" uberblock again. If the txg
+ * and timestamp in both "best" uberblocks do not match, the pool is in use
+ * by another host and the import fails. Since the granularity of the
+ * timestamp is in seconds this activity test must take a bare minimum of one
+ * second. In order to assure the accuracy of the activity test, the default
+ * values result in an activity test duration of 10x the mmp write interval.
+ *
+ * The "zpool import" activity test can be expected to take a minimum time of
+ * zfs_multihost_import_intervals * zfs_multihost_interval milliseconds. If the
+ * "best" uberblock has a valid ub_mmp_delay field, then the duration of the
+ * test may take longer if MMP writes were occurring less frequently than
+ * expected. Additionally, the duration is then extended by a random 25% to
+ * attempt to to detect simultaneous imports. For example, if both partner
+ * hosts are rebooted at the same time and automatically attempt to import the
+ * pool.
+ */
+
+/*
+ * Used to control the frequency of mmp writes which are performed when the
+ * 'multihost' pool property is on. This is one factor used to determine the
+ * length of the activity check during import.
+ *
+ * The mmp write period is zfs_multihost_interval / leaf-vdevs milliseconds.
+ * This means that on average an mmp write will be issued for each leaf vdev
+ * every zfs_multihost_interval milliseconds. In practice, the observed period
+ * can vary with the I/O load and this observed value is the delay which is
+ * stored in the uberblock. The minimum allowed value is 100 ms.
+ */
+ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL;
+
+/*
+ * Used to control the duration of the activity test on import. Smaller values
+ * of zfs_multihost_import_intervals will reduce the import time but increase
+ * the risk of failing to detect an active pool. The total activity check time
+ * is never allowed to drop below one second. A value of 0 is ignored and
+ * treated as if it was set to 1.
+ */
+uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
+
+/*
+ * Controls the behavior of the pool when mmp write failures are detected.
+ *
+ * When zfs_multihost_fail_intervals = 0 then mmp write failures are ignored.
+ * The failures will still be reported to the ZED which depending on its
+ * configuration may take action such as suspending the pool or taking a
+ * device offline.
+ *
+ * When zfs_multihost_fail_intervals > 0 then sequential mmp write failures will
+ * cause the pool to be suspended. This occurs when
+ * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds have
+ * passed since the last successful mmp write. This guarantees the activity
+ * test will see mmp writes if the
+ * pool is imported.
+ */
+uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
+
+char *mmp_tag = "mmp_write_uberblock";
+static void mmp_thread(void *arg);
+
+void
+mmp_init(spa_t *spa)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
+ mmp->mmp_kstat_id = 1;
+}
+
+void
+mmp_fini(spa_t *spa)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ mutex_destroy(&mmp->mmp_thread_lock);
+ cv_destroy(&mmp->mmp_thread_cv);
+ mutex_destroy(&mmp->mmp_io_lock);
+}
+
+static void
+mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr)
+{
+ CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG);
+ mutex_enter(&mmp->mmp_thread_lock);
+}
+
+static void
+mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
+{
+ ASSERT(*mpp != NULL);
+ *mpp = NULL;
+ cv_broadcast(&mmp->mmp_thread_cv);
+ CALLB_CPR_EXIT(cpr); /* drops &mmp->mmp_thread_lock */
+ thread_exit();
+}
+
+void
+mmp_thread_start(spa_t *spa)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ if (spa_writeable(spa)) {
+ mutex_enter(&mmp->mmp_thread_lock);
+ if (!mmp->mmp_thread) {
+ dprintf("mmp_thread_start pool %s\n",
+ spa->spa_name);
+ mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
+ spa, 0, &p0, TS_RUN, minclsyspri);
+ }
+ mutex_exit(&mmp->mmp_thread_lock);
+ }
+}
+
+void
+mmp_thread_stop(spa_t *spa)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ mutex_enter(&mmp->mmp_thread_lock);
+ mmp->mmp_thread_exiting = 1;
+ cv_broadcast(&mmp->mmp_thread_cv);
+
+ while (mmp->mmp_thread) {
+ cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
+ }
+ mutex_exit(&mmp->mmp_thread_lock);
+
+ ASSERT(mmp->mmp_thread == NULL);
+ mmp->mmp_thread_exiting = 0;
+}
+
+typedef enum mmp_vdev_state_flag {
+ MMP_FAIL_NOT_WRITABLE = (1 << 0),
+ MMP_FAIL_WRITE_PENDING = (1 << 1),
+} mmp_vdev_state_flag_t;
+
+/*
+ * Find a leaf vdev to write an MMP block to. It must not have an outstanding
+ * mmp write (if so a new write will also likely block). If there is no usable
+ * leaf, a nonzero error value is returned. The error value returned is a bit
+ * field.
+ *
+ * MMP_FAIL_WRITE_PENDING One or more leaf vdevs are writeable, but have an
+ * outstanding MMP write.
+ * MMP_FAIL_NOT_WRITABLE One or more leaf vdevs are not writeable.
+ */
+
+static int
+mmp_next_leaf(spa_t *spa)
+{
+ vdev_t *leaf;
+ vdev_t *starting_leaf;
+ int fail_mask = 0;
+
+ ASSERT(MUTEX_HELD(&spa->spa_mmp.mmp_io_lock));
+ ASSERT(spa_config_held(spa, SCL_STATE, RW_READER));
+ ASSERT(list_link_active(&spa->spa_leaf_list.list_head) == B_TRUE);
+ ASSERT(!list_is_empty(&spa->spa_leaf_list));
+
+ if (spa->spa_mmp.mmp_leaf_last_gen != spa->spa_leaf_list_gen) {
+ spa->spa_mmp.mmp_last_leaf = list_head(&spa->spa_leaf_list);
+ spa->spa_mmp.mmp_leaf_last_gen = spa->spa_leaf_list_gen;
+ }
+
+ leaf = spa->spa_mmp.mmp_last_leaf;
+ if (leaf == NULL)
+ leaf = list_head(&spa->spa_leaf_list);
+ starting_leaf = leaf;
+
+ do {
+ leaf = list_next(&spa->spa_leaf_list, leaf);
+ if (leaf == NULL)
+ leaf = list_head(&spa->spa_leaf_list);
+
+ if (!vdev_writeable(leaf)) {
+ fail_mask |= MMP_FAIL_NOT_WRITABLE;
+ } else if (leaf->vdev_mmp_pending != 0) {
+ fail_mask |= MMP_FAIL_WRITE_PENDING;
+ } else {
+ spa->spa_mmp.mmp_last_leaf = leaf;
+ return (0);
+ }
+ } while (leaf != starting_leaf);
+
+ ASSERT(fail_mask);
+
+ return (fail_mask);
+}
+
+/*
+ * MMP writes are issued on a fixed schedule, but may complete at variable,
+ * much longer, intervals. The mmp_delay captures long periods between
+ * successful writes for any reason, including disk latency, scheduling delays,
+ * etc.
+ *
+ * The mmp_delay is usually calculated as a decaying average, but if the latest
+ * delay is higher we do not average it, so that we do not hide sudden spikes
+ * which the importing host must wait for.
+ *
+ * If writes are occurring frequently, such as due to a high rate of txg syncs,
+ * the mmp_delay could become very small. Since those short delays depend on
+ * activity we cannot count on, we never allow mmp_delay to get lower than rate
+ * expected if only mmp_thread writes occur.
+ *
+ * If an mmp write was skipped or fails, and we have already waited longer than
+ * mmp_delay, we need to update it so the next write reflects the longer delay.
+ *
+ * Do not set mmp_delay if the multihost property is not on, so as not to
+ * trigger an activity check on import.
+ */
+static void
+mmp_delay_update(spa_t *spa, boolean_t write_completed)
+{
+ mmp_thread_t *mts = &spa->spa_mmp;
+ hrtime_t delay = gethrtime() - mts->mmp_last_write;
+
+ ASSERT(MUTEX_HELD(&mts->mmp_io_lock));
+
+ if (spa_multihost(spa) == B_FALSE) {
+ mts->mmp_delay = 0;
+ return;
+ }
+
+ if (delay > mts->mmp_delay)
+ mts->mmp_delay = delay;
+
+ if (write_completed == B_FALSE)
+ return;
+
+ mts->mmp_last_write = gethrtime();
+
+ /*
+ * strictly less than, in case delay was changed above.
+ */
+ if (delay < mts->mmp_delay) {
+ hrtime_t min_delay = MSEC2NSEC(zfs_multihost_interval) /
+ MAX(1, vdev_count_leaves(spa));
+ mts->mmp_delay = MAX(((delay + mts->mmp_delay * 127) / 128),
+ min_delay);
+ }
+}
+
+static void
+mmp_write_done(zio_t *zio)
+{
+ spa_t *spa = zio->io_spa;
+ vdev_t *vd = zio->io_vd;
+ mmp_thread_t *mts = zio->io_private;
+
+ mutex_enter(&mts->mmp_io_lock);
+ uint64_t mmp_kstat_id = vd->vdev_mmp_kstat_id;
+ hrtime_t mmp_write_duration = gethrtime() - vd->vdev_mmp_pending;
+
+ mmp_delay_update(spa, (zio->io_error == 0));
+
+ vd->vdev_mmp_pending = 0;
+ vd->vdev_mmp_kstat_id = 0;
+
+ mutex_exit(&mts->mmp_io_lock);
+ spa_config_exit(spa, SCL_STATE, mmp_tag);
+
+ abd_free(zio->io_abd);
+}
+
+/*
+ * When the uberblock on-disk is updated by a spa_sync,
+ * creating a new "best" uberblock, update the one stored
+ * in the mmp thread state, used for mmp writes.
+ */
+void
+mmp_update_uberblock(spa_t *spa, uberblock_t *ub)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ mutex_enter(&mmp->mmp_io_lock);
+ mmp->mmp_ub = *ub;
+ mmp->mmp_ub.ub_timestamp = gethrestime_sec();
+ mmp_delay_update(spa, B_TRUE);
+ mutex_exit(&mmp->mmp_io_lock);
+}
+
+/*
+ * Choose a random vdev, label, and MMP block, and write over it
+ * with a copy of the last-synced uberblock, whose timestamp
+ * has been updated to reflect that the pool is in use.
+ */
+static void
+mmp_write_uberblock(spa_t *spa)
+{
+ int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
+ mmp_thread_t *mmp = &spa->spa_mmp;
+ uberblock_t *ub;
+ vdev_t *vd = NULL;
+ int label, error;
+ uint64_t offset;
+
+ hrtime_t lock_acquire_time = gethrtime();
+ spa_config_enter(spa, SCL_STATE, mmp_tag, RW_READER);
+ lock_acquire_time = gethrtime() - lock_acquire_time;
+ if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
+ zfs_dbgmsg("SCL_STATE acquisition took %llu ns\n",
+ (u_longlong_t)lock_acquire_time);
+
+ mutex_enter(&mmp->mmp_io_lock);
+
+ error = mmp_next_leaf(spa);
+
+ /*
+ * spa_mmp_history has two types of entries:
+ * Issued MMP write: records time issued, error status, etc.
+ * Skipped MMP write: an MMP write could not be issued because no
+ * suitable leaf vdev was available. See comment above struct
+ * spa_mmp_history for details.
+ */
+
+ if (error) {
+ mmp_delay_update(spa, B_FALSE);
+ if (mmp->mmp_skip_error == error) {
+ /*
+ * ZoL porting note: the following is TBD
+ * spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1);
+ */
+ } else {
+ mmp->mmp_skip_error = error;
+ /*
+ * ZoL porting note: the following is TBD
+ * spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg,
+ * gethrestime_sec(), mmp->mmp_delay, NULL, 0,
+ * mmp->mmp_kstat_id++, error);
+ */
+ }
+ mutex_exit(&mmp->mmp_io_lock);
+ spa_config_exit(spa, SCL_STATE, mmp_tag);
+ return;
+ }
+
+ vd = spa->spa_mmp.mmp_last_leaf;
+ mmp->mmp_skip_error = 0;
+
+ if (mmp->mmp_zio_root == NULL)
+ mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
+ flags | ZIO_FLAG_GODFATHER);
+
+ ub = &mmp->mmp_ub;
+ ub->ub_timestamp = gethrestime_sec();
+ ub->ub_mmp_magic = MMP_MAGIC;
+ ub->ub_mmp_delay = mmp->mmp_delay;
+ vd->vdev_mmp_pending = gethrtime();
+ vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id;
+
+ zio_t *zio = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
+ abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
+ abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
+ abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
+
+ mmp->mmp_kstat_id++;
+ mutex_exit(&mmp->mmp_io_lock);
+
+ offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) -
+ MMP_BLOCKS_PER_LABEL + spa_get_random(MMP_BLOCKS_PER_LABEL));
+
+ label = spa_get_random(VDEV_LABELS);
+ vdev_label_write(zio, vd, label, ub_abd, offset,
+ VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
+ flags | ZIO_FLAG_DONT_PROPAGATE);
+
+ /*
+ * ZoL porting note: the following is TBD
+ * (void) spa_mmp_history_add(spa, ub->ub_txg, ub->ub_timestamp,
+ * ub->ub_mmp_delay, vd, label, vd->vdev_mmp_kstat_id, 0);
+ */
+
+ zio_nowait(zio);
+}
+
+static void
+mmp_thread(void *arg)
+{
+ spa_t *spa = (spa_t *)arg;
+ mmp_thread_t *mmp = &spa->spa_mmp;
+ boolean_t last_spa_suspended = spa_suspended(spa);
+ boolean_t last_spa_multihost = spa_multihost(spa);
+ callb_cpr_t cpr;
+ hrtime_t max_fail_ns = zfs_multihost_fail_intervals *
+ MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
+
+ mmp_thread_enter(mmp, &cpr);
+
+ /*
+ * The mmp_write_done() function calculates mmp_delay based on the
+ * prior value of mmp_delay and the elapsed time since the last write.
+ * For the first mmp write, there is no "last write", so we start
+ * with fake, but reasonable, default non-zero values.
+ */
+ mmp->mmp_delay = MSEC2NSEC(MAX(zfs_multihost_interval,
+ MMP_MIN_INTERVAL)) / MAX(vdev_count_leaves(spa), 1);
+ mmp->mmp_last_write = gethrtime() - mmp->mmp_delay;
+
+ while (!mmp->mmp_thread_exiting) {
+ uint64_t mmp_fail_intervals = zfs_multihost_fail_intervals;
+ uint64_t mmp_interval = MSEC2NSEC(
+ MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
+ boolean_t suspended = spa_suspended(spa);
+ boolean_t multihost = spa_multihost(spa);
+ hrtime_t next_time;
+
+ if (multihost)
+ next_time = gethrtime() + mmp_interval /
+ MAX(vdev_count_leaves(spa), 1);
+ else
+ next_time = gethrtime() +
+ MSEC2NSEC(MMP_DEFAULT_INTERVAL);
+
+ /*
+ * MMP off => on, or suspended => !suspended:
+ * No writes occurred recently. Update mmp_last_write to give
+ * us some time to try.
+ */
+ if ((!last_spa_multihost && multihost) ||
+ (last_spa_suspended && !suspended)) {
+ mutex_enter(&mmp->mmp_io_lock);
+ mmp->mmp_last_write = gethrtime();
+ mutex_exit(&mmp->mmp_io_lock);
+ }
+
+ /*
+ * MMP on => off:
+ * mmp_delay == 0 tells importing node to skip activity check.
+ */
+ if (last_spa_multihost && !multihost) {
+ mutex_enter(&mmp->mmp_io_lock);
+ mmp->mmp_delay = 0;
+ mutex_exit(&mmp->mmp_io_lock);
+ }
+ last_spa_multihost = multihost;
+ last_spa_suspended = suspended;
+
+ /*
+ * Smooth max_fail_ns when its factors are decreased, because
+ * making (max_fail_ns < mmp_interval) results in the pool being
+ * immediately suspended before writes can occur at the new
+ * higher frequency.
+ */
+ if ((mmp_interval * mmp_fail_intervals) < max_fail_ns) {
+ max_fail_ns = ((31 * max_fail_ns) + (mmp_interval *
+ mmp_fail_intervals)) / 32;
+ } else {
+ max_fail_ns = mmp_interval * mmp_fail_intervals;
+ }
+
+ /*
+ * Suspend the pool if no MMP write has succeeded in over
+ * mmp_interval * mmp_fail_intervals nanoseconds.
+ */
+ if (!suspended && mmp_fail_intervals && multihost &&
+ (gethrtime() - mmp->mmp_last_write) > max_fail_ns) {
+ cmn_err(CE_WARN, "MMP writes to pool '%s' have not "
+ "succeeded in over %llus; suspending pool",
+ spa_name(spa),
+ NSEC2SEC(gethrtime() - mmp->mmp_last_write));
+ zio_suspend(spa, NULL, ZIO_SUSPEND_MMP);
+ }
+
+ if (multihost && !suspended)
+ mmp_write_uberblock(spa);
+
+ CALLB_CPR_SAFE_BEGIN(&cpr);
+ (void) cv_timedwait_sig_hrtime(&mmp->mmp_thread_cv,
+ &mmp->mmp_thread_lock, next_time);
+ CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
+ }
+
+ /* Outstanding writes are allowed to complete. */
+ if (mmp->mmp_zio_root)
+ zio_wait(mmp->mmp_zio_root);
+
+ mmp->mmp_zio_root = NULL;
+ mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
+}
+
+/*
+ * Signal the MMP thread to wake it, when it is sleeping on
+ * its cv. Used when some module parameter has changed and
+ * we want the thread to know about it.
+ * Only signal if the pool is active and mmp thread is
+ * running, otherwise there is no thread to wake.
+ */
+static void
+mmp_signal_thread(spa_t *spa)
+{
+ mmp_thread_t *mmp = &spa->spa_mmp;
+
+ mutex_enter(&mmp->mmp_thread_lock);
+ if (mmp->mmp_thread)
+ cv_broadcast(&mmp->mmp_thread_cv);
+ mutex_exit(&mmp->mmp_thread_lock);
+}
+
+void
+mmp_signal_all_threads(void)
+{
+ spa_t *spa = NULL;
+
+ mutex_enter(&spa_namespace_lock);
+ while ((spa = spa_next(spa))) {
+ if (spa->spa_state == POOL_STATE_ACTIVE)
+ mmp_signal_thread(spa);
+ }
+ mutex_exit(&spa_namespace_lock);
+}
diff --git a/usr/src/uts/common/fs/zfs/spa.c b/usr/src/uts/common/fs/zfs/spa.c
index 1a931f3b9f..f478c3f3a9 100644
--- a/usr/src/uts/common/fs/zfs/spa.c
+++ b/usr/src/uts/common/fs/zfs/spa.c
@@ -57,6 +57,7 @@
#include <sys/vdev_initialize.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
+#include <sys/mmp.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
@@ -546,6 +547,16 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
error = SET_ERROR(EINVAL);
break;
+ case ZPOOL_PROP_MULTIHOST:
+ error = nvpair_value_uint64(elem, &intval);
+ if (!error && intval > 1)
+ error = SET_ERROR(EINVAL);
+
+ if (!error && !spa_get_hostid())
+ error = SET_ERROR(ENOTSUP);
+
+ break;
+
case ZPOOL_PROP_BOOTFS:
/*
* If the pool version is less than SPA_VERSION_BOOTFS,
@@ -1353,6 +1364,9 @@ spa_unload(spa_t *spa)
spa_config_exit(spa, SCL_ALL, spa);
}
+ if (spa->spa_mmp.mmp_thread)
+ mmp_thread_stop(spa);
+
/*
* Wait for any outstanding async I/O to complete.
*/
@@ -2305,6 +2319,205 @@ vdev_count_verify_zaps(vdev_t *vd)
return (total);
}
+/*
+ * Determine whether the activity check is required.
+ */
+static boolean_t
+spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
+ nvlist_t *config)
+{
+ uint64_t state = 0;
+ uint64_t hostid = 0;
+ uint64_t tryconfig_txg = 0;
+ uint64_t tryconfig_timestamp = 0;
+ nvlist_t *nvinfo;
+
+ if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
+ nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
+ (void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
+ &tryconfig_txg);
+ (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
+ &tryconfig_timestamp);
+ }
+
+ (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
+
+ /*
+ * Disable the MMP activity check - This is used by zdb which
+ * is intended to be used on potentially active pools.
+ */
+ if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
+ return (B_FALSE);
+
+ /*
+ * Skip the activity check when the MMP feature is disabled.
+ */
+ if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
+ return (B_FALSE);
+ /*
+ * If the tryconfig_* values are nonzero, they are the results of an
+ * earlier tryimport. If they match the uberblock we just found, then
+ * the pool has not changed and we return false so we do not test a
+ * second time.
+ */
+ if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
+ tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp)
+ return (B_FALSE);
+
+ /*
+ * Allow the activity check to be skipped when importing the pool
+ * on the same host which last imported it. Since the hostid from
+ * configuration may be stale use the one read from the label.
+ */
+ if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
+ hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
+
+ if (hostid == spa_get_hostid())
+ return (B_FALSE);
+
+ /*
+ * Skip the activity test when the pool was cleanly exported.
+ */
+ if (state != POOL_STATE_ACTIVE)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+/*
+ * Perform the import activity check. If the user canceled the import or
+ * we detected activity then fail.
+ */
+static int
+spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
+{
+ uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
+ uint64_t txg = ub->ub_txg;
+ uint64_t timestamp = ub->ub_timestamp;
+ uint64_t import_delay = NANOSEC;
+ hrtime_t import_expire;
+ nvlist_t *mmp_label = NULL;
+ vdev_t *rvd = spa->spa_root_vdev;
+ kcondvar_t cv;
+ kmutex_t mtx;
+ int error = 0;
+
+ cv_init(&cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
+ mutex_enter(&mtx);
+
+ /*
+ * If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
+ * during the earlier tryimport. If the txg recorded there is 0 then
+ * the pool is known to be active on another host.
+ *
+ * Otherwise, the pool might be in use on another node. Check for
+ * changes in the uberblocks on disk if necessary.
+ */
+ if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
+ nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
+ ZPOOL_CONFIG_LOAD_INFO);
+
+ if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
+ fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
+ vdev_uberblock_load(rvd, ub, &mmp_label);
+ error = SET_ERROR(EREMOTEIO);
+ goto out;
+ }
+ }
+
+ /*
+ * Preferentially use the zfs_multihost_interval from the node which
+ * last imported the pool. This value is stored in an MMP uberblock as.
+ *
+ * ub_mmp_delay * vdev_count_leaves() == zfs_multihost_interval
+ */
+ if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay)
+ import_delay = MAX(import_delay, import_intervals *
+ ub->ub_mmp_delay * MAX(vdev_count_leaves(spa), 1));
+
+ /* Apply a floor using the local default values. */
+ import_delay = MAX(import_delay, import_intervals *
+ MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)));
+
+ zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu import_intervals=%u "
+ "leaves=%u", import_delay, ub->ub_mmp_delay, import_intervals,
+ vdev_count_leaves(spa));
+
+ /* Add a small random factor in case of simultaneous imports (0-25%) */
+ import_expire = gethrtime() + import_delay +
+ (import_delay * spa_get_random(250) / 1000);
+
+ while (gethrtime() < import_expire) {
+ vdev_uberblock_load(rvd, ub, &mmp_label);
+
+ if (txg != ub->ub_txg || timestamp != ub->ub_timestamp) {
+ error = SET_ERROR(EREMOTEIO);
+ break;
+ }
+
+ if (mmp_label) {
+ nvlist_free(mmp_label);
+ mmp_label = NULL;
+ }
+
+ error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
+ if (error != -1) {
+ error = SET_ERROR(EINTR);
+ break;
+ }
+ error = 0;
+ }
+
+out:
+ mutex_exit(&mtx);
+ mutex_destroy(&mtx);
+ cv_destroy(&cv);
+
+ /*
+ * If the pool is determined to be active store the status in the
+ * spa->spa_load_info nvlist. If the remote hostname or hostid are
+ * available from configuration read from disk store them as well.
+ * This allows 'zpool import' to generate a more useful message.
+ *
+ * ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
+ * ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
+ * ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
+ */
+ if (error == EREMOTEIO) {
+ char *hostname = "<unknown>";
+ uint64_t hostid = 0;
+
+ if (mmp_label) {
+ if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
+ hostname = fnvlist_lookup_string(mmp_label,
+ ZPOOL_CONFIG_HOSTNAME);
+ fnvlist_add_string(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
+ }
+
+ if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
+ hostid = fnvlist_lookup_uint64(mmp_label,
+ ZPOOL_CONFIG_HOSTID);
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_HOSTID, hostid);
+ }
+ }
+
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_TXG, 0);
+
+ error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
+ }
+
+ if (mmp_label)
+ nvlist_free(mmp_label);
+
+ return (error);
+}
+
static int
spa_verify_host(spa_t *spa, nvlist_t *mos_config)
{
@@ -2555,6 +2768,7 @@ spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *label;
uberblock_t *ub = &spa->spa_uberblock;
+ boolean_t activity_check = B_FALSE;
/*
* If we are opening the checkpointed state of the pool by
@@ -2597,6 +2811,34 @@ spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
(u_longlong_t)ub->ub_txg);
/*
+ * For pools which have the multihost property on determine if the
+ * pool is truly inactive and can be safely imported. Prevent
+ * hosts which don't have a hostid set from importing the pool.
+ */
+ activity_check = spa_activity_check_required(spa, ub, label,
+ spa->spa_config);
+ if (activity_check) {
+ if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
+ spa_get_hostid() == 0) {
+ nvlist_free(label);
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
+ return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
+ }
+
+ int error = spa_activity_check(spa, ub, spa->spa_config);
+ if (error) {
+ nvlist_free(label);
+ return (error);
+ }
+
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
+ }
+
+ /*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
@@ -3151,6 +3393,7 @@ spa_ld_get_props(spa_t *spa)
spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
+ spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
&spa->spa_dedup_ditto);
@@ -3239,6 +3482,18 @@ spa_ld_load_vdev_metadata(spa_t *spa)
vdev_t *rvd = spa->spa_root_vdev;
/*
+ * If the 'multihost' property is set, then never allow a pool to
+ * be imported when the system hostid is zero. The exception to
+ * this rule is zdb which is always allowed to access pools.
+ */
+ if (spa_multihost(spa) && spa_get_hostid() == 0 &&
+ (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
+ fnvlist_add_uint64(spa->spa_load_info,
+ ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
+ return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
+ }
+
+ /*
* If the 'autoreplace' property is set, then post a resource notifying
* the ZFS DE that it should not issue any faults for unopenable
* devices. We also iterate over the vdevs, and post a sysevent for any
@@ -3838,6 +4093,7 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
*/
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
+ mmp_thread_start(spa);
/*
* Wait for all claims to sync. We sync up to the highest
@@ -4361,10 +4617,14 @@ spa_get_stats(const char *name, nvlist_t **config,
ZPOOL_CONFIG_ERRCOUNT,
spa_get_errlog_size(spa)) == 0);
- if (spa_suspended(spa))
+ if (spa_suspended(spa)) {
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED,
spa->spa_failmode) == 0);
+ VERIFY(nvlist_add_uint64(*config,
+ ZPOOL_CONFIG_SUSPENDED_REASON,
+ spa->spa_suspended) == 0);
+ }
spa_add_spares(spa, *config);
spa_add_l2cache(spa, *config);
@@ -4451,18 +4711,6 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
goto out;
}
- /*
- * The L2ARC currently only supports disk devices in
- * kernel context. For user-level testing, we allow it.
- */
-#ifdef _KERNEL
- if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
- strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
- error = SET_ERROR(ENOTBLK);
- vdev_free(vd);
- goto out;
- }
-#endif
vd->vdev_top = vd;
if ((error = vdev_open(vd)) == 0 &&
@@ -4807,6 +5055,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
+ spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
@@ -4817,6 +5066,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
+ mmp_thread_start(spa);
/*
* We explicitly wait for the first transaction to complete so that our
@@ -7403,6 +7653,9 @@ spa_sync_props(void *arg, dmu_tx_t *tx)
spa_async_request(spa,
SPA_ASYNC_AUTOEXPAND);
break;
+ case ZPOOL_PROP_MULTIHOST:
+ spa->spa_multihost = intval;
+ break;
case ZPOOL_PROP_DEDUPDITTO:
spa->spa_dedup_ditto = intval;
break;
@@ -7804,7 +8057,7 @@ spa_sync(spa_t *spa, uint64_t txg)
if (error == 0)
break;
- zio_suspend(spa, NULL);
+ zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
zio_resume_wait(spa);
}
dmu_tx_commit(tx);
diff --git a/usr/src/uts/common/fs/zfs/spa_config.c b/usr/src/uts/common/fs/zfs/spa_config.c
index ad61dd0723..e01260f312 100644
--- a/usr/src/uts/common/fs/zfs/spa_config.c
+++ b/usr/src/uts/common/fs/zfs/spa_config.c
@@ -413,8 +413,7 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
spa->spa_comment);
}
- hostid = zone_get_hostid(NULL);
-
+ hostid = spa_get_hostid();
if (hostid != 0) {
fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid);
}
diff --git a/usr/src/uts/common/fs/zfs/spa_misc.c b/usr/src/uts/common/fs/zfs/spa_misc.c
index 944e684d97..577871cb8a 100644
--- a/usr/src/uts/common/fs/zfs/spa_misc.c
+++ b/usr/src/uts/common/fs/zfs/spa_misc.c
@@ -718,6 +718,9 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
}
+ list_create(&spa->spa_leaf_list, sizeof (vdev_t),
+ offsetof(vdev_t, vdev_leaf_node));
+
return (spa);
}
@@ -762,6 +765,7 @@ spa_remove(spa_t *spa)
sizeof (avl_tree_t));
list_destroy(&spa->spa_config_list);
+ list_destroy(&spa->spa_leaf_list);
nvlist_free(spa->spa_label_features);
nvlist_free(spa->spa_load_info);
@@ -1407,6 +1411,9 @@ spa_get_random(uint64_t range)
ASSERT(range != 0);
+ if (range == 1)
+ return (0);
+
(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
return (r % range);
@@ -1736,7 +1743,7 @@ spa_get_failmode(spa_t *spa)
boolean_t
spa_suspended(spa_t *spa)
{
- return (spa->spa_suspended);
+ return (spa->spa_suspended != ZIO_SUSPEND_NONE);
}
uint64_t
@@ -2114,6 +2121,30 @@ spa_maxdnodesize(spa_t *spa)
return (DNODE_MIN_SIZE);
}
+boolean_t
+spa_multihost(spa_t *spa)
+{
+ return (spa->spa_multihost ? B_TRUE : B_FALSE);
+}
+
+unsigned long
+spa_get_hostid(void)
+{
+ unsigned long myhostid;
+
+#ifdef _KERNEL
+ myhostid = zone_get_hostid(NULL);
+#else /* _KERNEL */
+ /*
+ * We're emulating the system's hostid in userland, so
+ * we can't use zone_get_hostid().
+ */
+ (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
+#endif /* _KERNEL */
+
+ return (myhostid);
+}
+
/*
* Returns the txg that the last device removal completed. No indirect mappings
* have been added since this txg.
diff --git a/usr/src/uts/common/fs/zfs/sys/dsl_pool.h b/usr/src/uts/common/fs/zfs/sys/dsl_pool.h
index c79c5bf735..b23d19eef5 100644
--- a/usr/src/uts/common/fs/zfs/sys/dsl_pool.h
+++ b/usr/src/uts/common/fs/zfs/sys/dsl_pool.h
@@ -39,6 +39,7 @@
#include <sys/bptree.h>
#include <sys/rrwlock.h>
#include <sys/dsl_synctask.h>
+#include <sys/mmp.h>
#ifdef __cplusplus
extern "C" {
diff --git a/usr/src/uts/common/fs/zfs/sys/mmp.h b/usr/src/uts/common/fs/zfs/sys/mmp.h
new file mode 100644
index 0000000000..edb0d43470
--- /dev/null
+++ b/usr/src/uts/common/fs/zfs/sys/mmp.h
@@ -0,0 +1,68 @@
+/*
+ * CDDL HEADER START
+ *
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (C) 2017 by Lawrence Livermore National Security, LLC.
+ */
+
+#ifndef _SYS_MMP_H
+#define _SYS_MMP_H
+
+#include <sys/spa.h>
+#include <sys/zfs_context.h>
+#include <sys/uberblock_impl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MMP_MIN_INTERVAL 100 /* ms */
+#define MMP_DEFAULT_INTERVAL 1000 /* ms */
+#define MMP_DEFAULT_IMPORT_INTERVALS 10
+#define MMP_DEFAULT_FAIL_INTERVALS 5
+
+typedef struct mmp_thread {
+ kmutex_t mmp_thread_lock; /* protect thread mgmt fields */
+ kcondvar_t mmp_thread_cv;
+ kthread_t *mmp_thread;
+ uint8_t mmp_thread_exiting;
+ kmutex_t mmp_io_lock; /* protect below */
+ hrtime_t mmp_last_write; /* last successful MMP write */
+ uint64_t mmp_delay; /* decaying avg ns between MMP writes */
+ uberblock_t mmp_ub; /* last ub written by sync */
+ zio_t *mmp_zio_root; /* root of mmp write zios */
+ uint64_t mmp_kstat_id; /* unique id for next MMP write kstat */
+ int mmp_skip_error; /* reason for last skipped write */
+ vdev_t *mmp_last_leaf; /* last mmp write sent here */
+ uint64_t mmp_leaf_last_gen; /* last mmp write sent here */
+} mmp_thread_t;
+
+
+extern void mmp_init(struct spa *spa);
+extern void mmp_fini(struct spa *spa);
+extern void mmp_thread_start(struct spa *spa);
+extern void mmp_thread_stop(struct spa *spa);
+extern void mmp_update_uberblock(struct spa *spa, struct uberblock *ub);
+extern void mmp_signal_all_threads(void);
+
+/* Global tuning */
+extern ulong_t zfs_multihost_interval;
+extern uint_t zfs_multihost_fail_intervals;
+extern uint_t zfs_multihost_import_intervals;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MMP_H */
diff --git a/usr/src/uts/common/fs/zfs/sys/spa.h b/usr/src/uts/common/fs/zfs/sys/spa.h
index 3ecec3df39..dfda4eeabe 100644
--- a/usr/src/uts/common/fs/zfs/sys/spa.h
+++ b/usr/src/uts/common/fs/zfs/sys/spa.h
@@ -844,6 +844,8 @@ extern boolean_t spa_writeable(spa_t *spa);
extern boolean_t spa_has_pending_synctask(spa_t *spa);
extern int spa_maxblocksize(spa_t *spa);
extern int spa_maxdnodesize(spa_t *spa);
+extern boolean_t spa_multihost(spa_t *spa);
+extern unsigned long spa_get_hostid(void);
extern boolean_t spa_has_checkpoint(spa_t *spa);
extern boolean_t spa_importing_readonly_checkpoint(spa_t *spa);
extern boolean_t spa_suspend_async_destroy(spa_t *spa);
diff --git a/usr/src/uts/common/fs/zfs/sys/spa_impl.h b/usr/src/uts/common/fs/zfs/sys/spa_impl.h
index 7f36c0d456..cf9c32f624 100644
--- a/usr/src/uts/common/fs/zfs/sys/spa_impl.h
+++ b/usr/src/uts/common/fs/zfs/sys/spa_impl.h
@@ -326,7 +326,7 @@ struct spa {
zio_t *spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */
kmutex_t spa_suspend_lock; /* protects suspend_zio_root */
kcondvar_t spa_suspend_cv; /* notification of resume */
- uint8_t spa_suspended; /* pool is suspended */
+ zio_suspend_reason_t spa_suspended; /* pool is suspended */
uint8_t spa_claiming; /* pool is doing zil_claim() */
boolean_t spa_is_root; /* pool is root */
int spa_minref; /* num refs when first opened */
@@ -379,6 +379,11 @@ struct spa {
hrtime_t spa_ccw_fail_time; /* Conf cache write fail time */
+ uint64_t spa_multihost; /* multihost aware (mmp) */
+ mmp_thread_t spa_mmp; /* multihost mmp thread */
+ list_t spa_leaf_list; /* list of leaf vdevs */
+ uint64_t spa_leaf_list_gen; /* track leaf_list changes */
+
/*
* spa_refcount & spa_config_lock must be the last elements
* because refcount_t changes size based on compilation options.
diff --git a/usr/src/uts/common/fs/zfs/sys/uberblock.h b/usr/src/uts/common/fs/zfs/sys/uberblock.h
index 21e7ae0de7..044e438387 100644
--- a/usr/src/uts/common/fs/zfs/sys/uberblock.h
+++ b/usr/src/uts/common/fs/zfs/sys/uberblock.h
@@ -40,7 +40,8 @@ extern "C" {
typedef struct uberblock uberblock_t;
extern int uberblock_verify(uberblock_t *);
-extern boolean_t uberblock_update(uberblock_t *, vdev_t *, uint64_t);
+extern boolean_t uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg,
+ uint64_t mmp_delay);
#ifdef __cplusplus
}
diff --git a/usr/src/uts/common/fs/zfs/sys/uberblock_impl.h b/usr/src/uts/common/fs/zfs/sys/uberblock_impl.h
index 9a3684577d..e649a4ccda 100644
--- a/usr/src/uts/common/fs/zfs/sys/uberblock_impl.h
+++ b/usr/src/uts/common/fs/zfs/sys/uberblock_impl.h
@@ -44,6 +44,7 @@ extern "C" {
*/
#define UBERBLOCK_MAGIC 0x00bab10c /* oo-ba-bloc! */
#define UBERBLOCK_SHIFT 10 /* up to 1K */
+#define MMP_MAGIC 0xa11cea11 /* all-see-all */
struct uberblock {
uint64_t ub_magic; /* UBERBLOCK_MAGIC */
@@ -56,7 +57,7 @@ struct uberblock {
/* highest SPA_VERSION supported by software that wrote this txg */
uint64_t ub_software_version;
- /* These fields are reserved for features that are under development: */
+ /* Maybe missing in uberblocks we read, but always written */
uint64_t ub_mmp_magic;
uint64_t ub_mmp_delay;
uint64_t ub_mmp_seq;
diff --git a/usr/src/uts/common/fs/zfs/sys/vdev.h b/usr/src/uts/common/fs/zfs/sys/vdev.h
index 688af34ccd..b45f0a2ca9 100644
--- a/usr/src/uts/common/fs/zfs/sys/vdev.h
+++ b/usr/src/uts/common/fs/zfs/sys/vdev.h
@@ -161,6 +161,8 @@ extern uint64_t vdev_label_offset(uint64_t psize, int l, uint64_t offset);
extern int vdev_label_number(uint64_t psise, uint64_t offset);
extern nvlist_t *vdev_label_read_config(vdev_t *vd, uint64_t txg);
extern void vdev_uberblock_load(vdev_t *, struct uberblock *, nvlist_t **);
+extern void vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t
+ offset, uint64_t size, zio_done_func_t *done, void *private, int flags);
typedef enum {
VDEV_LABEL_CREATE, /* create/add a new device */
diff --git a/usr/src/uts/common/fs/zfs/sys/vdev_impl.h b/usr/src/uts/common/fs/zfs/sys/vdev_impl.h
index 4c5fe11a1e..bbad778317 100644
--- a/usr/src/uts/common/fs/zfs/sys/vdev_impl.h
+++ b/usr/src/uts/common/fs/zfs/sys/vdev_impl.h
@@ -242,7 +242,7 @@ struct vdev {
/* pool checkpoint related */
space_map_t *vdev_checkpoint_sm; /* contains reserved blocks */
-
+
boolean_t vdev_initialize_exit_wanted;
vdev_initializing_state_t vdev_initialize_state;
kthread_t *vdev_initialize_thread;
@@ -343,6 +343,9 @@ struct vdev {
zio_t *vdev_probe_zio; /* root of current probe */
vdev_aux_t vdev_label_aux; /* on-disk aux state */
uint64_t vdev_leaf_zap;
+ hrtime_t vdev_mmp_pending; /* 0 if write finished */
+ uint64_t vdev_mmp_kstat_id; /* to find kstat entry */
+ list_node_t vdev_leaf_node; /* leaf vdev list */
/*
* For DTrace to work in userland (libzpool) context, these fields must
@@ -364,6 +367,12 @@ struct vdev {
#define VDEV_PHYS_SIZE (112 << 10)
#define VDEV_UBERBLOCK_RING (128 << 10)
+/*
+ * MMP blocks occupy the last MMP_BLOCKS_PER_LABEL slots in the uberblock
+ * ring when MMP is enabled.
+ */
+#define MMP_BLOCKS_PER_LABEL 1
+
/* The largest uberblock we support is 8k. */
#define MAX_UBERBLOCK_SHIFT (13)
#define VDEV_UBERBLOCK_SHIFT(vd) \
diff --git a/usr/src/uts/common/fs/zfs/sys/zio.h b/usr/src/uts/common/fs/zfs/sys/zio.h
index ce3626e842..e14baad67c 100644
--- a/usr/src/uts/common/fs/zfs/sys/zio.h
+++ b/usr/src/uts/common/fs/zfs/sys/zio.h
@@ -138,6 +138,12 @@ enum zio_checksum {
#define ZIO_FAILURE_MODE_CONTINUE 1
#define ZIO_FAILURE_MODE_PANIC 2
+typedef enum zio_suspend_reason {
+ ZIO_SUSPEND_NONE = 0,
+ ZIO_SUSPEND_IOERR,
+ ZIO_SUSPEND_MMP,
+} zio_suspend_reason_t;
+
enum zio_flag {
/*
* Flags inherited by gang, ddt, and vdev children,
@@ -224,7 +230,7 @@ enum zio_child {
#define ZIO_CHILD_DDT_BIT ZIO_CHILD_BIT(ZIO_CHILD_DDT)
#define ZIO_CHILD_LOGICAL_BIT ZIO_CHILD_BIT(ZIO_CHILD_LOGICAL)
#define ZIO_CHILD_ALL_BITS \
- (ZIO_CHILD_VDEV_BIT | ZIO_CHILD_GANG_BIT | \
+ (ZIO_CHILD_VDEV_BIT | ZIO_CHILD_GANG_BIT | \
ZIO_CHILD_DDT_BIT | ZIO_CHILD_LOGICAL_BIT)
enum zio_wait_type {
@@ -435,7 +441,7 @@ struct zio {
avl_node_t io_queue_node;
avl_node_t io_offset_node;
avl_node_t io_alloc_node;
- zio_alloc_list_t io_alloc_list;
+ zio_alloc_list_t io_alloc_list;
/* Internal pipeline state */
enum zio_flag io_flags;
@@ -567,7 +573,7 @@ extern enum zio_checksum zio_checksum_dedup_select(spa_t *spa,
extern enum zio_compress zio_compress_select(spa_t *spa,
enum zio_compress child, enum zio_compress parent);
-extern void zio_suspend(spa_t *spa, zio_t *zio);
+extern void zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t);
extern int zio_resume(spa_t *spa);
extern void zio_resume_wait(spa_t *spa);
diff --git a/usr/src/uts/common/fs/zfs/uberblock.c b/usr/src/uts/common/fs/zfs/uberblock.c
index 8b198469e1..3b85260764 100644
--- a/usr/src/uts/common/fs/zfs/uberblock.c
+++ b/usr/src/uts/common/fs/zfs/uberblock.c
@@ -44,7 +44,7 @@ uberblock_verify(uberblock_t *ub)
* transaction group.
*/
boolean_t
-uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg)
+uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg, uint64_t mmp_delay)
{
ASSERT(ub->ub_txg < txg);
@@ -57,6 +57,9 @@ uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg)
ub->ub_guid_sum = rvd->vdev_guid_sum;
ub->ub_timestamp = gethrestime_sec();
ub->ub_software_version = SPA_VERSION;
+ ub->ub_mmp_magic = MMP_MAGIC;
+ ub->ub_mmp_delay = spa_multihost(rvd->vdev_spa) ? mmp_delay : 0;
+ ub->ub_mmp_seq = 0;
ub->ub_checkpoint_txg = 0;
return (ub->ub_rootbp.blk_birth == txg);
diff --git a/usr/src/uts/common/fs/zfs/vdev.c b/usr/src/uts/common/fs/zfs/vdev.c
index f5c3ff5d77..1c4f041072 100644
--- a/usr/src/uts/common/fs/zfs/vdev.c
+++ b/usr/src/uts/common/fs/zfs/vdev.c
@@ -353,6 +353,11 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += cvd->vdev_guid_sum;
+
+ if (cvd->vdev_ops->vdev_op_leaf) {
+ list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
+ cvd->vdev_spa->spa_leaf_list_gen++;
+ }
}
void
@@ -382,6 +387,12 @@ vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
pvd->vdev_children = 0;
}
+ if (cvd->vdev_ops->vdev_op_leaf) {
+ spa_t *spa = cvd->vdev_spa;
+ list_remove(&spa->spa_leaf_list, cvd);
+ spa->spa_leaf_list_gen++;
+ }
+
/*
* Walk up all ancestors to update guid sum.
*/
@@ -466,6 +477,7 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
vd->vdev_obsolete_segments = range_tree_create(NULL, NULL);
+ list_link_init(&vd->vdev_leaf_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -786,6 +798,7 @@ vdev_free(vdev_t *vd)
vdev_remove_child(vd->vdev_parent, vd);
ASSERT(vd->vdev_parent == NULL);
+ ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
* Clean up vdev structure.
diff --git a/usr/src/uts/common/fs/zfs/vdev_label.c b/usr/src/uts/common/fs/zfs/vdev_label.c
index 8d5f17c15f..17553607a5 100644
--- a/usr/src/uts/common/fs/zfs/vdev_label.c
+++ b/usr/src/uts/common/fs/zfs/vdev_label.c
@@ -22,6 +22,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
+ * Copyright 2019 Joyent, Inc.
*/
/*
@@ -193,14 +194,21 @@ vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
ZIO_PRIORITY_SYNC_READ, flags, B_TRUE));
}
-static void
+void
vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
uint64_t size, zio_done_func_t *done, void *private, int flags)
{
+#ifdef _KERNEL
+ /*
+ * This assert is invalid in the user-level ztest MMP code because
+ * the ztest thread is not in dsl_pool_sync_context. ZoL does not
+ * build the user-level code with DEBUG so this is not an issue there.
+ */
ASSERT(spa_config_held(zio->io_spa, SCL_ALL, RW_WRITER) == SCL_ALL ||
(spa_config_held(zio->io_spa, SCL_CONFIG | SCL_STATE, RW_READER) ==
(SCL_CONFIG | SCL_STATE) &&
dsl_pool_sync_context(spa_get_dsl(zio->io_spa))));
+#endif
ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
zio_nowait(zio_write_phys(zio, vd,
@@ -1142,7 +1150,8 @@ vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
if (!vdev_writeable(vd))
return;
- int n = ub->ub_txg & (VDEV_UBERBLOCK_COUNT(vd) - 1);
+ int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
+ int n = ub->ub_txg % (VDEV_UBERBLOCK_COUNT(vd) - m);
/* Copy the uberblock_t into the ABD */
abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
@@ -1360,10 +1369,13 @@ retry:
* and the vdev configuration hasn't changed,
* then there's nothing to do.
*/
- if (ub->ub_txg < txg &&
- uberblock_update(ub, spa->spa_root_vdev, txg) == B_FALSE &&
- list_is_empty(&spa->spa_config_dirty_list))
- return (0);
+ if (ub->ub_txg < txg) {
+ boolean_t changed = uberblock_update(ub, spa->spa_root_vdev,
+ txg, spa->spa_mmp.mmp_delay);
+
+ if (!changed && list_is_empty(&spa->spa_config_dirty_list))
+ return (0);
+ }
if (txg > spa_freeze_txg(spa))
return (0);
@@ -1426,6 +1438,9 @@ retry:
goto retry;
}
+ if (spa_multihost(spa))
+ mmp_update_uberblock(spa, ub);
+
/*
* Sync out odd labels for every dirty vdev. If the system dies
* in the middle of this process, the even labels and the new
diff --git a/usr/src/uts/common/fs/zfs/zfs_ioctl.c b/usr/src/uts/common/fs/zfs/zfs_ioctl.c
index 841c138ead..a28ba6ba86 100644
--- a/usr/src/uts/common/fs/zfs/zfs_ioctl.c
+++ b/usr/src/uts/common/fs/zfs/zfs_ioctl.c
@@ -4847,6 +4847,13 @@ zfs_ioc_clear(zfs_cmd_t *zc)
if (error != 0)
return (error);
+ /*
+ * If multihost is enabled, resuming I/O is unsafe as another
+ * host may have imported the pool.
+ */
+ if (spa_multihost(spa) && spa_suspended(spa))
+ return (SET_ERROR(EINVAL));
+
spa_vdev_state_enter(spa, SCL_NONE);
if (zc->zc_guid == 0) {
diff --git a/usr/src/uts/common/fs/zfs/zio.c b/usr/src/uts/common/fs/zfs/zio.c
index 845dc48087..7d54bc0046 100644
--- a/usr/src/uts/common/fs/zfs/zio.c
+++ b/usr/src/uts/common/fs/zfs/zio.c
@@ -1778,7 +1778,7 @@ zio_reexecute(zio_t *pio)
}
void
-zio_suspend(spa_t *spa, zio_t *zio)
+zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
{
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
fm_panic("Pool '%s' has encountered an uncorrectable I/O "
@@ -1794,7 +1794,7 @@ zio_suspend(spa_t *spa, zio_t *zio)
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
- spa->spa_suspended = B_TRUE;
+ spa->spa_suspended = reason;
if (zio != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
@@ -1817,7 +1817,7 @@ zio_resume(spa_t *spa)
* Reexecute all previously suspended i/o.
*/
mutex_enter(&spa->spa_suspend_lock);
- spa->spa_suspended = B_FALSE;
+ spa->spa_suspended = ZIO_SUSPEND_NONE;
cv_broadcast(&spa->spa_suspend_cv);
pio = spa->spa_suspend_zio_root;
spa->spa_suspend_zio_root = NULL;
@@ -3883,7 +3883,7 @@ zio_done(zio_t *zio)
* We'd fail again if we reexecuted now, so suspend
* until conditions improve (e.g. device comes online).
*/
- zio_suspend(spa, zio);
+ zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
} else {
/*
* Reexecution is potentially a huge amount of work.