summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/fs/zfs/dmu_objset.c
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common/fs/zfs/dmu_objset.c')
-rw-r--r--usr/src/uts/common/fs/zfs/dmu_objset.c107
1 files changed, 107 insertions, 0 deletions
diff --git a/usr/src/uts/common/fs/zfs/dmu_objset.c b/usr/src/uts/common/fs/zfs/dmu_objset.c
index 6bf61854b7..71d616415c 100644
--- a/usr/src/uts/common/fs/zfs/dmu_objset.c
+++ b/usr/src/uts/common/fs/zfs/dmu_objset.c
@@ -53,6 +53,7 @@
#include <sys/zfs_onexit.h>
#include <sys/dsl_destroy.h>
#include <sys/vdev.h>
+#include <sys/zfeature.h>
/*
* Needed to close a window in dnode_move() that allows the objset to be freed
@@ -348,6 +349,17 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
+ /*
+ * The $ORIGIN dataset (if it exists) doesn't have an associated
+ * objset, so there's no reason to open it. The $ORIGIN dataset
+ * will not exist on pools older than SPA_VERSION_ORIGIN.
+ */
+ if (ds != NULL && spa_get_dsl(spa) != NULL &&
+ spa_get_dsl(spa)->dp_origin_snap != NULL) {
+ ASSERT3P(ds->ds_dir, !=,
+ spa_get_dsl(spa)->dp_origin_snap->ds_dir);
+ }
+
os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
os->os_dsl_dataset = ds;
os->os_spa = spa;
@@ -1052,6 +1064,101 @@ dmu_objset_clone(const char *clone, const char *origin)
5, ZFS_SPACE_CHECK_NORMAL));
}
+static int
+dmu_objset_remap_indirects_impl(objset_t *os, uint64_t last_removed_txg)
+{
+ int error = 0;
+ uint64_t object = 0;
+ while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
+ error = dmu_object_remap_indirects(os, object,
+ last_removed_txg);
+ /*
+ * If the ZPL removed the object before we managed to dnode_hold
+ * it, we would get an ENOENT. If the ZPL declares its intent
+ * to remove the object (dnode_free) before we manage to
+ * dnode_hold it, we would get an EEXIST. In either case, we
+ * want to continue remapping the other objects in the objset;
+ * in all other cases, we want to break early.
+ */
+ if (error != 0 && error != ENOENT && error != EEXIST) {
+ break;
+ }
+ }
+ if (error == ESRCH) {
+ error = 0;
+ }
+ return (error);
+}
+
+int
+dmu_objset_remap_indirects(const char *fsname)
+{
+ int error = 0;
+ objset_t *os = NULL;
+ uint64_t last_removed_txg;
+ uint64_t remap_start_txg;
+ dsl_dir_t *dd;
+
+ error = dmu_objset_hold(fsname, FTAG, &os);
+ if (error != 0) {
+ return (error);
+ }
+ dd = dmu_objset_ds(os)->ds_dir;
+
+ if (!spa_feature_is_enabled(dmu_objset_spa(os),
+ SPA_FEATURE_OBSOLETE_COUNTS)) {
+ dmu_objset_rele(os, FTAG);
+ return (SET_ERROR(ENOTSUP));
+ }
+
+ if (dsl_dataset_is_snapshot(dmu_objset_ds(os))) {
+ dmu_objset_rele(os, FTAG);
+ return (SET_ERROR(EINVAL));
+ }
+
+ /*
+ * If there has not been a removal, we're done.
+ */
+ last_removed_txg = spa_get_last_removal_txg(dmu_objset_spa(os));
+ if (last_removed_txg == -1ULL) {
+ dmu_objset_rele(os, FTAG);
+ return (0);
+ }
+
+ /*
+ * If we have remapped since the last removal, we're done.
+ */
+ if (dsl_dir_is_zapified(dd)) {
+ uint64_t last_remap_txg;
+ if (zap_lookup(spa_meta_objset(dmu_objset_spa(os)),
+ dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
+ sizeof (last_remap_txg), 1, &last_remap_txg) == 0 &&
+ last_remap_txg > last_removed_txg) {
+ dmu_objset_rele(os, FTAG);
+ return (0);
+ }
+ }
+
+ dsl_dataset_long_hold(dmu_objset_ds(os), FTAG);
+ dsl_pool_rele(dmu_objset_pool(os), FTAG);
+
+ remap_start_txg = spa_last_synced_txg(dmu_objset_spa(os));
+ error = dmu_objset_remap_indirects_impl(os, last_removed_txg);
+ if (error == 0) {
+ /*
+ * We update the last_remap_txg to be the start txg so that
+ * we can guarantee that every block older than last_remap_txg
+ * that can be remapped has been remapped.
+ */
+ error = dsl_dir_update_last_remap_txg(dd, remap_start_txg);
+ }
+
+ dsl_dataset_long_rele(dmu_objset_ds(os), FTAG);
+ dsl_dataset_rele(dmu_objset_ds(os), FTAG);
+
+ return (error);
+}
+
int
dmu_objset_snapshot_one(const char *fsname, const char *snapname)
{