From 0b46d4dcedc4979cc6db45728993e8b094117cec Mon Sep 17 00:00:00 2001 From: Tom Caputi Date: Thu, 4 Oct 2018 10:08:09 +0900 Subject: Refcounted DSL Crypto Key Mappings Since native ZFS encryption was merged, we have been fighting against a series of bugs that come down to the same problem: Key mappings (which must be present during all I/O operations) are created and destroyed based on dataset ownership, but I/Os can have traditionally been allowed to "leak" into the next txg after the dataset is disowned. In the past we have attempted to solve this problem by trying to ensure that datasets are disowned ater all I/O is finished by calling txg_wait_synced(), but we have repeatedly found edge cases that need to be squashed and code paths that might incur a high number of txg syncs. This patch attempts to resolve this issue differently, by adding a reference to the key mapping for each txg it is dirtied in. By doing so, we can remove many of the unnecessary calls to txg_wait_synced() we have added in the past and ensure we don't need to deal with this problem in the future. Reviewed-by: Jorgen Lundman Reviewed by: Matthew Ahrens Reviewed-by: Brian Behlendorf Signed-off-by: Tom Caputi --- usr/src/cmd/ztest/ztest.c | 3 - usr/src/uts/common/fs/zfs/dmu_objset.c | 18 +++-- usr/src/uts/common/fs/zfs/dsl_crypt.c | 102 ++++++++++++++++++++-------- usr/src/uts/common/fs/zfs/dsl_dataset.c | 95 ++++++++++++++++++-------- usr/src/uts/common/fs/zfs/dsl_pool.c | 27 +++++++- usr/src/uts/common/fs/zfs/spa.c | 18 +---- usr/src/uts/common/fs/zfs/sys/dsl_crypt.h | 9 +-- usr/src/uts/common/fs/zfs/sys/dsl_dataset.h | 4 ++ usr/src/uts/common/fs/zfs/zil.c | 26 +++---- 9 files changed, 194 insertions(+), 108 deletions(-) diff --git a/usr/src/cmd/ztest/ztest.c b/usr/src/cmd/ztest/ztest.c index 2b210ba410..e91f10db19 100644 --- a/usr/src/cmd/ztest/ztest.c +++ b/usr/src/cmd/ztest/ztest.c @@ -3685,7 +3685,6 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) ztest_zd_init(&zdtmp, NULL, os); zil_replay(os, &zdtmp, ztest_replay_vector); ztest_zd_fini(&zdtmp); - txg_wait_synced(dmu_objset_pool(os), 0); dmu_objset_disown(os, B_TRUE, FTAG); } @@ -3756,7 +3755,6 @@ ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id) B_FALSE, B_TRUE, FTAG, &os2)); zil_close(zilog); - txg_wait_synced(spa_get_dsl(os->os_spa), 0); dmu_objset_disown(os, B_TRUE, FTAG); ztest_zd_fini(&zdtmp); @@ -6179,7 +6177,6 @@ ztest_dataset_close(int d) ztest_ds_t *zd = &ztest_ds[d]; zil_close(zd->zd_zilog); - txg_wait_synced(spa_get_dsl(zd->zd_os->os_spa), 0); dmu_objset_disown(zd->zd_os, B_TRUE, zd); ztest_zd_fini(zd); diff --git a/usr/src/uts/common/fs/zfs/dmu_objset.c b/usr/src/uts/common/fs/zfs/dmu_objset.c index eff09dab2b..ff1bc972a4 100644 --- a/usr/src/uts/common/fs/zfs/dmu_objset.c +++ b/usr/src/uts/common/fs/zfs/dmu_objset.c @@ -1075,6 +1075,7 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx) { dmu_objset_create_arg_t *doca = arg; dsl_pool_t *dp = dmu_tx_pool(tx); + spa_t *spa = dp->dp_spa; dsl_dir_t *pdd; const char *tail; dsl_dataset_t *ds; @@ -1092,8 +1093,7 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx) DS_HOLD_FLAG_DECRYPT, FTAG, &ds)); rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); bp = dsl_dataset_get_blkptr(ds); - os = dmu_objset_create_impl(pdd->dd_pool->dp_spa, - ds, bp, doca->doca_type, tx); + os = dmu_objset_create_impl(spa, ds, bp, doca->doca_type, tx); rrw_exit(&ds->ds_bp_rwlock, FTAG); if (doca->doca_userfunc != NULL) { @@ -1117,7 +1117,7 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx) ds->ds_owner = FTAG; mutex_exit(&ds->ds_lock); - rzio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); + rzio = zio_root(spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds, tx->tx_txg); if (tmpds != NULL) { @@ -1127,8 +1127,12 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx) VERIFY0(zio_wait(rzio)); dmu_objset_do_userquota_updates(os, tx); taskq_wait(dp->dp_sync_taskq); + if (txg_list_member(&dp->dp_dirty_datasets, ds, tx->tx_txg)) { + ASSERT3P(ds->ds_key_mapping, !=, NULL); + key_mapping_rele(spa, ds->ds_key_mapping, ds); + } - rzio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); + rzio = zio_root(spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds, tx->tx_txg); if (tmpds != NULL) { @@ -1137,8 +1141,11 @@ dmu_objset_create_sync(void *arg, dmu_tx_t *tx) } VERIFY0(zio_wait(rzio)); - if (need_sync_done) + if (need_sync_done) { + ASSERT3P(ds->ds_key_mapping, !=, NULL); + key_mapping_rele(spa, ds->ds_key_mapping, ds); dsl_dataset_sync_done(ds, tx); + } mutex_enter(&ds->ds_lock); ds->ds_owner = NULL; @@ -1552,7 +1559,6 @@ dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) if (os->os_raw_receive || os->os_next_write_raw[tx->tx_txg & TXG_MASK]) { ASSERT(os->os_encrypted); - os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_FALSE; arc_convert_to_raw(os->os_phys_buf, os->os_dsl_dataset->ds_object, ZFS_HOST_BYTEORDER, DMU_OT_OBJSET, NULL, NULL, NULL); diff --git a/usr/src/uts/common/fs/zfs/dsl_crypt.c b/usr/src/uts/common/fs/zfs/dsl_crypt.c index d1ffcba8fb..6ef478e956 100644 --- a/usr/src/uts/common/fs/zfs/dsl_crypt.c +++ b/usr/src/uts/common/fs/zfs/dsl_crypt.c @@ -889,6 +889,20 @@ spa_keystore_unload_wkey(const char *dsname) int ret = 0; dsl_dir_t *dd = NULL; dsl_pool_t *dp = NULL; + spa_t *spa = NULL; + + ret = spa_open(dsname, &spa, FTAG); + if (ret != 0) + return (ret); + + /* + * Wait for any outstanding txg IO to complete, releasing any + * remaining references on the wkey. + */ + if (spa_mode(spa) != FREAD) + txg_wait_synced(spa->spa_dsl_pool, 0); + + spa_close(spa, FTAG); /* hold the dsl dir */ ret = dsl_pool_hold(dsname, FTAG, &dp); @@ -923,9 +937,56 @@ error: return (ret); } +void +key_mapping_add_ref(dsl_key_mapping_t *km, void *tag) +{ + ASSERT3U(refcount_count(&km->km_refcnt), >=, 1); + (void) refcount_add(&km->km_refcnt, tag); +} + +/* + * The locking here is a little tricky to ensure we don't cause unnecessary + * performance problems. We want to release a key mapping whenever someone + * decrements the refcount to 0, but freeing the mapping requires removing + * it from the spa_keystore, which requires holding sk_km_lock as a writer. + * Most of the time we don't want to hold this lock as a writer, since the + * same lock is held as a reader for each IO that needs to encrypt / decrypt + * data for any dataset and in practice we will only actually free the + * mapping after unmounting a dataset. + */ +void +key_mapping_rele(spa_t *spa, dsl_key_mapping_t *km, void *tag) +{ + ASSERT3U(refcount_count(&km->km_refcnt), >=, 1); + + if (refcount_remove(&km->km_refcnt, tag) != 0) + return; + + /* + * We think we are going to need to free the mapping. Add a + * reference to prevent most other releasers from thinking + * this might be their responsibility. This is inherently + * racy, so we will confirm that we are legitimately the + * last holder once we have the sk_km_lock as a writer. + */ + (void) refcount_add(&km->km_refcnt, FTAG); + + rw_enter(&spa->spa_keystore.sk_km_lock, RW_WRITER); + if (refcount_remove(&km->km_refcnt, FTAG) != 0) { + rw_exit(&spa->spa_keystore.sk_km_lock); + return; + } + + avl_remove(&spa->spa_keystore.sk_key_mappings, km); + rw_exit(&spa->spa_keystore.sk_km_lock); + + spa_keystore_dsl_key_rele(spa, km->km_key, km); + kmem_free(km, sizeof (dsl_key_mapping_t)); +} + int -spa_keystore_create_mapping_impl(spa_t *spa, uint64_t dsobj, - dsl_dir_t *dd, void *tag) +spa_keystore_create_mapping(spa_t *spa, dsl_dataset_t *ds, void *tag, + dsl_key_mapping_t **km_out) { int ret; avl_index_t where; @@ -936,14 +997,17 @@ spa_keystore_create_mapping_impl(spa_t *spa, uint64_t dsobj, km = kmem_zalloc(sizeof (dsl_key_mapping_t), KM_SLEEP); refcount_create(&km->km_refcnt); - ret = spa_keystore_dsl_key_hold_dd(spa, dd, km, &km->km_key); + ret = spa_keystore_dsl_key_hold_dd(spa, ds->ds_dir, km, &km->km_key); if (ret != 0) { refcount_destroy(&km->km_refcnt); kmem_free(km, sizeof (dsl_key_mapping_t)); + + if (km_out != NULL) + *km_out = NULL; return (ret); } - km->km_dsobj = dsobj; + km->km_dsobj = ds->ds_object; rw_enter(&spa->spa_keystore.sk_km_lock, RW_WRITER); @@ -959,9 +1023,13 @@ spa_keystore_create_mapping_impl(spa_t *spa, uint64_t dsobj, if (found_km != NULL) { should_free = B_TRUE; (void) refcount_add(&found_km->km_refcnt, tag); + if (km_out != NULL) + *km_out = found_km; } else { (void) refcount_add(&km->km_refcnt, tag); avl_insert(&spa->spa_keystore.sk_key_mappings, km, where); + if (km_out != NULL) + *km_out = km; } rw_exit(&spa->spa_keystore.sk_km_lock); @@ -975,25 +1043,17 @@ spa_keystore_create_mapping_impl(spa_t *spa, uint64_t dsobj, return (0); } -int -spa_keystore_create_mapping(spa_t *spa, dsl_dataset_t *ds, void *tag) -{ - return (spa_keystore_create_mapping_impl(spa, ds->ds_object, - ds->ds_dir, tag)); -} - int spa_keystore_remove_mapping(spa_t *spa, uint64_t dsobj, void *tag) { int ret; dsl_key_mapping_t search_km; dsl_key_mapping_t *found_km; - boolean_t should_free = B_FALSE; /* init the search key mapping */ search_km.km_dsobj = dsobj; - rw_enter(&spa->spa_keystore.sk_km_lock, RW_WRITER); + rw_enter(&spa->spa_keystore.sk_km_lock, RW_READER); /* find the matching mapping */ found_km = avl_find(&spa->spa_keystore.sk_key_mappings, @@ -1003,23 +1063,9 @@ spa_keystore_remove_mapping(spa_t *spa, uint64_t dsobj, void *tag) goto error_unlock; } - /* - * Decrement the refcount on the mapping and remove it from the tree if - * it is zero. Try to minimize time spent in this lock by deferring - * cleanup work. - */ - if (refcount_remove(&found_km->km_refcnt, tag) == 0) { - should_free = B_TRUE; - avl_remove(&spa->spa_keystore.sk_key_mappings, found_km); - } - rw_exit(&spa->spa_keystore.sk_km_lock); - /* destroy the key mapping */ - if (should_free) { - spa_keystore_dsl_key_rele(spa, found_km->km_key, found_km); - kmem_free(found_km, sizeof (dsl_key_mapping_t)); - } + key_mapping_rele(spa, found_km, tag); return (0); diff --git a/usr/src/uts/common/fs/zfs/dsl_dataset.c b/usr/src/uts/common/fs/zfs/dsl_dataset.c index e9513755b3..e9fdaef927 100644 --- a/usr/src/uts/common/fs/zfs/dsl_dataset.c +++ b/usr/src/uts/common/fs/zfs/dsl_dataset.c @@ -429,8 +429,8 @@ dsl_dataset_try_add_ref(dsl_pool_t *dp, dsl_dataset_t *ds, void *tag) } int -dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj, - ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp) +dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag, + dsl_dataset_t **dsp) { objset_t *mos = dp->dp_meta_objset; dmu_buf_t *dbuf; @@ -587,6 +587,7 @@ dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj, } } } + ASSERT3P(ds->ds_dbuf, ==, dbuf); ASSERT3P(dsl_dataset_phys(ds), ==, dbuf->db_data); ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0 || @@ -594,22 +595,40 @@ dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj, dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap); *dsp = ds; - if ((flags & DS_HOLD_FLAG_DECRYPT) && ds->ds_dir->dd_crypto_obj != 0) { - err = spa_keystore_create_mapping(dp->dp_spa, ds, ds); - if (err != 0) { - dsl_dataset_rele(ds, tag); - return (SET_ERROR(EACCES)); - } - } - return (0); } int -dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag, - dsl_dataset_t **dsp) +dsl_dataset_create_key_mapping(dsl_dataset_t *ds) { - return (dsl_dataset_hold_obj_flags(dp, dsobj, 0, tag, dsp)); + dsl_dir_t *dd = ds->ds_dir; + + if (dd->dd_crypto_obj == 0) + return (0); + + return (spa_keystore_create_mapping(dd->dd_pool->dp_spa, + ds, ds, &ds->ds_key_mapping)); +} + +int +dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj, + ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp) +{ + int err; + + err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp); + if (err != 0) + return (err); + + ASSERT3P(*dsp, !=, NULL); + + if (flags & DS_HOLD_FLAG_DECRYPT) { + err = dsl_dataset_create_key_mapping(*dsp); + if (err != 0) + dsl_dataset_rele(*dsp, tag); + } + + return (err); } int @@ -772,29 +791,30 @@ dsl_dataset_namelen(dsl_dataset_t *ds) } void -dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag) +dsl_dataset_rele(dsl_dataset_t *ds, void *tag) { - if (ds->ds_dir != NULL && ds->ds_dir->dd_crypto_obj != 0 && - (flags & DS_HOLD_FLAG_DECRYPT)) { - (void) spa_keystore_remove_mapping(ds->ds_dir->dd_pool->dp_spa, - ds->ds_object, ds); - - /* - * Encrypted datasets require that users only release their - * decrypting reference after the dirty data has actually - * been written out. This ensures that the mapping exists - * when it is needed to write out dirty data. - */ - ASSERT(dmu_buf_user_refcount(ds->ds_dbuf) != 0 || - !dsl_dataset_is_dirty(ds)); - } dmu_buf_rele(ds->ds_dbuf, tag); } void -dsl_dataset_rele(dsl_dataset_t *ds, void *tag) +dsl_dataset_remove_key_mapping(dsl_dataset_t *ds) { - dsl_dataset_rele_flags(ds, 0, tag); + dsl_dir_t *dd = ds->ds_dir; + + if (dd == NULL || dd->dd_crypto_obj == 0) + return; + + (void) spa_keystore_remove_mapping(dd->dd_pool->dp_spa, + ds->ds_object, ds); +} + +void +dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag) +{ + if (flags & DS_HOLD_FLAG_DECRYPT) + dsl_dataset_remove_key_mapping(ds); + + dsl_dataset_rele(ds, tag); } void @@ -1146,8 +1166,18 @@ dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx) dp = ds->ds_dir->dd_pool; if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg)) { + objset_t *os = ds->ds_objset; + /* up the hold count until we can be written out */ dmu_buf_add_ref(ds->ds_dbuf, ds); + + /* if this dataset is encrypted, grab a reference to the DCK */ + if (ds->ds_dir->dd_crypto_obj != 0 && + !os->os_raw_receive && + !os->os_next_write_raw[tx->tx_txg & TXG_MASK]) { + ASSERT3P(ds->ds_key_mapping, !=, NULL); + key_mapping_add_ref(ds->ds_key_mapping, ds); + } } } @@ -1797,6 +1827,11 @@ dsl_dataset_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx) os->os_synced_dnodes = NULL; } + if (os->os_encrypted) + os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_FALSE; + else + ASSERT0(os->os_next_write_raw[tx->tx_txg & TXG_MASK]); + ASSERT(!dmu_objset_is_dirty(os, dmu_tx_get_txg(tx))); dmu_buf_rele(ds->ds_dbuf, ds); diff --git a/usr/src/uts/common/fs/zfs/dsl_pool.c b/usr/src/uts/common/fs/zfs/dsl_pool.c index 024ce11980..0b1923aa75 100644 --- a/usr/src/uts/common/fs/zfs/dsl_pool.c +++ b/usr/src/uts/common/fs/zfs/dsl_pool.c @@ -503,7 +503,8 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx); /* create the root objset */ - VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds)); + VERIFY0(dsl_dataset_hold_obj_flags(dp, obj, + DS_HOLD_FLAG_DECRYPT, FTAG, &ds)); #ifdef _KERNEL { objset_t *os; @@ -514,7 +515,7 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, zfs_create_fs(os, kcred, zplprops, tx); } #endif - dsl_dataset_rele(ds, FTAG); + dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); dmu_tx_commit(tx); @@ -675,9 +676,22 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) */ zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { + objset_t *os = ds->ds_objset; + ASSERT(list_link_active(&ds->ds_synced_link)); dmu_buf_rele(ds->ds_dbuf, ds); dsl_dataset_sync(ds, zio, tx); + + /* + * Release any key mappings created by calls to + * dsl_dataset_dirty() from the userquota accounting + * code paths. + */ + if (os->os_encrypted && !os->os_raw_receive && + !os->os_next_write_raw[txg & TXG_MASK]) { + ASSERT3P(ds->ds_key_mapping, !=, NULL); + key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); + } } VERIFY0(zio_wait(zio)); @@ -687,8 +701,17 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) * * - move dead blocks from the pending deadlist to the on-disk deadlist * - release hold from dsl_dataset_dirty() + * - release key mapping hold from dsl_dataset_dirty() */ while ((ds = list_remove_head(&synced_datasets)) != NULL) { + objset_t *os = ds->ds_objset; + + if (os->os_encrypted && !os->os_raw_receive && + !os->os_next_write_raw[txg & TXG_MASK]) { + ASSERT3P(ds->ds_key_mapping, !=, NULL); + key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); + } + dsl_dataset_sync_done(ds, tx); } while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) { diff --git a/usr/src/uts/common/fs/zfs/spa.c b/usr/src/uts/common/fs/zfs/spa.c index 6db625009d..48a4b2167e 100644 --- a/usr/src/uts/common/fs/zfs/spa.c +++ b/usr/src/uts/common/fs/zfs/spa.c @@ -4631,7 +4631,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, uint64_t txg = TXG_INITIAL; nvlist_t **spares, **l2cache; uint_t nspares, nl2cache; - uint64_t version, obj, root_dsobj = 0; + uint64_t version, obj; boolean_t has_features; char *poolname; nvlist_t *nvl; @@ -4874,26 +4874,10 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, dmu_tx_commit(tx); - /* - * If the root dataset is encrypted we will need to create key mappings - * for the zio layer before we start to write any data to disk and hold - * them until after the first txg has been synced. Waiting for the first - * transaction to complete also ensures that our bean counters are - * appropriately updated. - */ - if (dp->dp_root_dir->dd_crypto_obj != 0) { - root_dsobj = dsl_dir_phys(dp->dp_root_dir)->dd_head_dataset_obj; - VERIFY0(spa_keystore_create_mapping_impl(spa, root_dsobj, - dp->dp_root_dir, FTAG)); - } - spa->spa_sync_on = B_TRUE; txg_sync_start(dp); txg_wait_synced(dp, txg); - if (dp->dp_root_dir->dd_crypto_obj != 0) - VERIFY0(spa_keystore_remove_mapping(spa, root_dsobj, FTAG)); - spa_spawn_aux_threads(spa); spa_write_cachefile(spa, B_FALSE, B_TRUE); diff --git a/usr/src/uts/common/fs/zfs/sys/dsl_crypt.h b/usr/src/uts/common/fs/zfs/sys/dsl_crypt.h index 8573e370a3..ca448ead48 100644 --- a/usr/src/uts/common/fs/zfs/sys/dsl_crypt.h +++ b/usr/src/uts/common/fs/zfs/sys/dsl_crypt.h @@ -61,7 +61,7 @@ typedef struct dsl_wrapping_key { /* actual wrapping key */ crypto_key_t wk_key; - /* refcount of number of dsl_crypto_key_t's holding this struct */ + /* refcount of holders of this key */ refcount_t wk_refcnt; /* dsl directory object that owns this wrapping key */ @@ -181,10 +181,11 @@ int spa_keystore_load_wkey(const char *dsname, dsl_crypto_params_t *dcp, int spa_keystore_unload_wkey_impl(spa_t *spa, uint64_t ddobj); int spa_keystore_unload_wkey(const char *dsname); -int spa_keystore_create_mapping_impl(spa_t *spa, uint64_t dsobj, dsl_dir_t *dd, - void *tag); -int spa_keystore_create_mapping(spa_t *spa, struct dsl_dataset *ds, void *tag); +int spa_keystore_create_mapping(spa_t *spa, struct dsl_dataset *ds, void *tag, + dsl_key_mapping_t **km_out); int spa_keystore_remove_mapping(spa_t *spa, uint64_t dsobj, void *tag); +void key_mapping_add_ref(dsl_key_mapping_t *km, void *tag); +void key_mapping_rele(spa_t *spa, dsl_key_mapping_t *km, void *tag); int spa_keystore_lookup_key(spa_t *spa, uint64_t dsobj, void *tag, dsl_crypto_key_t **dck_out); diff --git a/usr/src/uts/common/fs/zfs/sys/dsl_dataset.h b/usr/src/uts/common/fs/zfs/sys/dsl_dataset.h index 384891723a..384d42c298 100644 --- a/usr/src/uts/common/fs/zfs/sys/dsl_dataset.h +++ b/usr/src/uts/common/fs/zfs/sys/dsl_dataset.h @@ -51,6 +51,7 @@ struct dsl_dataset; struct dsl_dir; struct dsl_pool; struct dsl_crypto_params; +struct dsl_key_mapping; #define DS_FLAG_INCONSISTENT (1ULL<<0) #define DS_IS_INCONSISTENT(ds) \ @@ -167,6 +168,7 @@ typedef struct dsl_dataset { uint64_t ds_object; uint64_t ds_fsid_guid; boolean_t ds_is_snapshot; + struct dsl_key_mapping *ds_key_mapping; /* only used in syncing context, only valid for non-snapshots: */ struct dsl_dataset *ds_prev; @@ -307,10 +309,12 @@ int dsl_dataset_hold_flags(struct dsl_pool *dp, const char *name, ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp); boolean_t dsl_dataset_try_add_ref(struct dsl_pool *dp, dsl_dataset_t *ds, void *tag); +int dsl_dataset_create_key_mapping(dsl_dataset_t *ds); int dsl_dataset_hold_obj(struct dsl_pool *dp, uint64_t dsobj, void *tag, dsl_dataset_t **); int dsl_dataset_hold_obj_flags(struct dsl_pool *dp, uint64_t dsobj, ds_hold_flags_t flags, void *tag, dsl_dataset_t **); +void dsl_dataset_remove_key_mapping(dsl_dataset_t *ds); void dsl_dataset_rele(dsl_dataset_t *ds, void *tag); void dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag); diff --git a/usr/src/uts/common/fs/zfs/zil.c b/usr/src/uts/common/fs/zfs/zil.c index 3c4acb09d2..3896d28fcf 100644 --- a/usr/src/uts/common/fs/zfs/zil.c +++ b/usr/src/uts/common/fs/zfs/zil.c @@ -3242,8 +3242,8 @@ zil_suspend(const char *osname, void **cookiep) * grabbing a reference to it. If the key isn't loaded we have no * choice but to return an error until the wrapping key is loaded. */ - if (os->os_encrypted && spa_keystore_create_mapping(os->os_spa, - dmu_objset_ds(os), FTAG) != 0) { + if (os->os_encrypted && + dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) { zilog->zl_suspend--; mutex_exit(&zilog->zl_lock); dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); @@ -3265,9 +3265,10 @@ zil_suspend(const char *osname, void **cookiep) zil_commit_impl(zilog, 0); /* - * Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we - * use txg_wait_synced() to ensure the data from the zilog has - * migrated to the main pool before calling zil_destroy(). + * Now that we've ensured all lwb's are LWB_STATE_DONE, + * txg_wait_synced() will be called from within zil_destroy(), + * which will ensure the data from the zilog has migrated to the + * main pool before it returns. */ txg_wait_synced(zilog->zl_dmu_pool, 0); @@ -3278,19 +3279,8 @@ zil_suspend(const char *osname, void **cookiep) cv_broadcast(&zilog->zl_cv_suspend); mutex_exit(&zilog->zl_lock); - if (os->os_encrypted) { - /* - * Encrypted datasets need to wait for all data to be - * synced out before removing the mapping. - * - * XXX: Depending on the number of datasets with - * outstanding ZIL data on a given log device, this - * might cause spa_offline_log() to take a long time. - */ - txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); - VERIFY0(spa_keystore_remove_mapping(os->os_spa, - dmu_objset_id(os), FTAG)); - } + if (os->os_encrypted) + dsl_dataset_remove_key_mapping(dmu_objset_ds(os)); if (cookiep == NULL) zil_resume(os); -- cgit v1.2.3