diff options
author | Matthew Ahrens <mahrens@delphix.com> | 2017-12-12 15:46:58 -0800 |
---|---|---|
committer | Prakash Surya <prakash.surya@delphix.com> | 2018-09-18 13:53:10 -0700 |
commit | 7928f4baf4ab3230557eb6289be68aa7a3003f38 (patch) | |
tree | f8a7b6b06e693b7e6d131aad74cde8c268880f35 | |
parent | e19b450bec203d8be04447ea476d7a86b36d63a1 (diff) | |
download | illumos-joyent-7928f4baf4ab3230557eb6289be68aa7a3003f38.tar.gz |
9617 too-frequent TXG sync causes excessive write inflation
Reviewed by: Serapheim Dimitropoulos <serapheim.dimitro@delphix.com>
Reviewed by: Brad Lewis <brad.lewis@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Andrew Stormont <andyjstormont@gmail.com>
Approved by: Robert Mustacchi <rm@joyent.com>
-rw-r--r-- | usr/src/uts/common/fs/zfs/dsl_pool.c | 10 | ||||
-rw-r--r-- | usr/src/uts/common/fs/zfs/sys/dsl_pool.h | 2 | ||||
-rw-r--r-- | usr/src/uts/common/fs/zfs/txg.c | 4 |
3 files changed, 11 insertions, 5 deletions
diff --git a/usr/src/uts/common/fs/zfs/dsl_pool.c b/usr/src/uts/common/fs/zfs/dsl_pool.c index 9f9111efa2..2a9ec5585a 100644 --- a/usr/src/uts/common/fs/zfs/dsl_pool.c +++ b/usr/src/uts/common/fs/zfs/dsl_pool.c @@ -103,9 +103,11 @@ uint64_t zfs_dirty_data_max_max = 4ULL * 1024 * 1024 * 1024; int zfs_dirty_data_max_percent = 10; /* - * If there is at least this much dirty data, push out a txg. + * If there's at least this much dirty data (as a percentage of + * zfs_dirty_data_max), push out a txg. This should be less than + * zfs_vdev_async_write_active_min_dirty_percent. */ -uint64_t zfs_dirty_data_sync = 64 * 1024 * 1024; +uint64_t zfs_dirty_data_sync_pct = 20; /* * Once there is this amount of dirty data, the dmu_tx_delay() will kick in @@ -824,10 +826,12 @@ dsl_pool_need_dirty_delay(dsl_pool_t *dp) { uint64_t delay_min_bytes = zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; + uint64_t dirty_min_bytes = + zfs_dirty_data_max * zfs_dirty_data_sync_pct / 100; boolean_t rv; mutex_enter(&dp->dp_lock); - if (dp->dp_dirty_total > zfs_dirty_data_sync) + if (dp->dp_dirty_total > dirty_min_bytes) txg_kick(dp); rv = (dp->dp_dirty_total > delay_min_bytes); mutex_exit(&dp->dp_lock); diff --git a/usr/src/uts/common/fs/zfs/sys/dsl_pool.h b/usr/src/uts/common/fs/zfs/sys/dsl_pool.h index 2df0f21f98..c79c5bf735 100644 --- a/usr/src/uts/common/fs/zfs/sys/dsl_pool.h +++ b/usr/src/uts/common/fs/zfs/sys/dsl_pool.h @@ -53,7 +53,7 @@ struct dsl_scan; extern uint64_t zfs_dirty_data_max; extern uint64_t zfs_dirty_data_max_max; -extern uint64_t zfs_dirty_data_sync; +extern uint64_t zfs_dirty_data_sync_pct; extern int zfs_dirty_data_max_percent; extern int zfs_delay_min_dirty_percent; extern uint64_t zfs_delay_scale; diff --git a/usr/src/uts/common/fs/zfs/txg.c b/usr/src/uts/common/fs/zfs/txg.c index 743b914a20..48e1c682cb 100644 --- a/usr/src/uts/common/fs/zfs/txg.c +++ b/usr/src/uts/common/fs/zfs/txg.c @@ -484,6 +484,8 @@ txg_sync_thread(void *arg) uint64_t timeout = zfs_txg_timeout * hz; uint64_t timer; uint64_t txg; + uint64_t dirty_min_bytes = + zfs_dirty_data_max * zfs_dirty_data_sync_pct / 100; /* * We sync when we're scanning, there's someone waiting @@ -495,7 +497,7 @@ txg_sync_thread(void *arg) !tx->tx_exiting && timer > 0 && tx->tx_synced_txg >= tx->tx_sync_txg_waiting && !txg_has_quiesced_to_sync(dp) && - dp->dp_dirty_total < zfs_dirty_data_sync) { + dp->dp_dirty_total < dirty_min_bytes) { dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n", tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer); |