summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authorPrakash Surya <prakash.surya@delphix.com>2017-09-01 11:04:26 -0700
committerPrakash Surya <prakash.surya@delphix.com>2017-11-29 07:04:27 -0800
commitcf07d3da9915c0d22da8f59e991639f819463cef (patch)
tree95111fb3dc827330da37c9d69a2ee7d4db593801 /usr/src
parentf27dfbe1ab97352a1070fc81db8c8acaa338079a (diff)
downloadillumos-gate-cf07d3da9915c0d22da8f59e991639f819463cef.tar.gz
8603 rename zilog's "zl_writer_lock" to "zl_issuer_lock"
Reviewed by: C Fraire <cfraire@me.com> Approved by: Dan McDonald <danmcd@joyent.com>
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/common/fs/zfs/sys/zil_impl.h10
-rw-r--r--usr/src/uts/common/fs/zfs/zil.c44
2 files changed, 27 insertions, 27 deletions
diff --git a/usr/src/uts/common/fs/zfs/sys/zil_impl.h b/usr/src/uts/common/fs/zfs/sys/zil_impl.h
index 308f4a5da3..543ba1ee1e 100644
--- a/usr/src/uts/common/fs/zfs/sys/zil_impl.h
+++ b/usr/src/uts/common/fs/zfs/sys/zil_impl.h
@@ -57,9 +57,9 @@ typedef enum {
* Log write block (lwb)
*
* Prior to an lwb being issued to disk via zil_lwb_write_issue(), it
- * will be protected by the zilog's "zl_writer_lock". Basically, prior
+ * will be protected by the zilog's "zl_issuer_lock". Basically, prior
* to it being issued, it will only be accessed by the thread that's
- * holding the "zl_writer_lock". After the lwb is issued, the zilog's
+ * holding the "zl_issuer_lock". After the lwb is issued, the zilog's
* "zl_lock" is used to protect the lwb against concurrent access.
*/
typedef struct lwb {
@@ -91,10 +91,10 @@ typedef struct lwb {
*
* The "zcw_lock" field is used to protect the commit waiter against
* concurrent access. This lock is often acquired while already holding
- * the zilog's "zl_writer_lock" or "zl_lock"; see the functions
+ * the zilog's "zl_issuer_lock" or "zl_lock"; see the functions
* zil_process_commit_list() and zil_lwb_flush_vdevs_done() as examples
* of this. Thus, one must be careful not to acquire the
- * "zl_writer_lock" or "zl_lock" when already holding the "zcw_lock";
+ * "zl_issuer_lock" or "zl_lock" when already holding the "zcw_lock";
* e.g. see the zil_commit_waiter_timeout() function.
*/
typedef struct zil_commit_waiter {
@@ -161,7 +161,7 @@ struct zilog {
uint8_t zl_keep_first; /* keep first log block in destroy */
uint8_t zl_replay; /* replaying records while set */
uint8_t zl_stop_sync; /* for debugging */
- kmutex_t zl_writer_lock; /* single writer, per ZIL, at a time */
+ kmutex_t zl_issuer_lock; /* single writer, per ZIL, at a time */
uint8_t zl_logbias; /* latency or throughput */
uint8_t zl_sync; /* synchronous or asynchronous */
int zl_parse_error; /* last zil_parse() error */
diff --git a/usr/src/uts/common/fs/zfs/zil.c b/usr/src/uts/common/fs/zfs/zil.c
index bc0b8dec3f..dc80272912 100644
--- a/usr/src/uts/common/fs/zfs/zil.c
+++ b/usr/src/uts/common/fs/zfs/zil.c
@@ -1095,7 +1095,7 @@ zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
zbookmark_phys_t zb;
zio_priority_t prio;
- ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
+ ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED);
EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED);
@@ -1192,7 +1192,7 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
int i, error;
boolean_t slog;
- ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
+ ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
@@ -1326,7 +1326,7 @@ zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
char *lr_buf;
uint64_t dlen, dnow, lwb_sp, reclen, txg;
- ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
+ ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
ASSERT3P(lwb->lwb_buf, !=, NULL);
@@ -1742,7 +1742,7 @@ zil_get_commit_list(zilog_t *zilog)
uint64_t otxg, txg;
list_t *commit_list = &zilog->zl_itx_commit_list;
- ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
+ ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
@@ -1849,7 +1849,7 @@ zil_prune_commit_list(zilog_t *zilog)
{
itx_t *itx;
- ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
+ ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
while (itx = list_head(&zilog->zl_itx_commit_list)) {
lr_t *lrc = &itx->itx_lr;
@@ -1899,12 +1899,12 @@ zil_commit_writer_stall(zilog_t *zilog)
* crash (because the previous lwb on-disk would not point to
* it).
*
- * We must hold the zilog's zl_writer_lock while we do this, to
+ * We must hold the zilog's zl_issuer_lock while we do this, to
* ensure no new threads enter zil_process_commit_list() until
* all lwb's in the zl_lwb_list have been synced and freed
* (which is achieved via the txg_wait_synced() call).
*/
- ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
+ ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
txg_wait_synced(zilog->zl_dmu_pool, 0);
ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
}
@@ -1923,7 +1923,7 @@ zil_process_commit_list(zilog_t *zilog)
lwb_t *lwb;
itx_t *itx;
- ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
+ ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
/*
* Return if there's nothing to commit before we dirty the fs by
@@ -2093,17 +2093,17 @@ zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
ASSERT(spa_writeable(zilog->zl_spa));
ASSERT0(zilog->zl_suspend);
- mutex_enter(&zilog->zl_writer_lock);
+ mutex_enter(&zilog->zl_issuer_lock);
if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
/*
* It's possible that, while we were waiting to acquire
- * the "zl_writer_lock", another thread committed this
+ * the "zl_issuer_lock", another thread committed this
* waiter to an lwb. If that occurs, we bail out early,
* without processing any of the zilog's queue of itxs.
*
* On certain workloads and system configurations, the
- * "zl_writer_lock" can become highly contended. In an
+ * "zl_issuer_lock" can become highly contended. In an
* attempt to reduce this contention, we immediately drop
* the lock if the waiter has already been processed.
*
@@ -2120,13 +2120,13 @@ zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
zil_process_commit_list(zilog);
out:
- mutex_exit(&zilog->zl_writer_lock);
+ mutex_exit(&zilog->zl_issuer_lock);
}
static void
zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
- ASSERT(!MUTEX_HELD(&zilog->zl_writer_lock));
+ ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
@@ -2138,7 +2138,7 @@ zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
* If the lwb has already been issued by another thread, we can
* immediately return since there's no work to be done (the
* point of this function is to issue the lwb). Additionally, we
- * do this prior to acquiring the zl_writer_lock, to avoid
+ * do this prior to acquiring the zl_issuer_lock, to avoid
* acquiring it when it's not necessary to do so.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
@@ -2147,13 +2147,13 @@ zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
/*
* In order to call zil_lwb_write_issue() we must hold the
- * zilog's "zl_writer_lock". We can't simply acquire that lock,
+ * zilog's "zl_issuer_lock". We can't simply acquire that lock,
* since we're already holding the commit waiter's "zcw_lock",
* and those two locks are aquired in the opposite order
* elsewhere.
*/
mutex_exit(&zcw->zcw_lock);
- mutex_enter(&zilog->zl_writer_lock);
+ mutex_enter(&zilog->zl_issuer_lock);
mutex_enter(&zcw->zcw_lock);
/*
@@ -2171,7 +2171,7 @@ zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
/*
* We've already checked this above, but since we hadn't
- * acquired the zilog's zl_writer_lock, we have to perform this
+ * acquired the zilog's zl_issuer_lock, we have to perform this
* check a second time while holding the lock. We can't call
* zil_lwb_write_issue() if the lwb had already been issued.
*/
@@ -2234,7 +2234,7 @@ zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
}
out:
- mutex_exit(&zilog->zl_writer_lock);
+ mutex_exit(&zilog->zl_issuer_lock);
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
}
@@ -2261,7 +2261,7 @@ static void
zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
- ASSERT(!MUTEX_HELD(&zilog->zl_writer_lock));
+ ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(spa_writeable(zilog->zl_spa));
ASSERT0(zilog->zl_suspend);
@@ -2510,7 +2510,7 @@ zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
* on two fundamental concepts:
*
* 1. The creation and issuance of lwb zio's is protected by
- * the zilog's "zl_writer_lock", which ensures only a single
+ * the zilog's "zl_issuer_lock", which ensures only a single
* thread is creating and/or issuing lwb's at a time
* 2. The "previous" lwb is a child of the "current" lwb
* (leveraging the zio parent-child depenency graph)
@@ -2759,7 +2759,7 @@ zil_alloc(objset_t *os, zil_header_t *zh_phys)
zilog->zl_last_lwb_latency = 0;
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
- mutex_init(&zilog->zl_writer_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
for (int i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
@@ -2804,7 +2804,7 @@ zil_free(zilog_t *zilog)
mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
}
- mutex_destroy(&zilog->zl_writer_lock);
+ mutex_destroy(&zilog->zl_issuer_lock);
mutex_destroy(&zilog->zl_lock);
cv_destroy(&zilog->zl_cv_suspend);