summaryrefslogtreecommitdiff
path: root/usr
diff options
context:
space:
mode:
authorLOLi <loli10K@users.noreply.github.com>2017-09-11 08:01:55 -0700
committerPrakash Surya <prakash.surya@delphix.com>2017-09-21 19:19:23 -0700
commit42b14111721da2ebd5159e7b45012a3eb0e3384c (patch)
treed3089fc1557ae872afe341901e2781a19530e8aa /usr
parentbd9d3f904625846bdc61af8897a1072029c7aeb7 (diff)
downloadillumos-joyent-42b14111721da2ebd5159e7b45012a3eb0e3384c.tar.gz
8648 Fix range locking in ZIL commit codepath
Reviewed by: Igor Kozhukhov <igor@dilos.org> Reviewed by: Matt Ahrens <mahrens@delphix.com> Reviewed by: Andriy Gapon <avg@FreeBSD.org> Reviewed by: Alexander Motin <mav@FreeBSD.org> Approved by: Robert Mustacchi <rm@joyent.com>
Diffstat (limited to 'usr')
-rw-r--r--usr/src/uts/common/fs/zfs/zfs_vnops.c2
-rw-r--r--usr/src/uts/common/fs/zfs/zvol.c15
2 files changed, 13 insertions, 4 deletions
diff --git a/usr/src/uts/common/fs/zfs/zfs_vnops.c b/usr/src/uts/common/fs/zfs/zfs_vnops.c
index 37f543ac3f..7b60c76b8b 100644
--- a/usr/src/uts/common/fs/zfs/zfs_vnops.c
+++ b/usr/src/uts/common/fs/zfs/zfs_vnops.c
@@ -1101,7 +1101,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
} else { /* indirect write */
/*
* Have to lock the whole block to ensure when it's
- * written out and it's checksum is being calculated
+ * written out and its checksum is being calculated
* that no one can change the data. We need to re-check
* blocksize after we get the lock in case it's changed!
*/
diff --git a/usr/src/uts/common/fs/zfs/zvol.c b/usr/src/uts/common/fs/zfs/zvol.c
index 9a02d465ed..35b5b69efa 100644
--- a/usr/src/uts/common/fs/zfs/zvol.c
+++ b/usr/src/uts/common/fs/zfs/zvol.c
@@ -1003,7 +1003,6 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
- zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
/*
* Write records come in two flavors: immediate and indirect.
@@ -1012,12 +1011,22 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
* sync the data and get a pointer to it (indirect) so that
* we don't have to write the data twice.
*/
- if (buf != NULL) { /* immediate write */
+ if (buf != NULL) { /* immediate write */
+ zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size,
+ RL_READER);
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
- } else {
+ } else { /* indirect write */
+ /*
+ * Have to lock the whole block to ensure when it's written out
+ * and its checksum is being calculated that no one can change
+ * the data. Contrarily to zfs_get_data we need not re-check
+ * blocksize after we get the lock because it cannot be changed.
+ */
size = zv->zv_volblocksize;
offset = P2ALIGN(offset, size);
+ zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size,
+ RL_READER);
error = dmu_buf_hold(os, object, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {