summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Yao <richard.yao@prophetstor.com>2017-06-13 14:50:40 -0400
committerPrakash Surya <prakash.surya@delphix.com>2018-05-17 09:28:41 -0700
commit8dfe5547fbf0979fc1065a8b6fddc1e940a7cf4f (patch)
tree89828b679bdf6c0886dd6966148dc83610d5f7de
parentf39927996d932d886093624a919a94b0daf5cb83 (diff)
downloadillumos-joyent-8dfe5547fbf0979fc1065a8b6fddc1e940a7cf4f.tar.gz
9539 Make zvol operations use _by_dnode routines
Reviewed by: Matthew Ahrens <mahrens@delphix.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed by: Rick McNeal <rick.mcneal@nexenta.com> Approved by: Dan McDonald <danmcd@joyent.com>
-rw-r--r--usr/src/uts/common/fs/zfs/dmu.c27
-rw-r--r--usr/src/uts/common/fs/zfs/sys/dmu.h8
-rw-r--r--usr/src/uts/common/fs/zfs/zvol.c26
-rw-r--r--usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd_zvol.c45
-rw-r--r--usr/src/uts/common/io/comstar/lu/stmf_sbd/stmf_sbd.h2
5 files changed, 60 insertions, 48 deletions
diff --git a/usr/src/uts/common/fs/zfs/dmu.c b/usr/src/uts/common/fs/zfs/dmu.c
index f035b05af2..15123f347b 100644
--- a/usr/src/uts/common/fs/zfs/dmu.c
+++ b/usr/src/uts/common/fs/zfs/dmu.c
@@ -443,7 +443,7 @@ dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
* and can induce severe lock contention when writing to several files
* whose dnodes are in the same block.
*/
-static int
+int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
{
@@ -1302,7 +1302,7 @@ xuio_stat_wbuf_nocopy(void)
}
#ifdef _KERNEL
-static int
+int
dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
{
dmu_buf_t **dbp;
@@ -1411,7 +1411,7 @@ dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
return (err);
}
-static int
+int
dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
@@ -1600,22 +1600,17 @@ dmu_return_arcbuf(arc_buf_t *buf)
* dmu_write().
*/
void
-dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
+dmu_assign_arcbuf_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
- dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
- dnode_t *dn;
dmu_buf_impl_t *db;
uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
uint64_t blkid;
- DB_DNODE_ENTER(dbuf);
- dn = DB_DNODE(dbuf);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
rw_exit(&dn->dn_struct_rwlock);
- DB_DNODE_EXIT(dbuf);
/*
* We can only assign if the offset is aligned, the arc buf is the
@@ -1632,11 +1627,8 @@ dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
- DB_DNODE_ENTER(dbuf);
- dn = DB_DNODE(dbuf);
os = dn->dn_objset;
object = dn->dn_object;
- DB_DNODE_EXIT(dbuf);
dbuf_rele(db, FTAG);
dmu_write(os, object, offset, blksz, buf->b_data, tx);
@@ -1645,6 +1637,17 @@ dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
}
}
+void
+dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
+ dmu_tx_t *tx)
+{
+ dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
+
+ DB_DNODE_ENTER(dbuf);
+ dmu_assign_arcbuf_dnode(DB_DNODE(dbuf), offset, buf, tx);
+ DB_DNODE_EXIT(dbuf);
+}
+
typedef struct {
dbuf_dirty_record_t *dsa_dr;
dmu_sync_cb_t *dsa_done;
diff --git a/usr/src/uts/common/fs/zfs/sys/dmu.h b/usr/src/uts/common/fs/zfs/sys/dmu.h
index 6a87abfe34..52238bc735 100644
--- a/usr/src/uts/common/fs/zfs/sys/dmu.h
+++ b/usr/src/uts/common/fs/zfs/sys/dmu.h
@@ -517,6 +517,9 @@ uint64_t dmu_buf_refcount(dmu_buf_t *db);
int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset,
uint64_t length, boolean_t read, void *tag,
int *numbufsp, dmu_buf_t ***dbpp);
+int dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
+ boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp,
+ uint32_t flags);
void dmu_buf_rele_array(dmu_buf_t **, int numbufs, void *tag);
typedef void dmu_buf_evict_func_t(void *user_ptr);
@@ -755,14 +758,19 @@ void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx);
int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size);
int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size);
+int dmu_read_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size);
int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size,
dmu_tx_t *tx);
int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size,
dmu_tx_t *tx);
+int dmu_write_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size,
+ dmu_tx_t *tx);
int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, struct page *pp, dmu_tx_t *tx);
struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size);
void dmu_return_arcbuf(struct arc_buf *buf);
+void dmu_assign_arcbuf_dnode(dnode_t *handle, uint64_t offset,
+ struct arc_buf *buf, dmu_tx_t *tx);
void dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, struct arc_buf *buf,
dmu_tx_t *tx);
int dmu_xuio_init(struct xuio *uio, int niov);
diff --git a/usr/src/uts/common/fs/zfs/zvol.c b/usr/src/uts/common/fs/zfs/zvol.c
index 1e0529964b..9f8700cba8 100644
--- a/usr/src/uts/common/fs/zfs/zvol.c
+++ b/usr/src/uts/common/fs/zfs/zvol.c
@@ -128,7 +128,7 @@ typedef struct zvol_state {
zilog_t *zv_zilog; /* ZIL handle */
list_t zv_extents; /* List of extents for dump */
znode_t zv_znode; /* for range locking */
- dmu_buf_t *zv_dbuf; /* bonus handle */
+ dnode_t *zv_dn; /* dnode hold */
} zvol_state_t;
/*
@@ -646,7 +646,7 @@ zvol_first_open(zvol_state_t *zv)
return (error);
}
- error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
+ error = dnode_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dn);
if (error) {
dmu_objset_disown(os, zvol_tag);
return (error);
@@ -671,8 +671,8 @@ zvol_last_close(zvol_state_t *zv)
zil_close(zv->zv_zilog);
zv->zv_zilog = NULL;
- dmu_buf_rele(zv->zv_dbuf, zvol_tag);
- zv->zv_dbuf = NULL;
+ dnode_rele(zv->zv_dn, zvol_tag);
+ zv->zv_dn = NULL;
/*
* Evict cached data
@@ -993,8 +993,6 @@ static int
zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
{
zvol_state_t *zv = arg;
- objset_t *os = zv->zv_objset;
- uint64_t object = ZVOL_OBJ;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length; /* length of user data */
dmu_buf_t *db;
@@ -1018,7 +1016,7 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
if (buf != NULL) { /* immediate write */
zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size,
RL_READER);
- error = dmu_read(os, object, offset, size, buf,
+ error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
DMU_READ_NO_PREFETCH);
} else { /* indirect write */
/*
@@ -1031,7 +1029,7 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
offset = P2ALIGN(offset, size);
zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size,
RL_READER);
- error = dmu_buf_hold(os, object, offset, zgd, &db,
+ error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
@@ -1098,8 +1096,8 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
(wr_state == WR_COPIED ? len : 0));
lr = (lr_write_t *)&itx->itx_lr;
- if (wr_state == WR_COPIED && dmu_read(zv->zv_objset,
- ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
+ if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
+ off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
@@ -1468,7 +1466,7 @@ zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
dmu_tx_abort(tx);
break;
}
- error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
+ error = dmu_write_uio_dnode(zv->zv_dn, uio, bytes, tx);
if (error == 0)
zvol_log_write(zv, tx, off, bytes, sync);
dmu_tx_commit(tx);
@@ -1549,7 +1547,7 @@ zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
int
zvol_get_volume_params(minor_t minor, uint64_t *blksize,
uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
- void **rl_hdl, void **bonus_hdl)
+ void **rl_hdl, void **dnode_hdl)
{
zvol_state_t *zv;
@@ -1560,7 +1558,7 @@ zvol_get_volume_params(minor_t minor, uint64_t *blksize,
return (SET_ERROR(ENXIO));
ASSERT(blksize && max_xfer_len && minor_hdl &&
- objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
+ objset_hdl && zil_hdl && rl_hdl && dnode_hdl);
*blksize = zv->zv_volblocksize;
*max_xfer_len = (uint64_t)zvol_maxphys;
@@ -1568,7 +1566,7 @@ zvol_get_volume_params(minor_t minor, uint64_t *blksize,
*objset_hdl = zv->zv_objset;
*zil_hdl = zv->zv_zilog;
*rl_hdl = &zv->zv_znode;
- *bonus_hdl = zv->zv_dbuf;
+ *dnode_hdl = zv->zv_dn;
return (0);
}
diff --git a/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd_zvol.c b/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd_zvol.c
index 0e96e2ec96..bf9a369506 100644
--- a/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd_zvol.c
+++ b/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd_zvol.c
@@ -59,12 +59,12 @@
* zfs internal interfaces referenced here:
*
* FUNCTIONS
- * dmu_buf_hold_array_by_bonus()
+ * dmu_buf_hold_array_by_dnode()
* dmu_buf_rele_array()
*
- * dmu_request_arc_buf()
+ * arc_loan_buf()
* dmu_assign_arcbuf()
- * dmu_return_arc()
+ * dmu_return_arcbuf()
* arc_buf_size()
*
* dmu_tx_create()
@@ -88,7 +88,7 @@
* zv_objset - dmu_tx_create
* zv_zilog - zil_commit
* zv_znode - zfs_range_lock
- * zv_dbuf - dmu_buf_hold_array_by_bonus, dmu_request_arcbuf
+ * zv_dn - dmu_buf_hold_array_by_bonus, dmu_request_arcbuf
* GLOBAL DATA
* zvol_maxphys
*/
@@ -114,7 +114,7 @@ sbd_zvol_get_volume_params(sbd_lu_t *sl)
&sl->sl_zvol_objset_hdl, /* dmu_tx_create */
&sl->sl_zvol_zil_hdl, /* zil_commit */
&sl->sl_zvol_rl_hdl, /* zfs_range_lock */
- &sl->sl_zvol_bonus_hdl); /* dmu_buf_hold_array_by_bonus, */
+ &sl->sl_zvol_dn_hdl); /* dmu_buf_hold_array_by_dnode, */
/* dmu_request_arcbuf, */
/* dmu_assign_arcbuf */
@@ -153,10 +153,10 @@ int
sbd_zvol_alloc_read_bufs(sbd_lu_t *sl, stmf_data_buf_t *dbuf)
{
sbd_zvol_io_t *zvio = dbuf->db_lu_private;
- rl_t *rl;
- int numbufs, error;
- uint64_t len = dbuf->db_data_size;
- uint64_t offset = zvio->zvio_offset;
+ rl_t *rl;
+ int numbufs, error;
+ uint64_t len = dbuf->db_data_size;
+ uint64_t offset = zvio->zvio_offset;
dmu_buf_t **dbpp, *dbp;
/* Make sure request is reasonable */
@@ -171,8 +171,9 @@ sbd_zvol_alloc_read_bufs(sbd_lu_t *sl, stmf_data_buf_t *dbuf)
*/
rl = zfs_range_lock(sl->sl_zvol_rl_hdl, offset, len, RL_READER);
- error = dmu_buf_hold_array_by_bonus(sl->sl_zvol_bonus_hdl, offset,
- len, TRUE, RDTAG, &numbufs, &dbpp);
+ error = dmu_buf_hold_array_by_dnode(sl->sl_zvol_dn_hdl,
+ offset, len, TRUE, RDTAG, &numbufs, &dbpp,
+ DMU_READ_PREFETCH);
zfs_range_unlock(rl);
@@ -242,8 +243,8 @@ sbd_zvol_alloc_write_bufs(sbd_lu_t *sl, stmf_data_buf_t *dbuf)
uint64_t blksize;
arc_buf_t **abp;
stmf_sglist_ent_t *sgl;
- uint64_t len = dbuf->db_data_size;
- uint64_t offset = zvio->zvio_offset;
+ uint64_t len = dbuf->db_data_size;
+ uint64_t offset = zvio->zvio_offset;
/* Make sure request is reasonable */
if (len > sl->sl_max_xfer_len)
@@ -293,7 +294,8 @@ sbd_zvol_alloc_write_bufs(sbd_lu_t *sl, stmf_data_buf_t *dbuf)
if (seglen == 0)
seglen = blksize;
seglen = MIN(seglen, len);
- abp[i] = dmu_request_arcbuf(sl->sl_zvol_bonus_hdl, (int)seglen);
+ abp[i] = arc_loan_buf(dmu_objset_spa(sl->sl_zvol_objset_hdl),
+ B_FALSE, (int)seglen);
ASSERT(arc_buf_size(abp[i]) == (int)seglen);
sgl->seg_addr = abp[i]->b_data;
sgl->seg_length = (uint32_t)seglen;
@@ -335,7 +337,7 @@ sbd_zvol_rele_write_bufs(sbd_lu_t *sl, stmf_data_buf_t *dbuf)
sbd_zvol_io_t *zvio = dbuf->db_lu_private;
dmu_tx_t *tx;
int sync, i, error;
- rl_t *rl;
+ rl_t *rl;
arc_buf_t **abp = zvio->zvio_abp;
int flags = zvio->zvio_flags;
uint64_t toffset, offset = zvio->zvio_offset;
@@ -364,7 +366,8 @@ sbd_zvol_rele_write_bufs(sbd_lu_t *sl, stmf_data_buf_t *dbuf)
abuf = abp[i];
size = arc_buf_size(abuf);
- dmu_assign_arcbuf(sl->sl_zvol_bonus_hdl, toffset, abuf, tx);
+ dmu_assign_arcbuf_dnode(sl->sl_zvol_dn_hdl, toffset, abuf,
+ tx);
toffset += size;
resid -= size;
}
@@ -391,7 +394,7 @@ int
sbd_zvol_copy_read(sbd_lu_t *sl, uio_t *uio)
{
int error;
- rl_t *rl;
+ rl_t *rl;
uint64_t len = (uint64_t)uio->uio_resid;
uint64_t offset = (uint64_t)uio->uio_loffset;
@@ -403,7 +406,7 @@ sbd_zvol_copy_read(sbd_lu_t *sl, uio_t *uio)
rl = zfs_range_lock(sl->sl_zvol_rl_hdl, offset, len, RL_READER);
- error = dmu_read_uio_dbuf(sl->sl_zvol_bonus_hdl, uio, len);
+ error = dmu_read_uio_dnode(sl->sl_zvol_dn_hdl, uio, len);
zfs_range_unlock(rl);
if (error == ECKSUM)
@@ -418,8 +421,8 @@ sbd_zvol_copy_read(sbd_lu_t *sl, uio_t *uio)
int
sbd_zvol_copy_write(sbd_lu_t *sl, uio_t *uio, int flags)
{
- rl_t *rl;
- dmu_tx_t *tx;
+ rl_t *rl;
+ dmu_tx_t *tx;
int error, sync;
uint64_t len = (uint64_t)uio->uio_resid;
uint64_t offset = (uint64_t)uio->uio_loffset;
@@ -442,7 +445,7 @@ sbd_zvol_copy_write(sbd_lu_t *sl, uio_t *uio, int flags)
if (error) {
dmu_tx_abort(tx);
} else {
- error = dmu_write_uio_dbuf(sl->sl_zvol_bonus_hdl, uio, len, tx);
+ error = dmu_write_uio_dnode(sl->sl_zvol_dn_hdl, uio, len, tx);
if (error == 0) {
zvol_log_write_minor(sl->sl_zvol_minor_hdl, tx, offset,
(ssize_t)len, sync);
diff --git a/usr/src/uts/common/io/comstar/lu/stmf_sbd/stmf_sbd.h b/usr/src/uts/common/io/comstar/lu/stmf_sbd/stmf_sbd.h
index efbc7268ea..a402ad0ee3 100644
--- a/usr/src/uts/common/io/comstar/lu/stmf_sbd/stmf_sbd.h
+++ b/usr/src/uts/common/io/comstar/lu/stmf_sbd/stmf_sbd.h
@@ -228,7 +228,7 @@ typedef struct sbd_lu {
void *sl_zvol_objset_hdl;
void *sl_zvol_zil_hdl;
void *sl_zvol_rl_hdl;
- void *sl_zvol_bonus_hdl;
+ void *sl_zvol_dn_hdl;
/* Backing store */
char *sl_data_filename;