summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Mooney <pmooney@pfmooney.com>2022-02-21 05:27:23 +0000
committerPatrick Mooney <pmooney@oxide.computer>2022-10-03 18:01:55 +0000
commit00bfaff92dad2fa278f0e40718333cf4864ad7d5 (patch)
tree4f9949a3bf7f01b424c3c536958c4e4508d4d2f5
parentb93865c3d90e9b0d73e338c9abb3293c35c571a8 (diff)
downloadillumos-joyent-00bfaff92dad2fa278f0e40718333cf4864ad7d5.tar.gz
14519 zfs_root should cache vnode
Reviewed by: Dan McDonald <danmcd@mnx.io> Reviewed by: Toomas Soome <tsoome@me.com> Reviewed by: Andy Fiddaman <illumos@fiddaman.net> Approved by: Gordon Ross <gordon.w.ross@gmail.com>
-rw-r--r--usr/src/uts/common/fs/zfs/sys/zfs_vfsops.h1
-rw-r--r--usr/src/uts/common/fs/zfs/zfs_vfsops.c61
2 files changed, 49 insertions, 13 deletions
diff --git a/usr/src/uts/common/fs/zfs/sys/zfs_vfsops.h b/usr/src/uts/common/fs/zfs/sys/zfs_vfsops.h
index 0d1611b66c..e1d7139f60 100644
--- a/usr/src/uts/common/fs/zfs/sys/zfs_vfsops.h
+++ b/usr/src/uts/common/fs/zfs/sys/zfs_vfsops.h
@@ -68,6 +68,7 @@ struct zfsvfs {
krwlock_t z_teardown_inactive_lock;
list_t z_all_znodes; /* all vnodes in the fs */
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
+ vnode_t *z_rootdir; /* mount root directory pointer */
vnode_t *z_ctldir; /* .zfs directory pointer */
boolean_t z_show_ctldir; /* expose .zfs in the root dir */
boolean_t z_issnap; /* true if this is a snapshot */
diff --git a/usr/src/uts/common/fs/zfs/zfs_vfsops.c b/usr/src/uts/common/fs/zfs/zfs_vfsops.c
index e7e00e58f6..87b4b36a7d 100644
--- a/usr/src/uts/common/fs/zfs/zfs_vfsops.c
+++ b/usr/src/uts/common/fs/zfs/zfs_vfsops.c
@@ -26,6 +26,7 @@
* Copyright 2019 Joyent, Inc.
* Copyright 2020 Joshua M. Clulow <josh@sysmgr.org>
* Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
+ * Copyright 2022 Oxide Computer Company
*/
/* Portions Copyright 2010 Robert Milkowski */
@@ -1435,6 +1436,13 @@ zfs_domount(vfs_t *vfsp, char *osname)
error = zfsvfs_setup(zfsvfs, B_TRUE);
}
+ /* cache the root vnode for this mount */
+ znode_t *rootzp;
+ if (error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp)) {
+ goto out;
+ }
+ zfsvfs->z_rootdir = ZTOV(rootzp);
+
if (!zfsvfs->z_issnap)
zfsctl_create(zfsvfs);
out:
@@ -1837,22 +1845,21 @@ zfs_mountroot(vfs_t *vfsp, enum whymountroot why)
goto out;
}
+ /* zfs_domount has already cached the root vnode for us */
zfsvfs = (zfsvfs_t *)vfsp->vfs_data;
ASSERT(zfsvfs);
- if (error = zfs_zget(zfsvfs, zfsvfs->z_root, &zp)) {
- cmn_err(CE_NOTE, "zfs_zget: error %d", error);
- goto out;
- }
+ ASSERT(zfsvfs->z_rootdir);
- vp = ZTOV(zp);
+ vp = zfsvfs->z_rootdir;
mutex_enter(&vp->v_lock);
vp->v_flag |= VROOT;
mutex_exit(&vp->v_lock);
- rootvp = vp;
/*
* Leave rootvp held. The root file system is never unmounted.
*/
+ VN_HOLD(vp);
+ rootvp = vp;
vfs_add((struct vnode *)0, vfsp,
(vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0);
@@ -2085,17 +2092,24 @@ static int
zfs_root(vfs_t *vfsp, vnode_t **vpp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
- znode_t *rootzp;
+ struct vnode *vp;
int error;
ZFS_ENTER(zfsvfs);
- error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
- if (error == 0)
- *vpp = ZTOV(rootzp);
+ vp = zfsvfs->z_rootdir;
+ if (vp != NULL) {
+ VN_HOLD(vp);
+ error = 0;
+ } else {
+ /* forced unmount */
+ error = EIO;
+ }
+ *vpp = vp;
ZFS_EXIT(zfsvfs);
return (error);
+
}
/*
@@ -2167,9 +2181,20 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
* other vops will fail with EIO.
*/
if (unmounting) {
+ /*
+ * Clear the cached root vnode now that we are unmounted.
+ * Its release must be performed outside the teardown locks to
+ * avoid recursive lock entry via zfs_inactive().
+ */
+ vnode_t *vp = zfsvfs->z_rootdir;
+ zfsvfs->z_rootdir = NULL;
+
zfsvfs->z_unmounted = B_TRUE;
rw_exit(&zfsvfs->z_teardown_inactive_lock);
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
+
+ /* Drop the cached root vp now that it is safe */
+ VN_RELE(vp);
}
/*
@@ -2237,14 +2262,24 @@ zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr)
*/
boolean_t draining;
uint_t thresh = 1;
+ vnode_t *ctlvp, *rvp;
+
+ /*
+ * The cached vnode for the root directory of the mount also
+ * maintains a hold on the vfs structure.
+ */
+ rvp = zfsvfs->z_rootdir;
+ thresh++;
/*
* The '.zfs' directory maintains a reference of its own, and
* any active references underneath are reflected in the vnode
* count. Allow one additional reference for it.
*/
- if (zfsvfs->z_ctldir != NULL)
+ ctlvp = zfsvfs->z_ctldir;
+ if (ctlvp != NULL) {
thresh++;
+ }
/*
* If it's running, the asynchronous unlinked drain task needs
@@ -2255,8 +2290,8 @@ zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr)
if (draining)
zfs_unlinked_drain_stop_wait(zfsvfs);
- if (vfsp->vfs_count > thresh || (zfsvfs->z_ctldir != NULL &&
- zfsvfs->z_ctldir->v_count > 1)) {
+ if (vfsp->vfs_count > thresh || rvp->v_count > 1 ||
+ (ctlvp != NULL && ctlvp->v_count > 1)) {
if (draining) {
/* If it was draining, restart the task */
zfs_unlinked_drain(zfsvfs);