summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/Makefile.master12
-rw-r--r--usr/src/tools/env/illumos.sh8
-rw-r--r--usr/src/tools/find_elf/find_elf.1onbld318
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb2_cancel.c10
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb2_fsctl_odx.c2
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb2_fsctl_sparse.c5
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb2_lease.c4
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb2_oplock.c6
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_cmn_rename.c11
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_cmn_setfile.c8
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_common_open.c9
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_delete.c4
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_fem.c2
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_locking_andx.c4
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_ofile.c20
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_oplock.c4
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_server.c77
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_session.c3
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_srv_oplock.c117
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_tree.c2
-rw-r--r--usr/src/uts/common/fs/smbsrv/smb_user.c49
-rw-r--r--usr/src/uts/common/io/overlay/plugins/overlay_vxlan.c3
-rw-r--r--usr/src/uts/common/io/usb/hcd/xhci/xhci.c25
-rw-r--r--usr/src/uts/common/io/usb/hcd/xhci/xhci_endpoint.c186
-rw-r--r--usr/src/uts/common/io/usb/hcd/xhci/xhci_usba.c184
-rw-r--r--usr/src/uts/common/smbsrv/smb_kproto.h7
-rw-r--r--usr/src/uts/common/smbsrv/smb_ktypes.h5
-rw-r--r--usr/src/uts/common/sys/usb/hcd/xhci/xhci.h52
28 files changed, 817 insertions, 320 deletions
diff --git a/usr/src/Makefile.master b/usr/src/Makefile.master
index 3ed49bc303..071e5dcab1 100644
--- a/usr/src/Makefile.master
+++ b/usr/src/Makefile.master
@@ -210,16 +210,16 @@ $(SPARC_BLD)PERL_MACH= sun4
PERL_VARIANT=
PERL_ARCH= $(PERL_MACH)-solaris$(PERL_VARIANT)-64int
PERL_ARCH64= $(PERL_MACH)-solaris$(PERL_VARIANT)-64
-PYTHON3_VERSION= 3.5
-PYTHON3_PKGVERS= -35
-PYTHON3_SUFFIX= m
+PYTHON3_VERSION= 3.9
+PYTHON3_PKGVERS= -39
+PYTHON3_SUFFIX=
PYTHON3= /usr/bin/python$(PYTHON3_VERSION)
# BUILDPY3b should be overridden in the env file in order to build python
# modules with a secondary python to aid migration between versions.
BUILDPY3b= $(POUND_SIGN)
-PYTHON3b_VERSION= 3.9
-PYTHON3b_PKGVERS= -39
-PYTHON3b_SUFFIX=
+PYTHON3b_VERSION= 3.5
+PYTHON3b_PKGVERS= -35
+PYTHON3b_SUFFIX= m
#
$(BUILDPY3b)PYTHON3b= /usr/bin/python$(PYTHON3b_VERSION)
TOOLS_PYTHON= $(PYTHON3)
diff --git a/usr/src/tools/env/illumos.sh b/usr/src/tools/env/illumos.sh
index 30148be6ef..25e897154f 100644
--- a/usr/src/tools/env/illumos.sh
+++ b/usr/src/tools/env/illumos.sh
@@ -19,8 +19,8 @@
# CDDL HEADER END
#
# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
-# Copyright 2015 Nexenta Systems, Inc. All rights reserved.
# Copyright 2012 Joshua M. Clulow <josh@sysmgr.org>
+# Copyright 2015 Nexenta Systems, Inc. All rights reserved.
# Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
# Copyright 2016 RackTop Systems.
# Copyright 2019 OmniOS Community Edition (OmniOSce) Association.
@@ -106,9 +106,9 @@ export ENABLE_SMB_PRINTING=
# If your distro uses certain versions of Python, make sure either
# Makefile.master contains your new defaults OR your .env file sets them.
-#export PYTHON3_VERSION=3.5
-#export PYTHON3_PKGVERS=-35
-#export PYTHON3_SUFFIX=m
+#export PYTHON3_VERSION=3.9
+#export PYTHON3_PKGVERS=-39
+#export PYTHON3_SUFFIX=
# Set console color scheme either by build type:
#
diff --git a/usr/src/tools/find_elf/find_elf.1onbld b/usr/src/tools/find_elf/find_elf.1onbld
index 8fe9b7713b..0f9eacad5e 100644
--- a/usr/src/tools/find_elf/find_elf.1onbld
+++ b/usr/src/tools/find_elf/find_elf.1onbld
@@ -21,124 +21,159 @@
.\"
.\" Copyright 2022 Jason King
.\"
-.TH FIND_ELF 1ONBLD "May 29, 2022"
-.SH NAME
-find_elf \- Locate ELF objects
-.SH SYNOPSIS
-\fBfind_elf [-afhnrs] path\fP
-.SH DESCRIPTION
+.Dd September 6, 2022
+.Dt FIND_ELF 1ONBLD
+.Os
+.Sh NAME
+.Nm find_elf
+.Nd Locate ELF objects
+.Sh SYNOPSIS
+.Nm
+.Op Fl afhnrs
+.Ar path
+.Sh DESCRIPTION
The
-.I find_elf
+.Nm
command descends a directory hierarchy and produces one line
of output on stdout for each ELF object found.
-.SH OPTIONS
+.Sh OPTIONS
The following options are supported:
-.TP 4
-.B \-a
-Disable alias processing. Symbolic links are treated as independent
-files, each such link results in a separate OBJECT output line,
-and ALIAS lines are not issued.
-.TP 4
-.B \-f
-Fast Mode. When reading directories, the file name and modes are
+.Bl -tag -width Fl
+.It Fl a
+Disable alias processing.
+Symbolic links are treated as independent
+files, each such link results in a separate
+.Sy OBJECT
+output line, and
+.Sy ALIAS
+lines are not issued.
+.It Fl f
+Fast Mode.
+When reading directories, the file name and modes are
used to eliminate files from consideration and speed up the search:
-Directories with names that start with a '.' character are skipped.
+Directories with names that start with a
+.Ql \&.
+character are skipped.
Executables must have the execute bit set, and
-shared objects must end with a .so extension. Files that do not
-meet these requirements are silently eliminated from consideration without
-further analysis.
-.TP 4
-.B \-h
+shared objects must end with a
+.Ql .so
+extension.
+Files that do not meet these requirements are silently eliminated from
+consideration without further analysis.
+.It Fl h
Show usage message.
-.TP 4
-.B \-n
-Do not treat well known hard-linked binaries as special. Certain well known
-binaries (currently \fBalias\fP and \fBisaexec\fP) are hard linked to many
-other names in a proto directory tree.
-.P
+.It Fl n
+Do not treat well known hard-linked binaries as special.
+Certain well known binaries
+.Po
+currently
+.Pa alias
+and
+.Pa isaexec
+.Pc
+are hard linked to many other names in a proto directory tree.
+.Pp
By default,
-.I find_elf
+.Nm
will use these well known names as the initial name and all other hard links
-to those binaries are treated as aliases. Disabling this behavior with the
-\fB-n\fR option will choose the first name encountered during directory
-traversal as the name, and all other hard links to the binary as aliases.
-.TP 4
-.B \-r
+to those binaries are treated as aliases.
+Disabling this behavior with the
+.Fl n
+option will choose the first name encountered during directory traversal as
+the name, and all other hard links to the binary as aliases.
+.It Fl r
Report file names as relative paths, relative to the given file or directory,
instead of fully qualified.
-.TP 4
-.B \-s
+.It Fl s
Only report shared objects.
-.SH OUTPUT
-.I find_elf
-produces a series of PREFIX, OBJECT, and ALIAS lines, which collectively
-describe the ELF objects located. Whitespace is used within each
+.El
+.Sh OUTPUT
+.Nm
+produces a series of
+.Sy PREFIX ,
+.Sy OBJECT ,
+and
+.Sy ALIAS
+lines, which collectively describe the ELF objects located.
+Whitespace is used within each
line to delimit the various fields of information provided.
-.P
-If the \fB-r\fP option is used to specify that file names be reported
-as relative paths, a PREFIX line is output to provide the base path from
+.Pp
+If the
+.Fl r
+option is used to specify that file names be reported
+as relative paths, a
+.Sy PREFIX
+line is output to provide the base path from
which the relative names should be interpreted.
-There can only be one PREFIX line, and it is output first, before any
-OBJECT or ALIAS lines.
-.sp
-.in +4
-.nf
+There can only be one
+.Sy PREFIX
+line, and it is output first, before any
+.Sy OBJECT
+or
+.Sy ALIAS
+lines.
+.Bd -literal -offset indent
PREFIX path
-.fi
-.in -4
-.sp
-For each object found, an OBJECT line is produced to describe it:
-.sp
-.in +4
-.nf
+.Ed
+.Pp
+For each object found, an
+.Sy OBJECT
+line is produced to describe it:
+.Bd -literal -offset indent
OBJECT [32 | 64] [DYN | EXEC | REL] [VERDEF | NOVERDEF] object-path
-.fi
-.in -4
-.sp
+.Ed
+.Pp
The first field provides the ELF class of the object, and will be
either 32 or 64.
The second field provides the type of object, either
-a shared object (DYN), an executable (EXEC), or a relocatable object (REL).
-The third field will be VERDEF if the object contains ELF
-version definitions, and NOVERDEF if the object is not versioned.
+a shared object
+.Ql DYN ,
+an executable
+.Ql EXEC ,
+or a relocatable object
+.Ql REL .
+The third field will be
+.Ql VERDEF
+if the object contains ELF version definitions, and
+.Ql NOVERDEF
+if the object is not versioned.
The final field gives the path to the object.
-.P
-Under Unix, a file can have multiple names. In the context of ELF
-objects, this often happens for one of two reasons:
-.RS +4
-.TP
-.ie t \(bu
-.el o
+.Pp
+Under Unix, a file can have multiple names.
+In the context of ELF objects, this often happens for one of two reasons:
+.Bl -bullet -offset indent
+.It
Compilation symlinks, used to provide a non-versioned name for a shared object.
-.RE
-.RS +4
-.TP
-.ie t \(bu
-.el o
-Symlinks such as '32' and '64' used to provide alternative
-non-machine specific paths to objects.
-.RE
-.sp
+.It
+Symlinks such as
+.Ql 32
+and
+.Ql 64
+used to provide alternative non-machine specific paths to objects.
+.El
+.Pp
When
-.I find_elf
-identifies an object via such an aliased name, it issues an ALIAS line
-mapping it to the main name for the object:
-.sp
-.in +4
-.nf
+.Nm
+identifies an object via such an aliased name, it issues an
+.Sy ALIAS
+line mapping it to the main name for the object:
+.Bd -literal -offset indent
ALIAS object-path alias-path
-.fi
-.in -4
-.sp
-The \fB-a\fP option alters the handling of aliased names. When \fB-a\fP is
-specified, each file results in a separate OBJECT line, as if they were
+.Ed
+.Pp
+The
+.Fl a
+option alters the handling of aliased names.
+When
+.Fl a
+is specified, each file results in a separate
+.Sy OBJECT
+line, as if they were
independent files rather than the same file with different names.
-.sp
-.SH EXAMPLES
-Assume the following hierarchy of files exist under /usr/lib/foo:
-.sp
-.in +4
-.nf
+.Sh EXAMPLES
+Assume the following hierarchy of files exist under
+.Pa /usr/lib/foo :
+.Bd -literal -offset indent
% /bin/ls -alRF /usr/lib/foo
/usr/lib/foo:
total 111
@@ -156,17 +191,22 @@ drwxr-xr-x 2 root root 4 Jul 16 17:35 ./
drwxr-xr-x 3 root root 7 Jul 16 17:35 ../
lrwxrwxrwx 1 root bin 11 Jul 16 17:35 libfoo.so -> libfoo.so.1*
-rwxr-xr-x 1 root bin 72536 Jul 16 17:35 libfoo.so.1*
-.fi
-.in -4
-.sp
-This hierarchy contains compilation symlinks (libfoo.so) and
-path alias symlinks (32, 64), as discussed in OUTPUT.
-.P
-.I find_elf
+.Ed
+.Pp
+This hierarchy contains compilation symlinks
+.Po
+.Pa libfoo.so
+.Pc
+and path alias symlinks
+.Po
+32, 64
+.Pc ,
+as discussed in
+.Sx OUTPUT .
+.Pp
+.Nm
produces the following output for the above hierarchy:
-.sp
-.in +4
-.nf
+.Bd -literal -offset indent
% find_elf -r /usr/lib/foo
PREFIX /usr/lib/foo
OBJECT 64 DYN VERDEF amd64/libfoo.so.1
@@ -177,14 +217,12 @@ OBJECT 32 DYN VERDEF libfoo.so.1
ALIAS libfoo.so.1 32/libfoo.so
ALIAS libfoo.so.1 32/libfoo.so.1
ALIAS libfoo.so.1 libfoo.so
-.fi
-.in -4
-.sp
-Contrast this with the output when \fB-a\fP is used to treat
-each name as an independent file:
-.sp
-.in +4
-.nf
+.Ed
+.Pp
+Contrast this with the output when
+.Fl a
+is used to treat each name as an independent file:
+.Bd -literal -offset indent
% find_elf -ar /usr/lib/foo
PREFIX /usr/lib/foo
OBJECT 32 DYN VERDEF 32/libfoo.so
@@ -195,40 +233,32 @@ OBJECT 64 DYN VERDEF amd64/libfoo.so.1
OBJECT 64 DYN VERDEF amd64/libfoo.so
OBJECT 32 DYN VERDEF libfoo.so.1
OBJECT 32 DYN VERDEF libfoo.so
-.fi
-.in -4
-.sp
+.Ed
+.Pp
When
-.I find_elf
+.Nm
is used to process an alias for which no target object is given,
-there will be no output. For example, using /lib/libc.so, which
-is a compilation symlink for /lib/libc.so.1:
-.sp
-.in +4
-.nf
+there will be no output.
+For example, using
+.Pa /lib/libc.so ,
+which is a compilation symlink for
+.Pa /lib/libc.so.1 :
+.Bd -literal -offset indent
% find_elf /lib/libc.so
-.fi
-.in -4
-.sp
-In such situations, the \fB-a\fP option can be used to produce
-the desired output:
-.sp
-.in +4
-.nf
+.Ed
+.Pp
+In such situations, the
+.Fl a
+option can be used to produce the desired output:
+.Bd -literal -offset indent
% find_elf -a /lib/libc.so
OBJECT 32 DYN VERDEF /lib/libc.so
-.fi
-.in -4
-.sp
-.PP
-.RS
-.nf
-.SH SEE ALSO
-.BR check_rtime (1ONBLD),
-.BR interface_check (1ONBLD),
-.BR interface_cmp (1ONBLD),
-.BR ld (1),
-.BR ldd (1),
-.BR elfdump (1),
-.BR pvs (1).
-
+.Ed
+.Sh SEE ALSO
+.Xr elfdump 1 ,
+.Xr ld 1 ,
+.Xr ldd 1 ,
+.Xr pvs 1 ,
+.Xr check_rtime 1ONBLD ,
+.Xr interface_check 1ONBLD ,
+.Xr interface_cmp 1ONBLD
diff --git a/usr/src/uts/common/fs/smbsrv/smb2_cancel.c b/usr/src/uts/common/fs/smbsrv/smb2_cancel.c
index 1116a432f7..df97015aac 100644
--- a/usr/src/uts/common/fs/smbsrv/smb2_cancel.c
+++ b/usr/src/uts/common/fs/smbsrv/smb2_cancel.c
@@ -10,7 +10,7 @@
*/
/*
- * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
*/
/*
@@ -96,8 +96,8 @@ smb2_cancel_sync(smb_request_t *sr)
struct smb_session *session = sr->session;
int cnt = 0;
- if (sr->smb2_messageid == 0)
- goto failure;
+ if (sr->smb2_messageid == 0 || sr->smb2_messageid == UINT64_MAX)
+ return;
smb_slist_enter(&session->s_req_list);
for (req = smb_slist_head(&session->s_req_list); req != NULL;
@@ -117,7 +117,6 @@ smb2_cancel_sync(smb_request_t *sr)
smb_slist_exit(&session->s_req_list);
if (cnt != 1) {
- failure:
DTRACE_PROBE2(smb2__cancel__error,
uint64_t, sr->smb2_messageid, int, cnt);
#ifdef DEBUG
@@ -148,6 +147,9 @@ smb2_cancel_async(smb_request_t *sr)
struct smb_session *session = sr->session;
int cnt = 0;
+ if (sr->smb2_async_id == 0)
+ return;
+
smb_slist_enter(&session->s_req_list);
req = smb_slist_head(&session->s_req_list);
while (req) {
diff --git a/usr/src/uts/common/fs/smbsrv/smb2_fsctl_odx.c b/usr/src/uts/common/fs/smbsrv/smb2_fsctl_odx.c
index fe748bbd62..6a9040a5db 100644
--- a/usr/src/uts/common/fs/smbsrv/smb2_fsctl_odx.c
+++ b/usr/src/uts/common/fs/smbsrv/smb2_fsctl_odx.c
@@ -308,6 +308,8 @@ smb2_fsctl_odx_read(smb_request_t *sr, smb_fsctl_t *fsctl)
*/
data = in_file_off;
tok_type = STORAGE_OFFLOAD_TOKEN_TYPE_NATIVE1;
+ if (sr->sr_state != SMB_REQ_STATE_ACTIVE)
+ return (NT_STATUS_SUCCESS);
rc = smb_fsop_next_alloc_range(ofile->f_cr, ofile->f_node,
&data, &hole);
switch (rc) {
diff --git a/usr/src/uts/common/fs/smbsrv/smb2_fsctl_sparse.c b/usr/src/uts/common/fs/smbsrv/smb2_fsctl_sparse.c
index 90bb254670..91f13a150e 100644
--- a/usr/src/uts/common/fs/smbsrv/smb2_fsctl_sparse.c
+++ b/usr/src/uts/common/fs/smbsrv/smb2_fsctl_sparse.c
@@ -10,7 +10,7 @@
*/
/*
- * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
*/
/*
@@ -306,6 +306,9 @@ smb2_sparse_copy(
while (*residp > 0) {
+ if (sr->sr_state != SMB_REQ_STATE_ACTIVE)
+ break;
+
data = src_off;
rc = smb_fsop_next_alloc_range(src_ofile->f_cr,
src_ofile->f_node, &data, &hole);
diff --git a/usr/src/uts/common/fs/smbsrv/smb2_lease.c b/usr/src/uts/common/fs/smbsrv/smb2_lease.c
index 7b7247fad8..b8bed20f91 100644
--- a/usr/src/uts/common/fs/smbsrv/smb2_lease.c
+++ b/usr/src/uts/common/fs/smbsrv/smb2_lease.c
@@ -365,7 +365,7 @@ smb2_lease_break_ack(smb_request_t *sr)
status = smb_oplock_ack_break(sr, ofile, &LeaseState);
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(ofile->f_node, 0);
+ (void) smb_oplock_wait_break(sr, ofile->f_node, 0);
status = NT_STATUS_SUCCESS;
}
@@ -641,7 +641,7 @@ done:
*/
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(ofile->f_node, 0);
+ (void) smb_oplock_wait_break(sr, ofile->f_node, 0);
status = NT_STATUS_SUCCESS;
}
ASSERT(status == NT_STATUS_SUCCESS);
diff --git a/usr/src/uts/common/fs/smbsrv/smb2_oplock.c b/usr/src/uts/common/fs/smbsrv/smb2_oplock.c
index f3f96c2b21..f8317b2e81 100644
--- a/usr/src/uts/common/fs/smbsrv/smb2_oplock.c
+++ b/usr/src/uts/common/fs/smbsrv/smb2_oplock.c
@@ -10,7 +10,7 @@
*/
/*
- * Copyright 2019 Nexenta by DDN, Inc. All rights reserved.
+ * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
* Copyright 2019 RackTop Systems.
*/
@@ -131,7 +131,7 @@ smb2_oplock_break_ack(smb_request_t *sr)
status = smb2sr_go_async(sr);
if (status != 0)
goto errout;
- (void) smb_oplock_wait_break(ofile->f_node, 0);
+ (void) smb_oplock_wait_break(sr, ofile->f_node, 0);
status = 0;
}
if (status != 0) {
@@ -320,7 +320,7 @@ smb2_oplock_acquire(smb_request_t *sr)
*/
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(ofile->f_node, 0);
+ (void) smb_oplock_wait_break(sr, ofile->f_node, 0);
status = 0;
}
diff --git a/usr/src/uts/common/fs/smbsrv/smb_cmn_rename.c b/usr/src/uts/common/fs/smbsrv/smb_cmn_rename.c
index 098e203fe0..578d01aa07 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_cmn_rename.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_cmn_rename.c
@@ -20,7 +20,8 @@
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2013-2020 Tintri by DDN, Inc. All rights reserved.
+ * Copyright 2019 RackTop Systems.
*/
#include <sys/synch.h>
@@ -253,7 +254,7 @@ smb_common_rename(smb_request_t *sr, smb_fqi_t *src_fqi, smb_fqi_t *dst_fqi)
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
if (sr->session->dialect >= SMB_VERS_2_BASE)
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(dst_fnode, 0);
+ (void) smb_oplock_wait_break(sr, dst_fnode, 0);
status = 0;
}
if (status != 0) {
@@ -610,7 +611,7 @@ smb_rename_check_src(smb_request_t *sr, smb_fqi_t *src_fqi)
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
if (sr->session->dialect >= SMB_VERS_2_BASE)
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(src_node, 0);
+ (void) smb_oplock_wait_break(sr, src_node, 0);
status = 0;
}
@@ -634,7 +635,7 @@ smb_rename_check_src(smb_request_t *sr, smb_fqi_t *src_fqi)
ASSERT(sr->session->dialect < SMB_VERS_2_BASE);
status = smb_oplock_break_DELETE(src_node, NULL);
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
- (void) smb_oplock_wait_break(src_node, 0);
+ (void) smb_oplock_wait_break(sr, src_node, 0);
}
/*
@@ -651,7 +652,7 @@ smb_rename_check_src(smb_request_t *sr, smb_fqi_t *src_fqi)
status = smb_oplock_break_SETINFO(src_node, NULL,
FileRenameInformation);
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
- (void) smb_oplock_wait_break(src_node, 0);
+ (void) smb_oplock_wait_break(sr, src_node, 0);
}
/*
diff --git a/usr/src/uts/common/fs/smbsrv/smb_cmn_setfile.c b/usr/src/uts/common/fs/smbsrv/smb_cmn_setfile.c
index 53ff9b3cf6..304eb93a39 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_cmn_setfile.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_cmn_setfile.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
*/
/*
@@ -122,7 +122,7 @@ smb_set_eof_info(smb_request_t *sr, smb_setinfo_t *si)
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
if (sr->session->dialect >= SMB_VERS_2_BASE)
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(node, 0);
+ (void) smb_oplock_wait_break(sr, node, 0);
status = 0;
}
if (status != 0)
@@ -164,7 +164,7 @@ smb_set_alloc_info(smb_request_t *sr, smb_setinfo_t *si)
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
if (sr->session->dialect >= SMB_VERS_2_BASE)
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(node, 0);
+ (void) smb_oplock_wait_break(sr, node, 0);
status = 0;
}
if (status != 0)
@@ -245,7 +245,7 @@ smb_set_disposition_info(smb_request_t *sr, smb_setinfo_t *si)
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
if (sr->session->dialect >= SMB_VERS_2_BASE)
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(node, 0);
+ (void) smb_oplock_wait_break(sr, node, 0);
status = 0;
}
if (status != 0)
diff --git a/usr/src/uts/common/fs/smbsrv/smb_common_open.c b/usr/src/uts/common/fs/smbsrv/smb_common_open.c
index fb4d46f599..59c892ee88 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_common_open.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_common_open.c
@@ -715,8 +715,7 @@ smb_common_open(smb_request_t *sr)
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
if (sr->session->dialect >= SMB_VERS_2_BASE)
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(fnode, 0);
- status = 0;
+ status = smb_oplock_wait_break(sr, fnode, 0);
}
if (status != NT_STATUS_SUCCESS)
goto errout;
@@ -751,8 +750,7 @@ smb_common_open(smb_request_t *sr)
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
if (sr->session->dialect >= SMB_VERS_2_BASE)
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(fnode, 0);
- status = 0;
+ status = smb_oplock_wait_break(sr, fnode, 0);
} else {
/*
* Even when the oplock layer does NOT
@@ -820,8 +818,7 @@ smb_common_open(smb_request_t *sr)
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
if (sr->session->dialect >= SMB_VERS_2_BASE)
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(fnode, 0);
- status = 0;
+ status = smb_oplock_wait_break(sr, fnode, 0);
}
if (status != NT_STATUS_SUCCESS)
goto errout;
diff --git a/usr/src/uts/common/fs/smbsrv/smb_delete.c b/usr/src/uts/common/fs/smbsrv/smb_delete.c
index f0159fc971..53ff4600c2 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_delete.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_delete.c
@@ -21,7 +21,7 @@
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
*/
#include <sys/sunddi.h>
@@ -486,7 +486,7 @@ smb_delete_remove_file(smb_request_t *sr, smb_error_t *err)
*/
status = smb_oplock_break_DELETE(node, NULL);
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
- (void) smb_oplock_wait_break(node, 0);
+ (void) smb_oplock_wait_break(sr, node, 0);
status = 0;
}
if (status != 0) {
diff --git a/usr/src/uts/common/fs/smbsrv/smb_fem.c b/usr/src/uts/common/fs/smbsrv/smb_fem.c
index b68466edaa..4204830afd 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_fem.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_fem.c
@@ -659,7 +659,7 @@ smb_fem_oplock_wait(smb_node_t *node, caller_context_t *ct)
ct->cc_flags |= CC_WOULDBLOCK;
rc = EAGAIN;
} else {
- (void) smb_oplock_wait_break(node,
+ (void) smb_oplock_wait_break_fem(node,
smb_fem_oplock_timeout);
rc = 0;
}
diff --git a/usr/src/uts/common/fs/smbsrv/smb_locking_andx.c b/usr/src/uts/common/fs/smbsrv/smb_locking_andx.c
index 8028cfe8c9..b58d3f9f0f 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_locking_andx.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_locking_andx.c
@@ -21,7 +21,7 @@
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
*/
/*
@@ -295,7 +295,7 @@ smb_com_locking_andx(smb_request_t *sr)
NewLevel = OPLOCK_LEVEL_TWO;
status = smb_oplock_ack_break(sr, ofile, &NewLevel);
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
- (void) smb_oplock_wait_break(ofile->f_node, 0);
+ (void) smb_oplock_wait_break(sr, ofile->f_node, 0);
status = 0;
}
if (unlock_num == 0 && lock_num == 0)
diff --git a/usr/src/uts/common/fs/smbsrv/smb_ofile.c b/usr/src/uts/common/fs/smbsrv/smb_ofile.c
index 70c546228c..d3f2bad47c 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_ofile.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_ofile.c
@@ -20,9 +20,9 @@
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011-2020 Tintri by DDN, Inc. All rights reserved.
* Copyright 2016 Syneto S.R.L. All rights reserved.
* Copyright (c) 2016 by Delphix. All rights reserved.
- * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
* Copyright 2021 RackTop Systems, Inc.
*/
@@ -798,6 +798,22 @@ smb_ofile_hold(smb_ofile_t *of)
}
/*
+ * Void arg variant of smb_ofile_release for use with smb_llist_post.
+ * This is needed because smb_ofile_release may need to enter the
+ * smb_llist as writer when it drops the last reference, so when
+ * we're in the llist as reader, use smb_llist_post with this
+ * function to arrange for the release call at llist_exit.
+ */
+void
+smb_ofile_release_LL(void *arg)
+{
+ smb_ofile_t *of = arg;
+
+ SMB_OFILE_VALID(of);
+ smb_ofile_release(of);
+}
+
+/*
* Release a reference on a file. If the reference count falls to
* zero and the file has been closed, post the object for deletion.
* Object deletion is deferred to avoid modifying a list while an
@@ -1772,7 +1788,7 @@ smb_ofile_set_delete_on_close(smb_request_t *sr, smb_ofile_t *of)
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
if (sr->session->dialect >= SMB_VERS_2_BASE)
(void) smb2sr_go_async(sr);
- (void) smb_oplock_wait_break(of->f_node, 0);
+ (void) smb_oplock_wait_break(sr, of->f_node, 0);
}
mutex_enter(&of->f_mutex);
diff --git a/usr/src/uts/common/fs/smbsrv/smb_oplock.c b/usr/src/uts/common/fs/smbsrv/smb_oplock.c
index 7be36ebf42..5215da8693 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_oplock.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_oplock.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
*/
/*
@@ -110,7 +110,7 @@ smb1_oplock_acquire(smb_request_t *sr, boolean_t level2ok)
* status code that says we should wait.
*/
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
- (void) smb_oplock_wait_break(ofile->f_node, 0);
+ (void) smb_oplock_wait_break(sr, ofile->f_node, 0);
status = 0;
}
diff --git a/usr/src/uts/common/fs/smbsrv/smb_server.c b/usr/src/uts/common/fs/smbsrv/smb_server.c
index 5988f121eb..af568986c9 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_server.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_server.c
@@ -1532,6 +1532,7 @@ smb_server_shutdown(smb_server_t *sv)
sv->sv_rootuser = NULL;
}
if (sv->sv_session != NULL) {
+ smb_session_cancel_requests(sv->sv_session, NULL, NULL);
smb_slist_wait_for_empty(&sv->sv_session->s_req_list);
/* Just in case import left users and trees */
@@ -2013,6 +2014,10 @@ smb_server_fclose(smb_server_t *sv, uint32_t uniqid)
* so it can force a logoff that we haven't noticed yet.
* This is not called frequently, so we just walk the list of
* connections searching for the user.
+ *
+ * Note that this must wait for any durable handles (ofiles)
+ * owned by this user to become "orphaned", so that a reconnect
+ * that may immediately follow can find and use such ofiles.
*/
void
smb_server_logoff_ssnid(smb_request_t *sr, uint64_t ssnid)
@@ -2020,6 +2025,9 @@ smb_server_logoff_ssnid(smb_request_t *sr, uint64_t ssnid)
smb_server_t *sv = sr->sr_server;
smb_llist_t *sess_list;
smb_session_t *sess;
+ smb_user_t *user = NULL;
+
+ SMB_SERVER_VALID(sv);
if (sv->sv_state != SMB_SERVER_STATE_RUNNING)
return;
@@ -2031,38 +2039,77 @@ smb_server_logoff_ssnid(smb_request_t *sr, uint64_t ssnid)
sess != NULL;
sess = smb_llist_next(sess_list, sess)) {
- smb_user_t *user;
-
SMB_SESSION_VALID(sess);
if (sess->dialect < SMB_VERS_2_BASE)
continue;
- if (sess->s_state != SMB_SESSION_STATE_NEGOTIATED)
+ switch (sess->s_state) {
+ case SMB_SESSION_STATE_NEGOTIATED:
+ case SMB_SESSION_STATE_TERMINATED:
+ case SMB_SESSION_STATE_DISCONNECTED:
+ break;
+ default:
continue;
+ }
- user = smb_session_lookup_ssnid(sess, ssnid);
- if (user == NULL)
- continue;
+ /*
+ * Normal situation is to find a LOGGED_ON user.
+ */
+ user = smb_session_lookup_uid_st(sess, ssnid, 0,
+ SMB_USER_STATE_LOGGED_ON);
+ if (user != NULL) {
+
+ if (smb_is_same_user(user->u_cred, sr->user_cr)) {
+ /* Treat this as if we lost the connection */
+ user->preserve_opens = SMB2_DH_PRESERVE_SOME;
+ smb_user_logoff(user);
+ break;
+ }
+ smb_user_release(user);
+ user = NULL;
+ }
- if (!smb_is_same_user(user->u_cred, sr->user_cr)) {
+ /*
+ * If we raced with disconnect, may find LOGGING_OFF,
+ * in which case we want to just wait for it.
+ */
+ user = smb_session_lookup_uid_st(sess, ssnid, 0,
+ SMB_USER_STATE_LOGGING_OFF);
+ if (user != NULL) {
+ if (smb_is_same_user(user->u_cred, sr->user_cr))
+ break;
smb_user_release(user);
- continue;
+ user = NULL;
}
+ }
- /* Treat this as if we lost the connection */
- user->preserve_opens = SMB2_DH_PRESERVE_SOME;
- smb_user_logoff(user);
- smb_user_release(user);
+ smb_llist_exit(sess_list);
+ if (user != NULL) {
/*
- * The above may have left work on the delete queues
+ * Wait for durable handles to be orphaned.
+ * Note: not holding the sess list rwlock.
*/
+ smb_user_wait_trees(user);
+
+ /*
+ * Could be doing the last release on a user below,
+ * which can leave work on the delete queues for
+ * s_user_list or s_tree_list so flush those.
+ * Must hold the session list after the user release
+ * so that the session can't go away while we flush.
+ */
+ smb_llist_enter(sess_list, RW_READER);
+
+ sess = user->u_session;
+ smb_user_release(user);
+
smb_llist_flush(&sess->s_tree_list);
smb_llist_flush(&sess->s_user_list);
- }
- smb_llist_exit(sess_list);
+ smb_llist_exit(sess_list);
+ }
}
/* See also: libsmb smb_kmod_setcfg */
diff --git a/usr/src/uts/common/fs/smbsrv/smb_session.c b/usr/src/uts/common/fs/smbsrv/smb_session.c
index 6739fee326..89ab25ae02 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_session.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_session.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2019 Nexenta Systems, Inc. All rights reserved.
+ * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
* Copyright 2020 RackTop Systems, Inc.
*/
@@ -444,6 +444,7 @@ smb_request_cancel(smb_request_t *sr)
case SMB_REQ_STATE_WAITING_FCN1:
case SMB_REQ_STATE_WAITING_LOCK:
case SMB_REQ_STATE_WAITING_PIPE:
+ case SMB_REQ_STATE_WAITING_OLBRK:
/*
* These are states that have a cancel_method.
* Make the state change now, to ensure that
diff --git a/usr/src/uts/common/fs/smbsrv/smb_srv_oplock.c b/usr/src/uts/common/fs/smbsrv/smb_srv_oplock.c
index 538b57b7ba..e7bf15ff56 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_srv_oplock.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_srv_oplock.c
@@ -74,6 +74,7 @@ int smb_oplock_timeout_def = 45000; /* mSec. */
static void smb_oplock_async_break(void *);
static void smb_oplock_hdl_clear(smb_ofile_t *);
+static void smb_oplock_wait_break_cancel(smb_request_t *sr);
/*
@@ -237,6 +238,7 @@ smb_oplock_ind_break(smb_ofile_t *ofile, uint32_t NewLevel,
boolean_t AckRequired, uint32_t CompletionStatus)
{
smb_server_t *sv = ofile->f_server;
+ smb_node_t *node = ofile->f_node;
smb_request_t *sr = NULL;
/*
@@ -293,6 +295,18 @@ smb_oplock_ind_break(smb_ofile_t *ofile, uint32_t NewLevel,
sr = smb_request_alloc(ofile->f_session, 0);
if (sr == NULL)
sr = smb_request_alloc(sv->sv_session, 0);
+ if (sr == NULL) {
+ /*
+ * Server must be shutting down. We took a
+ * hold on the ofile that must be released,
+ * but we can't release here because we're
+ * called with the node ofile list entered.
+ * See smb_ofile_release_LL.
+ */
+ smb_llist_post(&node->n_ofile_list, ofile,
+ smb_ofile_release_LL);
+ return;
+ }
sr->sr_state = SMB_REQ_STATE_SUBMITTED;
sr->smb2_async = B_TRUE;
@@ -512,7 +526,7 @@ smb_oplock_send_brk(smb_request_t *sr)
*/
#ifdef DEBUG
if (smb_oplock_debug_wait > 0) {
- status = smb_oplock_wait_break(ofile->f_node,
+ status = smb_oplock_wait_break(sr, ofile->f_node,
smb_oplock_debug_wait);
if (status == 0)
return;
@@ -521,7 +535,7 @@ smb_oplock_send_brk(smb_request_t *sr)
debug_enter("oplock_wait");
}
#endif
- status = smb_oplock_wait_break(ofile->f_node,
+ status = smb_oplock_wait_break(sr, ofile->f_node,
smb_oplock_timeout_ack);
if (status == 0)
return;
@@ -608,7 +622,7 @@ smb_oplock_send_brk(smb_request_t *sr)
if (status == NT_STATUS_OPLOCK_BREAK_IN_PROGRESS) {
/* Not expecting this status return. */
cmn_err(CE_NOTE, "clnt local oplock ack wait?");
- (void) smb_oplock_wait_break(ofile->f_node,
+ (void) smb_oplock_wait_break(sr, ofile->f_node,
smb_oplock_timeout_ack);
status = 0;
}
@@ -648,6 +662,24 @@ smb_oplock_hdl_clear(smb_ofile_t *ofile)
}
/*
+ * Called by smb_request_cancel() via sr->cancel_method
+ * Arg is the smb_node_t with the breaking oplock.
+ */
+static void
+smb_oplock_wait_break_cancel(smb_request_t *sr)
+{
+ smb_node_t *node = sr->cancel_arg2;
+ smb_oplock_t *ol;
+
+ SMB_NODE_VALID(node);
+ ol = &node->n_oplock;
+
+ mutex_enter(&ol->ol_mutex);
+ cv_broadcast(&ol->WaitingOpenCV);
+ mutex_exit(&ol->ol_mutex);
+}
+
+/*
* Wait up to "timeout" mSec. for the current oplock "breaking" flags
* to be cleared (by smb_oplock_ack_break or smb_oplock_break_CLOSE).
*
@@ -663,7 +695,84 @@ smb_oplock_hdl_clear(smb_ofile_t *ofile)
* we're about to do something that invalidates some cache.
*/
uint32_t
-smb_oplock_wait_break(smb_node_t *node, int timeout) /* mSec. */
+smb_oplock_wait_break(smb_request_t *sr, smb_node_t *node, int timeout)
+{
+ smb_oplock_t *ol;
+ clock_t time, rv;
+ uint32_t status = 0;
+ smb_req_state_t srstate;
+
+ SMB_NODE_VALID(node);
+ ol = &node->n_oplock;
+
+ if (timeout == 0)
+ timeout = smb_oplock_timeout_def;
+ time = MSEC_TO_TICK(timeout) + ddi_get_lbolt();
+
+ mutex_enter(&sr->sr_mutex);
+ if (sr->sr_state != SMB_REQ_STATE_ACTIVE) {
+ mutex_exit(&sr->sr_mutex);
+ return (NT_STATUS_CANCELLED);
+ }
+ sr->sr_state = SMB_REQ_STATE_WAITING_OLBRK;
+ sr->cancel_method = smb_oplock_wait_break_cancel;
+ sr->cancel_arg2 = node;
+ mutex_exit(&sr->sr_mutex);
+
+ mutex_enter(&ol->ol_mutex);
+ while ((ol->ol_state & BREAK_ANY) != 0) {
+ ol->waiters++;
+ rv = cv_timedwait(&ol->WaitingOpenCV,
+ &ol->ol_mutex, time);
+ ol->waiters--;
+ if (rv < 0) {
+ /* cv_timewait timeout */
+ status = NT_STATUS_CANNOT_BREAK_OPLOCK;
+ break;
+ }
+
+ /*
+ * Check if we were woken by smb_request_cancel,
+ * which sets state SMB_REQ_STATE_CANCEL_PENDING
+ * and signals WaitingOpenCV.
+ */
+ mutex_enter(&sr->sr_mutex);
+ srstate = sr->sr_state;
+ mutex_exit(&sr->sr_mutex);
+ if (srstate != SMB_REQ_STATE_WAITING_OLBRK) {
+ break;
+ }
+ }
+
+ mutex_exit(&ol->ol_mutex);
+
+ mutex_enter(&sr->sr_mutex);
+ sr->cancel_method = NULL;
+ sr->cancel_arg2 = NULL;
+ switch (sr->sr_state) {
+ case SMB_REQ_STATE_WAITING_OLBRK:
+ sr->sr_state = SMB_REQ_STATE_ACTIVE;
+ /* status from above */
+ break;
+ case SMB_REQ_STATE_CANCEL_PENDING:
+ sr->sr_state = SMB_REQ_STATE_CANCELLED;
+ status = NT_STATUS_CANCELLED;
+ break;
+ default:
+ status = NT_STATUS_INTERNAL_ERROR;
+ break;
+ }
+ mutex_exit(&sr->sr_mutex);
+
+ return (status);
+}
+
+/*
+ * Simplified version used in smb_fem.c, like above,
+ * but no smb_request_cancel stuff.
+ */
+uint32_t
+smb_oplock_wait_break_fem(smb_node_t *node, int timeout) /* mSec. */
{
smb_oplock_t *ol;
clock_t time, rv;
diff --git a/usr/src/uts/common/fs/smbsrv/smb_tree.c b/usr/src/uts/common/fs/smbsrv/smb_tree.c
index 45f381ffb1..2aadc3bf38 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_tree.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_tree.c
@@ -963,6 +963,7 @@ smb_tree_alloc(smb_request_t *sr, const smb_kshare_t *si,
/* grab a ref for tree->t_owner */
smb_user_hold_internal(sr->uid_user);
+ smb_user_inc_trees(sr->uid_user);
tree->t_owner = sr->uid_user;
/* if FS is readonly, enforce that here */
@@ -1031,6 +1032,7 @@ smb_tree_dealloc(void *arg)
smb_idpool_destructor(&tree->t_odid_pool);
SMB_USER_VALID(tree->t_owner);
+ smb_user_dec_trees(tree->t_owner);
smb_user_release(tree->t_owner);
kmem_cache_free(smb_cache_tree, tree);
diff --git a/usr/src/uts/common/fs/smbsrv/smb_user.c b/usr/src/uts/common/fs/smbsrv/smb_user.c
index 8934a213eb..a5dc57315f 100644
--- a/usr/src/uts/common/fs/smbsrv/smb_user.c
+++ b/usr/src/uts/common/fs/smbsrv/smb_user.c
@@ -662,6 +662,55 @@ smb_user_enum(smb_user_t *user, smb_svcenum_t *svcenum)
return (rc);
}
+/*
+ * Count references by trees this user owns,
+ * and allow waiting for them to go away.
+ */
+void
+smb_user_inc_trees(smb_user_t *user)
+{
+ mutex_enter(&user->u_mutex);
+ user->u_owned_tree_cnt++;
+ mutex_exit(&user->u_mutex);
+}
+
+void
+smb_user_dec_trees(smb_user_t *user)
+{
+ mutex_enter(&user->u_mutex);
+ user->u_owned_tree_cnt--;
+ if (user->u_owned_tree_cnt == 0)
+ cv_broadcast(&user->u_owned_tree_cv);
+ mutex_exit(&user->u_mutex);
+}
+
+int smb_user_wait_tree_tmo = 30;
+
+/*
+ * Wait (up to 30 sec.) for trees to go away.
+ * Should happen in less than a second.
+ */
+void
+smb_user_wait_trees(smb_user_t *user)
+{
+ clock_t time;
+
+ time = SEC_TO_TICK(smb_user_wait_tree_tmo) + ddi_get_lbolt();
+ mutex_enter(&user->u_mutex);
+ while (user->u_owned_tree_cnt != 0) {
+ if (cv_timedwait(&user->u_owned_tree_cv,
+ &user->u_mutex, time) < 0)
+ break;
+ }
+ mutex_exit(&user->u_mutex);
+#ifdef DEBUG
+ if (user->u_owned_tree_cnt != 0) {
+ cmn_err(CE_NOTE, "smb_user_wait_trees failed");
+ debug_enter("smb_user_wait_trees debug");
+ }
+#endif
+}
+
/* *************************** Static Functions ***************************** */
/*
diff --git a/usr/src/uts/common/io/overlay/plugins/overlay_vxlan.c b/usr/src/uts/common/io/overlay/plugins/overlay_vxlan.c
index 92144b3985..60ee264050 100644
--- a/usr/src/uts/common/io/overlay/plugins/overlay_vxlan.c
+++ b/usr/src/uts/common/io/overlay/plugins/overlay_vxlan.c
@@ -11,6 +11,7 @@
/*
* Copyright 2018 Joyent, Inc.
+ * Copyright 2022 MNX Cloud, Inc.
*/
/*
@@ -273,7 +274,7 @@ vxlan_o_setprop(void *arg, const char *pr_name, const void *buf,
if (IN6_IS_ADDR_V4MAPPED(ipv6)) {
ipaddr_t v4;
IN6_V4MAPPED_TO_IPADDR(ipv6, v4);
- if (IN_MULTICAST(v4))
+ if (IN_MULTICAST(ntohl(v4)))
return (EINVAL);
}
diff --git a/usr/src/uts/common/io/usb/hcd/xhci/xhci.c b/usr/src/uts/common/io/usb/hcd/xhci/xhci.c
index a28b1fa0d3..45401aaa1d 100644
--- a/usr/src/uts/common/io/usb/hcd/xhci/xhci.c
+++ b/usr/src/uts/common/io/usb/hcd/xhci/xhci.c
@@ -11,6 +11,7 @@
/*
* Copyright (c) 2019, Joyent, Inc.
+ * Copyright 2022 Oxide Computer Company
*/
/*
@@ -387,11 +388,23 @@
*
* Endpoint management is one of the key parts to the xhci driver as every
* endpoint is a pipe that a device driver uses, so they are our primary
- * currency. Endpoints are enabled and disabled when the client device drivers
- * open and close a pipe. When an endpoint is enabled, we have to fill in an
- * endpoint's context structure with information about the endpoint. These
- * basically tell the controller important properties which it uses to ensure
- * that there is adequate bandwidth for the device.
+ * currency. An endpoint is enabled when the client device driver opens the
+ * associated pipe for the first time. When an endpoint is enabled, we have to
+ * fill in an endpoint's context structure with information about the endpoint.
+ * These basically tell the controller important properties which it uses to
+ * ensure that there is adequate bandwidth for the device.
+ *
+ * If the client device closes the pipe again we explicitly stop the endpoint,
+ * moving it to the Halted state, and take ownership of any transfers
+ * previously submitted to the ring but which have not yet completed. A client
+ * may open and close a pipe several times -- ugen(4D) in particular is known
+ * for this -- and we will stop and start the ring accordingly.
+ *
+ * It is tempting to fully unconfigure an endpoint when a pipe is closed, but
+ * some host controllers appear to exhibit undefined behaviour each time the
+ * endpoint is re-enabled this way; e.g., silently dropped transfers. As such,
+ * we wait until the whole device is being torn down to disable all previously
+ * enabled endpoints at once, as part of disabling the device slot.
*
* Each endpoint has its own ring as described in the previous section. We place
* TRBs (transfer request blocks) onto a given ring to request I/O be performed.
@@ -701,7 +714,7 @@
* disappearing, we generally attempt to load the xHCI controller before the
* EHCI controller. This logic is not done in the driver; however, it is done in
* other parts of the kernel like in uts/common/io/consconfig_dacf.c in the
- * function consconfig_load_drivres().
+ * function consconfig_load_drivers().
*
* -----------
* Future Work
diff --git a/usr/src/uts/common/io/usb/hcd/xhci/xhci_endpoint.c b/usr/src/uts/common/io/usb/hcd/xhci/xhci_endpoint.c
index 560facefcc..fc08238d4b 100644
--- a/usr/src/uts/common/io/usb/hcd/xhci/xhci_endpoint.c
+++ b/usr/src/uts/common/io/usb/hcd/xhci/xhci_endpoint.c
@@ -12,6 +12,7 @@
/*
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2019 by Western Digital Corporation
+ * Copyright 2022 Oxide Computer Company
*/
/*
@@ -63,6 +64,34 @@ xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *ph)
return (ep);
}
+void
+xhci_endpoint_timeout_cancel(xhci_t *xhcip, xhci_endpoint_t *xep)
+{
+ xep->xep_state |= XHCI_ENDPOINT_TEARDOWN;
+ if (xep->xep_timeout != 0) {
+ mutex_exit(&xhcip->xhci_lock);
+ (void) untimeout(xep->xep_timeout);
+ mutex_enter(&xhcip->xhci_lock);
+ xep->xep_timeout = 0;
+ }
+}
+
+void
+xhci_endpoint_release(xhci_t *xhcip, xhci_endpoint_t *xep)
+{
+ VERIFY(MUTEX_HELD(&xhcip->xhci_lock));
+ VERIFY3U(xep->xep_num, !=, XHCI_DEFAULT_ENDPOINT);
+ VERIFY(list_is_empty(&xep->xep_transfers));
+
+ VERIFY(xep->xep_pipe != NULL);
+ xep->xep_pipe = NULL;
+
+ VERIFY(xep->xep_state & XHCI_ENDPOINT_OPEN);
+ xep->xep_state &= ~XHCI_ENDPOINT_OPEN;
+
+ xhci_endpoint_timeout_cancel(xhcip, xep);
+}
+
/*
* The assumption is that someone calling this owns this endpoint / device and
* that it's in a state where it's safe to zero out that information.
@@ -75,6 +104,10 @@ xhci_endpoint_fini(xhci_device_t *xd, int endpoint)
VERIFY(xep != NULL);
xd->xd_endpoints[endpoint] = NULL;
+ if (endpoint != XHCI_DEFAULT_ENDPOINT) {
+ VERIFY(!(xep->xep_state & XHCI_ENDPOINT_OPEN));
+ }
+
xhci_ring_free(&xep->xep_ring);
cv_destroy(&xep->xep_state_cv);
list_destroy(&xep->xep_transfers);
@@ -429,15 +462,27 @@ xhci_endpoint_avg_trb(xhci_t *xhcip, usb_ep_descr_t *ep, int mps)
/* LINTED: E_FUNC_NO_RET_VAL */
}
+/*
+ * Set up the input context for this endpoint. If this endpoint is already
+ * open, just confirm that the current parameters and the originally programmed
+ * parameters match.
+ */
int
xhci_endpoint_setup_context(xhci_t *xhcip, xhci_device_t *xd,
xhci_endpoint_t *xep)
{
- uint_t eptype, burst, ival, max_esit, avgtrb, mps, mult, cerr;
+ xhci_endpoint_params_t new_xepp;
xhci_endpoint_context_t *ectx;
uint64_t deq;
/*
+ * Explicitly zero this entire struct to start so that we can compare
+ * it with bcmp().
+ */
+ bzero(&new_xepp, sizeof (new_xepp));
+ new_xepp.xepp_configured = B_TRUE;
+
+ /*
* For a USB >=3.0 device we should always have its companion descriptor
* provided for us by USBA. If it's not here, complain loudly and fail.
*/
@@ -464,9 +509,10 @@ xhci_endpoint_setup_context(xhci_t *xhcip, xhci_device_t *xd,
VERIFY(xd->xd_usbdev->usb_dev_descr != NULL);
VERIFY(xep->xep_pipe != NULL);
- mps = xep->xep_pipe->p_ep.wMaxPacketSize & XHCI_CONTEXT_MPS_MASK;
- mult = XHCI_CONTEXT_DEF_MULT;
- cerr = XHCI_CONTEXT_DEF_CERR;
+ new_xepp.xepp_mps =
+ xep->xep_pipe->p_ep.wMaxPacketSize & XHCI_CONTEXT_MPS_MASK;
+ new_xepp.xepp_mult = XHCI_CONTEXT_DEF_MULT;
+ new_xepp.xepp_cerr = XHCI_CONTEXT_DEF_CERR;
switch (xep->xep_type) {
case USB_EP_ATTR_ISOCH:
@@ -484,12 +530,13 @@ xhci_endpoint_setup_context(xhci_t *xhcip, xhci_device_t *xd,
if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
ASSERT(xep->xep_pipe->p_xep.uex_flags &
USB_EP_XFLAGS_SS_COMP);
- mult = xep->xep_pipe->p_xep.uex_ep_ss.bmAttributes &
+ new_xepp.xepp_mult =
+ xep->xep_pipe->p_xep.uex_ep_ss.bmAttributes &
USB_EP_SS_COMP_ISOC_MULT_MASK;
}
- mps &= XHCI_CONTEXT_MPS_MASK;
- cerr = XHCI_CONTEXT_ISOCH_CERR;
+ new_xepp.xepp_mps &= XHCI_CONTEXT_MPS_MASK;
+ new_xepp.xepp_cerr = XHCI_CONTEXT_ISOCH_CERR;
break;
default:
/*
@@ -500,37 +547,69 @@ xhci_endpoint_setup_context(xhci_t *xhcip, xhci_device_t *xd,
break;
}
- eptype = xhci_endpoint_epdesc_to_type(&xep->xep_pipe->p_xep.uex_ep);
- burst = xhci_endpoint_determine_burst(xd, xep);
- ival = xhci_endpoint_interval(xd, &xep->xep_pipe->p_xep.uex_ep);
- max_esit = xhci_endpoint_max_esit(xd, xep, mps, burst);
- avgtrb = xhci_endpoint_avg_trb(xhcip, &xep->xep_pipe->p_xep.uex_ep,
- mps);
+ new_xepp.xepp_eptype = xhci_endpoint_epdesc_to_type(
+ &xep->xep_pipe->p_xep.uex_ep);
+ new_xepp.xepp_burst = xhci_endpoint_determine_burst(xd, xep);
+ new_xepp.xepp_ival = xhci_endpoint_interval(xd,
+ &xep->xep_pipe->p_xep.uex_ep);
+ new_xepp.xepp_max_esit = xhci_endpoint_max_esit(xd, xep,
+ new_xepp.xepp_mps, new_xepp.xepp_burst);
+ new_xepp.xepp_avgtrb = xhci_endpoint_avg_trb(xhcip,
+ &xep->xep_pipe->p_xep.uex_ep, new_xepp.xepp_mps);
/*
* The multi field may be reserved as zero if the LEC feature flag is
* set. See the description of mult in xHCI 1.1 / 6.2.3.
*/
if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC)
- mult = 0;
+ new_xepp.xepp_mult = 0;
+
+ if (xep->xep_params.xepp_configured) {
+ /*
+ * The endpoint context has been configured already. We are
+ * reopening the pipe, so just confirm that the parameters are
+ * the same.
+ */
+ if (bcmp(&xep->xep_params, &new_xepp, sizeof (new_xepp)) == 0) {
+ /*
+ * Everything matches up.
+ */
+ return (0);
+ }
+
+ DTRACE_PROBE3(xhci__context__mismatch,
+ xhci_t *, xhcip,
+ xhci_endpoint_t *, xep,
+ xhci_endpoint_params_t *, &new_xepp);
+
+ xhci_error(xhcip, "device input context on slot %d and "
+ "port %d for endpoint %u was already initialized but "
+ "with incompatible parameters",
+ xd->xd_slot, xd->xd_port, xep->xep_num);
+ return (EINVAL);
+ }
bzero(ectx, sizeof (xhci_endpoint_context_t));
- ectx->xec_info = LE_32(XHCI_EPCTX_SET_MULT(mult) |
- XHCI_EPCTX_SET_IVAL(ival));
- if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC)
- ectx->xec_info |= LE_32(XHCI_EPCTX_SET_MAX_ESIT_HI(max_esit));
+ ectx->xec_info = LE_32(XHCI_EPCTX_SET_MULT(new_xepp.xepp_mult) |
+ XHCI_EPCTX_SET_IVAL(new_xepp.xepp_ival));
+ if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC) {
+ ectx->xec_info |=
+ LE_32(XHCI_EPCTX_SET_MAX_ESIT_HI(new_xepp.xepp_max_esit));
+ }
- ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(cerr) |
- XHCI_EPCTX_SET_EPTYPE(eptype) | XHCI_EPCTX_SET_MAXB(burst) |
- XHCI_EPCTX_SET_MPS(mps));
+ ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(new_xepp.xepp_cerr) |
+ XHCI_EPCTX_SET_EPTYPE(new_xepp.xepp_eptype) |
+ XHCI_EPCTX_SET_MAXB(new_xepp.xepp_burst) |
+ XHCI_EPCTX_SET_MPS(new_xepp.xepp_mps));
deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
xep->xep_ring.xr_tail;
ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
- ectx->xec_txinfo = LE_32(XHCI_EPCTX_MAX_ESIT_PAYLOAD(max_esit) |
- XHCI_EPCTX_AVG_TRB_LEN(avgtrb));
+ ectx->xec_txinfo = LE_32(
+ XHCI_EPCTX_MAX_ESIT_PAYLOAD(new_xepp.xepp_max_esit) |
+ XHCI_EPCTX_AVG_TRB_LEN(new_xepp.xepp_avgtrb));
XHCI_DMA_SYNC(xd->xd_ictx, DDI_DMA_SYNC_FORDEV);
if (xhci_check_dma_handle(xhcip, &xd->xd_ictx) != DDI_FM_OK) {
@@ -542,6 +621,8 @@ xhci_endpoint_setup_context(xhci_t *xhcip, xhci_device_t *xd,
return (EIO);
}
+ bcopy(&new_xepp, &xep->xep_params, sizeof (new_xepp));
+ VERIFY(xep->xep_params.xepp_configured);
return (0);
}
@@ -613,10 +694,69 @@ xhci_endpoint_init(xhci_t *xhcip, xhci_device_t *xd,
return (ret);
}
+ xep->xep_state |= XHCI_ENDPOINT_OPEN;
+ return (0);
+}
+
+int
+xhci_endpoint_reinit(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep,
+ usba_pipe_handle_data_t *ph)
+{
+ VERIFY(MUTEX_HELD(&xhcip->xhci_lock));
+ VERIFY(ph != NULL);
+ VERIFY3U(xhci_endpoint_pipe_to_epid(ph), ==, xep->xep_num);
+ VERIFY3U(xep->xep_num, !=, XHCI_DEFAULT_ENDPOINT);
+
+ if (xep->xep_type != (ph->p_ep.bmAttributes & USB_EP_ATTR_MASK)) {
+ /*
+ * The endpoint type should not change unless the device has
+ * been torn down and recreated by the framework.
+ */
+ return (EINVAL);
+ }
+
+ if (xep->xep_state & XHCI_ENDPOINT_OPEN) {
+ return (EBUSY);
+ }
+
+ VERIFY(xep->xep_state & XHCI_ENDPOINT_TEARDOWN);
+ xep->xep_state &= ~XHCI_ENDPOINT_TEARDOWN;
+
+ VERIFY3U(xep->xep_timeout, ==, 0);
+ VERIFY(list_is_empty(&xep->xep_transfers));
+
+ VERIFY3P(xep->xep_pipe, ==, NULL);
+ xep->xep_pipe = ph;
+
+ /*
+ * Verify that the endpoint context parameters have not changed in a
+ * way that requires us to tell the controller about it.
+ */
+ int ret;
+ if ((ret = xhci_endpoint_setup_context(xhcip, xd, xep)) != 0) {
+ xep->xep_pipe = NULL;
+ xhci_endpoint_timeout_cancel(xhcip, xep);
+ return (ret);
+ }
+
+ xep->xep_state |= XHCI_ENDPOINT_OPEN;
return (0);
}
/*
+ * Wait until any ongoing resets or time outs are completed.
+ */
+void
+xhci_endpoint_serialize(xhci_t *xhcip, xhci_endpoint_t *xep)
+{
+ VERIFY(MUTEX_HELD(&xhcip->xhci_lock));
+
+ while ((xep->xep_state & XHCI_ENDPOINT_SERIALIZE) != 0) {
+ cv_wait(&xep->xep_state_cv, &xhcip->xhci_lock);
+ }
+}
+
+/*
* Attempt to quiesce an endpoint. Depending on the state of the endpoint, we
* may need to simply stop it. Alternatively, we may need to explicitly reset
* the endpoint. Once done, this endpoint should be stopped and can be
diff --git a/usr/src/uts/common/io/usb/hcd/xhci/xhci_usba.c b/usr/src/uts/common/io/usb/hcd/xhci/xhci_usba.c
index cd03bf2d32..114bd716c0 100644
--- a/usr/src/uts/common/io/usb/hcd/xhci/xhci_usba.c
+++ b/usr/src/uts/common/io/usb/hcd/xhci/xhci_usba.c
@@ -12,6 +12,7 @@
/*
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2019 by Western Digital Corporation
+ * Copyright 2022 Oxide Computer Company
*/
/*
@@ -58,7 +59,7 @@ xhci_hcdi_pipe_open(usba_pipe_handle_data_t *ph, usb_flags_t usb_flags)
{
xhci_t *xhcip = xhci_hcdi_get_xhcip(ph);
xhci_pipe_t *pipe;
- xhci_endpoint_t *xep;
+ xhci_endpoint_t *xep = NULL;
xhci_device_t *xd;
int kmflags = usb_flags & USB_FLAGS_SLEEP ? KM_SLEEP : KM_NOSLEEP;
int ret;
@@ -131,8 +132,21 @@ xhci_hcdi_pipe_open(usba_pipe_handle_data_t *ph, usb_flags_t usb_flags)
goto add;
}
- if (xd->xd_endpoints[epid] != NULL) {
+ /*
+ * If we're opening an endpoint other than the default control endpoint,
+ * then the device should have had a USB address assigned by the
+ * controller. Sanity check that before continuing.
+ */
+ VERIFY(xd->xd_addressed == B_TRUE);
+
+ /*
+ * We may have already initialized the endpoint with a previous pipe
+ * open.
+ */
+ if ((xep = xd->xd_endpoints[epid]) != NULL &&
+ (xep->xep_state & XHCI_ENDPOINT_OPEN)) {
mutex_exit(&xhcip->xhci_lock);
+
kmem_free(pipe, sizeof (xhci_pipe_t));
xhci_log(xhcip, "!asked to open endpoint %d on slot %d and "
"port %d, but endpoint already exists", epid, xd->xd_slot,
@@ -140,19 +154,75 @@ xhci_hcdi_pipe_open(usba_pipe_handle_data_t *ph, usb_flags_t usb_flags)
return (USB_FAILURE);
}
- /*
- * If we're opening an endpoint other than the default control endpoint,
- * then the device should have had a USB address assigned by the
- * controller. Sanity check that before continuing.
- */
- if (epid != XHCI_DEFAULT_ENDPOINT) {
- VERIFY(xd->xd_addressed == B_TRUE);
+ if (xep != NULL) {
+ /*
+ * The endpoint is already initialized but is not presently
+ * open so we can take it over here.
+ */
+ if ((ret = xhci_endpoint_reinit(xhcip, xd, xep, ph) != 0)) {
+ mutex_exit(&xhcip->xhci_lock);
+
+ kmem_free(pipe, sizeof (xhci_pipe_t));
+ xhci_log(xhcip, "!asked to reopen endpoint %d on "
+ "slot %d and port %d, but reinit failed (%d)",
+ epid, xd->xd_slot, xd->xd_port, ret);
+ return (ret);
+ }
+
+ /*
+ * We need to ensure the endpoint is stopped before we try to
+ * reset the transfer ring.
+ */
+ xep->xep_state |= XHCI_ENDPOINT_QUIESCE;
+ if ((ret = xhci_endpoint_quiesce(xhcip, xd, xep)) !=
+ USB_SUCCESS) {
+ /*
+ * If we could not quiesce the endpoint, release it so
+ * that another open can try again.
+ */
+ xep->xep_state &= ~XHCI_ENDPOINT_QUIESCE;
+ xhci_endpoint_release(xhcip, xep);
+ mutex_exit(&xhcip->xhci_lock);
+
+ kmem_free(pipe, sizeof (xhci_pipe_t));
+ xhci_log(xhcip, "!asked to reopen endpoint %d on "
+ "slot %d and port %d, but quiesce failed (%d)",
+ epid, xd->xd_slot, xd->xd_port, ret);
+ return (ret);
+ }
+
+ /*
+ * Reset the transfer ring dequeue pointer. The initial
+ * Configure Endpoint command leaves the endpoint in the
+ * Running state (xHCI 1.2 / 4.6.6), so even though the ring is
+ * still empty we ring the doorbell to end up in the same state
+ * (Running but Inactive).
+ */
+ mutex_exit(&xhcip->xhci_lock);
+ if ((ret = xhci_command_set_tr_dequeue(xhcip, xd, xep)) != 0 ||
+ (ret = xhci_endpoint_ring(xhcip, xd, xep)) != 0) {
+ mutex_enter(&xhcip->xhci_lock);
+ xep->xep_state &= ~XHCI_ENDPOINT_QUIESCE;
+ xhci_endpoint_release(xhcip, xep);
+ mutex_exit(&xhcip->xhci_lock);
+
+ kmem_free(pipe, sizeof (xhci_pipe_t));
+ xhci_log(xhcip, "!asked to open endpoint %d on "
+ "slot %d and port %d, but restart failed (%d)",
+ epid, xd->xd_slot, xd->xd_port, ret);
+ return (USB_FAILURE);
+ }
+ mutex_enter(&xhcip->xhci_lock);
+ xep->xep_state &= ~XHCI_ENDPOINT_QUIESCE;
+ mutex_exit(&xhcip->xhci_lock);
+
+ goto add;
}
/*
- * Okay, at this point we need to go create and set up an endpoint.
- * Once we're done, we'll try to install it and make sure that it
- * doesn't conflict with something else going on.
+ * Okay, at this point we need to go create and set up an endpoint from
+ * scratch. Once we're done, we'll try to install it and make sure
+ * that it doesn't conflict with something else going on.
*/
ret = xhci_endpoint_init(xhcip, xd, ph);
if (ret != 0) {
@@ -337,9 +407,7 @@ xhci_hcdi_pipe_poll_fini(usba_pipe_handle_data_t *ph, boolean_t is_close)
/*
* Ensure that no other resets or time outs are going on right now.
*/
- while ((xep->xep_state & (XHCI_ENDPOINT_SERIALIZE)) != 0) {
- cv_wait(&xep->xep_state_cv, &xhcip->xhci_lock);
- }
+ xhci_endpoint_serialize(xhcip, xep);
if (xpp->xpp_poll_state == XHCI_PERIODIC_POLL_IDLE) {
mutex_exit(&xhcip->xhci_lock);
@@ -417,7 +485,6 @@ xhci_hcdi_pipe_poll_fini(usba_pipe_handle_data_t *ph, boolean_t is_close)
* Tear down everything that we did in open. After this, the consumer of this
* USB device is done.
*/
-/* ARGSUSED */
static int
xhci_hcdi_pipe_close(usba_pipe_handle_data_t *ph, usb_flags_t usb_flags)
{
@@ -425,8 +492,7 @@ xhci_hcdi_pipe_close(usba_pipe_handle_data_t *ph, usb_flags_t usb_flags)
xhci_pipe_t *xp;
xhci_device_t *xd;
xhci_endpoint_t *xep;
- uint32_t info;
- int ret, i;
+ int ret;
uint_t epid;
if ((ph->p_ep.bmAttributes & USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR &&
@@ -467,53 +533,29 @@ xhci_hcdi_pipe_close(usba_pipe_handle_data_t *ph, usb_flags_t usb_flags)
}
/*
- * We need to clean up the endpoint. So the first thing we need to do is
- * stop it with a configure endpoint command. Once it's stopped, we can
- * free all associated resources.
- */
- mutex_enter(&xd->xd_imtx);
-
- /*
- * Potentially update the slot input context about the current max
- * endpoint. Make sure to set that the slot context is being updated
- * here as it may be changing and some hardware requires it.
+ * We clean up the endpoint by stopping it and cancelling any transfers
+ * that were in flight at the time. The endpoint is not unconfigured
+ * until the device is torn down later.
*/
- xd->xd_input->xic_drop_flags = LE_32(XHCI_INCTX_MASK_DCI(epid + 1));
- xd->xd_input->xic_add_flags = LE_32(XHCI_INCTX_MASK_DCI(0));
- for (i = XHCI_NUM_ENDPOINTS - 1; i >= 0; i--) {
- if (xd->xd_endpoints[i] != NULL &&
- xd->xd_endpoints[i] != xep)
- break;
+ xhci_endpoint_timeout_cancel(xhcip, xep);
+ xep->xep_state |= XHCI_ENDPOINT_QUIESCE;
+ if ((ret = xhci_endpoint_quiesce(xhcip, xd, xep)) != USB_SUCCESS) {
+ /*
+ * If we cannot stop the ring, it is not safe to proceed and we
+ * must keep the pipe open.
+ */
+ xep->xep_state &=
+ ~(XHCI_ENDPOINT_TEARDOWN | XHCI_ENDPOINT_QUIESCE);
+ cv_broadcast(&xep->xep_state_cv);
+ mutex_exit(&xhcip->xhci_lock);
+ xhci_error(xhcip, "asked to do close pipe on slot %d, "
+ "port %d, endpoint: %d, but quiesce failed %d",
+ xd->xd_slot, xd->xd_port, epid, ret);
+ return (USB_FAILURE);
}
- info = xd->xd_slotin->xsc_info;
- info &= ~XHCI_SCTX_DCI_MASK;
- info |= XHCI_SCTX_SET_DCI(i + 1);
- xd->xd_slotin->xsc_info = info;
-
- /*
- * Also zero out our context for this endpoint. Note that we don't
- * bother with syncing DMA memory here as it's not required to be synced
- * for this operation.
- */
- bzero(xd->xd_endin[xep->xep_num], sizeof (xhci_endpoint_context_t));
-
- /*
- * Stop the device and kill our timeout. Note, it is safe to hold the
- * device's input mutex across the untimeout, this lock should never be
- * referenced by the timeout code.
- */
- xep->xep_state |= XHCI_ENDPOINT_TEARDOWN;
- mutex_exit(&xhcip->xhci_lock);
- (void) untimeout(xep->xep_timeout);
-
- ret = xhci_command_configure_endpoint(xhcip, xd);
- mutex_exit(&xd->xd_imtx);
- if (ret != USB_SUCCESS)
- return (ret);
- mutex_enter(&xhcip->xhci_lock);
/*
- * Now that we've unconfigured the endpoint. See if we need to flush any
+ * Now that we've stopped the endpoint, see if we need to flush any
* transfers.
*/
xhci_hcdi_pipe_flush(xhcip, xep, USB_CR_PIPE_CLOSING);
@@ -521,7 +563,7 @@ xhci_hcdi_pipe_close(usba_pipe_handle_data_t *ph, usb_flags_t usb_flags)
xhci_hcdi_periodic_free(xhcip, xp);
}
- xhci_endpoint_fini(xd, epid);
+ xhci_endpoint_release(xhcip, xep);
remove:
ph->p_hcd_private = NULL;
@@ -579,9 +621,7 @@ xhci_hcdi_pipe_reset(usba_pipe_handle_data_t *ph, usb_flags_t usb_flags)
/*
* Ensure that no other resets or time outs are going on right now.
*/
- while ((xep->xep_state & (XHCI_ENDPOINT_SERIALIZE)) != 0) {
- cv_wait(&xep->xep_state_cv, &xhcip->xhci_lock);
- }
+ xhci_endpoint_serialize(xhcip, xep);
xep->xep_state |= XHCI_ENDPOINT_QUIESCE;
ret = xhci_endpoint_quiesce(xhcip, xd, xep);
@@ -1679,8 +1719,7 @@ xhci_hcdi_device_init(usba_device_t *ud, usb_port_t port, void **hcdpp)
}
/*
- * We're tearing down a device now. That means that the only endpoint context
- * that's still valid would be endpoint zero.
+ * We're tearing down a device now.
*/
static void
xhci_hcdi_device_fini(usba_device_t *ud, void *hcdp)
@@ -1726,7 +1765,16 @@ xhci_hcdi_device_fini(usba_device_t *ud, void *hcdp)
}
xhci_context_slot_output_fini(xhcip, xd);
- xhci_endpoint_fini(xd, XHCI_DEFAULT_ENDPOINT);
+
+ /*
+ * Once the slot is disabled, we can free any endpoints that were
+ * opened.
+ */
+ for (uint_t n = 0; n < XHCI_NUM_ENDPOINTS; n++) {
+ if (xd->xd_endpoints[n] != NULL) {
+ xhci_endpoint_fini(xd, n);
+ }
+ }
mutex_enter(&xhcip->xhci_lock);
list_remove(&xhcip->xhci_usba.xa_devices, xd);
diff --git a/usr/src/uts/common/smbsrv/smb_kproto.h b/usr/src/uts/common/smbsrv/smb_kproto.h
index a77a697f85..f61b9aa9a2 100644
--- a/usr/src/uts/common/smbsrv/smb_kproto.h
+++ b/usr/src/uts/common/smbsrv/smb_kproto.h
@@ -294,7 +294,8 @@ void smb_oplock_ind_break(smb_ofile_t *, uint32_t, boolean_t, uint32_t);
void smb_oplock_ind_break_in_ack(smb_request_t *, smb_ofile_t *,
uint32_t, boolean_t);
void smb_oplock_send_brk(smb_request_t *);
-uint32_t smb_oplock_wait_break(smb_node_t *, int);
+uint32_t smb_oplock_wait_break(smb_request_t *, smb_node_t *, int);
+uint32_t smb_oplock_wait_break_fem(smb_node_t *, int);
/*
* range lock functions - node operations
@@ -664,6 +665,7 @@ void smb_ofile_flush(smb_request_t *, smb_ofile_t *);
boolean_t smb_ofile_hold(smb_ofile_t *);
boolean_t smb_ofile_hold_olbrk(smb_ofile_t *);
void smb_ofile_release(smb_ofile_t *);
+void smb_ofile_release_LL(void *);
void smb_ofile_close_all(smb_tree_t *, uint32_t);
void smb_ofile_set_flags(smb_ofile_t *, uint32_t);
boolean_t smb_ofile_is_open(smb_ofile_t *);
@@ -740,6 +742,9 @@ cred_t *smb_kcred_create(void);
void smb_user_setcred(smb_user_t *, cred_t *, uint32_t);
boolean_t smb_is_same_user(cred_t *, cred_t *);
boolean_t smb_user_has_security_priv(smb_user_t *, cred_t *);
+void smb_user_inc_trees(smb_user_t *);
+void smb_user_dec_trees(smb_user_t *);
+void smb_user_wait_trees(smb_user_t *);
/*
* SMB tree functions (file smb_tree.c)
diff --git a/usr/src/uts/common/smbsrv/smb_ktypes.h b/usr/src/uts/common/smbsrv/smb_ktypes.h
index 5c083b6873..89b057ff91 100644
--- a/usr/src/uts/common/smbsrv/smb_ktypes.h
+++ b/usr/src/uts/common/smbsrv/smb_ktypes.h
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2020 Tintri by DDN, Inc. All rights reserved.
+ * Copyright 2011-2020 Tintri by DDN, Inc. All rights reserved.
* Copyright 2022 RackTop Systems, Inc.
*/
@@ -1077,6 +1077,8 @@ typedef struct smb_user {
uint32_t u_privileges;
uint16_t u_uid; /* unique per-session */
uint32_t u_audit_sid;
+ uint32_t u_owned_tree_cnt;
+ kcondvar_t u_owned_tree_cv;
uint32_t u_sign_flags;
struct smb_key u_sign_key; /* SMB2 signing */
@@ -1799,6 +1801,7 @@ typedef enum smb_req_state {
SMB_REQ_STATE_WAITING_FCN2,
SMB_REQ_STATE_WAITING_LOCK,
SMB_REQ_STATE_WAITING_PIPE,
+ SMB_REQ_STATE_WAITING_OLBRK,
SMB_REQ_STATE_COMPLETED,
SMB_REQ_STATE_CANCEL_PENDING,
SMB_REQ_STATE_CANCELLED,
diff --git a/usr/src/uts/common/sys/usb/hcd/xhci/xhci.h b/usr/src/uts/common/sys/usb/hcd/xhci/xhci.h
index c226019105..ddc1e09e1a 100644
--- a/usr/src/uts/common/sys/usb/hcd/xhci/xhci.h
+++ b/usr/src/uts/common/sys/usb/hcd/xhci/xhci.h
@@ -12,6 +12,7 @@
/*
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2019 by Western Digital Corporation
+ * Copyright 2022 Oxide Computer Company
*/
#ifndef _SYS_USB_XHCI_XHCI_H
@@ -497,17 +498,6 @@ typedef enum xhci_endpoint_state {
XHCI_ENDPOINT_QUIESCE = 0x04,
XHCI_ENDPOINT_TIMED_OUT = 0x08,
/*
- * This is a composite of states that we need to watch for. We don't
- * want to allow ourselves to set one of these flags while one of them
- * is currently active.
- */
- XHCI_ENDPOINT_SERIALIZE = 0x0c,
- /*
- * This is a composite of states that we need to make sure that if set,
- * we do not schedule activity on the ring.
- */
- XHCI_ENDPOINT_DONT_SCHEDULE = 0x0e,
- /*
* This enpdoint is being torn down and should make sure it de-schedules
* itself.
*/
@@ -516,15 +506,47 @@ typedef enum xhci_endpoint_state {
* This endpoint is currently used in polled I/O mode by the
* kernel debugger.
*/
- XHCI_ENDPOINT_POLLED = 0x20
+ XHCI_ENDPOINT_POLLED = 0x20,
+ /*
+ * This endpoint is open and in use by a pipe.
+ */
+ XHCI_ENDPOINT_OPEN = 0x40,
} xhci_endpoint_state_t;
/*
+ * This is a composite of states that we need to watch for. We don't
+ * want to allow ourselves to set one of these flags while one of them
+ * is currently active.
+ */
+#define XHCI_ENDPOINT_SERIALIZE (XHCI_ENDPOINT_QUIESCE | \
+ XHCI_ENDPOINT_TIMED_OUT)
+
+/*
+ * This is a composite of states that we need to make sure that if set, we do
+ * not schedule activity on the ring.
+ */
+#define XHCI_ENDPOINT_DONT_SCHEDULE (XHCI_ENDPOINT_HALTED | \
+ XHCI_ENDPOINT_QUIESCE | \
+ XHCI_ENDPOINT_TIMED_OUT)
+
+/*
* Forwards required for the endpoint
*/
struct xhci_device;
struct xhci;
+typedef struct xhci_endpoint_params {
+ boolean_t xepp_configured;
+ uint_t xepp_eptype;
+ uint_t xepp_burst;
+ uint_t xepp_ival;
+ uint_t xepp_max_esit;
+ uint_t xepp_avgtrb;
+ uint_t xepp_mps;
+ uint_t xepp_mult;
+ uint_t xepp_cerr;
+} xhci_endpoint_params_t;
+
typedef struct xhci_endpoint {
struct xhci *xep_xhci;
struct xhci_device *xep_xd;
@@ -536,6 +558,7 @@ typedef struct xhci_endpoint {
list_t xep_transfers;
usba_pipe_handle_data_t *xep_pipe;
xhci_ring_t xep_ring;
+ xhci_endpoint_params_t xep_params;
} xhci_endpoint_t;
typedef struct xhci_device {
@@ -828,9 +851,13 @@ extern void xhci_fm_runtime_reset(xhci_t *);
*/
extern int xhci_endpoint_init(xhci_t *, xhci_device_t *,
usba_pipe_handle_data_t *);
+extern int xhci_endpoint_reinit(xhci_t *, xhci_device_t *,
+ xhci_endpoint_t *, usba_pipe_handle_data_t *);
+extern void xhci_endpoint_release(xhci_t *, xhci_endpoint_t *);
extern void xhci_endpoint_fini(xhci_device_t *, int);
extern int xhci_endpoint_update_default(xhci_t *, xhci_device_t *,
xhci_endpoint_t *);
+extern void xhci_endpoint_timeout_cancel(xhci_t *, xhci_endpoint_t *);
extern int xhci_endpoint_setup_default_context(xhci_t *, xhci_device_t *,
xhci_endpoint_t *);
@@ -838,6 +865,7 @@ extern int xhci_endpoint_setup_default_context(xhci_t *, xhci_device_t *,
extern uint_t xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *);
extern boolean_t xhci_endpoint_is_periodic_in(xhci_endpoint_t *);
+extern void xhci_endpoint_serialize(xhci_t *, xhci_endpoint_t *);
extern int xhci_endpoint_quiesce(xhci_t *, xhci_device_t *, xhci_endpoint_t *);
extern int xhci_endpoint_schedule(xhci_t *, xhci_device_t *, xhci_endpoint_t *,
xhci_transfer_t *, boolean_t);