summaryrefslogtreecommitdiff
path: root/usr
diff options
context:
space:
mode:
authorDan McDonald <danmcd@mnx.io>2022-08-09 10:33:20 -0400
committerDan McDonald <danmcd@mnx.io>2022-08-09 13:18:12 -0400
commit345881c57eafd2f92f73791646936638889af149 (patch)
tree77858afbe8419583e7eb1db48e8c758bbab11122 /usr
parent6f8336c5540e2283d7e24887cc87118206eb3e99 (diff)
downloadillumos-joyent-345881c57eafd2f92f73791646936638889af149.tar.gz
14902 Have Intel vm_exit paths guard against Post-Barrier RSB Predictions
Reviewed by: Robert Mustacchi <rm@fingolfin.org> Reviewed by: Patrick Mooney <pmooney@pfmooney.com> Approved by: Joshua M. Clulow <josh@sysmgr.org>
Diffstat (limited to 'usr')
-rw-r--r--usr/src/uts/i86pc/os/cpuid.c32
-rw-r--r--usr/src/uts/intel/io/vmm/intel/vmx.c11
-rw-r--r--usr/src/uts/intel/io/vmm/intel/vmx_support.s50
3 files changed, 26 insertions, 67 deletions
diff --git a/usr/src/uts/i86pc/os/cpuid.c b/usr/src/uts/i86pc/os/cpuid.c
index 00a1c0004f..eda3030357 100644
--- a/usr/src/uts/i86pc/os/cpuid.c
+++ b/usr/src/uts/i86pc/os/cpuid.c
@@ -25,6 +25,7 @@
* Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
* Copyright 2020 Joyent, Inc.
* Copyright 2022 Oxide Computer Company
+ * Copyright 2022 MNX Cloud, Inc.
*/
/*
* Copyright (c) 2010, Intel Corporation.
@@ -1059,7 +1060,7 @@
*
* 1. Using Indirect Branch Restricted Speculation (IBRS).
* 2. Using Retpolines and RSB Stuffing
- * 3. Using Enhanced Indirect Branch Restricted Speculation (EIBRS)
+ * 3. Using Enhanced Indirect Branch Restricted Speculation (eIBRS)
*
* IBRS uses a feature added to microcode to restrict speculation, among other
* things. This form of mitigation has not been used as it has been generally
@@ -1094,11 +1095,11 @@
* process (which is partly why we need to have return stack buffer stuffing,
* but more on that in a bit) and in processors starting with Cascade Lake
* on the server side, it's dangerous to rely on retpolines. Instead, a new
- * mechanism has been introduced called Enhanced IBRS (EIBRS).
+ * mechanism has been introduced called Enhanced IBRS (eIBRS).
*
- * Unlike IBRS, EIBRS is designed to be enabled once at boot and left on each
+ * Unlike IBRS, eIBRS is designed to be enabled once at boot and left on each
* physical core. However, if this is the case, we don't want to use retpolines
- * any more. Therefore if EIBRS is present, we end up turning each retpoline
+ * any more. Therefore if eIBRS is present, we end up turning each retpoline
* function (called a thunk) into a jmp instruction. This means that we're still
* paying the cost of an extra jump to the external thunk, but it gives us
* flexibility and the ability to have a single kernel image that works across a
@@ -1125,16 +1126,14 @@
* worried about such as when we enter the kernel from user land.
*
* To prevent against additional manipulation of the RSB from other contexts
- * such as a non-root VMX context attacking the kernel we first look to enhanced
- * IBRS. When EIBRS is present and enabled, then there is nothing else that we
- * need to do to protect the kernel at this time.
+ * such as a non-root VMX context attacking the kernel we first look to
+ * enhanced IBRS. When eIBRS is present and enabled, then there should be
+ * nothing else that we need to do to protect the kernel at this time.
*
- * On CPUs without EIBRS we need to manually overwrite the contents of the
- * return stack buffer. We do this through the x86_rsb_stuff() function.
- * Currently this is employed on context switch. The x86_rsb_stuff() function is
- * disabled when enhanced IBRS is present because Intel claims on such systems
- * it will be ineffective. Stuffing the RSB in context switch helps prevent user
- * to user attacks via the RSB.
+ * Unfortunately, eIBRS or not, we need to manually overwrite the contents of
+ * the return stack buffer. We do this through the x86_rsb_stuff() function.
+ * Currently this is employed on context switch and vmx_exit. The
+ * x86_rsb_stuff() function is disabled only when mitigations in general are.
*
* If SMEP is not present, then we would have to stuff the RSB every time we
* transitioned from user mode to the kernel, which isn't very practical right
@@ -1365,7 +1364,7 @@
*
* - Spectre v1: Not currently mitigated
* - swapgs: lfences after swapgs paths
- * - Spectre v2: Retpolines/RSB Stuffing or EIBRS if HW support
+ * - Spectre v2: Retpolines/RSB Stuffing or eIBRS if HW support
* - Meltdown: Kernel Page Table Isolation
* - Spectre v3a: Updated CPU microcode
* - Spectre v4: Not currently mitigated
@@ -2781,6 +2780,10 @@ cpuid_update_l1d_flush(cpu_t *cpu, uchar_t *featureset)
/*
* We default to enabling RSB mitigations.
+ *
+ * NOTE: We used to skip RSB mitigations with eIBRS, but developments around
+ * post-barrier RSB guessing suggests we should enable RSB mitigations always
+ * unless specifically instructed not to.
*/
static void
cpuid_patch_rsb(x86_spectrev2_mitigation_t mit)
@@ -2789,7 +2792,6 @@ cpuid_patch_rsb(x86_spectrev2_mitigation_t mit)
uint8_t *stuff = (uint8_t *)x86_rsb_stuff;
switch (mit) {
- case X86_SPECTREV2_ENHANCED_IBRS:
case X86_SPECTREV2_DISABLED:
*stuff = ret;
break;
diff --git a/usr/src/uts/intel/io/vmm/intel/vmx.c b/usr/src/uts/intel/io/vmm/intel/vmx.c
index 4ef51259ab..5d9d920ca6 100644
--- a/usr/src/uts/intel/io/vmm/intel/vmx.c
+++ b/usr/src/uts/intel/io/vmm/intel/vmx.c
@@ -41,6 +41,7 @@
* Copyright 2015 Pluribus Networks Inc.
* Copyright 2018 Joyent, Inc.
* Copyright 2022 Oxide Computer Company
+ * Copyright 2022 MNX Cloud, Inc.
*/
#include <sys/cdefs.h>
@@ -174,9 +175,6 @@ static uint64_t cr4_ones_mask, cr4_zeros_mask;
static int vmx_initialized;
-/* Do not flush RSB upon vmexit */
-static int no_flush_rsb;
-
/*
* Optional capabilities
*/
@@ -797,12 +795,7 @@ vmx_vminit(struct vm *vm)
rdmsr(MSR_SYSENTER_EIP_MSR));
/* instruction pointer */
- if (no_flush_rsb) {
- vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest);
- } else {
- vmcs_write(VMCS_HOST_RIP,
- (uint64_t)vmx_exit_guest_flush_rsb);
- }
+ vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest);
/* link pointer */
vmcs_write(VMCS_LINK_POINTER, ~0);
diff --git a/usr/src/uts/intel/io/vmm/intel/vmx_support.s b/usr/src/uts/intel/io/vmm/intel/vmx_support.s
index 60f761d652..5ee7efc6b5 100644
--- a/usr/src/uts/intel/io/vmm/intel/vmx_support.s
+++ b/usr/src/uts/intel/io/vmm/intel/vmx_support.s
@@ -38,6 +38,7 @@
*
* Copyright 2013 Pluribus Networks Inc.
* Copyright 2018 Joyent, Inc.
+ * Copyright 2022 MNX Cloud, Inc.
*/
#include <sys/asm_linkage.h>
@@ -214,32 +215,7 @@ inst_error:
* The VMCS-restored %rsp points to the struct vmxctx
*/
.align ASM_ENTRY_ALIGN;
-ALTENTRY(vmx_exit_guest)
- /* Save guest state that is not automatically saved in the vmcs. */
- VMX_GUEST_SAVE
-
- /*
- * This will return to the caller of 'vmx_enter_guest()' with a return
- * value of VMX_GUEST_VMEXIT.
- */
- movl $VMX_GUEST_VMEXIT, %eax
- movq VMXSTK_RBX(%rsp), %rbx
- movq VMXSTK_R12(%rsp), %r12
- movq VMXSTK_R13(%rsp), %r13
- movq VMXSTK_R14(%rsp), %r14
- movq VMXSTK_R15(%rsp), %r15
-
- VMX_GUEST_FLUSH_SCRATCH
-
- addq $VMXSTKSIZE, %rsp
- popq %rbp
- ret
-SET_SIZE(vmx_enter_guest)
-
-
-
-.align ASM_ENTRY_ALIGN;
-ALTENTRY(vmx_exit_guest_flush_rsb)
+ENTRY_NP(vmx_exit_guest)
/* Save guest state that is not automatically saved in the vmcs. */
VMX_GUEST_SAVE
@@ -248,23 +224,11 @@ ALTENTRY(vmx_exit_guest_flush_rsb)
/*
* To prevent malicious branch target predictions from affecting the
* host, overwrite all entries in the RSB upon exiting a guest.
+ *
+ * NOTE: If RSB mitigations are disabled (see cpuid.c), this call is
+ * entirely a NOP.
*/
- movl $16, %ecx /* 16 iterations, two calls per loop */
- movq %rsp, %rax
-loop:
- call 2f /* create an RSB entry. */
-1:
- pause
- call 1b /* capture rogue speculation. */
-2:
- call 2f /* create an RSB entry. */
-1:
- pause
- call 1b /* capture rogue speculation. */
-2:
- subl $1, %ecx
- jnz loop
- movq %rax, %rsp
+ call x86_rsb_stuff
/*
* This will return to the caller of 'vmx_enter_guest()' with a return
@@ -280,7 +244,7 @@ loop:
addq $VMXSTKSIZE, %rsp
popq %rbp
ret
-SET_SIZE(vmx_exit_guest_flush_rsb)
+SET_SIZE(vmx_exit_guest)
/*
* %rdi = trapno