diff options
Diffstat (limited to 'usr/src/uts/intel/ia32/ml')
-rw-r--r-- | usr/src/uts/intel/ia32/ml/exception.s | 67 | ||||
-rw-r--r-- | usr/src/uts/intel/ia32/ml/swtch.s | 61 |
2 files changed, 83 insertions, 45 deletions
diff --git a/usr/src/uts/intel/ia32/ml/exception.s b/usr/src/uts/intel/ia32/ml/exception.s index 8b538910e2..82d449f31c 100644 --- a/usr/src/uts/intel/ia32/ml/exception.s +++ b/usr/src/uts/intel/ia32/ml/exception.s @@ -1,7 +1,7 @@ /* * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2014 by Delphix. All rights reserved. - * Copyright (c) 2017 Joyent, Inc. + * Copyright (c) 2018 Joyent, Inc. */ /* @@ -81,7 +81,7 @@ ndptrap_frstor(void) #define NPTRAP_NOERR(trapno) \ pushq $0; \ - pushq $trapno + pushq $trapno #define TRAP_NOERR(trapno) \ XPV_TRAP_POP; \ @@ -93,13 +93,13 @@ ndptrap_frstor(void) */ #define TRAP_ERR(trapno) \ XPV_TRAP_POP; \ - pushq $trapno + pushq $trapno #else /* __xpv && __amd64 */ #define TRAP_NOERR(trapno) \ push $0; \ - push $trapno + push $trapno #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno) @@ -108,10 +108,24 @@ ndptrap_frstor(void) * onto stack. */ #define TRAP_ERR(trapno) \ - push $trapno + push $trapno #endif /* __xpv && __amd64 */ + /* + * These are the stacks used on cpu0 for taking double faults, + * NMIs and MCEs (the latter two only on amd64 where we have IST). + * + * We define them here instead of in a C file so that we can page-align + * them (gcc won't do that in a .c file). + */ + .data + DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE) + .fill DEFAULTSTKSZ, 1, 0 + DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE) + .fill DEFAULTSTKSZ, 1, 0 + DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE) + .fill DEFAULTSTKSZ, 1, 0 /* * #DE @@ -163,6 +177,12 @@ ndptrap_frstor(void) je 1f leaq brand_sys_sysenter(%rip), %r11 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */ + je 1f + leaq tr_sys_sysenter(%rip), %r11 + cmpq %r11, 24(%rsp) + je 1f + leaq tr_brand_sys_sysenter(%rip), %r11 + cmpq %r11, 24(%rsp) jne 2f 1: SWAPGS 2: popq %r11 @@ -214,6 +234,10 @@ ndptrap_frstor(void) * the cpu structs for all processors till we find a match for the gdt * of the trapping processor. The stack is expected to be pointing at * the standard regs pushed by hardware on a trap (plus error code and trapno). + * + * It's ok for us to clobber gsbase here (and possibly end up with both gsbase + * and kgsbase set to the same value) because we're not going back the normal + * way out of here (via IRET). Where we're going, we don't need no user %gs. */ #define SET_CPU_GSBASE \ subq $REGOFF_TRAPNO, %rsp; /* save regs */ \ @@ -294,7 +318,7 @@ ndptrap_frstor(void) call av_dispatch_nmivect INTR_POP - IRET + jmp tr_iret_auto /*NOTREACHED*/ SET_SIZE(nmiint) @@ -319,8 +343,8 @@ ndptrap_frstor(void) movl %esp, %ebp - pushl %ebp - call av_dispatch_nmivect + pushl %ebp + call av_dispatch_nmivect addl $4, %esp INTR_POP_USER @@ -433,7 +457,7 @@ ud_push: movq 32(%rsp), %rax /* reload calling RSP */ movq %rbp, (%rax) /* store %rbp there */ popq %rax /* pop off temp */ - IRET /* return from interrupt */ + jmp tr_iret_kernel /* return from interrupt */ /*NOTREACHED*/ ud_leave: @@ -454,7 +478,7 @@ ud_leave: movq %rbp, 32(%rsp) /* store new %rsp */ movq %rax, %rbp /* set new %rbp */ popq %rax /* pop off temp */ - IRET /* return from interrupt */ + jmp tr_iret_kernel /* return from interrupt */ /*NOTREACHED*/ ud_nop: @@ -464,7 +488,7 @@ ud_nop: */ INTR_POP incq (%rsp) - IRET + jmp tr_iret_kernel /*NOTREACHED*/ ud_ret: @@ -475,7 +499,7 @@ ud_ret: movq %rax, 8(%rsp) /* store calling RIP */ addq $8, 32(%rsp) /* adjust new %rsp */ popq %rax /* pop off temp */ - IRET /* return from interrupt */ + jmp tr_iret_kernel /* return from interrupt */ /*NOTREACHED*/ ud_trap: @@ -633,7 +657,7 @@ _emul_done: */ TRAP_NOERR(T_NOEXTFLT) /* $7 */ INTR_PUSH - + /* * We want to do this quickly as every lwp using fp will take this * after a context switch -- we do the frequent path in ndptrap_frstor @@ -709,7 +733,7 @@ _patch_xrstorq_rbx: SWAPGS /* if from user, need swapgs */ LOADCPU(%rax) SWAPGS -2: +2: /* * Xrstor needs to use edx as part of its flag. * NOTE: have to push rdx after "cmpw ...24(%rsp)", otherwise rsp+$24 @@ -749,7 +773,7 @@ _patch_xrstorq_rbx: popq %rdx popq %rbx popq %rax - IRET + jmp tr_iret_auto /*NOTREACHED*/ .handle_in_trap: @@ -867,7 +891,7 @@ _patch_xrstor_ebx: 1: addq $DESCTBR_SIZE, %rsp popq %rax - + DFTRAP_PUSH /* @@ -1127,7 +1151,7 @@ check_for_user_address: #endif /* !__amd64 */ ENTRY_NP(resvtrap) - TRAP_NOERR(15) /* (reserved) */ + TRAP_NOERR(T_RESVTRAP) /* (reserved) */ jmp cmntrap SET_SIZE(resvtrap) @@ -1207,15 +1231,10 @@ check_for_user_address: SET_SIZE(xmtrap) ENTRY_NP(invaltrap) - TRAP_NOERR(30) /* very invalid */ + TRAP_NOERR(T_INVALTRAP) /* very invalid */ jmp cmntrap SET_SIZE(invaltrap) - ENTRY_NP(invalint) - TRAP_NOERR(31) /* even more so */ - jmp cmnint - SET_SIZE(invalint) - .globl fasttable #if defined(__amd64) @@ -1286,7 +1305,7 @@ check_for_user_address: ENTRY_NP(fast_null) XPV_TRAP_POP orq $PS_C, 24(%rsp) /* set carry bit in user flags */ - IRET + jmp tr_iret_auto /*NOTREACHED*/ SET_SIZE(fast_null) diff --git a/usr/src/uts/intel/ia32/ml/swtch.s b/usr/src/uts/intel/ia32/ml/swtch.s index 0948fa7c93..6fc38cfbe8 100644 --- a/usr/src/uts/intel/ia32/ml/swtch.s +++ b/usr/src/uts/intel/ia32/ml/swtch.s @@ -24,7 +24,7 @@ */ /* - * Copyright (c) 2015, Joyent, Inc. All rights reserved. + * Copyright (c) 2018 Joyent, Inc. */ /* @@ -64,7 +64,7 @@ * The MMU context, therefore, only changes when resuming a thread in * a process different from curproc. * - * resume_from_intr() is called when the thread being resumed was not + * resume_from_intr() is called when the thread being resumed was not * passivated by resume (e.g. was interrupted). This means that the * resume lock is already held and that a restore context is not needed. * Also, the MMU context is not changed on the resume in this case. @@ -235,6 +235,8 @@ resume(kthread_t *t) #if defined(__amd64) + .global kpti_enable + ENTRY(resume) movq %gs:CPU_THREAD, %rax leaq resume_return(%rip), %r11 @@ -305,7 +307,7 @@ resume(kthread_t *t) */ movq CPU_IDLE_THREAD(%r15), %rax /* idle thread pointer */ - /* + /* * Set the idle thread as the current thread */ movq T_SP(%rax), %rsp /* It is safe to set rsp */ @@ -318,7 +320,7 @@ resume(kthread_t *t) GET_THREAD_HATP(%rdi, %r12, %r11) call hat_switch - /* + /* * Clear and unlock previous thread's t_lock * to allow it to be dispatched by another processor. */ @@ -368,13 +370,24 @@ resume(kthread_t *t) * thread -- this will set rsp0 to the wrong value, but it's harmless * as it's a kernel thread, and it won't actually attempt to implicitly * use the rsp0 via a privilege change. + * + * Note that when we have KPTI enabled on amd64, we never use this + * value at all (since all the interrupts have an IST set). */ movq CPU_TSS(%r13), %r14 +#if !defined(__xpv) + cmpq $1, kpti_enable + jne 1f + leaq CPU_KPTI_TR_RSP(%r13), %rax + jmp 2f +1: movq T_STACK(%r12), %rax addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */ -#if !defined(__xpv) +2: movq %rax, TSS_RSP0(%r14) #else + movq T_STACK(%r12), %rax + addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */ movl $KDS_SEL, %edi movq %rax, %rsi call HYPERVISOR_stack_switch @@ -407,7 +420,7 @@ resume(kthread_t *t) movq %rcx, %rdi call restorepctx .norestorepctx: - + STORE_INTR_START(%r12) /* @@ -428,7 +441,7 @@ resume(kthread_t *t) * resuming thread's PC after first setting the priority as low as * possible and blocking all interrupt threads that may be active. */ - movq %r13, %rax /* save return address */ + movq %r13, %rax /* save return address */ RESTORE_REGS(%r11) pushq %rax /* push return address for spl0() */ call __dtrace_probe___sched_on__cpu @@ -490,12 +503,12 @@ resume_return: addl $4, %esp .nosavepctx: - /* + /* * Temporarily switch to the idle thread's stack */ movl CPU_IDLE_THREAD(%ebx), %eax /* idle thread pointer */ - /* + /* * Set the idle thread as the current thread */ movl T_SP(%eax), %esp /* It is safe to set esp */ @@ -506,8 +519,8 @@ resume_return: pushl %ecx call hat_switch addl $4, %esp - - /* + + /* * Clear and unlock previous thread's t_lock * to allow it to be dispatched by another processor. */ @@ -673,7 +686,7 @@ resume_from_zombie(kthread_t *t) #endif /* __xpv */ - /* + /* * Temporarily switch to the idle thread's stack so that the zombie * thread's stack can be reclaimed by the reaper. */ @@ -686,7 +699,7 @@ resume_from_zombie(kthread_t *t) */ andq $_BITNOT(STACK_ALIGN-1), %rsp - /* + /* * Set the idle thread as the current thread. */ movq %rax, %gs:CPU_THREAD @@ -695,7 +708,7 @@ resume_from_zombie(kthread_t *t) GET_THREAD_HATP(%rdi, %r12, %r11) call hat_switch - /* + /* * Put the zombie on death-row. */ movq %r13, %rdi @@ -743,14 +756,14 @@ resume_from_zombie_return: movl %eax, %cr0 .zfpu_disabled: - /* + /* * Temporarily switch to the idle thread's stack so that the zombie * thread's stack can be reclaimed by the reaper. */ movl %gs:CPU_IDLE_THREAD, %eax /* idle thread pointer */ movl T_SP(%eax), %esp /* get onto idle thread stack */ - /* + /* * Set the idle thread as the current thread. */ movl %eax, %gs:CPU_THREAD @@ -763,7 +776,7 @@ resume_from_zombie_return: call hat_switch addl $4, %esp - /* + /* * Put the zombie on death-row. */ pushl %esi @@ -814,7 +827,7 @@ resume_from_intr(kthread_t *t) movq T_SP(%r12), %rsp /* restore resuming thread's sp */ xorl %ebp, %ebp /* make $<threadlist behave better */ - /* + /* * Unlock outgoing thread's mutex dispatched by another processor. */ xorl %eax, %eax @@ -864,7 +877,7 @@ resume_from_intr_return: movl T_SP(%edi), %esp /* restore resuming thread's sp */ xorl %ebp, %ebp /* make $<threadlist behave better */ - /* + /* * Unlock outgoing thread's mutex dispatched by another processor. */ xorl %eax,%eax @@ -969,9 +982,15 @@ thread_splitstack_cleanup(void) ENTRY(thread_splitstack_cleanup) LOADCPU(%r8) movq CPU_TSS(%r8), %r9 - movq CPU_THREAD(%r8), %r10 + cmpq $1, kpti_enable + jne 1f + leaq CPU_KPTI_TR_RSP(%r8), %rax + jmp 2f +1: + movq CPU_THREAD(%r8), %r10 movq T_STACK(%r10), %rax - addq $REGSIZE+MINFRAME, %rax + addq $REGSIZE+MINFRAME, %rax +2: movq %rax, TSS_RSP0(%r9) ret SET_SIZE(thread_splitstack_cleanup) |