diff options
author | John Levon <john.levon@joyent.com> | 2018-06-13 22:54:04 +0000 |
---|---|---|
committer | Robert Mustacchi <rm@joyent.com> | 2018-06-22 17:18:57 +0000 |
commit | 52b3cf5cd58128b1ae465be28a32b1cc5c617a3f (patch) | |
tree | 67e73a2e833c0817992a17118e9640a64a497ff5 /usr/src/uts/intel/ia32 | |
parent | bce37cdf6c9e94a5df1aec9fec97bb9c50139717 (diff) | |
download | illumos-joyent-release-20180315.tar.gz |
OS-6967 LDT still not happy under KPTIrelease-20180315
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Alex Wilson <alex.wilson@joyent.com>
Approved by: Jerry Jelinek <jerry.jelinek@joyent.com>
Approved by: Alex Wilson <alex.wilson@joyent.com>
Diffstat (limited to 'usr/src/uts/intel/ia32')
-rw-r--r-- | usr/src/uts/intel/ia32/os/sysi86.c | 121 |
1 files changed, 50 insertions, 71 deletions
diff --git a/usr/src/uts/intel/ia32/os/sysi86.c b/usr/src/uts/intel/ia32/os/sysi86.c index e3f4e2608c..bdb66e3e1f 100644 --- a/usr/src/uts/intel/ia32/os/sysi86.c +++ b/usr/src/uts/intel/ia32/os/sysi86.c @@ -348,8 +348,7 @@ static void ldt_load(void) { #if defined(__xpv) - xen_set_ldt(get_ssd_base(&curproc->p_ldt_desc), - curproc->p_ldtlimit + 1); + xen_set_ldt(curproc->p_ldt, curproc->p_ldtlimit + 1); #else size_t len; system_desc_t desc; @@ -415,7 +414,7 @@ ldt_savectx(proc_t *p) #endif ldt_unload(); - cpu_fast_syscall_enable(NULL); + cpu_fast_syscall_enable(); } static void @@ -425,30 +424,34 @@ ldt_restorectx(proc_t *p) ASSERT(p == curproc); ldt_load(); - cpu_fast_syscall_disable(NULL); + cpu_fast_syscall_disable(); } /* - * When a process with a private LDT execs, fast syscalls must be enabled for - * the new process image. + * At exec time, we need to clear up our LDT context and re-enable fast syscalls + * for the new process image. + * + * The same is true for the other case, where we have: + * + * proc_exit() + * ->exitpctx()->ldt_savectx() + * ->freepctx()->ldt_freectx() + * + * Because pre-emption is not prevented between the two callbacks, we could have + * come off CPU, and brought back LDT context when coming back on CPU via + * ldt_restorectx(). */ /* ARGSUSED */ static void ldt_freectx(proc_t *p, int isexec) { - ASSERT(p->p_ldt); - - if (isexec) { - kpreempt_disable(); - cpu_fast_syscall_enable(NULL); - kpreempt_enable(); - } + ASSERT(p->p_ldt != NULL); + ASSERT(p == curproc); - /* - * ldt_free() will free the memory used by the private LDT, reset the - * process's descriptor, and re-program the LDTR. - */ + kpreempt_disable(); ldt_free(p); + cpu_fast_syscall_enable(); + kpreempt_enable(); } /* @@ -500,10 +503,10 @@ ldt_installctx(proc_t *p, proc_t *cp) int setdscr(struct ssd *ssd) { - ushort_t seli; /* selector index */ + ushort_t seli; /* selector index */ user_desc_t *ldp; /* descriptor pointer */ user_desc_t ndesc; /* new descriptor */ - proc_t *pp = ttoproc(curthread); + proc_t *pp = curproc; int rc = 0; /* @@ -544,11 +547,12 @@ setdscr(struct ssd *ssd) */ kpreempt_disable(); ldt_installctx(pp, NULL); - cpu_fast_syscall_disable(NULL); + cpu_fast_syscall_disable(); ASSERT(curthread->t_post_sys != 0); kpreempt_enable(); } else if (seli > pp->p_ldtlimit) { + ASSERT(pp->p_pctx != NULL); /* * Increase size of ldt to include seli. @@ -650,10 +654,14 @@ setdscr(struct ssd *ssd) } /* - * If acc1 is zero, clear the descriptor (including the 'present' bit) + * If acc1 is zero, clear the descriptor (including the 'present' bit). + * Make sure we update the CPU-private copy of the LDT. */ if (ssd->acc1 == 0) { rc = ldt_update_segd(ldp, &null_udesc); + kpreempt_disable(); + ldt_load(); + kpreempt_enable(); mutex_exit(&pp->p_ldtlock); return (rc); } @@ -667,7 +675,6 @@ setdscr(struct ssd *ssd) return (EINVAL); } -#if defined(__amd64) /* * Do not allow 32-bit applications to create 64-bit mode code * segments. @@ -677,50 +684,34 @@ setdscr(struct ssd *ssd) mutex_exit(&pp->p_ldtlock); return (EINVAL); } -#endif /* __amd64 */ /* - * Set up a code or data user segment descriptor. + * Set up a code or data user segment descriptor, making sure to update + * the CPU-private copy of the LDT. */ if (SI86SSD_ISUSEG(ssd)) { ssd_to_usd(ssd, &ndesc); rc = ldt_update_segd(ldp, &ndesc); + kpreempt_disable(); + ldt_load(); + kpreempt_enable(); mutex_exit(&pp->p_ldtlock); return (rc); } -#if defined(__i386) - /* - * Allow a call gate only if the destination is in the LDT - * and the system is running in 32-bit legacy mode. - * - * In long mode 32-bit call gates are redefined as 64-bit call - * gates and the hw enforces that the target code selector - * of the call gate must be 64-bit selector. A #gp fault is - * generated if otherwise. Since we do not allow 32-bit processes - * to switch themselves to 64-bits we never allow call gates - * on 64-bit system system. - */ - if (SI86SSD_TYPE(ssd) == SDT_SYSCGT && SELISLDT(ssd->ls)) { - - - ssd_to_sgd(ssd, (gate_desc_t *)&ndesc); - rc = ldt_update_segd(ldp, &ndesc); - mutex_exit(&pp->p_ldtlock); - return (rc); - } -#endif /* __i386 */ - mutex_exit(&pp->p_ldtlock); return (EINVAL); } /* - * Allocate new LDT for process just large enough to contain seli. - * Note we allocate and grow LDT in PAGESIZE chunks. We do this - * to simplify the implementation and because on the hypervisor it's - * required, since the LDT must live on pages that have PROT_WRITE - * removed and which are given to the hypervisor. + * Allocate new LDT for process just large enough to contain seli. Note we + * allocate and grow LDT in PAGESIZE chunks. We do this to simplify the + * implementation and because on the hypervisor it's required, since the LDT + * must live on pages that have PROT_WRITE removed and which are given to the + * hypervisor. + * + * Note that we don't actually load the LDT into the current CPU here: it's done + * later by our caller. */ static void ldt_alloc(proc_t *pp, uint_t seli) @@ -751,13 +742,6 @@ ldt_alloc(proc_t *pp, uint_t seli) pp->p_ldt = ldt; pp->p_ldtlimit = nsels - 1; - set_syssegd(&pp->p_ldt_desc, ldt, ldtsz - 1, SDT_SYSLDT, SEL_KPL); - - if (pp == curproc) { - kpreempt_disable(); - ldt_load(); - kpreempt_enable(); - } } static void @@ -776,7 +760,6 @@ ldt_free(proc_t *pp) pp->p_ldt = NULL; pp->p_ldtlimit = 0; - pp->p_ldt_desc = null_sdesc; mutex_exit(&pp->p_ldtlock); if (pp == curproc) { @@ -841,6 +824,14 @@ ldt_dup(proc_t *pp, proc_t *cp) } +/* + * Note that we don't actually load the LDT into the current CPU here: it's done + * later by our caller - unless we take an error. This works out because + * ldt_load() does a copy of ->p_ldt instead of directly loading it into the GDT + * (and therefore can't be using the freed old LDT), and by definition if the + * new entry didn't pass validation, then the proc shouldn't be referencing an + * entry in the extended region. + */ static void ldt_grow(proc_t *pp, uint_t seli) { @@ -891,17 +882,5 @@ ldt_grow(proc_t *pp, uint_t seli) pp->p_ldt = nldt; pp->p_ldtlimit = nsels - 1; - /* - * write new ldt segment descriptor. - */ - set_syssegd(&pp->p_ldt_desc, nldt, nldtsz - 1, SDT_SYSLDT, SEL_KPL); - - /* - * load the new ldt. - */ - kpreempt_disable(); - ldt_load(); - kpreempt_enable(); - kmem_free(oldt, oldtsz); } |