summaryrefslogtreecommitdiff
path: root/usr/src/uts/i86pc
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/i86pc')
-rw-r--r--usr/src/uts/i86pc/Makefile.files11
-rw-r--r--usr/src/uts/i86pc/Makefile.i86pc17
-rw-r--r--usr/src/uts/i86pc/boot/boot_gdt.s36
-rw-r--r--usr/src/uts/i86pc/dboot/dboot_asm.s22
-rw-r--r--usr/src/uts/i86pc/dboot/dboot_grub.s7
-rw-r--r--usr/src/uts/i86pc/dboot/dboot_xen.s7
-rw-r--r--usr/src/uts/i86pc/io/pciex/inc.flg1
-rw-r--r--usr/src/uts/i86pc/ml/amd64.il179
-rw-r--r--usr/src/uts/i86pc/ml/bios_call_src.s110
-rw-r--r--usr/src/uts/i86pc/ml/comm_page.s6
-rw-r--r--usr/src/uts/i86pc/ml/cpr_wakecode.s375
-rw-r--r--usr/src/uts/i86pc/ml/fast_trap_asm.s135
-rw-r--r--usr/src/uts/i86pc/ml/fb_swtch_src.s176
-rw-r--r--usr/src/uts/i86pc/ml/ia32.il178
-rw-r--r--usr/src/uts/i86pc/ml/interrupt.s110
-rw-r--r--usr/src/uts/i86pc/ml/kpti_trampolines.s9
-rw-r--r--usr/src/uts/i86pc/ml/locore.s1536
-rw-r--r--usr/src/uts/i86pc/ml/mach_offsets.in150
-rw-r--r--usr/src/uts/i86pc/ml/mpcore.s282
-rw-r--r--usr/src/uts/i86pc/ml/notes.s18
-rw-r--r--usr/src/uts/i86pc/ml/offsets.in2
-rw-r--r--usr/src/uts/i86pc/ml/syscall_asm.s744
-rw-r--r--usr/src/uts/i86pc/ml/syscall_asm_amd64.s157
23 files changed, 66 insertions, 4202 deletions
diff --git a/usr/src/uts/i86pc/Makefile.files b/usr/src/uts/i86pc/Makefile.files
index a0509bf21d..d9d6605a63 100644
--- a/usr/src/uts/i86pc/Makefile.files
+++ b/usr/src/uts/i86pc/Makefile.files
@@ -155,21 +155,13 @@ CORE_OBJS += $(BOOT_DRIVER_OBJS)
# locore.o is special. It must be the first file relocated so that it
# it is relocated just where its name implies.
#
-SPECIAL_OBJS_32 += \
- locore.o \
- fast_trap_asm.o \
- interrupt.o \
- syscall_asm.o
-
-SPECIAL_OBJS_64 += \
+SPECIAL_OBJS += \
locore.o \
fast_trap_asm.o \
interrupt.o \
syscall_asm_amd64.o \
kpti_trampolines.o
-SPECIAL_OBJS += $(SPECIAL_OBJS_$(CLASS))
-
#
# Objects that get compiled into the identity mapped PT_LOAD section of unix
# to handle the earliest part of booting.
@@ -320,7 +312,6 @@ ASSYM_DEPS += \
mpcore.o \
sseblk.o \
swtch.o \
- syscall_asm.o \
syscall_asm_amd64.o \
kpti_trampolines.o \
cpr_wakecode.o
diff --git a/usr/src/uts/i86pc/Makefile.i86pc b/usr/src/uts/i86pc/Makefile.i86pc
index 47ca5bf8e9..cec9d91ac8 100644
--- a/usr/src/uts/i86pc/Makefile.i86pc
+++ b/usr/src/uts/i86pc/Makefile.i86pc
@@ -26,6 +26,7 @@
# Copyright (c) 2013 Andrew Stormont. All rights reserved.
# Copyright 2019 Joyent, Inc.
# Copyright 2019 OmniOS Community Edition (OmniOSce) Association.
+# Copyright 2019 Joyent, Inc.
#
#
# This makefile contains the common definitions for the i86pc unix
@@ -99,15 +100,6 @@ DEF_BUILDS = $(DEF_BUILDS64)
ALL_BUILDS = $(ALL_BUILDS64)
#
-# x86 or amd64 inline templates
-#
-INLINES_32 = $(UTSBASE)/intel/ia32/ml/ia32.il \
- $(UTSBASE)/$(PLATFORM)/ml/ia32.il
-INLINES_64 = $(UTSBASE)/intel/amd64/ml/amd64.il \
- $(UTSBASE)/$(PLATFORM)/ml/amd64.il
-INLINES += $(INLINES_$(CLASS))
-
-#
# kernel-specific optimizations; override default in Makefile.master
#
@@ -121,7 +113,7 @@ COPTIMIZE = $(COPTFLAG_$(CLASS))
CFLAGS = $(CFLAGS_XARCH)
CFLAGS += $(COPTIMIZE)
-CFLAGS += $(INLINES) -D_ASM_INLINES
+CFLAGS += -D_ASM_INLINES
CFLAGS += $(CCMODE)
CFLAGS += $(SPACEFLAG)
CFLAGS += $(CCUNBOUND)
@@ -147,9 +139,7 @@ UNIX_MAPFILE = $(UTSBASE)/$(PLATFORM)/conf/Mapfile
MODSTUBS = $(UTSBASE)/intel/ia32/ml/modstubs.s
GENASSYM_SRC = $(UTSBASE)/$(PLATFORM)/ml/genassym.c
OFFSETS_SRC = $(UTSBASE)/$(PLATFORM)/ml/offsets.in
-PLATFORM_OFFSETS_32 = $(UTSBASE)/$(PLATFORM)/ml/mach_offsets.in
-PLATFORM_OFFSETS_64 = $(UTSBASE)/intel/amd64/ml/mach_offsets.in
-PLATFORM_OFFSETS_SRC = $(PLATFORM_OFFSETS_$(CLASS))
+PLATFORM_OFFSETS_SRC = $(UTSBASE)/intel/amd64/ml/mach_offsets.in
KDI_OFFSETS_SRC = $(UTSBASE)/intel/kdi/kdi_offsets.in
#
@@ -186,7 +176,6 @@ DEBUG_COND_DBG64 =
IF_DEBUG_OBJ = $(DEBUG_COND_$(BUILD_TYPE))$(OBJS_DIR)/
$(IF_DEBUG_OBJ)trap.o := DEBUG_DEFS += -DTRAPDEBUG -DTRAPTRACE
-$(IF_DEBUG_OBJ)syscall_asm.o := DEBUG_DEFS += -DSYSCALLTRACE -DTRAPTRACE
$(IF_DEBUG_OBJ)syscall_asm_amd64.o := DEBUG_DEFS += -DSYSCALLTRACE -DTRAPTRACE
$(IF_DEBUG_OBJ)fast_trap_asm.o := DEBUG_DEFS += -DTRAPTRACE
$(IF_DEBUG_OBJ)interrupt.o := DEBUG_DEFS += -DTRAPTRACE
diff --git a/usr/src/uts/i86pc/boot/boot_gdt.s b/usr/src/uts/i86pc/boot/boot_gdt.s
index 58c74c6f41..84bad4b9c7 100644
--- a/usr/src/uts/i86pc/boot/boot_gdt.s
+++ b/usr/src/uts/i86pc/boot/boot_gdt.s
@@ -22,34 +22,21 @@
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright 2020 Joyent, Inc.
*/
-#if defined(__lint)
-#pragma pack(1)
-struct {
- uint16_t limit_low;
- uint16_t base_low;
- uint8_t base_middle;
- uint8_t attr;
- uint8_t attr_and_limit;
- uint8_t base_high;
-} global_descriptor_table[8];
-struct {
- uint16_t limit; /* sizeof (global_descriptor_table) - 1 */
- void *base; /* &global_descriptor_table */
-} gdt_info;
-#pragma pack()
-
-#else /* __lint */
+/*
+ * The boot GDT must remain in sync with the entries in intel/sys/segments.h; in
+ * particular kmdb uses B64CODE_SEL or B32CODE_SEL in perpetuity for its IDT
+ * entries (they're copied to the kernel's GDT in init_idt()).
+ *
+ * The GDT is effectively an array of user_desc_t entries.
+ */
.align 16
.data
- /*
- * This must remain in sync with the entries in intel/sys/gdt.h; in
- * particular kmdb uses B64CODE_SEL or B32CODE_SEL in perpetuity for
- * its IDT entries (they're copied to the kernel's GDT in init_idt()).
- */
global_descriptor_table:
.long 0
@@ -129,6 +116,10 @@ fake_cpu_gdt_base_24_31:
/ .long 0
/ .long 0
+
+/*
+ * This is a desctbr_t.
+ */
gdt_info:
.value gdt_info - global_descriptor_table - 1
.long global_descriptor_table
@@ -143,4 +134,3 @@ fake_cpu_ptr:
.4byte 0
.skip 0x6c0, 0
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/dboot/dboot_asm.s b/usr/src/uts/i86pc/dboot/dboot_asm.s
index 47e525708f..ea19df5ca3 100644
--- a/usr/src/uts/i86pc/dboot/dboot_asm.s
+++ b/usr/src/uts/i86pc/dboot/dboot_asm.s
@@ -27,27 +27,6 @@
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
-#if defined(__lint)
-
-#include "dboot_asm.h"
-
-/* ARGSUSED */
-uint32_t
-get_cpuid_edx(uint32_t *eax)
-{ return (0); }
-
-/* ARGSUSED */
-void
-outb(int port, uint8_t value)
-{}
-
-/* ARGSUSED */
-uint8_t
-inb(int port)
-{ return (0); }
-
-#else /* __lint */
-
#if defined(__amd64)
/*
@@ -140,4 +119,3 @@ inb(int port)
#endif /* __i386 */
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/dboot/dboot_grub.s b/usr/src/uts/i86pc/dboot/dboot_grub.s
index c1485b605f..6841879088 100644
--- a/usr/src/uts/i86pc/dboot/dboot_grub.s
+++ b/usr/src/uts/i86pc/dboot/dboot_grub.s
@@ -24,12 +24,6 @@
* Use is subject to license terms.
*/
-#if defined(__lint)
-
-int silence_lint_warnings = 0;
-
-#else /* __lint */
-
#include <sys/multiboot.h>
#include <sys/multiboot2.h>
#include <sys/asm_linkage.h>
@@ -372,4 +366,3 @@ longmode:
.skip 4096
.long 0
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/dboot/dboot_xen.s b/usr/src/uts/i86pc/dboot/dboot_xen.s
index dda17358d1..5ed08ee355 100644
--- a/usr/src/uts/i86pc/dboot/dboot_xen.s
+++ b/usr/src/uts/i86pc/dboot/dboot_xen.s
@@ -24,16 +24,10 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include "dboot_xboot.h"
-#if defined(__lint)
-
-#else /* __lint */
-
#if defined(__amd64)
ENTRY_NP(_start)
@@ -125,4 +119,3 @@
#endif /* __i386 */
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/io/pciex/inc.flg b/usr/src/uts/i86pc/io/pciex/inc.flg
index 370afa6195..a7e0172f57 100644
--- a/usr/src/uts/i86pc/io/pciex/inc.flg
+++ b/usr/src/uts/i86pc/io/pciex/inc.flg
@@ -105,7 +105,6 @@ echo_file usr/src/uts/intel/Makefile.files
echo_file usr/src/uts/intel/Makefile.rules
echo_file usr/src/uts/intel/Makefile.intel
echo_file usr/src/uts/intel/Makefile.targ
-echo_file usr/src/uts/intel/ia32/ml/ia32.il
echo_file usr/src/cmd/Makefile
echo_file usr/src/cmd/Makefile.cmd
echo_file usr/src/cmd/Makefile.targ
diff --git a/usr/src/uts/i86pc/ml/amd64.il b/usr/src/uts/i86pc/ml/amd64.il
deleted file mode 100644
index 9abac56955..0000000000
--- a/usr/src/uts/i86pc/ml/amd64.il
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-
-/
-/ Inline functions specific to the i86pc kernel running on bare metal.
-/
-
-/
-/ return value of cr3 register
-/
- .inline getcr3,0
- movq %cr3, %rax
- .end
-
-/
-/ reload cr3 register with its current value
-/
- .inline reload_cr3,0
- movq %cr3, %rdi
- movq %rdi, %cr3
- .end
-
-/
-/ set cr3 register with new value
-/
- .inline setcr3,0
- movq %rdi, %cr3
- .end
-
-/
-/ return value of cr8 register
-/
- .inline getcr8,0
- movq %cr8, %rax
- .end
-
-/
-/ set cr8 register
-/
- .inline setcr8,0
- movq %rdi, %cr8
- .end
-
-/
-/ enable interrupts
-/
- .inline sti,0
- sti
- .end
-
-/
-/ disable interrupts
-/
- .inline cli,0
- cli
- .end
-
-/
-/ disable interrupts and return value describing if interrupts were enabled
-/
- .inline clear_int_flag,0
- pushfq
- cli
- popq %rax
- .end
-
- .inline intr_clear,0
- pushfq
- cli
- popq %rax
- .end
-
-/
-/ return the value of the flags register
-/
- .inline getflags,0
- pushfq
- popq %rax
- .end
-
-/
-/ restore interrupt enable flag to value returned from 'clear_int_flag' above
-/
- .inline restore_int_flag,4
- testq $0x200, %rdi
- jz 1f
- sti
-1:
- .end
-
- .inline intr_restore,4
- testq $0x200, %rdi
- jz 1f
- sti
-1:
- .end
-
-/
-/ in and out
-/
- .inline inb,4
- movq %rdi, %rdx
- xorq %rax, %rax
- inb (%dx)
- .end
-
- .inline inw,4
- movq %rdi, %rdx
- xorq %rax, %rax
- inw (%dx)
- .end
-
- .inline inl,4
- movq %rdi, %rdx
- xorq %rax, %rax
- inl (%dx)
- .end
-
- .inline outb,8
- movq %rdi, %rdx
- movq %rsi, %rax
- outb (%dx)
- .end
-
- .inline outw,8
- movq %rdi, %rdx
- movq %rsi, %rax
- outw (%dx)
- .end
-
- .inline outl,8
- movq %rdi, %rdx
- movq %rsi, %rax
- outl (%dx)
- .end
-
-/*
- * Call the halt instruction. This will put the CPU to sleep until
- * it is again awoken via an interrupt.
- * This function should be called with interrupts already disabled
- * for the CPU.
- * Note that "sti" will only enable interrupts at the end of the
- * subsequent instruction...in this case: "hlt".
- */
- .inline i86_halt,0
- sti
- hlt
- .end
-/
-/ execute the bsrw instruction
-/
- .inline bsrw_insn,4
- xorl %eax, %eax
- bsrw %di, %ax
- .end
diff --git a/usr/src/uts/i86pc/ml/bios_call_src.s b/usr/src/uts/i86pc/ml/bios_call_src.s
index a587929066..d29d7f49cc 100644
--- a/usr/src/uts/i86pc/ml/bios_call_src.s
+++ b/usr/src/uts/i86pc/ml/bios_call_src.s
@@ -24,13 +24,9 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-#if defined(__lint)
-
-int silence_lint = 0;
-
-#else
+/*
+ * Copyright 2019 Joyent, Inc.
+ */
#include <sys/segments.h>
#include <sys/controlregs.h>
@@ -44,43 +40,9 @@ int silence_lint = 0;
*/
#define DATASZ .byte 0x66;
-#if defined(__amd64)
-#define MOVCR(x, y) movq x,%rax; movq %rax, y
-#define LOAD_XAX(sym) leaq sym, %rax
-#elif defined(__i386)
-#define MOVCR(x, y) movl x,%eax; movl %eax, y
-#define LOAD_XAX(sym) leal sym, %eax
-#endif
-
.globl _start
_start:
-#if defined(__i386)
-
- /*
- * Save caller registers
- */
- movl %ebp, save_ebp
- movl %esp, save_esp
- movl %ebx, save_ebx
- movl %esi, save_esi
- movl %edi, save_edi
-
- /* get registers argument into esi */
- movl 8(%esp), %esi
-
- /* put interrupt number in %bl */
- movl 4(%esp), %ebx
-
- /* Switch to a low memory stack */
- movl $_start, %esp
-
- /* allocate space for args on stack */
- subl $18, %esp
- movl %esp, %edi
-
-#elif defined(__amd64)
-
/*
* Save caller registers
*/
@@ -103,8 +65,6 @@ _start:
subq $18, %rsp
movq %rsp, %rdi
-#endif
-
/* copy args from high memory to stack in low memory */
cld
movl $18, %ecx
@@ -123,11 +83,13 @@ _start:
movw %es, save_es
movw %fs, save_fs
movw %gs, save_gs
- MOVCR( %cr4, save_cr4)
- MOVCR( %cr3, save_cr3)
- MOVCR( %cr0, save_cr0)
+ movq %cr4, %rax
+ movq %rax, save_cr4
+ movq %cr3, %rax
+ movq %rax, save_cr3
+ movq %cr0, %rax
+ movq %rax, save_cr0
-#if defined(__amd64)
/*
* save/clear the extension parts of the fs/gs base registers and cr8
*/
@@ -157,18 +119,17 @@ _start:
movq %cr8, %rax
movq %rax, save_cr8
-#endif
/*
* set offsets in 16 bit ljmp instructions below
*/
- LOAD_XAX(enter_real)
+ leaq enter_real, %rax
movw %ax, enter_real_ljmp
- LOAD_XAX(enter_protected)
+ leaq enter_protected, %rax
movw %ax, enter_protected_ljmp
- LOAD_XAX(gdt_info)
+ leaq gdt_info, %rax
movw %ax, gdt_info_load
/*
@@ -181,7 +142,6 @@ _start:
/*
* zero out all the registers to make sure they're 16 bit clean
*/
-#if defined(__amd64)
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r10
@@ -190,7 +150,6 @@ _start:
xorq %r13, %r13
xorq %r14, %r14
xorq %r15, %r15
-#endif
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
@@ -205,9 +164,8 @@ _start:
lgdt gdt_info
lidt idt_info
-#if defined(__amd64)
/*
- * Shut down 64 bit mode. First get into compatiblity mode.
+ * Shut down 64 bit mode. First get into compatibility mode.
*/
movq %rsp, %rax
pushq $B32DATA_SEL
@@ -238,7 +196,6 @@ _start:
rdmsr
btcl $8, %eax /* bit 8 Long Mode Enable bit */
wrmsr
-#endif
/*
* ok.. now enter 16 bit mode, so we can shut down protected mode
@@ -351,7 +308,6 @@ enter_protected:
movl save_cr3, %eax
movl %eax, %cr3
-#if defined(__amd64)
/*
* re-enable long mode
*/
@@ -359,7 +315,6 @@ enter_protected:
rdmsr
btsl $8, %eax
wrmsr
-#endif
movl save_cr0, %eax
movl %eax, %cr0
@@ -367,7 +322,6 @@ enter_protected:
enter_paging:
-#if defined(__amd64)
/*
* transition back to 64 bit mode
*/
@@ -376,7 +330,6 @@ enter_paging:
lret
longmode:
.code64
-#endif
/*
* restore caller frame pointer and segment registers
*/
@@ -388,15 +341,9 @@ longmode:
* in its corresponding GDT selector. The busy bit is the 2nd bit in
* the 5th byte of the selector.
*/
-#if defined(__i386)
- movzwl save_tr, %eax
- addl save_gdt+2, %eax
- btcl $1, 5(%eax)
-#elif defined(__amd64)
movzwq save_tr, %rax
addq save_gdt+2, %rax
btcl $1, 5(%rax)
-#endif
ltr save_tr
movw save_ds, %ds
movw save_ss, %ss
@@ -404,18 +351,11 @@ longmode:
movw save_fs, %fs
movw save_gs, %gs
-#if defined(__i386)
- pushl save_cs
- pushl $.newcs
- lret
-#elif defined(__amd64)
pushq save_cs
pushq $.newcs
lretq
-#endif
.newcs:
-#if defined(__amd64)
/*
* restore the hidden kernel segment base register values
*/
@@ -439,29 +379,10 @@ longmode:
je 1f
movq %rax, %cr8
1:
-#endif
/*
* copy results to caller's location, then restore remaining registers
*/
-#if defined(__i386)
- movl save_esp, %edi
- movl 8(%edi), %edi
- movl %esp, %esi
- movl $18, %ecx
- rep
- movsb
- movw 18(%esp), %ax
- andl $0xffff, %eax
- movl save_ebx, %ebx
- movl save_esi, %esi
- movl save_edi, %edi
- movl save_esp, %esp
- movl save_ebp, %ebp
- movl save_esp, %esp
- ret
-
-#elif defined(__amd64)
movq save_rsi, %rdi
movq %rsp, %rsi
movq $18, %rcx
@@ -478,8 +399,6 @@ longmode:
movq save_rsp, %rsp
ret
-#endif
-
/*
* Caller's registers to restore
@@ -497,7 +416,6 @@ save_esp:
.long 0
.align 8
-#if defined(__amd64)
save_rsi:
.quad 0
save_rbx:
@@ -522,7 +440,6 @@ save_fsbase:
.quad 0
save_cr8:
.quad 0
-#endif /* __amd64 */
save_idt:
.quad 0
@@ -562,4 +479,3 @@ idt_info:
* We need to trampoline thru a gdt we have in low memory.
*/
#include "../boot/boot_gdt.s"
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/ml/comm_page.s b/usr/src/uts/i86pc/ml/comm_page.s
index 49d39397bf..e03fec4fe7 100644
--- a/usr/src/uts/i86pc/ml/comm_page.s
+++ b/usr/src/uts/i86pc/ml/comm_page.s
@@ -20,7 +20,7 @@
#include <sys/comm_page.h>
#include <sys/tsc.h>
-#if defined(_GENCTF) || defined(__lint)
+#if defined(_GENCTF)
hrtime_t tsc_last;
hrtime_t tsc_resume_cap;
@@ -37,7 +37,7 @@ hrtime_t tsc_sync_tick_delta[NCPU];
comm_page_t comm_page;
-#else /* defined(_GENCTF) || defined(__lint) */
+#else /* defined(_GENCTF) */
#include "assym.h"
@@ -85,4 +85,4 @@ comm_page_t comm_page;
/* pad out the rest of the page from the struct end */
.fill _CONST(COMM_PAGE_SIZE - COMM_PAGE_S_SIZE), 1, 0
-#endif /* defined(_GENCTF) || defined(__lint) */
+#endif /* defined(_GENCTF) */
diff --git a/usr/src/uts/i86pc/ml/cpr_wakecode.s b/usr/src/uts/i86pc/ml/cpr_wakecode.s
index 4e4d2225b7..7b0d642884 100644
--- a/usr/src/uts/i86pc/ml/cpr_wakecode.s
+++ b/usr/src/uts/i86pc/ml/cpr_wakecode.s
@@ -30,10 +30,8 @@
#include <sys/x86_archext.h>
#include <sys/cpr_wakecode.h>
-#if !defined(__lint)
#include <sys/segments.h>
#include "assym.h"
-#endif
#ifdef DEBUG
#define LED 1
@@ -76,17 +74,6 @@
*
*/
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-int
-wc_save_context(wc_cpu_t *pcpu)
-{ return 0; }
-
-#else /* lint */
-
-#if defined(__amd64)
-
ENTRY_NP(wc_save_context)
movq (%rsp), %rdx / return address
@@ -174,59 +161,6 @@ wc_save_context(wc_cpu_t *pcpu)
SET_SIZE(wc_save_context)
-#elif defined(__i386)
-
- ENTRY_NP(wc_save_context)
-
- movl 4(%esp), %eax / wc_cpu_t *
- movl %eax, WC_VIRTADDR(%eax)
-
- movl (%esp), %edx / return address
- movl %edx, WC_RETADDR(%eax)
-
- str WC_TR(%eax) / stash everything else we need
- sgdt WC_GDT(%eax)
- sldt WC_LDT(%eax)
- sidt WC_IDT(%eax)
-
- movl %cr0, %edx
- movl %edx, WC_CR0(%eax)
- movl %cr3, %edx
- movl %edx, WC_CR3(%eax)
- movl %cr4, %edx
- movl %edx, WC_CR4(%eax)
-
- movl %ebx, WC_EBX(%eax)
- movl %edi, WC_EDI(%eax)
- movl %esi, WC_ESI(%eax)
- movl %ebp, WC_EBP(%eax)
- movl %esp, WC_ESP(%eax)
-
- movw %ss, WC_SS(%eax)
- movw %cs, WC_CS(%eax)
- movw %ds, WC_DS(%eax)
- movw %es, WC_ES(%eax)
- movw %fs, WC_FS(%eax)
- movw %gs, WC_GS(%eax)
-
- pushfl
- popl WC_EFLAGS(%eax)
-
- pushl %gs:CPU_ID / save current cpu id
- popl WC_CPU_ID(%eax)
-
- wbinvd / flush the cache
- mfence
-
- movl $1, %eax / at suspend return 1
- ret
-
- SET_SIZE(wc_save_context)
-
-#endif /* __amd64 */
-
-#endif /* lint */
-
/*
* Our assumptions:
@@ -244,20 +178,6 @@ wc_save_context(wc_cpu_t *pcpu)
* - We return to original caller (a la setjmp)
*/
-#if defined(lint) || defined(__lint)
-
-void
-wc_rm_start(void)
-{}
-
-void
-wc_rm_end(void)
-{}
-
-#else /* lint */
-
-#if defined(__amd64)
-
ENTRY_NP(wc_rm_start)
/*
@@ -872,298 +792,3 @@ A1:
wc_rm_end:
nop
-#elif defined(__i386)
-
- ENTRY_NP(wc_rm_start)
-
-/entry: jmp entry / stop here for HDT
-
- cli
- movw %cs, %ax
- movw %ax, %ds / establish ds ...
- movw %ax, %ss / ... and ss:esp
- D16 movl $WC_STKSTART, %esp
-
-#if LED
- D16 movl $WC_LED, %edx
- D16 movb $0xd1, %al
- outb (%dx)
-#endif
-
-#if SERIAL
- D16 movl $WC_COM, %edx
- D16 movb $0x61, %al
- outb (%dx)
-#endif
-
-
- D16 call vgainit
- D16 call kbdinit
- D16 call cominit
-
-#if LED
- D16 movl $WC_LED, %edx
- D16 movb $0xd2, %al
- outb (%dx)
-#endif
-
-#if SERIAL
- D16 movl $WC_COM, %edx
- D16 movb $0x62, %al
- outb (%dx)
-#endif
-
- D16 A16 movl $WC_CPU, %ebx / base add of wc_cpu_t
-
-#if LED
- D16 movb $0xd3, %al
- outb $WC_LED
-#endif
-
-#if SERIAL
- D16 movl $WC_COM, %edx
- D16 movb $0x63, %al
- outb (%dx)
-#endif
-
- D16 A16 movl %cs:WC_DS(%ebx), %edx / %ds post prot/paging transit
-
-#if LED
- D16 movb $0xd4, %al
- outb $WC_LED
-#endif
-
- D16 A16 lgdt %cs:WC_GDT(%ebx) / restore gdt and idtr
- D16 A16 lidt %cs:WC_IDT(%ebx)
-
-#if LED
- D16 movb $0xd5, %al
- outb $WC_LED
-#endif
-
- D16 A16 movl %cs:WC_CR4(%ebx), %eax / restore cr4
- D16 andl $_BITNOT(CR4_PGE), %eax / don't set Global Enable yet
- movl %eax, %cr4
-
-#if LED
- D16 movb $0xd6, %al
- outb $WC_LED
-#endif
-
- D16 A16 movl %cs:WC_CR3(%ebx), %eax / set PDPT
- movl %eax, %cr3
-
-#if LED
- D16 movb $0xd7, %al
- outb $WC_LED
-#endif
-
- D16 A16 movl %cs:WC_CR0(%ebx), %eax / enable prot/paging, etc.
- movl %eax, %cr0
-
-#if LED
- D16 movb $0xd8, %al
- outb $WC_LED
-#endif
-
- D16 A16 movl %cs:WC_VIRTADDR(%ebx), %ebx / virtaddr of wc_cpu_t
-
-#if LED
- D16 movb $0xd9, %al
- outb $WC_LED
-#endif
-
-#if LED
- D16 movb $0xda, %al
- outb $WC_LED
-#endif
-
- jmp flush / flush prefetch queue
-flush:
- D16 pushl $KCS_SEL
- D16 pushl $kernel_wc_code
- D16 lret / re-appear at kernel_wc_code
-
-
-/*
- * Support routine to re-initialize VGA subsystem
- */
-vgainit:
- D16 ret
-
-/*
- * Support routine to re-initialize keyboard (which is USB - help!)
- */
-kbdinit:
- D16 ret
-
-/*
- * Support routine to re-initialize COM ports to something sane for debug output
- */
-cominit:
-#if DEBUG
-/*
- * on debug kernels we need to initialize COM1 & COM2 here, so that
- * we can get debug output before the asy driver has resumed
- */
-
-/ select COM1
- D16 movl $_CONST(COM1+LCR), %edx
- D16 movb $DLAB, %al / divisor latch
- outb (%dx)
-
- D16 movl $_CONST(COM1+DLL), %edx / divisor latch lsb
- D16 movb $B9600L, %al / divisor latch
- outb (%dx)
-
- D16 movl $_CONST(COM1+DLH), %edx / divisor latch hsb
- D16 movb $B9600H, %al / divisor latch
- outb (%dx)
-
- D16 movl $_CONST(COM1+LCR), %edx / select COM1
- D16 movb $_CONST(STOP1|BITS8), %al / 1 stop bit, 8bit word len
- outb (%dx)
-
- D16 movl $_CONST(COM1+MCR), %edx / select COM1
- D16 movb $_CONST(RTS|DTR), %al / 1 stop bit, 8bit word len
- outb (%dx)
-
-/ select COM2
- D16 movl $_CONST(COM2+LCR), %edx
- D16 movb $DLAB, %al / divisor latch
- outb (%dx)
-
- D16 movl $_CONST(COM2+DLL), %edx / divisor latch lsb
- D16 movb $B9600L, %al / divisor latch
- outb (%dx)
-
- D16 movl $_CONST(COM2+DLH), %edx / divisor latch hsb
- D16 movb $B9600H, %al / divisor latch
- outb (%dx)
-
- D16 movl $_CONST(COM2+LCR), %edx / select COM1
- D16 movb $_CONST(STOP1|BITS8), %al / 1 stop bit, 8bit word len
- outb (%dx)
-
- D16 movl $_CONST(COM2+MCR), %edx / select COM1
- D16 movb $_CONST(RTS|DTR), %al / 1 stop bit, 8bit word len
- outb (%dx)
-#endif /* DEBUG */
-
- D16 ret
-
- .globl wc_rm_end
-wc_rm_end:
- nop
-
- .globl kernel_wc_code
-kernel_wc_code:
- / At this point we are with kernel's cs and proper eip.
- / We will be executing not from the copy in real mode platter,
- / but from the original code where boot loaded us.
- / By this time GDT and IDT are loaded as is cr0, cr3 and cr4.
- / %ebx is wc_cpu
- / %dx is our ds
-
-#if LED
- D16 movb $0xdb, %al
- outb $WC_LED
-#endif
-
-/ got here OK
-
- movw %dx, %ds / $KDS_SEL
-
-#if LED
- movb $0xdc, %al
- outb $WC_LED
-#endif
-
- /*
- * Before proceeding, enable usage of the page table NX bit if
- * that's how the page tables are set up.
- */
- bt $X86FSET_NX, x86_featureset
- jnc 1f
- movl $MSR_AMD_EFER, %ecx
- rdmsr
- orl $AMD_EFER_NXE, %eax
- wrmsr
-1:
-
- movl WC_CR4(%ebx), %eax / restore full cr4 (with Global Enable)
- movl %eax, %cr4
-
-
- lldt WC_LDT(%ebx) / $LDT_SEL
-
- movzwl WC_TR(%ebx), %eax / clear TSS busy bit
- addl WC_GDT+2(%ebx), %eax
- andl $_BITNOT(0x200), 4(%eax)
- ltr WC_TR(%ebx) / $UTSS_SEL
-
- movw WC_SS(%ebx), %ss / restore segment registers
- movw WC_ES(%ebx), %es
- movw WC_FS(%ebx), %fs
- movw WC_GS(%ebx), %gs
-
- /*
- * set the stack pointer to point into the identity mapped page
- * temporarily, so we can make function calls
- */
- .globl rm_platter_va
- movl rm_platter_va, %eax
- movl $WC_STKSTART, %esp
- addl %eax, %esp
- movl %esp, %ebp
-
- /*
- * if we are not running on the boot CPU restore stack contents by
- * calling i_cpr_restore_stack(curthread, save_stack);
- */
- call i_cpr_bootcpuid
- cmpl %eax, WC_CPU_ID(%ebx)
- je 2f
-
- pushl WC_SAVED_STACK(%ebx)
- pushl %gs:CPU_THREAD
- call i_cpr_restore_stack
- addl $0x10, %esp
-2:
-
- movl WC_ESP(%ebx), %esp
- movl %esp, %ebp
-
- movl WC_RETADDR(%ebx), %eax / return to caller of wc_save_context
- movl %eax, (%esp)
-
- /*
- * APIC initialization, skip iff function pointer is NULL
- */
- cmpl $0, ap_mlsetup
- je 3f
- call *ap_mlsetup
-3:
-
- call *cpr_start_cpu_func
-
- pushl WC_EFLAGS(%ebx) / restore flags
- popfl
-
- movl WC_EDI(%ebx), %edi / restore general registers
- movl WC_ESI(%ebx), %esi
- movl WC_EBP(%ebx), %ebp
- movl WC_EBX(%ebx), %ebx
-
-/exit: jmp exit / stop here for HDT
-
- xorl %eax, %eax / at wakeup return 0
- ret
-
- SET_SIZE(wc_rm_start)
-
-
-#endif /* defined(__amd64) */
-
-#endif /* lint */
-
diff --git a/usr/src/uts/i86pc/ml/fast_trap_asm.s b/usr/src/uts/i86pc/ml/fast_trap_asm.s
index bb3d0b3686..af4c164bdb 100644
--- a/usr/src/uts/i86pc/ml/fast_trap_asm.s
+++ b/usr/src/uts/i86pc/ml/fast_trap_asm.s
@@ -29,15 +29,6 @@
#include <sys/regset.h>
#include <sys/psw.h>
-#if defined(__lint)
-
-#include <sys/types.h>
-#include <sys/thread.h>
-#include <sys/systm.h>
-#include <sys/lgrp.h>
-
-#else /* __lint */
-
#include <sys/pcb.h>
#include <sys/trap.h>
#include <sys/ftrace.h>
@@ -48,41 +39,6 @@
#include "assym.h"
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-hrtime_t
-get_hrtime(void)
-{ return (0); }
-
-hrtime_t
-get_hrestime(void)
-{
- hrtime_t ts;
-
- gethrestime((timespec_t *)&ts);
- return (ts);
-}
-
-hrtime_t
-gethrvtime(void)
-{
- klwp_t *lwp = ttolwp(curthread);
- struct mstate *ms = &lwp->lwp_mstate;
-
- return (gethrtime() - ms->ms_state_start + ms->ms_acct[LMS_USER]);
-}
-
-uint64_t
-getlgrp(void)
-{
- return (((uint64_t)(curthread->t_lpl->lpl_lgrpid) << 32) |
- curthread->t_cpu->cpu_id);
-}
-
-#else /* __lint */
/*
* XX64: We are assuming that libc continues to expect the 64-bit value being
@@ -95,8 +51,6 @@ getlgrp(void)
* assumptions are not true.
*/
-#if defined(__amd64)
-
.globl gethrtimef
ENTRY_NP(get_hrtime)
FAST_INTR_PUSH
@@ -108,20 +62,6 @@ getlgrp(void)
FAST_INTR_RETURN
SET_SIZE(get_hrtime)
-#elif defined(__i386)
-
- .globl gethrtimef
- ENTRY_NP(get_hrtime)
- FAST_INTR_PUSH
- call *gethrtimef
- FAST_INTR_POP
- FAST_INTR_RETURN
- SET_SIZE(get_hrtime)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
-
.globl gethrestimef
ENTRY_NP(get_hrestime)
FAST_INTR_PUSH
@@ -136,25 +76,13 @@ getlgrp(void)
FAST_INTR_RETURN
SET_SIZE(get_hrestime)
-#elif defined(__i386)
-
- .globl gethrestimef
- ENTRY_NP(get_hrestime)
- FAST_INTR_PUSH
- subl $TIMESPEC_SIZE, %esp
- pushl %esp
- call *gethrestimef
- movl _CONST(4 + 0)(%esp), %eax
- movl _CONST(4 + CLONGSIZE)(%esp), %edx
- addl $_CONST(4 + TIMESPEC_SIZE), %esp
- FAST_INTR_POP
- FAST_INTR_RETURN
- SET_SIZE(get_hrestime)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
-
+ /*
+ * In C this is
+ *
+ * klwp_t *lwp = ttolwp(curthread);
+ * struct mstate *ms = &lwp->lwp_mstate;
+ * return (gethrtime() - ms->ms_state_start + ms->ms_acct[LMS_USER]);
+ */
ENTRY_NP(gethrvtime)
FAST_INTR_PUSH
call gethrtime_unscaled /* get time since boot */
@@ -173,34 +101,12 @@ getlgrp(void)
FAST_INTR_RETURN
SET_SIZE(gethrvtime)
-#elif defined(__i386)
-
- ENTRY_NP(gethrvtime)
- FAST_INTR_PUSH
- call gethrtime_unscaled /* get time since boot */
- movl %gs:CPU_LWP, %ecx /* current lwp */
- subl LWP_MS_STATE_START(%ecx), %eax /* - ms->ms_state_start */
- sbbl LWP_MS_STATE_START+4(%ecx), %edx
- addl LWP_ACCT_USER(%ecx), %eax /* add ms->ms_acct[LMS_USER] */
- adcl LWP_ACCT_USER+4(%ecx), %edx
- subl $0x8, %esp
- leal (%esp), %ecx
- movl %eax, (%ecx)
- movl %edx, 4(%ecx)
- pushl %ecx
- call scalehrtime
- popl %ecx
- movl (%ecx), %eax
- movl 4(%ecx), %edx
- addl $0x8, %esp
- FAST_INTR_POP
- FAST_INTR_RETURN
- SET_SIZE(gethrvtime)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
-
+ /*
+ * In C this is:
+ *
+ * return (((uint64_t)(curthread->t_lpl->lpl_lgrpid) << 32) |
+ * curthread->t_cpu->cpu_id);
+ */
ENTRY_NP(getlgrp)
FAST_INTR_PUSH
movq %gs:CPU_THREAD, %rcx
@@ -211,18 +117,3 @@ getlgrp(void)
FAST_INTR_RETURN
SET_SIZE(getlgrp)
-#elif defined(__i386)
-
- ENTRY_NP(getlgrp)
- FAST_INTR_PUSH
- movl %gs:CPU_THREAD, %ecx
- movl T_LPL(%ecx), %ecx
- movl LPL_LGRPID(%ecx), %edx
- movl %gs:CPU_ID, %eax
- FAST_INTR_POP
- FAST_INTR_RETURN
- SET_SIZE(getlgrp)
-
-#endif /* __i386 */
-
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/ml/fb_swtch_src.s b/usr/src/uts/i86pc/ml/fb_swtch_src.s
index 4d1789fc9b..5f614599ba 100644
--- a/usr/src/uts/i86pc/ml/fb_swtch_src.s
+++ b/usr/src/uts/i86pc/ml/fb_swtch_src.s
@@ -26,12 +26,6 @@
*/
-#if defined(__lint)
-
-int fb_swtch_silence_lint = 0;
-
-#else
-
#include <sys/asm_linkage.h>
#include <sys/segments.h>
#include <sys/controlregs.h>
@@ -96,7 +90,6 @@ _start:
/* Disable interrupts */
cli
-#if defined(__amd64)
/* Switch to a low memory stack */
movq $_start, %rsp
addq $FASTBOOT_STACK_OFFSET, %rsp
@@ -112,23 +105,6 @@ _start:
rep
smovb
-#elif defined(__i386)
- movl 0x4(%esp), %esi /* address of fastboot info struct */
-
- /* Switch to a low memory stack */
- movl $_start, %esp
- addl $FASTBOOT_STACK_OFFSET, %esp
-
- /* Copy struct to stack */
- movl %esp, %edi /* destination on the new stack */
- movl $FI_VALID, %ecx /* size to copy */
- rep
- smovb
-
-#endif
-
-#if defined(__amd64)
-
xorl %eax, %eax
xorl %edx, %edx
@@ -141,11 +117,9 @@ _start:
movl $MSR_AMD_KGSBASE, %ecx
wrmsr
-#endif
/*
* zero out all the registers to make sure they're 16 bit clean
*/
-#if defined(__amd64)
xorq %r8, %r8
xorq %r9, %r9
xorq %r10, %r10
@@ -154,25 +128,21 @@ _start:
xorq %r13, %r13
xorq %r14, %r14
xorq %r15, %r15
-#endif
xorl %eax, %eax
xorl %ebx, %ebx
xorl %ecx, %ecx
xorl %edx, %edx
xorl %ebp, %ebp
-#if defined(__amd64)
/*
* Load our own GDT
*/
lgdt gdt_info
-#endif
/*
* Load our own IDT
*/
lidt idt_info
-#if defined(__amd64)
/*
* Invalidate all TLB entries.
* Load temporary pagetables to copy kernel and boot-archive
@@ -229,13 +199,10 @@ _start:
* - turning off PCID in cr4
* - disabling LME (long mode enable) in EFER (extended feature reg)
*/
-#endif
DISABLE_PAGING /* clobbers %eax */
-#if defined(__amd64)
ljmp $B32CODE_SEL, $1f
1:
-#endif
/*
* Clear PGE, PAE and PSE flags as dboot expects them to be
@@ -245,68 +212,11 @@ _start:
andl $_BITNOT(CR4_PGE | CR4_PAE | CR4_PSE), %eax
movl %eax, %cr4
-#if defined(__amd64)
movl $MSR_AMD_EFER, %ecx /* Extended Feature Enable */
rdmsr
btcl $8, %eax /* bit 8 Long Mode Enable bit */
wrmsr
-#elif defined(__i386)
- /*
- * If fi_has_pae is set, re-enable paging with PAE.
- */
- leal FI_FILES(%esp), %ebx /* offset to the files */
- movl FI_HAS_PAE(%esp), %edi /* need to enable paging or not */
- cmpl $0, %edi
- je paging_on /* no need to enable paging */
-
- movl FI_LAST_TABLE_PA(%esp), %esi /* page table PA */
-
- /*
- * Turn on PAE
- */
- movl %cr4, %eax
- orl $CR4_PAE, %eax
- movl %eax, %cr4
-
- /*
- * Load top pagetable base address into cr3
- */
- movl FI_PAGETABLE_PA(%esp), %eax
- movl %eax, %cr3
-
- movl %cr0, %eax
- orl $_CONST(CR0_PG | CR0_WP | CR0_AM), %eax
- andl $_BITNOT(CR0_NW | CR0_CD), %eax
- movl %eax, %cr0
- jmp paging_on
-paging_on:
-
- /* copy unix to final destination */
- leal _MUL(FASTBOOT_UNIX, FI_FILES_INCR)(%ebx), %edx
- call map_copy
-
- /* copy boot archive to final destination */
- leal _MUL(FASTBOOT_BOOTARCHIVE, FI_FILES_INCR)(%ebx), %edx
- call map_copy
-
- /* Disable paging one more time */
- DISABLE_PAGING
-
- /* Copy sections if there are any */
- leal _MUL(FASTBOOT_UNIX, FI_FILES_INCR)(%ebx), %edx
- movl FB_SECTCNT(%edx), %eax
- cmpl $0, %eax
- je 1f
- call copy_sections
-1:
-
- /* Whatever flags we turn on we need to turn off */
- movl %cr4, %eax
- andl $_BITNOT(CR4_PAE), %eax
- movl %eax, %cr4
-#endif /* __i386 */
-
dboot_jump:
/* Jump to dboot */
movl $DBOOT_ENTRY_ADDRESS, %edi
@@ -314,8 +224,6 @@ dboot_jump:
movl $MB_BOOTLOADER_MAGIC, %eax
jmp *%edi
-#if defined(__amd64)
-
.code64
ENTRY_NP(copy_sections)
/*
@@ -361,89 +269,6 @@ dboot_jump:
ret
SET_SIZE(map_copy)
-#elif defined(__i386)
-
- ENTRY_NP(copy_sections)
- /*
- * On entry
- * %edx points to the fboot_file_t
- * %eax contains the number of sections
- */
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
-
- movl %eax, %ebp
-
- COPY_SECT(%edx, %ebx, %ebp)
-
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
- SET_SIZE(copy_sections)
-
- ENTRY_NP(map_copy)
- /*
- * On entry
- * %edx points to the fboot_file_t
- * %edi has FB_HAS_PAE(%esp)
- * %esi has FI_LAST_TABLE_PA(%esp)
- */
- pushl %eax
- pushl %ebx
- pushl %ecx
- pushl %edx
- pushl %ebp
- pushl %esi
- pushl %edi
- movl %esi, %ebp /* Save page table PA in %ebp */
-
- movl FB_PTE_LIST_PA(%edx), %eax /* PA list of the source */
- movl FB_DEST_PA(%edx), %ebx /* PA of the destination */
-
-loop:
- movl (%eax), %esi /* Are we done? */
- cmpl $FASTBOOT_TERMINATE, %esi
- je done
-
- cmpl $1, (%esp) /* Is paging on? */
- jne no_paging /* Nope */
-
- movl %ebp, %edi /* Page table PA */
- movl %esi, (%edi) /* Program low 32-bit */
- movl 4(%eax), %esi /* high bits of the table */
- movl %esi, 4(%edi) /* Program high 32-bit */
- movl %cr3, %esi /* Reload cr3 */
- movl %esi, %cr3
- movl FB_VA(%edx), %esi /* Load from VA */
- jmp do_copy
-no_paging:
- andl $_BITNOT(MMU_PAGEOFFSET), %esi /* clear lower 12-bit */
-do_copy:
- movl %ebx, %edi
- movl $PAGESIZE, %ecx
- shrl $2, %ecx /* 4-byte at a time */
- rep
- smovl
- addl $8, %eax /* We built the PTEs as 8-byte entries */
- addl $PAGESIZE, %ebx
- jmp loop
-done:
- popl %edi
- popl %esi
- popl %ebp
- popl %edx
- popl %ecx
- popl %ebx
- popl %eax
- ret
- SET_SIZE(map_copy)
-#endif /* __i386 */
-
-
idt_info:
.value 0x3ff
.quad 0
@@ -452,4 +277,3 @@ idt_info:
* We need to trampoline thru a gdt we have in low memory.
*/
#include "../boot/boot_gdt.s"
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/ml/ia32.il b/usr/src/uts/i86pc/ml/ia32.il
deleted file mode 100644
index 44354b8bca..0000000000
--- a/usr/src/uts/i86pc/ml/ia32.il
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-/
-/ Inline functions specific to the i86pc kernel running on bare metal.
-/
-
-/
-/ return value of cr3 register
-/
- .inline getcr3,0
- movl %cr3, %eax
- .end
-
-/
-/ reload cr3 register with its current value
-/
- .inline reload_cr3,0
- movl %cr3, %eax
- movl %eax, %cr3
- .end
-
-/*
- * Put a new value into cr3 (page table base register
- * void setcr3(void *value)
- */
- .inline setcr3,4
- movl (%esp), %eax
- movl %eax, %cr3
- .end
-
-/
-/ enable interrupts
-/
- .inline sti,0
- sti
- .end
-
-/
-/ disable interrupts
-/
- .inline cli,0
- cli
- .end
-
-/
-/ disable interrupts and return value describing if interrupts were enabled
-/
- .inline clear_int_flag,0
- pushfl
- cli
- popl %eax
- .end
-
- .inline intr_clear,0
- pushfl
- cli
- popl %eax
- .end
-
-/
-/ return the flags register
-/
- .inline getflags,0
- pushfl
- popl %eax
- .end
-
-/
-/ restore interrupt enable flag to value returned from 'clear_int_flag' above
-/
- .inline restore_int_flag,4
- testl $0x200, (%esp)
- jz 1f
- sti
-1:
- .end
-
- .inline intr_restore,4
- testl $0x200, (%esp)
- jz 1f
- sti
-1:
- .end
-
-/
-/ in and out
-/
- .inline inb,4
- movl (%esp), %edx
- xorl %eax, %eax
- inb (%dx)
- .end
-
- .inline inw,4
- movl (%esp), %edx
- xorl %eax, %eax
- inw (%dx)
- .end
-
- .inline inl,4
- movl (%esp), %edx
- xorl %eax, %eax
- inl (%dx)
- .end
-
- .inline outb,8
- movl (%esp), %edx
- movl 4(%esp), %eax
- outb (%dx)
- .end
-
- .inline outw,8
- movl (%esp), %edx
- movl 4(%esp), %eax
- outw (%dx)
- .end
-
- .inline outl,8
- movl (%esp), %edx
- movl 4(%esp), %eax
- outl (%dx)
- .end
-
-/*
- * Invalidate TLB translation to 1 page.
- * void mmu_tlbflush_entry(void *addr)
- */
- .inline mmu_tlbflush_entry,4
- movl (%esp), %eax
- invlpg (%eax)
- .end
-
-/*
- * Call the halt instruction. This will put the CPU to sleep until
- * it is again awoken via an interrupt.
- * This function should be called with interrupts already disabled
- * for the CPU.
- * Note that "sti" will only enable interrupts at the end of the
- * subsequent instruction...in this case: "hlt".
- */
- .inline i86_halt,0
- sti
- hlt
- .end
-
-/*
- * execute the bsrw instruction
- * int bsrw_insn(uint16_t)
- */
- .inline bsrw_insn,4
- xorl %eax, %eax
- movw (%esp), %cx
- bsrw %cx, %ax
- .end
diff --git a/usr/src/uts/i86pc/ml/interrupt.s b/usr/src/uts/i86pc/ml/interrupt.s
index 9849297ad2..8a35ee3a24 100644
--- a/usr/src/uts/i86pc/ml/interrupt.s
+++ b/usr/src/uts/i86pc/ml/interrupt.s
@@ -36,14 +36,6 @@
#include <sys/psw.h>
#include <sys/x86_archext.h>
-#if defined(__lint)
-
-#include <sys/types.h>
-#include <sys/thread.h>
-#include <sys/systm.h>
-
-#else /* __lint */
-
#include <sys/segments.h>
#include <sys/pcb.h>
#include <sys/trap.h>
@@ -53,18 +45,6 @@
#include <sys/panic.h>
#include "assym.h"
-#endif /* lint */
-
-#if defined(__lint)
-
-void
-_interrupt(void)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
/*
* Common register usage:
*
@@ -109,38 +89,6 @@ _interrupt(void)
SET_SIZE(cmnint)
SET_SIZE(_interrupt)
-#elif defined(__i386)
-
- ENTRY_NP2(cmnint, _interrupt)
-
- INTR_PUSH
- INTGATE_INIT_KERNEL_FLAGS
-
- /*
- * At the end of TRACE_PTR %esi points to the current TRAPTRACE entry
- */
- TRACE_PTR(%esi, %eax, %eax, %edx, $TT_INTERRUPT)
- /* Uses labels 8 and 9 */
- TRACE_REGS(%esi, %esp, %eax, %ebx) /* Uses label 9 */
- TRACE_STAMP(%esi) /* Clobbers %eax, %edx, uses 9 */
-
- movl %esp, %ebp
-
- TRACE_STACK(%esi)
-
- pushl %esi /* pass traptrace record pointer */
- pushl %ebp /* pass struct regs pointer */
- call *do_interrupt_common /* interrupt service routine */
- addl $8, %esp /* pop args off of stack */
-
- jmp _sys_rtt_ints_disabled
- /*NOTREACHED*/
-
- SET_SIZE(cmnint)
- SET_SIZE(_interrupt)
-
-#endif /* __i386 */
-
/*
* Declare a uintptr_t which has the size of _interrupt to enable stack
* traceback code to know when a regs structure is on the stack.
@@ -151,33 +99,20 @@ _interrupt_size:
.NWORD . - _interrupt
.type _interrupt_size, @object
-#endif /* __lint */
-
-#if defined(__lint)
-
-void
-fakesoftint(void)
-{}
-
-#else /* __lint */
-
- /
- / If we're here, we're being called from splx() to fake a soft
- / interrupt (note that interrupts are still disabled from splx()).
- / We execute this code when a soft interrupt is posted at
- / level higher than the CPU's current spl; when spl is lowered in
- / splx(), it will see the softint and jump here. We'll do exactly
- / what a trap would do: push our flags, %cs, %eip, error code
- / and trap number (T_SOFTINT). The cmnint() code will see T_SOFTINT
- / and branch to the dosoftint() code.
- /
-#if defined(__amd64)
-
/*
- * In 64-bit mode, iretq -always- pops all five regs
- * Imitate the 16-byte auto-align of the stack, and the
- * zero-ed out %ss value.
+ * If we're here, we're being called from splx() to fake a soft
+ * interrupt (note that interrupts are still disabled from
+ * splx()). We execute this code when a soft interrupt is
+ * posted at level higher than the CPU's current spl; when spl
+ * is lowered in splx(), it will see the softint and jump here.
+ * We'll do exactly what a trap would do: push our flags, %cs,
+ * %rip, error code and trap number (T_SOFTINT). The cmnint()
+ * code will see T_SOFTINT and branch to the dosoftint() code.
+ *
+ * iretq -always- pops all five regs. Imitate the 16-byte
+ * auto-align of the stack, and the zero-ed out %ss value.
*/
+
ENTRY_NP(fakesoftint)
movq %rsp, %r11
andq $-16, %rsp
@@ -200,24 +135,3 @@ fakesoftint(void)
SET_SIZE(fakesoftint_return)
SET_SIZE(fakesoftint)
-#elif defined(__i386)
-
- ENTRY_NP(fakesoftint)
- pushfl
-#if defined(__xpv)
- popl %eax
- EVENT_MASK_TO_IE(%edx, %eax)
- pushl %eax
-#endif
- pushl %cs
- pushl $fakesoftint_return
- pushl $0
- pushl $T_SOFTINT
- jmp cmnint
- ALTENTRY(fakesoftint_return)
- ret
- SET_SIZE(fakesoftint_return)
- SET_SIZE(fakesoftint)
-
-#endif /* __i386 */
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/ml/kpti_trampolines.s b/usr/src/uts/i86pc/ml/kpti_trampolines.s
index a036eefee1..df7f1c3aae 100644
--- a/usr/src/uts/i86pc/ml/kpti_trampolines.s
+++ b/usr/src/uts/i86pc/ml/kpti_trampolines.s
@@ -110,14 +110,6 @@
#include <sys/machbrand.h>
#include <sys/param.h>
-#if defined(__lint)
-
-#include <sys/types.h>
-#include <sys/thread.h>
-#include <sys/systm.h>
-
-#else /* __lint */
-
#include <sys/segments.h>
#include <sys/pcb.h>
#include <sys/trap.h>
@@ -820,4 +812,3 @@ tr_intr_ret_end:
kpti_tramp_end:
nop
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/ml/locore.s b/usr/src/uts/i86pc/ml/locore.s
index aad2fe89e2..3ef051d928 100644
--- a/usr/src/uts/i86pc/ml/locore.s
+++ b/usr/src/uts/i86pc/ml/locore.s
@@ -43,19 +43,6 @@
#include <sys/x86_archext.h>
#include <sys/machparam.h>
-#if defined(__lint)
-
-#include <sys/types.h>
-#include <sys/thread.h>
-#include <sys/systm.h>
-#include <sys/lgrp.h>
-#include <sys/regset.h>
-#include <sys/link.h>
-#include <sys/bootconf.h>
-#include <sys/bootsvcs.h>
-
-#else /* __lint */
-
#include <sys/segments.h>
#include <sys/pcb.h>
#include <sys/trap.h>
@@ -133,19 +120,6 @@
.comm t0stack, DEFAULTSTKSZ, 32
.comm t0, 4094, 32
-#endif /* __lint */
-
-
-#if defined(__amd64)
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-_locore_start(struct boot_syscalls *sysp, ulong_t rsi, struct bootops *bop)
-{}
-
-#else /* __lint */
/*
* kobj_init() vectors us back to here with (note) a slightly different
@@ -229,11 +203,6 @@ _locore_start(struct boot_syscalls *sysp, ulong_t rsi, struct bootops *bop)
call panic
SET_SIZE(_locore_start)
-#endif /* __amd64 */
-#endif /* __lint */
-
-#if !defined(__lint)
-
__return_from_main:
.string "main() returned"
__unsupported_cpu:
@@ -244,862 +213,14 @@ _no_pending_updates:
.string "locore.s:%d lwp_rtt(lwp %p) but pcb_rupdate != 1"
#endif
-#endif /* !__lint */
-
-#if !defined(__amd64)
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-_locore_start(struct boot_syscalls *sysp, struct bootops *bop)
-{}
-
-#else /* __lint */
-
- /*
- * kobj_init() vectors us back to here with (note) a slightly different
- * set of arguments than _start is given (see lint prototypes above).
- *
- * XXX Make this less vile, please.
- */
- ENTRY_NP(_locore_start)
-
- /*
- * %ecx = boot services (should die someday)
- * %ebx = bootops
- */
- mov $edata, %ebp / edata needs to be defined for ksyms
- movl $0, (%ebp) / limit stack back trace
-
- /*
- * Initialize our stack pointer to the thread 0 stack (t0stack)
- * and leave room for a phony "struct regs".
- */
- movl $t0stack + DEFAULTSTKSZ - REGSIZE, %esp
-
- /*
- * Save call back for special x86 boot services vector
- */
- mov %ecx, sysp / save call back for boot services
-
- mov %ebx, bootops / save bootops
- movl $bootops, bootopsp
-
-
- /*
- * Save all registers and flags
- */
- pushal
- pushfl
-
-#if !defined(__xpv)
- /*
- * Override bios settings and enable write protect and
- * alignment check faults.
- */
- movl %cr0, %eax
-
- /*
- * enable WP for detecting faults, and enable alignment checking.
- */
- orl $_CONST(CR0_WP|CR0_AM), %eax
- andl $_BITNOT(CR0_WT|CR0_CE), %eax
- movl %eax, %cr0 / set the cr0 register correctly and
- / override the BIOS setup
-
- /*
- * If bit 21 of eflags can be flipped, then cpuid is present
- * and enabled.
- */
- pushfl
- popl %ecx
- movl %ecx, %eax
- xorl $PS_ID, %eax / try complemented bit
- pushl %eax
- popfl
- pushfl
- popl %eax
- cmpl %eax, %ecx
- jne have_cpuid
-
- /*
- * cpuid may be disabled on Cyrix, try to detect Cyrix by the 5/2 test
- * div does not modify the cc flags on Cyrix, even though this may
- * also be true for other vendors, this is generally true only for
- * newer models from those vendors that support and do not disable
- * cpuid (usually because cpuid cannot be disabled)
- */
-
- /*
- * clear cc flags
- */
- xorb %ah, %ah
- sahf
-
- /*
- * perform 5/2 test
- */
- movw $5, %ax
- movb $2, %bl
- divb %bl
-
- lahf
- cmpb $2, %ah
- jne cpu_486
-
- /*
- * div did not modify the cc flags, chances are the vendor is Cyrix
- * assume the vendor is Cyrix and use the CCR's to enable cpuid
- */
- .set CYRIX_CRI, 0x22 / CR Index Register
- .set CYRIX_CRD, 0x23 / CR Data Register
-
- .set CYRIX_CCR3, 0xc3 / Config Control Reg 3
- .set CYRIX_CCR4, 0xe8 / Config Control Reg 4
- .set CYRIX_DIR0, 0xfe / Device Identification Reg 0
- .set CYRIX_DIR1, 0xff / Device Identification Reg 1
-
- /*
- * even if the cpu vendor is Cyrix and the motherboard/chipset
- * vendor decided to ignore lines A1-A4 for I/O addresses, I/O port
- * 0x21 corresponds with 0x23 and since 0x22 is still untouched,
- * the reads and writes of 0x21 are guaranteed to be off-chip of
- * the cpu
- */
-
- /*
- * enable read of ISR at I/O port 0x20
- */
- movb $0xb, %al
- outb $MCMD_PORT
-
- /*
- * read IMR and store in %bl
- */
- inb $MIMR_PORT
- movb %al, %bl
-
- /*
- * mask out all interrupts so that ISR will not change
- */
- movb $0xff, %al
- outb $MIMR_PORT
-
- /*
- * reads of I/O port 0x22 on Cyrix are always directed off-chip
- * make use of I/O pull-up to test for an unknown device on 0x22
- */
- inb $CYRIX_CRI
- cmpb $0xff, %al
- je port_22_free
-
- /*
- * motherboard/chipset vendor may be ignoring line A1 of I/O address
- */
- movb %al, %cl
-
- /*
- * if the ISR and the value read from 0x22 do not match then we have
- * detected some unknown device, probably a chipset, at 0x22
- */
- inb $MCMD_PORT
- cmpb %al, %cl
- jne restore_IMR
-
-port_22_free:
- /*
- * now test to see if some unknown device is using I/O port 0x23
- *
- * read the external I/O port at 0x23
- */
- inb $CYRIX_CRD
-
- /*
- * Test for pull-up at 0x23 or if I/O address line A1 is being ignored.
- * IMR is 0xff so both tests are performed simultaneously.
- */
- cmpb $0xff, %al
- jne restore_IMR
-
- /*
- * We are a Cyrix part. In case we are some model of Cx486 or a Cx586,
- * record the type and fix it later if not.
- */
- movl $X86_VENDOR_Cyrix, x86_vendor
- movl $X86_TYPE_CYRIX_486, x86_type
-
- /*
- * Try to read CCR3. All Cyrix cpu's which support cpuid have CCR3.
- *
- * load CCR3 index into CCR index register
- */
-
- movb $CYRIX_CCR3, %al
- outb $CYRIX_CRI
-
- /*
- * If we are not a Cyrix cpu, then we have performed an external I/O
- * cycle. If the CCR index was not valid for this Cyrix model, we may
- * have performed an external I/O cycle as well. In these cases and
- * if the motherboard/chipset vendor ignores I/O address line A1,
- * then the PIC will have IRQ3 set at the lowest priority as a side
- * effect of the above outb. We are reasonalbly confident that there
- * is not an unknown device on I/O port 0x22, so there should have been
- * no unpredictable side-effect of the above outb.
- */
-
- /*
- * read CCR3
- */
- inb $CYRIX_CRD
-
- /*
- * If we are not a Cyrix cpu the inb above produced an external I/O
- * cycle. If we are a Cyrix model that does not support CCR3 wex
- * produced an external I/O cycle. In all known Cyrix models 6x86 and
- * above, bit 3 of CCR3 is reserved and cannot be set to 1. In all
- * Cyrix models prior to the 6x86 that supported CCR3, bits 4-7 are
- * reserved as well. It is highly unlikely that CCR3 contains the value
- * 0xff. We test to see if I/O port 0x23 is pull-up or the IMR and
- * deduce we are not a Cyrix with support for cpuid if so.
- */
- cmpb $0xff, %al
- je restore_PIC
-
- /*
- * There exist 486 ISA Cyrix chips that support CCR3 but do not support
- * DIR0 and DIR1. If we try to read DIR0, we may generate external I/O
- * cycles, the exact behavior is model specific and undocumented.
- * Unfortunately these external I/O cycles may confuse some PIC's beyond
- * recovery. Fortunatetly we can use the following undocumented trick:
- * if bit 4 of CCR3 can be toggled, then DIR0 and DIR1 are supported.
- * Pleasantly MAPEN contains bit 4 of CCR3, so this trick is guaranteed
- * to work on all Cyrix cpu's which support cpuid.
- */
- movb %al, %dl
- xorb $0x10, %dl
- movb %al, %cl
-
- /*
- * write back CRR3 with toggled bit 4 to CCR3
- */
- movb $CYRIX_CCR3, %al
- outb $CYRIX_CRI
-
- movb %dl, %al
- outb $CYRIX_CRD
-
- /*
- * read CCR3
- */
- movb $CYRIX_CCR3, %al
- outb $CYRIX_CRI
- inb $CYRIX_CRD
- movb %al, %dl
-
- /*
- * restore CCR3
- */
- movb $CYRIX_CCR3, %al
- outb $CYRIX_CRI
-
- movb %cl, %al
- outb $CYRIX_CRD
-
- /*
- * if bit 4 was not toggled DIR0 and DIR1 are not supported in which
- * case we do not have cpuid anyway
- */
- andb $0x10, %al
- andb $0x10, %dl
- cmpb %al, %dl
- je restore_PIC
-
- /*
- * read DIR0
- */
- movb $CYRIX_DIR0, %al
- outb $CYRIX_CRI
- inb $CYRIX_CRD
-
- /*
- * test for pull-up
- */
- cmpb $0xff, %al
- je restore_PIC
-
- /*
- * Values of 0x20-0x27 in DIR0 are currently reserved by Cyrix for
- * future use. If Cyrix ever produces a cpu that supports cpuid with
- * these ids, the following test will have to change. For now we remain
- * pessimistic since the formats of the CRR's may be different then.
- *
- * test for at least a 6x86, to see if we support both MAPEN and CPUID
- */
- cmpb $0x30, %al
- jb restore_IMR
-
- /*
- * enable MAPEN
- */
- movb $CYRIX_CCR3, %al
- outb $CYRIX_CRI
-
- andb $0xf, %cl
- movb %cl, %al
- orb $0x10, %al
- outb $CYRIX_CRD
-
- /*
- * select CCR4
- */
- movb $CYRIX_CCR4, %al
- outb $CYRIX_CRI
-
- /*
- * read CCR4
- */
- inb $CYRIX_CRD
-
- /*
- * enable cpuid
- */
- orb $0x80, %al
- movb %al, %dl
-
- /*
- * select CCR4
- */
- movb $CYRIX_CCR4, %al
- outb $CYRIX_CRI
-
- /*
- * write CCR4
- */
- movb %dl, %al
- outb $CYRIX_CRD
-
- /*
- * select CCR3
- */
- movb $CYRIX_CCR3, %al
- outb $CYRIX_CRI
-
- /*
- * disable MAPEN and write CCR3
- */
- movb %cl, %al
- outb $CYRIX_CRD
-
- /*
- * restore IMR
- */
- movb %bl, %al
- outb $MIMR_PORT
-
- /*
- * test to see if cpuid available
- */
- pushfl
- popl %ecx
- movl %ecx, %eax
- xorl $PS_ID, %eax / try complemented bit
- pushl %eax
- popfl
- pushfl
- popl %eax
- cmpl %eax, %ecx
- jne have_cpuid
- jmp cpu_486
-
-restore_PIC:
- /*
- * In case the motherboard/chipset vendor is ignoring line A1 of the
- * I/O address, we set the PIC priorities to sane values.
- */
- movb $0xc7, %al / irq 7 lowest priority
- outb $MCMD_PORT
-
-restore_IMR:
- movb %bl, %al
- outb $MIMR_PORT
- jmp cpu_486
-
-have_cpuid:
- /*
- * cpuid instruction present
- */
- bts $X86FSET_CPUID, x86_featureset / Just to set; Ignore the CF
- movl $0, %eax
- cpuid
-
- movl %ebx, cpu_vendor
- movl %edx, cpu_vendor+4
- movl %ecx, cpu_vendor+8
-
- /*
- * early cyrix cpus are somewhat strange and need to be
- * probed in curious ways to determine their identity
- */
-
- leal cpu_vendor, %esi
- leal CyrixInstead, %edi
- movl $12, %ecx
- repz
- cmpsb
- je vendor_is_cyrix
-
- / let mlsetup()/cpuid_pass1() handle everything else in C
-
- jmp cpu_done
-
-is486:
- /*
- * test to see if a useful cpuid
- */
- testl %eax, %eax
- jz isa486
-
- movl $1, %eax
- cpuid
-
- movl %eax, %ebx
- andl $0xF00, %ebx
- cmpl $0x400, %ebx
- je isa486
-
- rep; ret /* use 2 byte return instruction */
- /* AMD Software Optimization Guide - Section 6.2 */
-isa486:
- /*
- * lose the return address
- */
- popl %eax
- jmp cpu_486
-
-vendor_is_cyrix:
- call is486
-
- /*
- * Processor signature and feature flags for Cyrix are insane.
- * BIOS can play with semi-documented registers, so cpuid must be used
- * cautiously. Since we are Cyrix that has cpuid, we have DIR0 and DIR1
- * Keep the family in %ebx and feature flags in %edx until not needed
- */
-
- /*
- * read DIR0
- */
- movb $CYRIX_DIR0, %al
- outb $CYRIX_CRI
- inb $CYRIX_CRD
-
- /*
- * First we handle the cases where we are a 6x86 or 6x86L.
- * The 6x86 is basically a 486, the only reliable bit in the
- * feature flags is for FPU. The 6x86L is better, unfortunately
- * there is no really good way to distinguish between these two
- * cpu's. We are pessimistic and when in doubt assume 6x86.
- */
-
- cmpb $0x40, %al
- jae maybeGX
-
- /*
- * We are an M1, either a 6x86 or 6x86L.
- */
- cmpb $0x30, %al
- je maybe6x86L
- cmpb $0x31, %al
- je maybe6x86L
- cmpb $0x34, %al
- je maybe6x86L
- cmpb $0x35, %al
- je maybe6x86L
-
- /*
- * although it is possible that we are a 6x86L, the cpu and
- * documentation are so buggy, we just do not care.
- */
- jmp likely6x86
-
-maybe6x86L:
- /*
- * read DIR1
- */
- movb $CYRIX_DIR1, %al
- outb $CYRIX_CRI
- inb $CYRIX_CRD
- cmpb $0x22, %al
- jb likely6x86
-
- /*
- * We are a 6x86L, or at least a 6x86 with honest cpuid feature flags
- */
- movl $X86_TYPE_CYRIX_6x86L, x86_type
- jmp coma_bug
-
-likely6x86:
- /*
- * We are likely a 6x86, or a 6x86L without a way of knowing
- *
- * The 6x86 has NO Pentium or Pentium Pro compatible features even
- * though it claims to be a Pentium Pro compatible!
- *
- * The 6x86 core used in the 6x86 may have most of the Pentium system
- * registers and largely conform to the Pentium System Programming
- * Reference. Documentation on these parts is long gone. Treat it as
- * a crippled Pentium and hope for the best.
- */
-
- movl $X86_TYPE_CYRIX_6x86, x86_type
- jmp coma_bug
-
-maybeGX:
- /*
- * Now we check whether we are a MediaGX or GXm. We have particular
- * reason for concern here. Even though most of the GXm's
- * report having TSC in the cpuid feature flags, the TSC may be
- * horribly broken. What is worse, is that MediaGX's are basically
- * 486's while the good GXm's are more like Pentium Pro's!
- */
-
- cmpb $0x50, %al
- jae maybeM2
-
- /*
- * We are either a MediaGX (sometimes called a Gx86) or GXm
- */
-
- cmpb $41, %al
- je maybeMediaGX
-
- cmpb $44, %al
- jb maybeGXm
-
- cmpb $47, %al
- jbe maybeMediaGX
-
- /*
- * We do not honestly know what we are, so assume a MediaGX
- */
- jmp media_gx
-
-maybeGXm:
- /*
- * It is still possible we are either a MediaGX or GXm, trust cpuid
- * family should be 5 on a GXm
- */
- cmpl $0x500, %ebx
- je GXm
-
- /*
- * BIOS/Cyrix might set family to 6 on a GXm
- */
- cmpl $0x600, %ebx
- jne media_gx
-
-GXm:
- movl $X86_TYPE_CYRIX_GXm, x86_type
- jmp cpu_done
-
-maybeMediaGX:
- /*
- * read DIR1
- */
- movb $CYRIX_DIR1, %al
- outb $CYRIX_CRI
- inb $CYRIX_CRD
-
- cmpb $0x30, %al
- jae maybeGXm
-
- /*
- * we are a MediaGX for which we do not trust cpuid
- */
-media_gx:
- movl $X86_TYPE_CYRIX_MediaGX, x86_type
- jmp cpu_486
-
-maybeM2:
- /*
- * Now we check whether we are a 6x86MX or MII. These cpu's are
- * virtually identical, but we care because for the 6x86MX, we
- * must work around the coma bug. Also for 6x86MX prior to revision
- * 1.4, the TSC may have serious bugs.
- */
-
- cmpb $0x60, %al
- jae maybeM3
-
- /*
- * family should be 6, but BIOS/Cyrix might set it to 5
- */
- cmpl $0x600, %ebx
- ja cpu_486
-
- /*
- * read DIR1
- */
- movb $CYRIX_DIR1, %al
- outb $CYRIX_CRI
- inb $CYRIX_CRD
-
- cmpb $0x8, %al
- jb cyrix6x86MX
- cmpb $0x80, %al
- jb MII
-
-cyrix6x86MX:
- /*
- * It is altogether unclear how the revision stamped on the cpu
- * maps to the values in DIR0 and DIR1. Just assume TSC is broken.
- */
- movl $X86_TYPE_CYRIX_6x86MX, x86_type
- jmp coma_bug
-
-MII:
- movl $X86_TYPE_CYRIX_MII, x86_type
-likeMII:
- jmp cpu_done
-
-maybeM3:
- /*
- * We are some chip that we cannot identify yet, an MIII perhaps.
- * We will be optimistic and hope that the chip is much like an MII,
- * and that cpuid is sane. Cyrix seemed to have gotten it right in
- * time for the MII, we can only hope it stayed that way.
- * Maybe the BIOS or Cyrix is trying to hint at something
- */
- cmpl $0x500, %ebx
- je GXm
-
- cmpb $0x80, %al
- jae likelyM3
-
- /*
- * Just test for the features Cyrix is known for
- */
-
- jmp MII
-
-likelyM3:
- /*
- * DIR0 with values from 0x80 to 0x8f indicates a VIA Cyrix III, aka
- * the Cyrix MIII. There may be parts later that use the same ranges
- * for DIR0 with special values in DIR1, maybe the VIA CIII, but for
- * now we will call anything with a DIR0 of 0x80 or higher an MIII.
- * The MIII is supposed to support large pages, but we will believe
- * it when we see it. For now we just enable and test for MII features.
- */
- movl $X86_TYPE_VIA_CYRIX_III, x86_type
- jmp likeMII
-
-coma_bug:
-
-/*
- * With NO_LOCK set to 0 in CCR1, the usual state that BIOS enforces, some
- * bus cycles are issued with LOCK# asserted. With NO_LOCK set to 1, all bus
- * cycles except page table accesses and interrupt ACK cycles do not assert
- * LOCK#. xchgl is an instruction that asserts LOCK# if NO_LOCK is set to 0.
- * Due to a bug in the cpu core involving over-optimization of branch
- * prediction, register renaming, and execution of instructions down both the
- * X and Y pipes for the xchgl instruction, short loops can be written that
- * never de-assert LOCK# from one invocation of the loop to the next, ad
- * infinitum. The undesirable effect of this situation is that interrupts are
- * not serviced. The ideal workaround to this bug would be to set NO_LOCK to
- * 1. Unfortunately bus cycles that would otherwise have asserted LOCK# no
- * longer do, unless they are page table accesses or interrupt ACK cycles.
- * With LOCK# not asserted, these bus cycles are now cached. This can cause
- * undesirable behaviour if the ARR's are not configured correctly. Solaris
- * does not configure the ARR's, nor does it provide any useful mechanism for
- * doing so, thus the ideal workaround is not viable. Fortunately, the only
- * known exploits for this bug involve the xchgl instruction specifically.
- * There is a group of undocumented registers on Cyrix 6x86, 6x86L, and
- * 6x86MX cpu's which can be used to specify one instruction as a serializing
- * instruction. With the xchgl instruction serialized, LOCK# is still
- * asserted, but it is the sole instruction for which LOCK# is asserted.
- * There is now some added penalty for the xchgl instruction, but the usual
- * bus locking is preserved. This ingenious workaround was discovered by
- * disassembling a binary provided by Cyrix as a workaround for this bug on
- * Windows, but its not documented anywhere by Cyrix, nor is the bug actually
- * mentioned in any public errata! The only concern for this workaround is
- * that there may be similar undiscovered bugs with other instructions that
- * assert LOCK# that may be leveraged to similar ends. The fact that Cyrix
- * fixed this bug sometime late in 1997 and no other exploits other than
- * xchgl have been discovered is good indication that this workaround is
- * reasonable.
- */
-
- .set CYRIX_DBR0, 0x30 / Debug Register 0
- .set CYRIX_DBR1, 0x31 / Debug Register 1
- .set CYRIX_DBR2, 0x32 / Debug Register 2
- .set CYRIX_DBR3, 0x33 / Debug Register 3
- .set CYRIX_DOR, 0x3c / Debug Opcode Register
-
- /*
- * What is known about DBR1, DBR2, DBR3, and DOR is that for normal
- * cpu execution DBR1, DBR2, and DBR3 are set to 0. To obtain opcode
- * serialization, DBR1, DBR2, and DBR3 are loaded with 0xb8, 0x7f,
- * and 0xff. Then, DOR is loaded with the one byte opcode.
- */
-
- /*
- * select CCR3
- */
- movb $CYRIX_CCR3, %al
- outb $CYRIX_CRI
-
- /*
- * read CCR3 and mask out MAPEN
- */
- inb $CYRIX_CRD
- andb $0xf, %al
-
- /*
- * save masked CCR3 in %ah
- */
- movb %al, %ah
-
- /*
- * select CCR3
- */
- movb $CYRIX_CCR3, %al
- outb $CYRIX_CRI
-
- /*
- * enable MAPEN
- */
- movb %ah, %al
- orb $0x10, %al
- outb $CYRIX_CRD
-
- /*
- * read DBR0
- */
- movb $CYRIX_DBR0, %al
- outb $CYRIX_CRI
- inb $CYRIX_CRD
-
- /*
- * disable MATCH and save in %bh
- */
- orb $0x80, %al
- movb %al, %bh
-
- /*
- * write DBR0
- */
- movb $CYRIX_DBR0, %al
- outb $CYRIX_CRI
- movb %bh, %al
- outb $CYRIX_CRD
-
- /*
- * write DBR1
- */
- movb $CYRIX_DBR1, %al
- outb $CYRIX_CRI
- movb $0xf8, %al
- outb $CYRIX_CRD
-
- /*
- * write DBR2
- */
- movb $CYRIX_DBR2, %al
- outb $CYRIX_CRI
- movb $0x7f, %al
- outb $CYRIX_CRD
-
- /*
- * write DBR3
- */
- movb $CYRIX_DBR3, %al
- outb $CYRIX_CRI
- xorb %al, %al
- outb $CYRIX_CRD
-
- /*
- * write DOR
- */
- movb $CYRIX_DOR, %al
- outb $CYRIX_CRI
- movb $0x87, %al
- outb $CYRIX_CRD
-
- /*
- * enable MATCH
- */
- movb $CYRIX_DBR0, %al
- outb $CYRIX_CRI
- movb %bh, %al
- andb $0x7f, %al
- outb $CYRIX_CRD
-
- /*
- * disable MAPEN
- */
- movb $0xc3, %al
- outb $CYRIX_CRI
- movb %ah, %al
- outb $CYRIX_CRD
-
- jmp cpu_done
-
-cpu_done:
-
- popfl /* Restore original FLAGS */
- popal /* Restore all registers */
-
-#endif /* !__xpv */
-
- /*
- * mlsetup(%esp) gets called.
- */
- pushl %esp
- call mlsetup
- addl $4, %esp
-
- /*
- * We change our appearance to look like the real thread 0.
- * (NOTE: making ourselves to be a real thread may be a noop)
- * main() gets called. (NOTE: main() never returns).
- */
- call main
- /* NOTREACHED */
- pushl $__return_from_main
- call panic
-
- /* NOTREACHED */
-cpu_486:
- pushl $__unsupported_cpu
- call panic
- SET_SIZE(_locore_start)
-
-#endif /* __lint */
-#endif /* !__amd64 */
-
-
/*
* For stack layout, see privregs.h
* When cmntrap gets called, the error code and trap number have been pushed.
* When cmntrap_pushed gets called, the entire struct regs has been pushed.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-cmntrap()
-{}
-
-#else /* __lint */
-
.globl trap /* C handler called below */
-#if defined(__amd64)
-
ENTRY_NP2(cmntrap, _cmntrap)
INTR_PUSH
@@ -1206,111 +327,6 @@ cmntrap()
SET_SIZE(cmntrap)
SET_SIZE(_cmntrap)
-#elif defined(__i386)
-
-
- ENTRY_NP2(cmntrap, _cmntrap)
-
- INTR_PUSH
-
- ALTENTRY(cmntrap_pushed)
-
- movl %esp, %ebp
-
- /*
- * - if this is a #pf i.e. T_PGFLT, %esi is live
- * and contains the faulting address i.e. a copy of %cr2
- *
- * - if this is a #db i.e. T_SGLSTP, %esi is live
- * and contains the value of %db6
- */
-
- TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP) /* Uses labels 8 and 9 */
- TRACE_REGS(%edi, %esp, %ebx, %ecx) /* Uses label 9 */
- TRACE_STAMP(%edi) /* Clobbers %eax, %edx, uses 9 */
-
- /*
- * We must first check if DTrace has set its NOFAULT bit. This
- * regrettably must happen before the trap stack is recorded, because
- * this requires a call to getpcstack() and may induce recursion if an
- * fbt::getpcstack: enabling is inducing the bad load.
- */
- movl %gs:CPU_ID, %eax
- shll $CPU_CORE_SHIFT, %eax
- addl $cpu_core, %eax
- movw CPUC_DTRACE_FLAGS(%eax), %cx
- testw $CPU_DTRACE_NOFAULT, %cx
- jnz .dtrace_induced
-
- TRACE_STACK(%edi)
-
- pushl %gs:CPU_ID
- pushl %esi /* fault address for PGFLTs */
- pushl %ebp /* &regs */
-
- /*
- * We know that this isn't a DTrace non-faulting load; we can now safely
- * reenable interrupts. (In the case of pagefaults, we enter through an
- * interrupt gate.)
- */
- ENABLE_INTR_FLAGS
-
- call trap /* trap(rp, addr, cpuid) handles all traps */
- addl $12, %esp /* get argument off stack */
- jmp _sys_rtt
-
-.dtrace_induced:
- cmpw $KCS_SEL, REGOFF_CS(%ebp) /* test CS for user-mode trap */
- jne 3f /* if from user, panic */
-
- cmpl $T_PGFLT, REGOFF_TRAPNO(%ebp)
- je 1f
-
- cmpl $T_GPFLT, REGOFF_TRAPNO(%ebp)
- je 0f
-
- cmpl $T_ZERODIV, REGOFF_TRAPNO(%ebp)
- jne 4f /* if not PF/GP/UD/DE, panic */
-
- orw $CPU_DTRACE_DIVZERO, %cx
- movw %cx, CPUC_DTRACE_FLAGS(%eax)
- jmp 2f
-
-0:
- /*
- * If we've taken a GPF, we don't (unfortunately) have the address that
- * induced the fault. So instead of setting the fault to BADADDR,
- * we'll set the fault to ILLOP.
- */
- orw $CPU_DTRACE_ILLOP, %cx
- movw %cx, CPUC_DTRACE_FLAGS(%eax)
- jmp 2f
-1:
- orw $CPU_DTRACE_BADADDR, %cx
- movw %cx, CPUC_DTRACE_FLAGS(%eax) /* set fault to bad addr */
- movl %esi, CPUC_DTRACE_ILLVAL(%eax)
- /* fault addr is illegal value */
-2:
- pushl REGOFF_EIP(%ebp)
- call dtrace_instr_size
- addl $4, %esp
- movl REGOFF_EIP(%ebp), %ecx
- addl %eax, %ecx
- movl %ecx, REGOFF_EIP(%ebp)
- INTR_POP_KERNEL
- IRET
- /*NOTREACHED*/
-3:
- pushl $dtrace_badflags
- call panic
-4:
- pushl $dtrace_badtrap
- call panic
- SET_SIZE(cmntrap)
- SET_SIZE(_cmntrap)
-
-#endif /* __i386 */
-
/*
* Declare a uintptr_t which has the size of _cmntrap to enable stack
* traceback code to know when a regs structure is on the stack.
@@ -1327,27 +343,8 @@ dtrace_badflags:
dtrace_badtrap:
.string "bad DTrace trap"
-#endif /* __lint */
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-cmninttrap()
-{}
-
-#if !defined(__xpv)
-void
-bop_trap_handler(void)
-{}
-#endif
-
-#else /* __lint */
-
.globl trap /* C handler called below */
-#if defined(__amd64)
-
ENTRY_NP(cmninttrap)
INTR_PUSH
@@ -1380,58 +377,8 @@ bop_trap_handler(void)
SET_SIZE(bop_trap_handler)
#endif
-#elif defined(__i386)
-
- ENTRY_NP(cmninttrap)
-
- INTR_PUSH
- INTGATE_INIT_KERNEL_FLAGS
-
- TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP) /* Uses labels 8 and 9 */
- TRACE_REGS(%edi, %esp, %ebx, %ecx) /* Uses label 9 */
- TRACE_STAMP(%edi) /* Clobbers %eax, %edx, uses 9 */
-
- movl %esp, %ebp
-
- TRACE_STACK(%edi)
-
- pushl %gs:CPU_ID
- pushl $0
- pushl %ebp
- call trap /* trap(rp, addr, cpuid) handles all traps */
- addl $12, %esp
- jmp _sys_rtt
- SET_SIZE(cmninttrap)
-
-#if !defined(__xpv)
- /*
- * Handle traps early in boot. Just revectors into C quickly as
- * these are always fatal errors.
- */
- ENTRY(bop_trap_handler)
- movl %esp, %eax
- pushl %eax
- call bop_trap
- SET_SIZE(bop_trap_handler)
-#endif
-
-#endif /* __i386 */
-
-#endif /* __lint */
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-dtrace_trap()
-{}
-
-#else /* __lint */
-
.globl dtrace_user_probe
-#if defined(__amd64)
-
ENTRY_NP(dtrace_trap)
INTR_PUSH
@@ -1458,60 +405,10 @@ dtrace_trap()
SET_SIZE(dtrace_trap)
-#elif defined(__i386)
-
- ENTRY_NP(dtrace_trap)
-
- INTR_PUSH
-
- TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP) /* Uses labels 8 and 9 */
- TRACE_REGS(%edi, %esp, %ebx, %ecx) /* Uses label 9 */
- TRACE_STAMP(%edi) /* Clobbers %eax, %edx, uses 9 */
-
- movl %esp, %ebp
-
- pushl %gs:CPU_ID
-#if defined(__xpv)
- movl %gs:CPU_VCPU_INFO, %eax
- movl VCPU_INFO_ARCH_CR2(%eax), %eax
-#else
- movl %cr2, %eax
-#endif
- pushl %eax
- pushl %ebp
-
- ENABLE_INTR_FLAGS
-
- call dtrace_user_probe /* dtrace_user_probe(rp, addr, cpuid) */
- addl $12, %esp /* get argument off stack */
-
- jmp _sys_rtt
- SET_SIZE(dtrace_trap)
-
-#endif /* __i386 */
-
-#endif /* __lint */
-
/*
* Return from _sys_trap routine.
*/
-#if defined(__lint)
-
-void
-lwp_rtt_initial(void)
-{}
-
-void
-lwp_rtt(void)
-{}
-
-void
-_sys_rtt(void)
-{}
-
-#else /* __lint */
-
ENTRY_NP(lwp_rtt_initial)
movq %gs:CPU_THREAD, %r15
movq T_STACK(%r15), %rsp /* switch to the thread stack */
@@ -1636,32 +533,6 @@ _sys_rtt_end:
SET_SIZE(sys_rtt_syscall)
SET_SIZE(sys_rtt_syscall32)
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*
- * So why do we have to deal with all this crud in the world of ia32?
- *
- * Basically there are four classes of ia32 implementations, those that do not
- * have a TSC, those that have a marginal TSC that is broken to the extent
- * that it is useless, those that have a marginal TSC that is not quite so
- * horribly broken and can be used with some care, and those that have a
- * reliable TSC. This crud has to be here in order to sift through all the
- * variants.
- */
-
-/*ARGSUSED*/
-uint64_t
-freq_tsc(uint32_t *pit_counter)
-{
- return (0);
-}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
/*
* XX64 quick and dirty port from the i386 version. Since we
* believe the amd64 tsc is more reliable, could this code be
@@ -1867,410 +738,3 @@ freq_tsc_end:
ret
SET_SIZE(freq_tsc)
-#elif defined(__i386)
-
- ENTRY_NP(freq_tsc)
- pushl %ebp
- movl %esp, %ebp
- pushl %edi
- pushl %esi
- pushl %ebx
-
-/ We have a TSC, but we have no way in general to know how reliable it is.
-/ Usually a marginal TSC behaves appropriately unless not enough time
-/ elapses between reads. A reliable TSC can be read as often and as rapidly
-/ as desired. The simplistic approach of reading the TSC counter and
-/ correlating to the PIT counter cannot be naively followed. Instead estimates
-/ have to be taken to successively refine a guess at the speed of the cpu
-/ and then the TSC and PIT counter are correlated. In practice very rarely
-/ is more than one quick loop required for an estimate. Measures have to be
-/ taken to prevent the PIT counter from wrapping beyond its resolution and for
-/ measuring the clock rate of very fast processors.
-/
-/ The following constant can be tuned. It should be such that the loop does
-/ not take too many nor too few PIT counts to execute. If this value is too
-/ large, then on slow machines the loop will take a long time, or the PIT
-/ counter may even wrap. If this value is too small, then on fast machines
-/ the PIT counter may count so few ticks that the resolution of the PIT
-/ itself causes a bad guess. Because this code is used in machines with
-/ marginal TSC's and/or IO, if this value is too small on those, it may
-/ cause the calculated cpu frequency to vary slightly from boot to boot.
-/
-/ In all cases even if this constant is set inappropriately, the algorithm
-/ will still work and the caller should be able to handle variances in the
-/ calculation of cpu frequency, but the calculation will be inefficient and
-/ take a disproportionate amount of time relative to a well selected value.
-/ As the slowest supported cpu becomes faster, this constant should be
-/ carefully increased.
-
- movl $0x8000, %ecx
-
- / to make sure the instruction cache has been warmed
- clc
-
- jmp freq_tsc_loop
-
-/ The following block of code up to and including the latching of the PIT
-/ counter after freq_tsc_perf_loop is very critical and very carefully
-/ written, it should only be modified with great care. freq_tsc_loop to
-/ freq_tsc_perf_loop fits exactly in 16 bytes as do the instructions in
-/ freq_tsc_perf_loop up to the unlatching of the PIT counter.
-
- .align 32
-freq_tsc_loop:
- / save the loop count in %ebx
- movl %ecx, %ebx
-
- / initialize the PIT counter and start a count down
- movb $PIT_LOADMODE, %al
- outb $PITCTL_PORT
- movb $0xff, %al
- outb $PITCTR0_PORT
- outb $PITCTR0_PORT
-
- / read the TSC and store the TS in %edi:%esi
- rdtsc
- movl %eax, %esi
-
-freq_tsc_perf_loop:
- movl %edx, %edi
- movl %eax, %esi
- movl %edx, %edi
- loop freq_tsc_perf_loop
-
- / read the TSC and store the LSW in %ecx
- rdtsc
- movl %eax, %ecx
-
- / latch the PIT counter and status
- movb $_CONST(PIT_READBACK|PIT_READBACKC0), %al
- outb $PITCTL_PORT
-
- / remember if the icache has been warmed
- setc %ah
-
- / read the PIT status
- inb $PITCTR0_PORT
- shll $8, %eax
-
- / read PIT count
- inb $PITCTR0_PORT
- shll $8, %eax
- inb $PITCTR0_PORT
- bswap %eax
-
- / check to see if the PIT count was loaded into the CE
- btw $_CONST(PITSTAT_NULLCNT+8), %ax
- jc freq_tsc_increase_count
-
- / check to see if PIT counter wrapped
- btw $_CONST(PITSTAT_OUTPUT+8), %ax
- jnc freq_tsc_pit_did_not_wrap
-
- / halve count
- shrl $1, %ebx
- movl %ebx, %ecx
-
- / the instruction cache has been warmed
- stc
-
- jmp freq_tsc_loop
-
-freq_tsc_increase_count:
- shll $1, %ebx
- jc freq_tsc_too_fast
-
- movl %ebx, %ecx
-
- / the instruction cache has been warmed
- stc
-
- jmp freq_tsc_loop
-
-freq_tsc_pit_did_not_wrap:
- roll $16, %eax
-
- cmpw $0x2000, %ax
- notw %ax
- jb freq_tsc_sufficient_duration
-
-freq_tsc_calculate:
- / in mode 0, the PIT loads the count into the CE on the first CLK pulse,
- / then on the second CLK pulse the CE is decremented, therefore mode 0
- / is really a (count + 1) counter, ugh
- xorl %esi, %esi
- movw %ax, %si
- incl %esi
-
- movl $0xf000, %eax
- mull %ebx
-
- / tuck away (target_pit_count * loop_count)
- movl %edx, %ecx
- movl %eax, %ebx
-
- movl %esi, %eax
- movl $0xffffffff, %edx
- mull %edx
-
- addl %esi, %eax
- adcl $0, %edx
-
- cmpl %ecx, %edx
- ja freq_tsc_div_safe
- jb freq_tsc_too_fast
-
- cmpl %ebx, %eax
- jbe freq_tsc_too_fast
-
-freq_tsc_div_safe:
- movl %ecx, %edx
- movl %ebx, %eax
-
- movl %esi, %ecx
- divl %ecx
-
- movl %eax, %ecx
-
- / the instruction cache has been warmed
- stc
-
- jmp freq_tsc_loop
-
-freq_tsc_sufficient_duration:
- / test to see if the icache has been warmed
- btl $16, %eax
- jnc freq_tsc_calculate
-
- / recall mode 0 is a (count + 1) counter
- andl $0xffff, %eax
- incl %eax
-
- / save the number of PIT counts
- movl 8(%ebp), %ebx
- movl %eax, (%ebx)
-
- / calculate the number of TS's that elapsed
- movl %ecx, %eax
- subl %esi, %eax
- sbbl %edi, %edx
-
- jmp freq_tsc_end
-
-freq_tsc_too_fast:
- / return 0 as a 64 bit quantity
- xorl %eax, %eax
- xorl %edx, %edx
-
-freq_tsc_end:
- popl %ebx
- popl %esi
- popl %edi
- popl %ebp
- ret
- SET_SIZE(freq_tsc)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if !defined(__amd64)
-#if defined(__lint)
-
-/*
- * We do not have a TSC so we use a block of instructions with well known
- * timings.
- */
-
-/*ARGSUSED*/
-uint64_t
-freq_notsc(uint32_t *pit_counter)
-{
- return (0);
-}
-
-#else /* __lint */
- ENTRY_NP(freq_notsc)
- pushl %ebp
- movl %esp, %ebp
- pushl %edi
- pushl %esi
- pushl %ebx
-
- / initial count for the idivl loop
- movl $0x1000, %ecx
-
- / load the divisor
- movl $1, %ebx
-
- jmp freq_notsc_loop
-
-.align 16
-freq_notsc_loop:
- / set high 32 bits of dividend to zero
- xorl %edx, %edx
-
- / save the loop count in %edi
- movl %ecx, %edi
-
- / initialize the PIT counter and start a count down
- movb $PIT_LOADMODE, %al
- outb $PITCTL_PORT
- movb $0xff, %al
- outb $PITCTR0_PORT
- outb $PITCTR0_PORT
-
- / set low 32 bits of dividend to zero
- xorl %eax, %eax
-
-/ It is vital that the arguments to idivl be set appropriately because on some
-/ cpu's this instruction takes more or less clock ticks depending on its
-/ arguments.
-freq_notsc_perf_loop:
- idivl %ebx
- idivl %ebx
- idivl %ebx
- idivl %ebx
- idivl %ebx
- loop freq_notsc_perf_loop
-
- / latch the PIT counter and status
- movb $_CONST(PIT_READBACK|PIT_READBACKC0), %al
- outb $PITCTL_PORT
-
- / read the PIT status
- inb $PITCTR0_PORT
- shll $8, %eax
-
- / read PIT count
- inb $PITCTR0_PORT
- shll $8, %eax
- inb $PITCTR0_PORT
- bswap %eax
-
- / check to see if the PIT count was loaded into the CE
- btw $_CONST(PITSTAT_NULLCNT+8), %ax
- jc freq_notsc_increase_count
-
- / check to see if PIT counter wrapped
- btw $_CONST(PITSTAT_OUTPUT+8), %ax
- jnc freq_notsc_pit_did_not_wrap
-
- / halve count
- shrl $1, %edi
- movl %edi, %ecx
-
- jmp freq_notsc_loop
-
-freq_notsc_increase_count:
- shll $1, %edi
- jc freq_notsc_too_fast
-
- movl %edi, %ecx
-
- jmp freq_notsc_loop
-
-freq_notsc_pit_did_not_wrap:
- shrl $16, %eax
-
- cmpw $0x2000, %ax
- notw %ax
- jb freq_notsc_sufficient_duration
-
-freq_notsc_calculate:
- / in mode 0, the PIT loads the count into the CE on the first CLK pulse,
- / then on the second CLK pulse the CE is decremented, therefore mode 0
- / is really a (count + 1) counter, ugh
- xorl %esi, %esi
- movw %ax, %si
- incl %esi
-
- movl %edi, %eax
- movl $0xf000, %ecx
- mull %ecx
-
- / tuck away (target_pit_count * loop_count)
- movl %edx, %edi
- movl %eax, %ecx
-
- movl %esi, %eax
- movl $0xffffffff, %edx
- mull %edx
-
- addl %esi, %eax
- adcl $0, %edx
-
- cmpl %edi, %edx
- ja freq_notsc_div_safe
- jb freq_notsc_too_fast
-
- cmpl %ecx, %eax
- jbe freq_notsc_too_fast
-
-freq_notsc_div_safe:
- movl %edi, %edx
- movl %ecx, %eax
-
- movl %esi, %ecx
- divl %ecx
-
- movl %eax, %ecx
-
- jmp freq_notsc_loop
-
-freq_notsc_sufficient_duration:
- / recall mode 0 is a (count + 1) counter
- incl %eax
-
- / save the number of PIT counts
- movl 8(%ebp), %ebx
- movl %eax, (%ebx)
-
- / calculate the number of cpu clock ticks that elapsed
- cmpl $X86_VENDOR_Cyrix, x86_vendor
- jz freq_notsc_notcyrix
-
- / freq_notsc_perf_loop takes 86 clock cycles on Cyrix 6x86 cores
- movl $86, %eax
- jmp freq_notsc_calculate_tsc
-
-freq_notsc_notcyrix:
- / freq_notsc_perf_loop takes 237 clock cycles on Intel Pentiums
- movl $237, %eax
-
-freq_notsc_calculate_tsc:
- mull %edi
-
- jmp freq_notsc_end
-
-freq_notsc_too_fast:
- / return 0 as a 64 bit quantity
- xorl %eax, %eax
- xorl %edx, %edx
-
-freq_notsc_end:
- popl %ebx
- popl %esi
- popl %edi
- popl %ebp
-
- ret
- SET_SIZE(freq_notsc)
-
-#endif /* __lint */
-#endif /* !__amd64 */
-
-#if !defined(__lint)
- .data
-#if !defined(__amd64)
- .align 4
-cpu_vendor:
- .long 0, 0, 0 /* Vendor ID string returned */
-
- .globl CyrixInstead
-
- .globl x86_featureset
- .globl x86_type
- .globl x86_vendor
-#endif
-
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/ml/mach_offsets.in b/usr/src/uts/i86pc/ml/mach_offsets.in
deleted file mode 100644
index b7ea0131aa..0000000000
--- a/usr/src/uts/i86pc/ml/mach_offsets.in
+++ /dev/null
@@ -1,150 +0,0 @@
-\
-\ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
-\ Use is subject to license terms.
-\
-\ CDDL HEADER START
-\
-\ The contents of this file are subject to the terms of the
-\ Common Development and Distribution License (the "License").
-\ You may not use this file except in compliance with the License.
-\
-\ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-\ or http://www.opensolaris.org/os/licensing.
-\ See the License for the specific language governing permissions
-\ and limitations under the License.
-\
-\ When distributing Covered Code, include this CDDL HEADER in each
-\ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-\ If applicable, add the following below this CDDL HEADER, with the
-\ fields enclosed by brackets "[]" replaced with your own identifying
-\ information: Portions Copyright [yyyy] [name of copyright owner]
-\
-\ CDDL HEADER END
-\
-\ Copyright 2011 Joyent, Inc. All rights reserved.
-\
-
-\
-\ offsets.in: input file to produce assym.h using the ctfstabs program
-\
-
-#ifndef _GENASSYM
-#define _GENASSYM
-#endif
-
-#define SIZES 1
-
-#if defined(__xpv)
-\
-\ XXPV This seems to need to be first to avoid a namespace collision
-\ with another header file in the list below.
-\
-#include <sys/hypervisor.h>
-#endif
-
-#include <sys/types.h>
-#include <sys/bootsvcs.h>
-#include <sys/systm.h>
-#include <sys/sysinfo.h>
-#include <sys/user.h>
-#include <sys/thread.h>
-#include <sys/proc.h>
-#include <sys/cpuvar.h>
-#include <sys/tss.h>
-#include <sys/privregs.h>
-#include <sys/segments.h>
-#include <sys/devops.h>
-#include <sys/ddi_impldefs.h>
-#include <vm/as.h>
-#include <sys/avintr.h>
-#include <sys/pic.h>
-#include <sys/rm_platter.h>
-#include <sys/stream.h>
-#include <sys/strsubr.h>
-#include <sys/sunddi.h>
-#include <sys/traptrace.h>
-#include <sys/ontrap.h>
-#include <sys/lgrp.h>
-#include <sys/dtrace.h>
-
-regs REGSIZE
- r_savfp REGOFF_SAVFP
- r_savpc REGOFF_SAVPC
- r_gs REGOFF_GS
- r_fs REGOFF_FS
- r_es REGOFF_ES
- r_ds REGOFF_DS
- r_edi REGOFF_EDI
- r_esi REGOFF_ESI
- r_ebp REGOFF_EBP
- r_esp REGOFF_ESP
- r_ebx REGOFF_EBX
- r_edx REGOFF_EDX
- r_ecx REGOFF_ECX
- r_eax REGOFF_EAX
- r_trapno REGOFF_TRAPNO
- r_err REGOFF_ERR
- r_eip REGOFF_EIP
- r_cs REGOFF_CS
- r_efl REGOFF_EFL
- r_uesp REGOFF_UESP
- r_ss REGOFF_SS
-
-\#define REGOFF_PC REGOFF_EIP
-
-tss_t
- tss_esp0 TSS_ESP0
- tss_ss0 TSS_SS0
- tss_ldt TSS_LDT
- tss_cr3 TSS_CR3
- tss_cs TSS_CS
- tss_ss TSS_SS
- tss_ds TSS_DS
- tss_es TSS_ES
- tss_fs TSS_FS
- tss_gs TSS_GS
- tss_ebp TSS_EBP
- tss_eip TSS_EIP
- tss_eflags TSS_EFL
- tss_esp TSS_ESP
- tss_eax TSS_EAX
- tss_ebx TSS_EBX
- tss_ecx TSS_ECX
- tss_edx TSS_EDX
- tss_esi TSS_ESI
- tss_edi TSS_EDI
-
-\#define LABEL_EBP _CONST(_MUL(2, LABEL_VAL_INCR) + LABEL_VAL)
-\#define LABEL_EBX _CONST(_MUL(3, LABEL_VAL_INCR) + LABEL_VAL)
-\#define LABEL_ESI _CONST(_MUL(4, LABEL_VAL_INCR) + LABEL_VAL)
-\#define LABEL_EDI _CONST(_MUL(5, LABEL_VAL_INCR) + LABEL_VAL)
-\#define T_EBP _CONST(T_LABEL + LABEL_EBP)
-\#define T_EBX _CONST(T_LABEL + LABEL_EBX)
-\#define T_ESI _CONST(T_LABEL + LABEL_ESI)
-\#define T_EDI _CONST(T_LABEL + LABEL_EDI)
-
-_klwp
- lwp_pcb.pcb_fsdesc LWP_PCB_FSDESC
- lwp_pcb.pcb_gsdesc LWP_PCB_GSDESC
- lwp_pcb.pcb_drstat LWP_PCB_DRSTAT
- lwp_pcb.pcb_flags PCB_FLAGS
- lwp_pcb.pcb_fpu LWP_PCB_FPU
- lwp_pcb.pcb_fpu.fpu_regs LWP_FPU_REGS
- lwp_pcb.pcb_fpu.fpu_flags LWP_FPU_FLAGS
- lwp_pcb.pcb_fpu.fpu_regs.kfpu_u.kfpu_fx LWP_FPU_CHIP_STATE
-
-pcb PCBSIZE
- pcb_drstat
- pcb_fsdesc
- pcb_gsdesc
- pcb_fpu.fpu_regs PCB_FPU_REGS
- pcb_fpu.fpu_flags PCB_FPU_FLAGS
-
-#if defined(__xpv)
-
-vcpu_info
- evtchn_upcall_pending VCPU_INFO_EVTCHN_UPCALL_PENDING
- evtchn_upcall_mask VCPU_INFO_EVTCHN_UPCALL_MASK
- arch.cr2 VCPU_INFO_ARCH_CR2
-
-#endif /* __xpv */
diff --git a/usr/src/uts/i86pc/ml/mpcore.s b/usr/src/uts/i86pc/ml/mpcore.s
index 68549c6e5d..249fd2aec0 100644
--- a/usr/src/uts/i86pc/ml/mpcore.s
+++ b/usr/src/uts/i86pc/ml/mpcore.s
@@ -34,10 +34,8 @@
#include <sys/privregs.h>
#include <sys/x86_archext.h>
-#if !defined(__lint)
#include <sys/segments.h>
#include "assym.h"
-#endif
/*
* Our assumptions:
@@ -60,24 +58,6 @@
*
*/
-#if defined(__lint)
-
-void
-real_mode_start_cpu(void)
-{}
-
-void
-real_mode_stop_cpu_stage1(void)
-{}
-
-void
-real_mode_stop_cpu_stage2(void)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(real_mode_start_cpu)
/*
@@ -118,29 +98,8 @@ pestart:
/*
* 16-bit protected mode is now active, so prepare to turn on long
* mode.
- *
- * Note that we currently assume that if we're attempting to run a
- * kernel compiled with (__amd64) #defined, the target CPU has long
- * mode support.
*/
-#if 0
- /*
- * If there's a chance this might not be true, the following test should
- * be done, with the no_long_mode branch then doing something
- * appropriate:
- */
-
- movl $0x80000000, %eax /* get largest extended CPUID */
- cpuid
- cmpl $0x80000000, %eax /* check if > 0x80000000 */
- jbe no_long_mode /* nope, no long mode */
- movl $0x80000001, %eax
- cpuid /* get extended feature flags */
- btl $29, %edx /* check for long mode */
- jnc no_long_mode /* long mode not supported */
-#endif
-
/*
* Add any initial cr4 bits
*/
@@ -335,200 +294,6 @@ kernel_cs_code:
SET_SIZE(real_mode_start_cpu)
-#elif defined(__i386)
-
- ENTRY_NP(real_mode_start_cpu)
-
-#if !defined(__GNUC_AS__)
-
- cli
- D16 movw %cs, %eax
- movw %eax, %ds /* load cs into ds */
- movw %eax, %ss /* and into ss */
-
- /*
- * Helps in debugging by giving us the fault address.
- *
- * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
- */
- D16 movl $0xffc, %esp
-
- D16 A16 lgdt %cs:GDTROFF
- D16 A16 lidt %cs:IDTROFF
- D16 A16 movl %cs:CR4OFF, %eax /* set up CR4, if desired */
- D16 andl %eax, %eax
- D16 A16 je no_cr4
-
- D16 movl %eax, %ecx
- D16 movl %cr4, %eax
- D16 orl %ecx, %eax
- D16 movl %eax, %cr4
-no_cr4:
- D16 A16 movl %cs:CR3OFF, %eax
- A16 movl %eax, %cr3
- movl %cr0, %eax
-
- /*
- * Enable protected-mode, paging, write protect, and alignment mask
- */
- D16 orl $[CR0_PG|CR0_PE|CR0_WP|CR0_AM], %eax
- movl %eax, %cr0
- jmp pestart
-
-pestart:
- D16 pushl $KCS_SEL
- D16 pushl $kernel_cs_code
- D16 lret
- .globl real_mode_start_cpu_end
-real_mode_start_cpu_end:
- nop
-
- .globl kernel_cs_code
-kernel_cs_code:
- /*
- * At this point we are with kernel's cs and proper eip.
- *
- * We will be executing not from the copy in real mode platter,
- * but from the original code where boot loaded us.
- *
- * By this time GDT and IDT are loaded as is cr3.
- */
- movw $KFS_SEL,%eax
- movw %eax,%fs
- movw $KGS_SEL,%eax
- movw %eax,%gs
- movw $KDS_SEL,%eax
- movw %eax,%ds
- movw %eax,%es
- movl %gs:CPU_TSS,%esi
- movw %eax,%ss
- movl TSS_ESP0(%esi),%esp
- movw $KTSS_SEL,%ax
- ltr %ax
- xorw %ax, %ax /* clear LDTR */
- lldt %ax
- movl %cr0,%edx
- andl $-1![CR0_TS|CR0_EM],%edx /* clear emulate math chip bit */
- orl $[CR0_MP|CR0_NE],%edx
- movl %edx,%cr0 /* set machine status word */
-
- /*
- * Before going any further, enable usage of page table NX bit if
- * that's how our page tables are set up.
- */
- bt $X86FSET_NX, x86_featureset
- jnc 1f
- movl %cr4, %ecx
- andl $CR4_PAE, %ecx
- jz 1f
- movl $MSR_AMD_EFER, %ecx
- rdmsr
- orl $AMD_EFER_NXE, %eax
- wrmsr
-1:
- movl %gs:CPU_THREAD, %eax /* get thread ptr */
- call *T_PC(%eax) /* call mp_startup */
- /* not reached */
- int $20 /* whoops, returned somehow! */
-
-#else
-
- cli
- mov %cs, %ax
- mov %eax, %ds /* load cs into ds */
- mov %eax, %ss /* and into ss */
-
- /*
- * Helps in debugging by giving us the fault address.
- *
- * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
- */
- D16 mov $0xffc, %esp
-
- D16 A16 lgdtl %cs:GDTROFF
- D16 A16 lidtl %cs:IDTROFF
- D16 A16 mov %cs:CR4OFF, %eax /* set up CR4, if desired */
- D16 and %eax, %eax
- D16 A16 je no_cr4
-
- D16 mov %eax, %ecx
- D16 mov %cr4, %eax
- D16 or %ecx, %eax
- D16 mov %eax, %cr4
-no_cr4:
- D16 A16 mov %cs:CR3OFF, %eax
- A16 mov %eax, %cr3
- mov %cr0, %eax
-
- /*
- * Enable protected-mode, paging, write protect, and alignment mask
- */
- D16 or $(CR0_PG|CR0_PE|CR0_WP|CR0_AM), %eax
- mov %eax, %cr0
- jmp pestart
-
-pestart:
- D16 pushl $KCS_SEL
- D16 pushl $kernel_cs_code
- D16 lret
- .globl real_mode_start_cpu_end
-real_mode_start_cpu_end:
- nop
- .globl kernel_cs_code
-kernel_cs_code:
- /*
- * At this point we are with kernel's cs and proper eip.
- *
- * We will be executing not from the copy in real mode platter,
- * but from the original code where boot loaded us.
- *
- * By this time GDT and IDT are loaded as is cr3.
- */
- mov $KFS_SEL, %ax
- mov %eax, %fs
- mov $KGS_SEL, %ax
- mov %eax, %gs
- mov $KDS_SEL, %ax
- mov %eax, %ds
- mov %eax, %es
- mov %gs:CPU_TSS, %esi
- mov %eax, %ss
- mov TSS_ESP0(%esi), %esp
- mov $(KTSS_SEL), %ax
- ltr %ax
- xorw %ax, %ax /* clear LDTR */
- lldt %ax
- mov %cr0, %edx
- and $~(CR0_TS|CR0_EM), %edx /* clear emulate math chip bit */
- or $(CR0_MP|CR0_NE), %edx
- mov %edx, %cr0 /* set machine status word */
-
- /*
- * Before going any farther, enable usage of page table NX bit if
- * that's how our page tables are set up. (PCIDE is enabled later on).
- */
- bt $X86FSET_NX, x86_featureset
- jnc 1f
- movl %cr4, %ecx
- andl $CR4_PAE, %ecx
- jz 1f
- movl $MSR_AMD_EFER, %ecx
- rdmsr
- orl $AMD_EFER_NXE, %eax
- wrmsr
-1:
- mov %gs:CPU_THREAD, %eax /* get thread ptr */
- call *T_PC(%eax) /* call mp_startup */
- /* not reached */
- int $20 /* whoops, returned somehow! */
-#endif
-
- SET_SIZE(real_mode_start_cpu)
-
-#endif /* __amd64 */
-
-#if defined(__amd64)
-
ENTRY_NP(real_mode_stop_cpu_stage1)
#if !defined(__GNUC_AS__)
@@ -580,52 +345,6 @@ real_mode_stop_cpu_stage1_end:
SET_SIZE(real_mode_stop_cpu_stage1)
-#elif defined(__i386)
-
- ENTRY_NP(real_mode_stop_cpu_stage1)
-
-#if !defined(__GNUC_AS__)
-
- cli
- D16 movw %cs, %eax
- movw %eax, %ds /* load cs into ds */
- movw %eax, %ss /* and into ss */
-
- /*
- * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
- */
- movw $CPUHALTCODEOFF, %ax
- .byte 0xff, 0xe0 /* jmp *%ax */
-
-#else /* __GNUC_AS__ */
-
- cli
- mov %cs, %ax
- mov %eax, %ds /* load cs into ds */
- mov %eax, %ss /* and into ss */
-
- /*
- * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
- */
- movw $CPUHALTCODEOFF, %ax
- /*
- * The following indirect call is executed as part of starting up a CPU.
- * As such nothing else should be running on it or executing in the
- * system such that it is a viable Spectre v2 branch target injection
- * location. At least, in theory.
- */
- jmp *%ax
-
-#endif /* !__GNUC_AS__ */
-
- .globl real_mode_stop_cpu_stage1_end
-real_mode_stop_cpu_stage1_end:
- nop
-
- SET_SIZE(real_mode_stop_cpu_stage1)
-
-#endif /* __amd64 */
-
ENTRY_NP(real_mode_stop_cpu_stage2)
movw $0xdead, %ax
@@ -645,4 +364,3 @@ real_mode_stop_cpu_stage2_end:
SET_SIZE(real_mode_stop_cpu_stage2)
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/ml/notes.s b/usr/src/uts/i86pc/ml/notes.s
index 72ae373813..331e7e8197 100644
--- a/usr/src/uts/i86pc/ml/notes.s
+++ b/usr/src/uts/i86pc/ml/notes.s
@@ -21,21 +21,18 @@
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
-
-#include <sys/elf_notes.h>
-#pragma ident "%Z%%M% %I% %E% SMI"
+/*
+ * Copyright 2019 Joyent, Inc.
+ */
-#if defined(lint)
-#include <sys/types.h>
-#else
+#include <sys/elf_notes.h>
#include "assym.h"
-/
-/ Tell the booter that we'd like to load unix on a large page
-/ if the chip supports it.
-/
+/*
+ * Tell the booter that we'd like to load unix on a large page.
+ */
.section .note
.align 4
.4byte .name1_end - .name1_begin
@@ -49,4 +46,3 @@
.4byte FOUR_MEG
.desc1_end:
.align 4
-#endif
diff --git a/usr/src/uts/i86pc/ml/offsets.in b/usr/src/uts/i86pc/ml/offsets.in
index 475c5bac36..6c1de5c145 100644
--- a/usr/src/uts/i86pc/ml/offsets.in
+++ b/usr/src/uts/i86pc/ml/offsets.in
@@ -123,9 +123,7 @@ _kthread THREAD_SIZE
_tu._ts._t_post_sys T_POST_SYS
_tu._t_post_sys_ast T_POST_SYS_AST
t_copyops
-#ifdef __amd64
t_useracc
-#endif
as
a_hat
diff --git a/usr/src/uts/i86pc/ml/syscall_asm.s b/usr/src/uts/i86pc/ml/syscall_asm.s
deleted file mode 100644
index 5bb6bdea31..0000000000
--- a/usr/src/uts/i86pc/ml/syscall_asm.s
+++ /dev/null
@@ -1,744 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 by Delphix. All rights reserved.
- */
-
-/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
-/* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
-/* All Rights Reserved */
-
-/* Copyright (c) 1987, 1988 Microsoft Corporation */
-/* All Rights Reserved */
-
-#include <sys/asm_linkage.h>
-#include <sys/asm_misc.h>
-#include <sys/regset.h>
-#include <sys/psw.h>
-#include <sys/x86_archext.h>
-#include <sys/machbrand.h>
-#include <sys/privregs.h>
-
-#if defined(__lint)
-
-#include <sys/types.h>
-#include <sys/thread.h>
-#include <sys/systm.h>
-
-#else /* __lint */
-
-#include <sys/segments.h>
-#include <sys/pcb.h>
-#include <sys/trap.h>
-#include <sys/ftrace.h>
-#include <sys/traptrace.h>
-#include <sys/clock.h>
-#include <sys/panic.h>
-#include "assym.h"
-
-#endif /* __lint */
-
-/*
- * We implement two flavours of system call entry points
- *
- * - {int,lcall}/iret (i386)
- * - sysenter/sysexit (Pentium II and beyond)
- *
- * The basic pattern used in the handlers is to check to see if we can
- * do fast (simple) version of the system call; if we can't we use various
- * C routines that handle corner cases and debugging.
- *
- * To reduce the amount of assembler replication, yet keep the system call
- * implementations vaguely comprehensible, the common code in the body
- * of the handlers is broken up into a set of preprocessor definitions
- * below.
- */
-
-/*
- * When we have SYSCALLTRACE defined, we sneak an extra
- * predicate into a couple of tests.
- */
-#if defined(SYSCALLTRACE)
-#define ORL_SYSCALLTRACE(r32) \
- orl syscalltrace, r32
-#else
-#define ORL_SYSCALLTRACE(r32)
-#endif
-
-/*
- * This check is false whenever we want to go fast i.e.
- *
- * if (code >= NSYSCALL ||
- * t->t_pre_sys || (t->t_proc_flag & TP_WATCHPT) != 0)
- * do full version
- * #ifdef SYSCALLTRACE
- * if (syscalltrace)
- * do full version
- * #endif
- *
- * Preconditions:
- * - t curthread
- * - code contains the syscall number
- * Postconditions:
- * - %ecx and %edi are smashed
- * - condition code flag ZF is cleared if pre-sys is too complex
- */
-#define CHECK_PRESYS_NE(t, code) \
- movzbl T_PRE_SYS(t), %edi; \
- movzwl T_PROC_FLAG(t), %ecx; \
- andl $TP_WATCHPT, %ecx; \
- orl %ecx, %edi; \
- cmpl $NSYSCALL, code; \
- setae %cl; \
- movzbl %cl, %ecx; \
- orl %ecx, %edi; \
- ORL_SYSCALLTRACE(%edi)
-
-/*
- * Check if a brand_mach_ops callback is defined for the specified callback_id
- * type. If so invoke it with the user's %gs value loaded and the following
- * data on the stack:
- * --------------------------------------
- * | user's %ss |
- * | | user's %esp |
- * | | EFLAGS register |
- * | | user's %cs |
- * | | user's %eip (user return address) |
- * | | 'scratch space' |
- * | | user's %ebx |
- * | | user's %gs selector |
- * v | lwp pointer |
- * | callback wrapper return addr |
- * --------------------------------------
- *
- * If the brand code returns, we assume that we are meant to execute the
- * normal system call path.
- *
- * The interface to the brand callbacks on the 32-bit kernel assumes %ebx
- * is available as a scratch register within the callback. If the callback
- * returns within the kernel then this macro will restore %ebx. If the
- * callback is going to return directly to userland then it should restore
- * %ebx before returning to userland.
- */
-#define BRAND_CALLBACK(callback_id) \
- subl $4, %esp /* save some scratch space */ ;\
- pushl %ebx /* save %ebx to use for scratch */ ;\
- pushl %gs /* save the user %gs */ ;\
- movl $KGS_SEL, %ebx ;\
- movw %bx, %gs /* switch to the kernel's %gs */ ;\
- movl %gs:CPU_THREAD, %ebx /* load the thread pointer */ ;\
- movl T_LWP(%ebx), %ebx /* load the lwp pointer */ ;\
- pushl %ebx /* push the lwp pointer */ ;\
- movl LWP_PROCP(%ebx), %ebx /* load the proc pointer */ ;\
- movl P_BRAND(%ebx), %ebx /* load the brand pointer */ ;\
- movl B_MACHOPS(%ebx), %ebx /* load the machops pointer */ ;\
- movl _CONST(_MUL(callback_id, CPTRSIZE))(%ebx), %ebx ;\
- cmpl $0, %ebx ;\
- je 1f ;\
- movl %ebx, 12(%esp) /* save callback to scratch */ ;\
- movl 4(%esp), %ebx /* grab the user %gs */ ;\
- movw %bx, %gs /* restore the user %gs */ ;\
- call *12(%esp) /* call callback in scratch */ ;\
-1: movl 4(%esp), %ebx /* restore user %gs (re-do if */ ;\
- movw %bx, %gs /* branch due to no callback) */ ;\
- movl 8(%esp), %ebx /* restore user's %ebx */ ;\
- addl $16, %esp /* restore stack ptr */
-
-#define MSTATE_TRANSITION(from, to) \
- pushl $to; \
- pushl $from; \
- call syscall_mstate; \
- addl $0x8, %esp
-
-/*
- * aka CPU_STATS_ADDQ(CPU, sys.syscall, 1)
- * This must be called with interrupts or preemption disabled.
- */
-#define CPU_STATS_SYS_SYSCALL_INC \
- addl $1, %gs:CPU_STATS_SYS_SYSCALL; \
- adcl $0, %gs:CPU_STATS_SYS_SYSCALL+4;
-
-#if !defined(__lint)
-
-/*
- * ASSERT(lwptoregs(lwp) == rp);
- *
- * this may seem obvious, but very odd things happen if this
- * assertion is false
- *
- * Preconditions:
- * -none-
- * Postconditions (if assertion is true):
- * %esi and %edi are smashed
- */
-#if defined(DEBUG)
-
-__lwptoregs_msg:
- .string "syscall_asm.s:%d lwptoregs(%p) [%p] != rp [%p]"
-
-#define ASSERT_LWPTOREGS(t, rp) \
- movl T_LWP(t), %esi; \
- movl LWP_REGS(%esi), %edi; \
- cmpl rp, %edi; \
- je 7f; \
- pushl rp; \
- pushl %edi; \
- pushl %esi; \
- pushl $__LINE__; \
- pushl $__lwptoregs_msg; \
- call panic; \
-7:
-#else
-#define ASSERT_LWPTOREGS(t, rp)
-#endif
-
-#endif /* __lint */
-
-/*
- * This is an assembler version of this fragment:
- *
- * lwp->lwp_state = LWP_SYS;
- * lwp->lwp_ru.sysc++;
- * lwp->lwp_eosys = NORMALRETURN;
- * lwp->lwp_ap = argp;
- *
- * Preconditions:
- * -none-
- * Postconditions:
- * -none-
- */
-#define SET_LWP(lwp, argp) \
- movb $LWP_SYS, LWP_STATE(lwp); \
- addl $1, LWP_RU_SYSC(lwp); \
- adcl $0, LWP_RU_SYSC+4(lwp); \
- movb $NORMALRETURN, LWP_EOSYS(lwp); \
- movl argp, LWP_AP(lwp)
-
-/*
- * Set up the thread, lwp, find the handler, and copy
- * in the arguments from userland to the kernel stack.
- *
- * Preconditions:
- * - %eax contains the syscall number
- * Postconditions:
- * - %eax contains a pointer to the sysent structure
- * - %ecx is zeroed
- * - %esi, %edi are smashed
- * - %esp is SYS_DROPped ready for the syscall
- */
-#define SIMPLE_SYSCALL_PRESYS(t, faultlabel) \
- movl T_LWP(t), %esi; \
- movw %ax, T_SYSNUM(t); \
- subl $SYS_DROP, %esp; \
- shll $SYSENT_SIZE_SHIFT, %eax; \
- SET_LWP(%esi, %esp); \
- leal sysent(%eax), %eax; \
- movzbl SY_NARG(%eax), %ecx; \
- testl %ecx, %ecx; \
- jz 4f; \
- movl %esp, %edi; \
- movl SYS_DROP + REGOFF_UESP(%esp), %esi; \
- movl $faultlabel, T_LOFAULT(t); \
- addl $4, %esi; \
- rep; \
- smovl; \
- movl %ecx, T_LOFAULT(t); \
-4:
-
-/*
- * Check to see if a simple return is possible i.e.
- *
- * if ((t->t_post_sys_ast | syscalltrace) != 0)
- * do full version;
- *
- * Preconditions:
- * - t is curthread
- * Postconditions:
- * - condition code NE is set if post-sys is too complex
- * - rtmp is zeroed if it isn't (we rely on this!)
- */
-#define CHECK_POSTSYS_NE(t, rtmp) \
- xorl rtmp, rtmp; \
- ORL_SYSCALLTRACE(rtmp); \
- orl T_POST_SYS_AST(t), rtmp; \
- cmpl $0, rtmp
-
-/*
- * Fix up the lwp, thread, and eflags for a successful return
- *
- * Preconditions:
- * - zwreg contains zero
- * Postconditions:
- * - %esp has been unSYS_DROPped
- * - %esi is smashed (points to lwp)
- */
-#define SIMPLE_SYSCALL_POSTSYS(t, zwreg) \
- movl T_LWP(t), %esi; \
- addl $SYS_DROP, %esp; \
- movw zwreg, T_SYSNUM(t); \
- movb $LWP_USER, LWP_STATE(%esi); \
- andb $_CONST(0xffff - PS_C), REGOFF_EFL(%esp)
-
-/*
- * System call handler. This is the destination of both the call
- * gate (lcall 0x27) _and_ the interrupt gate (int 0x91). For our purposes,
- * there are two significant differences between an interrupt gate and a call
- * gate:
- *
- * 1) An interrupt gate runs the handler with interrupts disabled, whereas a
- * call gate runs the handler with whatever EFLAGS settings were in effect at
- * the time of the call.
- *
- * 2) An interrupt gate pushes the contents of the EFLAGS register at the time
- * of the interrupt onto the stack, whereas a call gate does not.
- *
- * Because we use the following code sequence to handle system calls made from
- * _both_ a call gate _and_ an interrupt gate, these two differences must be
- * respected. In regards to number 1) above, the handler must ensure that a sane
- * EFLAGS snapshot is stored on the stack so that when the kernel returns back
- * to the user via iret (which returns to user with the EFLAGS value saved on
- * the stack), interrupts are re-enabled.
- *
- * In regards to number 2) above, the handler must always put a current snapshot
- * of EFLAGS onto the stack in the appropriate place. If we came in via an
- * interrupt gate, we will be clobbering the EFLAGS value that was pushed by
- * the interrupt gate. This is OK, as the only bit that was changed by the
- * hardware was the IE (interrupt enable) bit, which for an interrupt gate is
- * now off. If we were to do nothing, the stack would contain an EFLAGS with
- * IE off, resulting in us eventually returning back to the user with interrupts
- * disabled. The solution is to turn on the IE bit in the EFLAGS value saved on
- * the stack.
- *
- * Another subtlety which deserves mention is the difference between the two
- * descriptors. The call gate descriptor is set to instruct the hardware to copy
- * one parameter from the user stack to the kernel stack, whereas the interrupt
- * gate descriptor doesn't use the parameter passing mechanism at all. The
- * kernel doesn't actually use the parameter that is copied by the hardware; the
- * only reason it does this is so that there is a space on the stack large
- * enough to hold an EFLAGS register value, which happens to be in the correct
- * place for use by iret when we go back to userland. How convenient.
- *
- * Stack frame description in syscall() and callees.
- *
- * |------------|
- * | regs | +(8*4)+4 registers
- * |------------|
- * | 8 args | <- %esp MAXSYSARGS (currently 8) arguments
- * |------------|
- *
- */
-#define SYS_DROP _CONST(_MUL(MAXSYSARGS, 4))
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-sys_call()
-{}
-
-void
-_allsyscalls()
-{}
-
-size_t _allsyscalls_size;
-
-#else /* __lint */
-
- ENTRY_NP2(brand_sys_call, _allsyscalls)
- BRAND_CALLBACK(BRAND_CB_SYSCALL)
-
- ALTENTRY(sys_call)
- / on entry eax = system call number
-
- / set up the stack to look as in reg.h
- subl $8, %esp / pad the stack with ERRCODE and TRAPNO
-
- SYSCALL_PUSH
-
-#ifdef TRAPTRACE
- TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_SYSCALL) / Uses labels "8" and "9"
- TRACE_REGS(%edi, %esp, %ebx, %ecx) / Uses label "9"
- pushl %eax
- TRACE_STAMP(%edi) / Clobbers %eax, %edx, uses "9"
- popl %eax
- movl %eax, TTR_SYSNUM(%edi)
-#endif
-
-_watch_do_syscall:
- movl %esp, %ebp
-
- / Interrupts may be enabled here, so we must make sure this thread
- / doesn't migrate off the CPU while it updates the CPU stats.
- /
- / XXX This is only true if we got here via call gate thru the LDT for
- / old style syscalls. Perhaps this preempt++-- will go away soon?
- movl %gs:CPU_THREAD, %ebx
- addb $1, T_PREEMPT(%ebx)
- CPU_STATS_SYS_SYSCALL_INC
- subb $1, T_PREEMPT(%ebx)
-
- ENABLE_INTR_FLAGS
-
- pushl %eax / preserve across mstate call
- MSTATE_TRANSITION(LMS_USER, LMS_SYSTEM)
- popl %eax
-
- movl %gs:CPU_THREAD, %ebx
-
- ASSERT_LWPTOREGS(%ebx, %esp)
-
- CHECK_PRESYS_NE(%ebx, %eax)
- jne _full_syscall_presys
- SIMPLE_SYSCALL_PRESYS(%ebx, _syscall_fault)
-
-_syslcall_call:
- call *SY_CALLC(%eax)
-
-_syslcall_done:
- CHECK_POSTSYS_NE(%ebx, %ecx)
- jne _full_syscall_postsys
- SIMPLE_SYSCALL_POSTSYS(%ebx, %cx)
- movl %eax, REGOFF_EAX(%esp)
- movl %edx, REGOFF_EDX(%esp)
-
- MSTATE_TRANSITION(LMS_SYSTEM, LMS_USER)
-
- /
- / get back via iret
- /
- CLI(%edx)
- jmp sys_rtt_syscall
-
-_full_syscall_presys:
- movl T_LWP(%ebx), %esi
- subl $SYS_DROP, %esp
- movb $LWP_SYS, LWP_STATE(%esi)
- pushl %esp
- pushl %ebx
- call syscall_entry
- addl $8, %esp
- jmp _syslcall_call
-
-_full_syscall_postsys:
- addl $SYS_DROP, %esp
- pushl %edx
- pushl %eax
- pushl %ebx
- call syscall_exit
- addl $12, %esp
- MSTATE_TRANSITION(LMS_SYSTEM, LMS_USER)
- jmp _sys_rtt
-
-_syscall_fault:
- push $0xe / EFAULT
- call set_errno
- addl $4, %esp
- xorl %eax, %eax / fake syscall_err()
- xorl %edx, %edx
- jmp _syslcall_done
- SET_SIZE(sys_call)
- SET_SIZE(brand_sys_call)
-
-#endif /* __lint */
-
-/*
- * System call handler via the sysenter instruction
- *
- * Here's how syscall entry usually works (see sys_call for details).
- *
- * There, the caller (lcall or int) in userland has arranged that:
- *
- * - %eax contains the syscall number
- * - the user stack contains the args to the syscall
- *
- * Normally the lcall instruction into the call gate causes the processor
- * to push %ss, %esp, <top-of-stack>, %cs, %eip onto the kernel stack.
- * The sys_call handler then leaves space for r_trapno and r_err, and
- * pusha's {%eax, %ecx, %edx, %ebx, %esp, %ebp, %esi, %edi}, followed
- * by %ds, %es, %fs and %gs to capture a 'struct regs' on the stack.
- * Then the kernel sets %ds, %es and %gs to kernel selectors, and finally
- * extracts %efl and puts it into r_efl (which happens to live at the offset
- * that <top-of-stack> was copied into). Note that the value in r_efl has
- * the IF (interrupt enable) flag turned on. (The int instruction into the
- * interrupt gate does essentially the same thing, only instead of
- * <top-of-stack> we get eflags - see comment above.)
- *
- * In the sysenter case, things are a lot more primitive.
- *
- * The caller in userland has arranged that:
- *
- * - %eax contains the syscall number
- * - %ecx contains the user %esp
- * - %edx contains the return %eip
- * - the user stack contains the args to the syscall
- *
- * e.g.
- * <args on the stack>
- * mov $SYS_callnum, %eax
- * mov $1f, %edx / return %eip
- * mov %esp, %ecx / return %esp
- * sysenter
- * 1:
- *
- * Hardware and (privileged) initialization code have arranged that by
- * the time the sysenter instructions completes:
- *
- * - %eip is pointing to sys_sysenter (below).
- * - %cs and %ss are set to kernel text and stack (data) selectors.
- * - %esp is pointing at the lwp's stack
- * - Interrupts have been disabled.
- *
- * The task for the sysenter handler is:
- *
- * - recreate the same regs structure on the stack and the same
- * kernel state as if we'd come in on an lcall
- * - do the normal work of a syscall
- * - execute the system call epilogue, use sysexit to return to userland.
- *
- * Note that we are unable to return both "rvals" to userland with this
- * call, as %edx is used by the sysexit instruction.
- *
- * One final complication in this routine is its interaction with
- * single-stepping in a debugger. For most of the system call mechanisms,
- * the CPU automatically clears the single-step flag before we enter the
- * kernel. The sysenter mechanism does not clear the flag, so a user
- * single-stepping through a libc routine may suddenly find themself
- * single-stepping through the kernel. To detect this, kmdb compares the
- * trap %pc to the [brand_]sys_enter addresses on each single-step trap.
- * If it finds that we have single-stepped to a sysenter entry point, it
- * explicitly clears the flag and executes the sys_sysenter routine.
- *
- * One final complication in this final complication is the fact that we
- * have two different entry points for sysenter: brand_sys_sysenter and
- * sys_sysenter. If we enter at brand_sys_sysenter and start single-stepping
- * through the kernel with kmdb, we will eventually hit the instruction at
- * sys_sysenter. kmdb cannot distinguish between that valid single-step
- * and the undesirable one mentioned above. To avoid this situation, we
- * simply add a jump over the instruction at sys_sysenter to make it
- * impossible to single-step to it.
- */
-#if defined(__lint)
-
-void
-sys_sysenter()
-{}
-
-#else /* __lint */
-
- ENTRY_NP(brand_sys_sysenter)
- pushl %edx
- BRAND_CALLBACK(BRAND_CB_SYSENTER)
- popl %edx
- /*
- * Jump over sys_sysenter to allow single-stepping as described
- * above.
- */
- ja 1f
-
- ALTENTRY(sys_sysenter)
- nop
-1:
- /
- / do what the call gate would've done to the stack ..
- /
- pushl $UDS_SEL / (really %ss, but it's the same ..)
- pushl %ecx / userland makes this a copy of %esp
- pushfl
- orl $PS_IE, (%esp) / turn interrupts on when we return to user
- pushl $UCS_SEL
- pushl %edx / userland makes this a copy of %eip
- /
- / done. finish building the stack frame
- /
- subl $8, %esp / leave space for ERR and TRAPNO
-
- SYSENTER_PUSH
-
-#ifdef TRAPTRACE
- TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_SYSENTER) / uses labels 8 and 9
- TRACE_REGS(%edi, %esp, %ebx, %ecx) / uses label 9
- pushl %eax
- TRACE_STAMP(%edi) / clobbers %eax, %edx, uses label 9
- popl %eax
- movl %eax, TTR_SYSNUM(%edi)
-#endif
- movl %esp, %ebp
-
- CPU_STATS_SYS_SYSCALL_INC
-
- ENABLE_INTR_FLAGS
-
- pushl %eax / preserve across mstate call
- MSTATE_TRANSITION(LMS_USER, LMS_SYSTEM)
- popl %eax
-
- movl %gs:CPU_THREAD, %ebx
-
- ASSERT_LWPTOREGS(%ebx, %esp)
-
- CHECK_PRESYS_NE(%ebx, %eax)
- jne _full_syscall_presys
- SIMPLE_SYSCALL_PRESYS(%ebx, _syscall_fault)
-
-_sysenter_call:
- call *SY_CALLC(%eax)
-
-_sysenter_done:
- CHECK_POSTSYS_NE(%ebx, %ecx)
- jne _full_syscall_postsys
- SIMPLE_SYSCALL_POSTSYS(%ebx, %cx)
- /
- / sysexit uses %edx to restore %eip, so we can't use it
- / to return a value, sigh.
- /
- movl %eax, REGOFF_EAX(%esp)
- / movl %edx, REGOFF_EDX(%esp)
-
- / Interrupts will be turned on by the 'sti' executed just before
- / sysexit. The following ensures that restoring the user's EFLAGS
- / doesn't enable interrupts too soon.
- andl $_BITNOT(PS_IE), REGOFF_EFL(%esp)
-
- MSTATE_TRANSITION(LMS_SYSTEM, LMS_USER)
-
- cli
-
- SYSCALL_POP
-
- popl %edx / sysexit: %edx -> %eip
- addl $4, %esp / get CS off the stack
- popfl / EFL
- popl %ecx / sysexit: %ecx -> %esp
- sti
- sysexit
- SET_SIZE(sys_sysenter)
- SET_SIZE(brand_sys_sysenter)
-#endif /* __lint */
-
-#if defined(__lint)
-/*
- * System call via an int80. This entry point is only used by the Linux
- * application environment. Unlike the sysenter path, there is no default
- * action to take if no callback is registered for this process.
- */
-void
-sys_int80()
-{}
-
-#else /* __lint */
-
- ENTRY_NP(brand_sys_int80)
- BRAND_CALLBACK(BRAND_CB_INT80)
-
- ALTENTRY(sys_int80)
- /*
- * We hit an int80, but this process isn't of a brand with an int80
- * handler. Bad process! Make it look as if the INT failed.
- * Modify %eip to point before the INT, push the expected error
- * code and fake a GP fault.
- *
- */
- subl $2, (%esp) /* int insn 2-bytes */
- pushl $_CONST(_MUL(T_INT80, GATE_DESC_SIZE) + 2)
- jmp gptrap / GP fault
- SET_SIZE(sys_int80)
- SET_SIZE(brand_sys_int80)
-
-/*
- * Declare a uintptr_t which covers the entire pc range of syscall
- * handlers for the stack walkers that need this.
- */
- .align CPTRSIZE
- .globl _allsyscalls_size
- .type _allsyscalls_size, @object
-_allsyscalls_size:
- .NWORD . - _allsyscalls
- SET_SIZE(_allsyscalls_size)
-
-#endif /* __lint */
-
-/*
- * These are the thread context handlers for lwps using sysenter/sysexit.
- */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-sep_save(void *ksp)
-{}
-
-/*ARGSUSED*/
-void
-sep_restore(void *ksp)
-{}
-
-#else /* __lint */
-
- /*
- * setting this value to zero as we switch away causes the
- * stack-pointer-on-sysenter to be NULL, ensuring that we
- * don't silently corrupt another (preempted) thread stack
- * when running an lwp that (somehow) didn't get sep_restore'd
- */
- ENTRY_NP(sep_save)
- xorl %edx, %edx
- xorl %eax, %eax
- movl $MSR_INTC_SEP_ESP, %ecx
- wrmsr
- ret
- SET_SIZE(sep_save)
-
- /*
- * Update the kernel stack pointer as we resume onto this cpu.
- */
- ENTRY_NP(sep_restore)
- movl 4(%esp), %eax /* per-lwp kernel sp */
- xorl %edx, %edx
- movl $MSR_INTC_SEP_ESP, %ecx
- wrmsr
- ret
- SET_SIZE(sep_restore)
-
-#endif /* __lint */
-
-/*
- * Call syscall(). Called from trap() on watchpoint at lcall 0,7
- */
-
-#if defined(__lint)
-
-void
-watch_syscall(void)
-{}
-
-#else /* __lint */
-
- ENTRY_NP(watch_syscall)
- CLI(%eax)
- movl %gs:CPU_THREAD, %ebx
- movl T_STACK(%ebx), %esp / switch to the thread stack
- movl REGOFF_EAX(%esp), %eax / recover original syscall#
- jmp _watch_do_syscall
- SET_SIZE(watch_syscall)
-
-#endif /* __lint */
diff --git a/usr/src/uts/i86pc/ml/syscall_asm_amd64.s b/usr/src/uts/i86pc/ml/syscall_asm_amd64.s
index 9ef517e2f6..fed9afbc8d 100644
--- a/usr/src/uts/i86pc/ml/syscall_asm_amd64.s
+++ b/usr/src/uts/i86pc/ml/syscall_asm_amd64.s
@@ -31,14 +31,6 @@
#include <sys/psw.h>
#include <sys/machbrand.h>
-#if defined(__lint)
-
-#include <sys/types.h>
-#include <sys/thread.h>
-#include <sys/systm.h>
-
-#else /* __lint */
-
#include <sys/segments.h>
#include <sys/pcb.h>
#include <sys/trap.h>
@@ -54,8 +46,6 @@
#include "assym.h"
-#endif /* __lint */
-
/*
* We implement five flavours of system call entry points
*
@@ -286,8 +276,6 @@
#if defined(DEBUG)
-#if !defined(__lint)
-
__lwptoregs_msg:
.string "syscall_asm_amd64.s:%d lwptoregs(%p) [%p] != rp [%p]"
@@ -298,9 +286,7 @@ __no_rupdate_msg:
.string "syscall_asm_amd64.s:%d lwp %p, pcb_rupdate != 0"
__bad_ts_msg:
- .string "sysscall_asm_amd64.s:%d CR0.TS set on user return"
-
-#endif /* !__lint */
+ .string "syscall_asm_amd64.s:%d CR0.TS set on user return"
#define ASSERT_LWPTOREGS(lwp, rp) \
movq LWP_REGS(lwp), %r11; \
@@ -433,21 +419,6 @@ __bad_ts_msg:
#define XPV_SYSCALL_PROD /* nothing */
#endif
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-sys_syscall()
-{}
-
-void
-_allsyscalls()
-{}
-
-size_t _allsyscalls_size;
-
-#else /* __lint */
-
ENTRY_NP2(brand_sys_syscall,_allsyscalls)
SWAPGS /* kernel gsbase */
XPV_SYSCALL_PROD
@@ -799,17 +770,6 @@ _syscall_post_call:
SET_SIZE(sys_syscall)
SET_SIZE(brand_sys_syscall)
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-sys_syscall32()
-{}
-
-#else /* __lint */
-
ENTRY_NP(brand_sys_syscall32)
SWAPGS /* kernel gsbase */
XPV_TRAP_POP
@@ -1069,8 +1029,6 @@ _full_syscall_postsys32:
SET_SIZE(sys_syscall32)
SET_SIZE(brand_sys_syscall32)
-#endif /* __lint */
-
/*
* System call handler via the sysenter instruction
* Used only for 32-bit system calls on the 64-bit kernel.
@@ -1111,13 +1069,6 @@ _full_syscall_postsys32:
* one mentioned above. To avoid this situation, we simply add a jump over the
* instruction at sys_sysenter to make it impossible to single-step to it.
*/
-#if defined(__lint)
-
-void
-sys_sysenter()
-{}
-
-#else /* __lint */
ENTRY_NP(brand_sys_sysenter)
SWAPGS /* kernel gsbase */
@@ -1348,89 +1299,11 @@ sys_sysenter()
SET_SIZE(_sys_sysenter_post_swapgs)
SET_SIZE(brand_sys_sysenter)
-#endif /* __lint */
-
-#if defined(__lint)
-/*
- * System call via an int80. This entry point is only used by the Linux
- * application environment. Unlike the other entry points, there is no
- * default action to take if no callback is registered for this process.
- */
-void
-sys_int80()
-{}
-
-#else /* __lint */
-
- ENTRY_NP(brand_sys_int80)
- SWAPGS /* kernel gsbase */
- XPV_TRAP_POP
- call smap_enable
-
- /*
- * We first attempt to call the "b_int80" handler from the "struct
- * brand_mach_ops" for this brand. If no handler function is installed
- * for this brand, the BRAND_CALLBACK() macro returns here and we
- * check the lwp for a "lwp_brand_syscall" handler.
- */
- BRAND_CALLBACK(BRAND_CB_INT80, BRAND_URET_FROM_INTR_STACK())
-
- /*
- * Check to see if this lwp provides "lwp_brand_syscall". If so, we
- * will route this int80 through the regular system call handling path.
- */
- movq %r15, %gs:CPU_RTMP_R15
- movq %gs:CPU_THREAD, %r15
- movq T_LWP(%r15), %r15
- movq LWP_BRAND_SYSCALL(%r15), %r15
- testq %r15, %r15
- movq %gs:CPU_RTMP_R15, %r15
- jnz nopop_syscall_int
-
- /*
- * The brand provided neither a "b_int80", nor a "lwp_brand_syscall"
- * function, and has thus opted out of handling this trap.
- */
- SWAPGS /* user gsbase */
- jmp nopop_int80
-
- ENTRY_NP(sys_int80)
- /*
- * We hit an int80, but this process isn't of a brand with an int80
- * handler. Bad process! Make it look as if the INT failed.
- * Modify %rip to point before the INT, push the expected error
- * code and fake a GP fault. Note on 64-bit hypervisor we need
- * to undo the XPV_TRAP_POP and push rcx and r11 back on the stack
- * because gptrap will pop them again with its own XPV_TRAP_POP.
- */
- XPV_TRAP_POP
- call smap_enable
-nopop_int80:
- subq $2, (%rsp) /* int insn 2-bytes */
- pushq $_CONST(_MUL(T_INT80, GATE_DESC_SIZE) + 2)
-#if defined(__xpv)
- push %r11
- push %rcx
-#endif
- jmp gptrap / GP fault
- SET_SIZE(sys_int80)
- SET_SIZE(brand_sys_int80)
-#endif /* __lint */
-
-
/*
* This is the destination of the "int $T_SYSCALLINT" interrupt gate, used by
* the generic i386 libc to do system calls. We do a small amount of setup
* before jumping into the existing sys_syscall32 path.
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-sys_syscall_int()
-{}
-
-#else /* __lint */
ENTRY_NP(brand_sys_syscall_int)
SWAPGS /* kernel gsbase */
@@ -1474,8 +1347,6 @@ nopop_syscall_int:
SET_SIZE(sys_syscall_int)
SET_SIZE(brand_sys_syscall_int)
-#endif /* __lint */
-
/*
* Legacy 32-bit applications and old libc implementations do lcalls;
* we should never get here because the LDT entry containing the syscall
@@ -1490,15 +1361,6 @@ nopop_syscall_int:
* instruction of this handler being either swapgs or cli.
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-sys_lcall32()
-{}
-
-#else /* __lint */
-
ENTRY_NP(sys_lcall32)
SWAPGS /* kernel gsbase */
pushq $0
@@ -1523,26 +1385,10 @@ _allsyscalls_size:
.NWORD . - _allsyscalls
SET_SIZE(_allsyscalls_size)
-#endif /* __lint */
-
/*
* These are the thread context handlers for lwps using sysenter/sysexit.
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-sep_save(void *ksp)
-{}
-
-/*ARGSUSED*/
-void
-sep_restore(void *ksp)
-{}
-
-#else /* __lint */
-
/*
* setting this value to zero as we switch away causes the
* stack-pointer-on-sysenter to be NULL, ensuring that we
@@ -1569,4 +1415,3 @@ sep_restore(void *ksp)
ret
SET_SIZE(sep_restore)
-#endif /* __lint */