summaryrefslogtreecommitdiff
path: root/usr/src/uts/intel
diff options
context:
space:
mode:
authorJerry Jelinek <jerry.jelinek@joyent.com>2020-04-01 11:43:08 +0000
committerJerry Jelinek <jerry.jelinek@joyent.com>2020-04-01 11:43:08 +0000
commit4c80b42683b88f933f87dbe79c0e94e336919938 (patch)
treedf19ddfcca3c9e28551d014957000da52bd47995 /usr/src/uts/intel
parent88606cbe3645730adccc57e29f4b829af51c05f4 (diff)
parent5338faaac2dc1b2a16cb1a986233578834926ce0 (diff)
downloadillumos-joyent-4c80b42683b88f933f87dbe79c0e94e336919938.tar.gz
[illumos-gate merge]
commit 5338faaac2dc1b2a16cb1a986233578834926ce0 12423 ipf: variable may be used uninitialized commit 9b0bb795691f70ec1b1796f6d15266f82d7a3200 12349 clean up 32-bit assembly and lint Conflicts: usr/src/uts/sparc/v9/ml/lock_prim.s usr/src/uts/intel/ia32/ml/lock_prim.s usr/src/uts/intel/ia32/ml/copy.s usr/src/uts/intel/Makefile.rules usr/src/uts/i86pc/ml/syscall_asm_amd64.s usr/src/uts/i86pc/ml/syscall_asm.s [deleted upstream]
Diffstat (limited to 'usr/src/uts/intel')
-rw-r--r--usr/src/uts/intel/Makefile.files8
-rw-r--r--usr/src/uts/intel/Makefile.intel9
-rw-r--r--usr/src/uts/intel/Makefile.rules196
-rw-r--r--usr/src/uts/intel/amd64/krtld/kobj_crt.s11
-rw-r--r--usr/src/uts/intel/amd64/ml/amd64.il218
-rw-r--r--usr/src/uts/intel/brand/common/brand_solaris.s76
-rw-r--r--usr/src/uts/intel/dtrace/dtrace_asm.s349
-rw-r--r--usr/src/uts/intel/ia32/ml/copy.s1269
-rw-r--r--usr/src/uts/intel/ia32/ml/ddi_i86_asm.s1157
-rw-r--r--usr/src/uts/intel/ia32/ml/desctbls_asm.s212
-rw-r--r--usr/src/uts/intel/ia32/ml/exception.s441
-rw-r--r--usr/src/uts/intel/ia32/ml/float.s162
-rw-r--r--usr/src/uts/intel/ia32/ml/hypersubr.s165
-rw-r--r--usr/src/uts/intel/ia32/ml/i86_subr.s2708
-rw-r--r--usr/src/uts/intel/ia32/ml/ia32.il200
-rw-r--r--usr/src/uts/intel/ia32/ml/lock_prim.s785
-rw-r--r--usr/src/uts/intel/ia32/ml/modstubs.s7
-rw-r--r--usr/src/uts/intel/ia32/ml/ovbcopy.s68
-rw-r--r--usr/src/uts/intel/ia32/ml/sseblk.s250
-rw-r--r--usr/src/uts/intel/ia32/sys/Makefile6
-rw-r--r--usr/src/uts/intel/ia32/sys/kdi_regs.h73
-rw-r--r--usr/src/uts/intel/ia32/sys/privmregs.h67
-rw-r--r--usr/src/uts/intel/io/acpica/osl_ml.s91
-rw-r--r--usr/src/uts/intel/kdi/kdi_asm.s5
-rw-r--r--usr/src/uts/intel/kdi/kdi_idthdl.s5
25 files changed, 60 insertions, 8478 deletions
diff --git a/usr/src/uts/intel/Makefile.files b/usr/src/uts/intel/Makefile.files
index 057a89f138..baeccfaac8 100644
--- a/usr/src/uts/intel/Makefile.files
+++ b/usr/src/uts/intel/Makefile.files
@@ -64,14 +64,6 @@ DBOOT_OBJS += \
retpoline.o
#
-# 64-bit multiply/divide compiler helper routines
-# used only for ia32
-#
-
-SPECIAL_OBJS_32 += \
- muldiv.o
-
-#
# Generic-unix Module
#
GENUNIX_OBJS += \
diff --git a/usr/src/uts/intel/Makefile.intel b/usr/src/uts/intel/Makefile.intel
index aed47948a9..6e079138bc 100644
--- a/usr/src/uts/intel/Makefile.intel
+++ b/usr/src/uts/intel/Makefile.intel
@@ -110,13 +110,6 @@ DEF_BUILDS = $(DEF_BUILDS64)
ALL_BUILDS = $(ALL_BUILDS64)
#
-# x86 or amd64 inline templates
-#
-INLINES_32 = $(UTSBASE)/intel/ia32/ml/ia32.il
-INLINES_64 = $(UTSBASE)/intel/amd64/ml/amd64.il
-INLINES += $(INLINES_$(CLASS))
-
-#
# kernel-specific optimizations; override default in Makefile.master
#
@@ -130,7 +123,7 @@ COPTIMIZE = $(COPTFLAG_$(CLASS))
CFLAGS = $(CFLAGS_XARCH)
CFLAGS += $(COPTIMIZE)
-CFLAGS += $(INLINES) -D_ASM_INLINES
+CFLAGS += -D_ASM_INLINES
CFLAGS += $(CCMODE)
CFLAGS += $(SPACEFLAG)
CFLAGS += $(CCUNBOUND)
diff --git a/usr/src/uts/intel/Makefile.rules b/usr/src/uts/intel/Makefile.rules
index bdf0830e9c..763c448725 100644
--- a/usr/src/uts/intel/Makefile.rules
+++ b/usr/src/uts/intel/Makefile.rules
@@ -41,9 +41,7 @@
#
# Need a way to distinguish between the ia32 and amd64 subdirs.
#
-SUBARCH_DIR_32 = ia32
-SUBARCH_DIR_64 = amd64
-SUBARCH_DIR = $(SUBARCH_DIR_$(CLASS))
+SUBARCH_DIR = amd64
#
# Section 1a: C object build rules
@@ -56,9 +54,6 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/common/io/power/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
-$(OBJS_DIR)/%.o: $(SRC)/common/util/i386/%.s
- $(COMPILE.s) -o $@ $<
-
$(OBJS_DIR)/%.o: $(UTSBASE)/intel/brand/sn1/%.s
$(COMPILE.s) -o $@ $<
@@ -312,192 +307,3 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/intel/$(SUBARCH_DIR)/krtld/%.s
$(OBJS_DIR)/%.o: $(SRC)/common/util/$(SUBARCH_DIR)/%.c
$(COMPILE.c) $(KRTLD_INC_PATH) $(KRTLD_CPPFLAGS) -o $@ $<
$(CTFCONVERT_O)
-
-
-#
-# Section 1b: Lint `object' build rules.
-#
-$(LINTS_DIR)/%.ln: $(SRC)/common/fs/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/util/i386/%.s
- @($(LHEAD) $(LINT.s) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/brand/sn1/%.s
- @($(LHEAD) $(LINT.s) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/brand/solaris10/%.s
- @($(LHEAD) $(LINT.s) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/dtrace/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/dtrace/%.s
- @($(LHEAD) $(LINT.s) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/zfs/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/fs/proc/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/ia32/ml/%.s
- @($(LHEAD) $(LINT.s) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/ia32/os/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/ia32/promif/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/ia32/syscall/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/acpica/%.s
- @($(LHEAD) $(LINT.s) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/acpica/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/acpica/events/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/acpica/hardware/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/acpica/dispatcher/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/acpica/executer/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/acpica/parser/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/acpica/namespace/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/acpica/resources/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/acpica/tables/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/acpica/utilities/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/acpica/disassembler/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/amd8111s/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/amr/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/scsi/adapters/arcmsr/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/hotplug/pcicfg/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/hotplug/pci/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/intel_nb5000/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/intel_nhm/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/ipmi/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/mc/mc-amd/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/mc-amd/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/pci/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/pciex/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dktp/controller/ata/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dktp/dcdev/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dktp/disk/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dktp/drvobj/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dktp/hba/ghd/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/dnet/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/scsi/targets/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/vgatext/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/vmxnet/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/vmxnet3s/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/scsi/adapters/pvscsi/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/os/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/pcbe/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/promif/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/syscall/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/common/os/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/kdi/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/kdi/%.s
- @($(LHEAD) $(LINT.s) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/nskern/%.s
- @($(LHEAD) $(LINT.s) $< $(LTAIL))
-
-#
-# krtld lints
-#
-$(LINTS_DIR)/%.ln: $(UTSBASE)/common/krtld/%.c
- @($(LHEAD) $(LINT.c) $(KRTLD_INC_PATH) $(KRTLD_CPPFLAGS) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/$(SUBARCH_DIR)/krtld/%.c
- @($(LHEAD) $(LINT.c) $(KRTLD_INC_PATH) $(KRTLD_CPPFLAGS) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/$(SUBARCH_DIR)/krtld/%.s
- @($(LHEAD) $(LINT.s) $(KRTLD_INC_PATH) $(KRTLD_CPPFLAGS) $< $(LTAIL))
-
-$(LINTS_DIR)/%.ln: $(SRC)/common/util/$(SUBARCH_DIR)/%.c
- @($(LHEAD) $(LINT.c) $(KRTLD_INC_PATH) $(KRTLD_CPPFLAGS) $< $(LTAIL))
-
-$(OBJS_DIR)/kobj.ln := CPPFLAGS += -D_DBOOT
diff --git a/usr/src/uts/intel/amd64/krtld/kobj_crt.s b/usr/src/uts/intel/amd64/krtld/kobj_crt.s
index 96025df7ea..1480a4040e 100644
--- a/usr/src/uts/intel/amd64/krtld/kobj_crt.s
+++ b/usr/src/uts/intel/amd64/krtld/kobj_crt.s
@@ -37,15 +37,6 @@
* There is NO RETURN from exitto().
*/
-#if defined(lint)
-
-/* ARGSUSED */
-void
-exitto(caddr_t entrypoint)
-{}
-
-#else /* lint */
-
ENTRY(exitto)
@@ -67,5 +58,3 @@ exitto(caddr_t entrypoint)
SET_SIZE(exitto)
-#endif
-
diff --git a/usr/src/uts/intel/amd64/ml/amd64.il b/usr/src/uts/intel/amd64/ml/amd64.il
deleted file mode 100644
index 3e2a790729..0000000000
--- a/usr/src/uts/intel/amd64/ml/amd64.il
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-/*
- * Copyright 2019 Joyent, Inc.
- */
-
-/
-/ In-line functions for amd64 kernels.
-/
-
-/
-/ return current thread pointer
-/
-/ NOTE: the "0x18" should be replaced by the computed value of the
-/ offset of "cpu_thread" from the beginning of the struct cpu.
-/ Including "assym.h" does not work, however, since that stuff
-/ is PSM-specific and is only visible to the 'unix' build anyway.
-/ Same with current cpu pointer, where "0xc" should be replaced
-/ by the computed value of the offset of "cpu_self".
-/ Ugh -- what a disaster.
-/
- .inline threadp,0
- movq %gs:0x18, %rax
- .end
-
-/
-/ return current cpu pointer
-/
- .inline curcpup,0
- movq %gs:0x10, %rax
- .end
-
-/
-/ return caller
-/
- .inline caller,0
- movq 8(%rbp), %rax
- .end
-
-/
-/ convert ipl to spl. This is the identity function for i86
-/
- .inline ipltospl,0
- movq %rdi, %rax
- .end
-
-/
-/ Networking byte order functions (too bad, Intel has the wrong byte order)
-/
-
- .inline htonll,4
- movq %rdi, %rax
- bswapq %rax
- .end
-
- .inline ntohll,4
- movq %rdi, %rax
- bswapq %rax
- .end
-
- .inline htonl,4
- movl %edi, %eax
- bswap %eax
- .end
-
- .inline ntohl,4
- movl %edi, %eax
- bswap %eax
- .end
-
- .inline htons,4
- movl %edi, %eax
- bswap %eax
- shrl $16, %eax
- .end
-
- .inline ntohs,4
- movl %edi, %eax
- bswap %eax
- shrl $16, %eax
- .end
-
-/*
- * multiply two long numbers and yield a u_lonlong_t result
- * Provided to manipulate hrtime_t values.
- */
- /* XX64 These don't work correctly with SOS9 build 13.0 yet
- .inline mul32, 8
- xorl %edx, %edx
- movl %edi, %eax
- mull %esi
- shlq $32, %rdx
- orq %rdx, %rax
- ret
- .end
- */
-/*
- * Unlock hres_lock and increment the count value. (See clock.h)
- */
- .inline unlock_hres_lock, 0
- lock
- incl hres_lock
- .end
-
- .inline atomic_orb,8
- movl %esi, %eax
- lock
- orb %al,(%rdi)
- .end
-
- .inline atomic_andb,8
- movl %esi, %eax
- lock
- andb %al,(%rdi)
- .end
-
-/*
- * atomic inc/dec operations.
- * void atomic_inc16(uint16_t *addr) { ++*addr; }
- * void atomic_dec16(uint16_t *addr) { --*addr; }
- */
- .inline atomic_inc16,4
- lock
- incw (%rdi)
- .end
-
- .inline atomic_dec16,4
- lock
- decw (%rdi)
- .end
-
-/*
- * atomic bit clear
- */
- .inline atomic_btr32,8
- lock
- btrl %esi, (%rdi)
- setc %al
- .end
-
-/*
- * Call the pause instruction. To the Pentium 4 Xeon processor, it acts as
- * a hint that the code sequence is a busy spin-wait loop. Without a pause
- * instruction in these loops, the P4 Xeon processor may suffer a severe
- * penalty when exiting the loop because the processor detects a possible
- * memory violation. Inserting the pause instruction significantly reduces
- * the likelihood of a memory order violation, improving performance.
- * The pause instruction is a NOP on all other IA-32 processors.
- */
- .inline ht_pause, 0
- pause
- .end
-
-/*
- * inlines for update_sregs().
- */
- .inline __set_ds, 0
- movw %di, %ds
- .end
-
- .inline __set_es, 0
- movw %di, %es
- .end
-
- .inline __set_fs, 0
- movw %di, %fs
- .end
-
- .inline __set_gs, 0
- movw %di, %gs
- .end
-
-/*
- * prefetch 64 bytes
- */
-
- .inline prefetch_read_many,8
- prefetcht0 (%rdi)
- prefetcht0 32(%rdi)
- .end
-
- .inline prefetch_read_once,8
- prefetchnta (%rdi)
- prefetchnta 32(%rdi)
- .end
-
- .inline prefetch_write_many,8
- prefetcht0 (%rdi)
- prefetcht0 32(%rdi)
- .end
-
- .inline prefetch_write_once,8
- prefetcht0 (%rdi)
- prefetcht0 32(%rdi)
- .end
diff --git a/usr/src/uts/intel/brand/common/brand_solaris.s b/usr/src/uts/intel/brand/common/brand_solaris.s
index b80b44e6c3..acf528a6ec 100644
--- a/usr/src/uts/intel/brand/common/brand_solaris.s
+++ b/usr/src/uts/intel/brand/common/brand_solaris.s
@@ -32,50 +32,14 @@
* use brand-specific #defines to replace the XXX_brand_... definitions.
*/
-#ifdef lint
-
-#include <sys/systm.h>
-
-#else /* !lint */
-
#include <sys/asm_linkage.h>
#include <sys/privregs.h>
#include <sys/segments.h>
#include "assym.h"
#include "brand_asm.h"
-#endif /* !lint */
-
-#ifdef lint
-
-void
-XXX_brand_sysenter_callback(void)
-{
-}
-
-void
-XXX_brand_syscall_callback(void)
-{
-}
-
-#if defined(__amd64)
-void
-XXX_brand_syscall32_callback(void)
-{
-}
-#endif /* amd64 */
-
-void
-XXX_brand_int91_callback(void)
-{
-}
-
-#else /* !lint */
-
#ifdef _ASM /* The remainder of this file is only for assembly files */
-#if defined(__amd64)
-
/*
* syscall handler for 32-bit user processes:
* See "64-BIT INTERPOSITION STACK" in brand_asm.h.
@@ -155,44 +119,4 @@ ENTRY(XXX_brand_int91_callback)
retq
SET_SIZE(XXX_brand_int91_callback)
-#else /* !__amd64 */
-
-/*
- * To 'return' to our user-space handler, we need to replace the iret target
- * address. The original return address is passed back in %eax.
- * See "32-BIT INTERPOSITION STACK" and "32-BIT INT STACK" in brand_asm.h.
- */
-ENTRY(XXX_brand_syscall_callback)
- CALLBACK_PROLOGUE(XXX_emulation_table, SPD_HANDLER, SYSCALL_REG,
- SCR_REG, SCR_REGB);
- CALC_TABLE_ADDR(SCR_REG, SPD_HANDLER); /* new ret addr is in scratch */
- mov SCR_REG, SYSCALL_REG; /* place new ret addr in syscallreg */
- GET_V(SP_REG, 0, V_U_EBX, SCR_REG); /* restore scratch register */
- add $V_END, SP_REG; /* restore intr stack pointer */
- /*CSTYLED*/
- xchg (SP_REG), SYSCALL_REG /* swap new and orig. return addrs */
- jmp nopop_sys_rtt_syscall
-9:
- ret
-SET_SIZE(XXX_brand_syscall_callback)
-
-/*
- * To 'return' to our user-space handler, we just need to place its address
- * into %edx. The original return address is passed back in SYSCALL_REG.
- * See "32-BIT INTERPOSITION STACK" in brand_asm.h.
- */
-ENTRY(XXX_brand_sysenter_callback)
- CALLBACK_PROLOGUE(XXX_emulation_table, SPD_HANDLER, SYSCALL_REG,
- SCR_REG, SCR_REGB);
- mov %edx, SCR_REG; /* save orig return addr in scr reg */
- CALC_TABLE_ADDR(%edx, SPD_HANDLER); /* new return addr is in %edx */
- mov SCR_REG, SYSCALL_REG; /* save orig return addr in %eax */
- GET_V(SP_REG, 0, V_U_EBX, SCR_REG) /* restore scratch register */
- sysexit
-9:
- ret
-SET_SIZE(XXX_brand_sysenter_callback)
-
-#endif /* !__amd64 */
#endif /* _ASM */
-#endif /* !lint */
diff --git a/usr/src/uts/intel/dtrace/dtrace_asm.s b/usr/src/uts/intel/dtrace/dtrace_asm.s
index cd2dc5c5bf..695b06451c 100644
--- a/usr/src/uts/intel/dtrace/dtrace_asm.s
+++ b/usr/src/uts/intel/dtrace/dtrace_asm.s
@@ -23,54 +23,19 @@
* Use is subject to license terms.
*/
/*
- * Copyright 2015 Joyent, Inc.
+ * Copyright 2019 Joyent, Inc.
*/
#include <sys/asm_linkage.h>
#include <sys/regset.h>
-#if defined(lint)
-#include <sys/dtrace_impl.h>
-#else
#include "assym.h"
-#endif
-
-#if defined(lint) || defined(__lint)
-
-greg_t
-dtrace_getfp(void)
-{ return (0); }
-
-#else /* lint */
-
-#if defined(__amd64)
ENTRY_NP(dtrace_getfp)
movq %rbp, %rax
ret
SET_SIZE(dtrace_getfp)
-#elif defined(__i386)
-
- ENTRY_NP(dtrace_getfp)
- movl %ebp, %eax
- ret
- SET_SIZE(dtrace_getfp)
-
-#endif /* __i386 */
-#endif /* lint */
-
-
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-uint64_t
-dtrace_getvmreg(uint32_t reg, volatile uint16_t *flags)
-{ return (0); }
-
-#else /* lint */
-
-#if defined(__amd64)
ENTRY_NP(dtrace_getvmreg)
@@ -80,50 +45,6 @@ dtrace_getvmreg(uint32_t reg, volatile uint16_t *flags)
SET_SIZE(dtrace_getvmreg)
-#elif defined(__i386)
-
- ENTRY_NP(dtrace_getvmreg)
- pushl %ebp / Setup stack frame
- movl %esp, %ebp
-
- movl 12(%ebp), %eax / Load flag pointer
- movw (%eax), %cx / Load flags
- orw $CPU_DTRACE_ILLOP, %cx / Set ILLOP
- movw %cx, (%eax) / Store flags
-
- leave
- ret
- SET_SIZE(dtrace_getvmreg)
-
-#endif /* __i386 */
-#endif /* lint */
-
-
-#if defined(lint) || defined(__lint)
-
-uint32_t
-dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
-{
- uint32_t old;
-
- if ((old = *target) == cmp)
- *target = new;
- return (old);
-}
-
-void *
-dtrace_casptr(void *target, void *cmp, void *new)
-{
- void *old;
-
- if ((old = *(void **)target) == cmp)
- *(void **)target = new;
- return (old);
-}
-
-#else /* lint */
-
-#if defined(__amd64)
ENTRY(dtrace_cas32)
movl %esi, %eax
@@ -139,60 +60,11 @@ dtrace_casptr(void *target, void *cmp, void *new)
ret
SET_SIZE(dtrace_casptr)
-#elif defined(__i386)
-
- ENTRY(dtrace_cas32)
- ALTENTRY(dtrace_casptr)
- movl 4(%esp), %edx
- movl 8(%esp), %eax
- movl 12(%esp), %ecx
- lock
- cmpxchgl %ecx, (%edx)
- ret
- SET_SIZE(dtrace_casptr)
- SET_SIZE(dtrace_cas32)
-
-#endif /* __i386 */
-#endif /* lint */
-
-#if defined(lint)
-
-/*ARGSUSED*/
-uintptr_t
-dtrace_caller(int aframes)
-{
- return (0);
-}
-
-#else /* lint */
-
-#if defined(__amd64)
ENTRY(dtrace_caller)
movq $-1, %rax
ret
SET_SIZE(dtrace_caller)
-#elif defined(__i386)
-
- ENTRY(dtrace_caller)
- movl $-1, %eax
- ret
- SET_SIZE(dtrace_caller)
-
-#endif /* __i386 */
-#endif /* lint */
-
-#if defined(lint)
-
-/*ARGSUSED*/
-void
-dtrace_copy(uintptr_t src, uintptr_t dest, size_t size)
-{}
-
-#else
-
-#if defined(__amd64)
-
ENTRY(dtrace_copy)
pushq %rbp
call smap_disable
@@ -207,42 +79,6 @@ dtrace_copy(uintptr_t src, uintptr_t dest, size_t size)
ret
SET_SIZE(dtrace_copy)
-#elif defined(__i386)
-
- ENTRY(dtrace_copy)
- pushl %ebp
- movl %esp, %ebp
- pushl %esi
- pushl %edi
-
- movl 8(%ebp), %esi / Load source address
- movl 12(%ebp), %edi / Load destination address
- movl 16(%ebp), %ecx / Load count
- repz / Repeat for count...
- smovb / move from %ds:si to %es:di
-
- popl %edi
- popl %esi
- movl %ebp, %esp
- popl %ebp
- ret
- SET_SIZE(dtrace_copy)
-
-#endif /* __i386 */
-#endif
-
-#if defined(lint)
-
-/*ARGSUSED*/
-void
-dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
- volatile uint16_t *flags)
-{}
-
-#else
-
-#if defined(__amd64)
-
ENTRY(dtrace_copystr)
pushq %rbp
movq %rsp, %rbp
@@ -269,56 +105,6 @@ dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
SET_SIZE(dtrace_copystr)
-#elif defined(__i386)
-
- ENTRY(dtrace_copystr)
-
- pushl %ebp / Setup stack frame
- movl %esp, %ebp
- pushl %ebx / Save registers
-
- movl 8(%ebp), %ebx / Load source address
- movl 12(%ebp), %edx / Load destination address
- movl 16(%ebp), %ecx / Load count
-
-0:
- movb (%ebx), %al / Load from source
- movb %al, (%edx) / Store to destination
- incl %ebx / Increment source pointer
- incl %edx / Increment destination pointer
- decl %ecx / Decrement remaining count
- cmpb $0, %al
- je 2f
- testl $0xfff, %ecx / Check if count is 4k-aligned
- jnz 1f
- movl 20(%ebp), %eax / load flags pointer
- testl $CPU_DTRACE_BADADDR, (%eax) / load and test dtrace flags
- jnz 2f
-1:
- cmpl $0, %ecx
- jne 0b
-
-2:
- popl %ebx
- movl %ebp, %esp
- popl %ebp
- ret
-
- SET_SIZE(dtrace_copystr)
-
-#endif /* __i386 */
-#endif
-
-#if defined(lint)
-
-/*ARGSUSED*/
-uintptr_t
-dtrace_fulword(void *addr)
-{ return (0); }
-
-#else
-#if defined(__amd64)
-
ENTRY(dtrace_fulword)
call smap_disable
movq (%rdi), %rax
@@ -326,28 +112,6 @@ dtrace_fulword(void *addr)
ret
SET_SIZE(dtrace_fulword)
-#elif defined(__i386)
-
- ENTRY(dtrace_fulword)
- movl 4(%esp), %ecx
- xorl %eax, %eax
- movl (%ecx), %eax
- ret
- SET_SIZE(dtrace_fulword)
-
-#endif /* __i386 */
-#endif
-
-#if defined(lint)
-
-/*ARGSUSED*/
-uint8_t
-dtrace_fuword8_nocheck(void *addr)
-{ return (0); }
-
-#else
-#if defined(__amd64)
-
ENTRY(dtrace_fuword8_nocheck)
call smap_disable
xorq %rax, %rax
@@ -356,28 +120,6 @@ dtrace_fuword8_nocheck(void *addr)
ret
SET_SIZE(dtrace_fuword8_nocheck)
-#elif defined(__i386)
-
- ENTRY(dtrace_fuword8_nocheck)
- movl 4(%esp), %ecx
- xorl %eax, %eax
- movzbl (%ecx), %eax
- ret
- SET_SIZE(dtrace_fuword8_nocheck)
-
-#endif /* __i386 */
-#endif
-
-#if defined(lint)
-
-/*ARGSUSED*/
-uint16_t
-dtrace_fuword16_nocheck(void *addr)
-{ return (0); }
-
-#else
-#if defined(__amd64)
-
ENTRY(dtrace_fuword16_nocheck)
call smap_disable
xorq %rax, %rax
@@ -386,28 +128,6 @@ dtrace_fuword16_nocheck(void *addr)
ret
SET_SIZE(dtrace_fuword16_nocheck)
-#elif defined(__i386)
-
- ENTRY(dtrace_fuword16_nocheck)
- movl 4(%esp), %ecx
- xorl %eax, %eax
- movzwl (%ecx), %eax
- ret
- SET_SIZE(dtrace_fuword16_nocheck)
-
-#endif /* __i386 */
-#endif
-
-#if defined(lint)
-
-/*ARGSUSED*/
-uint32_t
-dtrace_fuword32_nocheck(void *addr)
-{ return (0); }
-
-#else
-#if defined(__amd64)
-
ENTRY(dtrace_fuword32_nocheck)
call smap_disable
xorq %rax, %rax
@@ -416,28 +136,6 @@ dtrace_fuword32_nocheck(void *addr)
ret
SET_SIZE(dtrace_fuword32_nocheck)
-#elif defined(__i386)
-
- ENTRY(dtrace_fuword32_nocheck)
- movl 4(%esp), %ecx
- xorl %eax, %eax
- movl (%ecx), %eax
- ret
- SET_SIZE(dtrace_fuword32_nocheck)
-
-#endif /* __i386 */
-#endif
-
-#if defined(lint)
-
-/*ARGSUSED*/
-uint64_t
-dtrace_fuword64_nocheck(void *addr)
-{ return (0); }
-
-#else
-#if defined(__amd64)
-
ENTRY(dtrace_fuword64_nocheck)
call smap_disable
movq (%rdi), %rax
@@ -445,31 +143,6 @@ dtrace_fuword64_nocheck(void *addr)
ret
SET_SIZE(dtrace_fuword64_nocheck)
-#elif defined(__i386)
-
- ENTRY(dtrace_fuword64_nocheck)
- movl 4(%esp), %ecx
- xorl %eax, %eax
- xorl %edx, %edx
- movl (%ecx), %eax
- movl 4(%ecx), %edx
- ret
- SET_SIZE(dtrace_fuword64_nocheck)
-
-#endif /* __i386 */
-#endif
-
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-void
-dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
- int fault, int fltoffs, uintptr_t illval)
-{}
-
-#else /* lint */
-#if defined(__amd64)
-
ENTRY(dtrace_probe_error)
pushq %rbp
movq %rsp, %rbp
@@ -487,23 +160,3 @@ dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
ret
SET_SIZE(dtrace_probe_error)
-#elif defined(__i386)
-
- ENTRY(dtrace_probe_error)
- pushl %ebp
- movl %esp, %ebp
- pushl 0x1c(%ebp)
- pushl 0x18(%ebp)
- pushl 0x14(%ebp)
- pushl 0x10(%ebp)
- pushl 0xc(%ebp)
- pushl 0x8(%ebp)
- pushl dtrace_probeid_error
- call dtrace_probe
- movl %ebp, %esp
- popl %ebp
- ret
- SET_SIZE(dtrace_probe_error)
-
-#endif /* __i386 */
-#endif
diff --git a/usr/src/uts/intel/ia32/ml/copy.s b/usr/src/uts/intel/ia32/ml/copy.s
index 672f7e3374..d02637e5fe 100644
--- a/usr/src/uts/intel/ia32/ml/copy.s
+++ b/usr/src/uts/intel/ia32/ml/copy.s
@@ -42,12 +42,7 @@
#include <sys/errno.h>
#include <sys/asm_linkage.h>
-#if defined(__lint)
-#include <sys/types.h>
-#include <sys/systm.h>
-#else /* __lint */
#include "assym.h"
-#endif /* __lint */
#define KCOPY_MIN_SIZE 128 /* Must be >= 16 bytes */
#define XCOPY_MIN_SIZE 128 /* Must be >= 16 bytes */
@@ -143,13 +138,8 @@
* I'm sorry about these macros, but copy.s is unsurprisingly sensitive to
* additional call instructions.
*/
-#if defined(__amd64)
#define SMAP_DISABLE_COUNT 16
#define SMAP_ENABLE_COUNT 26
-#elif defined(__i386)
-#define SMAP_DISABLE_COUNT 0
-#define SMAP_ENABLE_COUNT 0
-#endif
#define SMAP_DISABLE_INSTR(ITER) \
.globl _smap_disable_patch_/**/ITER; \
@@ -161,20 +151,9 @@
_smap_enable_patch_/**/ITER/**/:; \
nop; nop; nop;
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-kcopy(const void *from, void *to, size_t count)
-{ return (0); }
-
-#else /* __lint */
-
.globl kernelbase
.globl postbootkernelbase
-#if defined(__amd64)
-
ENTRY(kcopy)
pushq %rbp
movq %rsp, %rbp
@@ -211,86 +190,10 @@ _kcopy_copyerr:
ret
SET_SIZE(kcopy)
-#elif defined(__i386)
-
-#define ARG_FROM 8
-#define ARG_TO 12
-#define ARG_COUNT 16
-
- ENTRY(kcopy)
-#ifdef DEBUG
- pushl %ebp
- movl %esp, %ebp
- movl postbootkernelbase, %eax
- cmpl %eax, ARG_FROM(%ebp)
- jb 0f
- cmpl %eax, ARG_TO(%ebp)
- jnb 1f
-0: pushl $.kcopy_panic_msg
- call panic
-1: popl %ebp
-#endif
- lea _kcopy_copyerr, %eax /* lofault value */
- movl %gs:CPU_THREAD, %edx
-
-do_copy_fault:
- pushl %ebp
- movl %esp, %ebp /* setup stack frame */
- pushl %esi
- pushl %edi /* save registers */
-
- movl T_LOFAULT(%edx), %edi
- pushl %edi /* save the current lofault */
- movl %eax, T_LOFAULT(%edx) /* new lofault */
-
- movl ARG_COUNT(%ebp), %ecx
- movl ARG_FROM(%ebp), %esi
- movl ARG_TO(%ebp), %edi
- shrl $2, %ecx /* word count */
- rep
- smovl
- movl ARG_COUNT(%ebp), %ecx
- andl $3, %ecx /* bytes left over */
- rep
- smovb
- xorl %eax, %eax
-
- /*
- * A fault during do_copy_fault is indicated through an errno value
- * in %eax and we iret from the trap handler to here.
- */
-_kcopy_copyerr:
- popl %ecx
- popl %edi
- movl %ecx, T_LOFAULT(%edx) /* restore the original lofault */
- popl %esi
- popl %ebp
- ret
- SET_SIZE(kcopy)
-
#undef ARG_FROM
#undef ARG_TO
#undef ARG_COUNT
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*
- * Copy a block of storage. Similar to kcopy but uses non-temporal
- * instructions.
- */
-
-/* ARGSUSED */
-int
-kcopy_nta(const void *from, void *to, size_t count, int copy_cached)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
#define COPY_LOOP_INIT(src, dst, cnt) \
addq cnt, src; \
addq cnt, dst; \
@@ -367,88 +270,6 @@ _kcopy_nta_copyerr:
SET_SIZE(do_copy_fault_nta)
SET_SIZE(kcopy_nta)
-#elif defined(__i386)
-
-#define ARG_FROM 8
-#define ARG_TO 12
-#define ARG_COUNT 16
-
-#define COPY_LOOP_INIT(src, dst, cnt) \
- addl cnt, src; \
- addl cnt, dst; \
- shrl $3, cnt; \
- neg cnt
-
-#define COPY_LOOP_BODY(src, dst, cnt) \
- prefetchnta 0x100(src, cnt, 8); \
- movl (src, cnt, 8), %esi; \
- movnti %esi, (dst, cnt, 8); \
- movl 0x4(src, cnt, 8), %esi; \
- movnti %esi, 0x4(dst, cnt, 8); \
- movl 0x8(src, cnt, 8), %esi; \
- movnti %esi, 0x8(dst, cnt, 8); \
- movl 0xc(src, cnt, 8), %esi; \
- movnti %esi, 0xc(dst, cnt, 8); \
- addl $2, cnt
-
- /*
- * kcopy_nta is not implemented for 32-bit as no performance
- * improvement was shown. We simply jump directly to kcopy
- * and discard the 4 arguments.
- */
- ENTRY(kcopy_nta)
- jmp kcopy
-
- lea _kcopy_nta_copyerr, %eax /* lofault value */
- ALTENTRY(do_copy_fault_nta)
- pushl %ebp
- movl %esp, %ebp /* setup stack frame */
- pushl %esi
- pushl %edi
-
- movl %gs:CPU_THREAD, %edx
- movl T_LOFAULT(%edx), %edi
- pushl %edi /* save the current lofault */
- movl %eax, T_LOFAULT(%edx) /* new lofault */
-
- /* COPY_LOOP_BODY needs to use %esi */
- movl ARG_COUNT(%ebp), %ecx
- movl ARG_FROM(%ebp), %edi
- movl ARG_TO(%ebp), %eax
- COPY_LOOP_INIT(%edi, %eax, %ecx)
-1: COPY_LOOP_BODY(%edi, %eax, %ecx)
- jnz 1b
- mfence
-
- xorl %eax, %eax
-_kcopy_nta_copyerr:
- popl %ecx
- popl %edi
- movl %ecx, T_LOFAULT(%edx) /* restore the original lofault */
- popl %esi
- leave
- ret
- SET_SIZE(do_copy_fault_nta)
- SET_SIZE(kcopy_nta)
-
-#undef ARG_FROM
-#undef ARG_TO
-#undef ARG_COUNT
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-bcopy(const void *from, void *to, size_t count)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(bcopy)
#ifdef DEBUG
orq %rdx, %rdx /* %rdx = count */
@@ -977,54 +798,6 @@ call_panic:
SET_SIZE(bcopy_altentry)
SET_SIZE(bcopy)
-#elif defined(__i386)
-
-#define ARG_FROM 4
-#define ARG_TO 8
-#define ARG_COUNT 12
-
- ENTRY(bcopy)
-#ifdef DEBUG
- movl ARG_COUNT(%esp), %eax
- orl %eax, %eax
- jz 1f
- movl postbootkernelbase, %eax
- cmpl %eax, ARG_FROM(%esp)
- jb 0f
- cmpl %eax, ARG_TO(%esp)
- jnb 1f
-0: pushl %ebp
- movl %esp, %ebp
- pushl $.bcopy_panic_msg
- call panic
-1:
-#endif
-do_copy:
- movl %esi, %eax /* save registers */
- movl %edi, %edx
- movl ARG_COUNT(%esp), %ecx
- movl ARG_FROM(%esp), %esi
- movl ARG_TO(%esp), %edi
-
- shrl $2, %ecx /* word count */
- rep
- smovl
- movl ARG_COUNT(%esp), %ecx
- andl $3, %ecx /* bytes left over */
- rep
- smovb
- movl %eax, %esi /* restore registers */
- movl %edx, %edi
- ret
- SET_SIZE(bcopy)
-
-#undef ARG_COUNT
-#undef ARG_FROM
-#undef ARG_TO
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Zero a block of storage, returning an error code if we
@@ -1032,17 +805,6 @@ do_copy:
* Returns errno value on pagefault error, 0 if all ok
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-kzero(void *addr, size_t count)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(kzero)
#ifdef DEBUG
cmpq postbootkernelbase(%rip), %rdi /* %rdi = addr */
@@ -1073,78 +835,10 @@ _kzeroerr:
ret
SET_SIZE(kzero)
-#elif defined(__i386)
-
-#define ARG_ADDR 8
-#define ARG_COUNT 12
-
- ENTRY(kzero)
-#ifdef DEBUG
- pushl %ebp
- movl %esp, %ebp
- movl postbootkernelbase, %eax
- cmpl %eax, ARG_ADDR(%ebp)
- jnb 0f
- pushl $.kzero_panic_msg
- call panic
-0: popl %ebp
-#endif
- lea _kzeroerr, %eax /* kzeroerr is lofault value */
-
- pushl %ebp /* save stack base */
- movl %esp, %ebp /* set new stack base */
- pushl %edi /* save %edi */
-
- mov %gs:CPU_THREAD, %edx
- movl T_LOFAULT(%edx), %edi
- pushl %edi /* save the current lofault */
- movl %eax, T_LOFAULT(%edx) /* new lofault */
-
- movl ARG_COUNT(%ebp), %ecx /* get size in bytes */
- movl ARG_ADDR(%ebp), %edi /* %edi <- address of bytes to clear */
- shrl $2, %ecx /* Count of double words to zero */
- xorl %eax, %eax /* sstol val */
- rep
- sstol /* %ecx contains words to clear (%eax=0) */
-
- movl ARG_COUNT(%ebp), %ecx /* get size in bytes */
- andl $3, %ecx /* do mod 4 */
- rep
- sstob /* %ecx contains residual bytes to clear */
-
- /*
- * A fault during kzero is indicated through an errno value
- * in %eax when we iret to here.
- */
-_kzeroerr:
- popl %edi
- movl %edi, T_LOFAULT(%edx) /* restore the original lofault */
- popl %edi
- popl %ebp
- ret
- SET_SIZE(kzero)
-
-#undef ARG_ADDR
-#undef ARG_COUNT
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Zero a block of storage.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-bzero(void *addr, size_t count)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(bzero)
#ifdef DEBUG
cmpq postbootkernelbase(%rip), %rdi /* %rdi = addr */
@@ -1459,44 +1153,6 @@ L(use_rep):
SET_SIZE(bzero_altentry)
SET_SIZE(bzero)
-#elif defined(__i386)
-
-#define ARG_ADDR 4
-#define ARG_COUNT 8
-
- ENTRY(bzero)
-#ifdef DEBUG
- movl postbootkernelbase, %eax
- cmpl %eax, ARG_ADDR(%esp)
- jnb 0f
- pushl %ebp
- movl %esp, %ebp
- pushl $.bzero_panic_msg
- call panic
-0:
-#endif
-do_zero:
- movl %edi, %edx
- movl ARG_COUNT(%esp), %ecx
- movl ARG_ADDR(%esp), %edi
- shrl $2, %ecx
- xorl %eax, %eax
- rep
- sstol
- movl ARG_COUNT(%esp), %ecx
- andl $3, %ecx
- rep
- sstob
- movl %edx, %edi
- ret
- SET_SIZE(bzero)
-
-#undef ARG_ADDR
-#undef ARG_COUNT
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Transfer data to and from user space -
* Note that these routines can cause faults
@@ -1519,17 +1175,6 @@ do_zero:
* Copy user data to kernel space.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-copyin(const void *uaddr, void *kaddr, size_t count)
-{ return (0); }
-
-#else /* lint */
-
-#if defined(__amd64)
-
ENTRY(copyin)
pushq %rbp
movq %rsp, %rbp
@@ -1585,62 +1230,6 @@ _copyin_err:
ret
SET_SIZE(copyin)
-#elif defined(__i386)
-
-#define ARG_UADDR 4
-#define ARG_KADDR 8
-
- ENTRY(copyin)
- movl kernelbase, %ecx
-#ifdef DEBUG
- cmpl %ecx, ARG_KADDR(%esp)
- jnb 1f
- pushl %ebp
- movl %esp, %ebp
- pushl $.copyin_panic_msg
- call panic
-1:
-#endif
- lea _copyin_err, %eax
-
- movl %gs:CPU_THREAD, %edx
- cmpl %ecx, ARG_UADDR(%esp) /* test uaddr < kernelbase */
- jb do_copy_fault
- jmp 3f
-
-_copyin_err:
- popl %ecx
- popl %edi
- movl %ecx, T_LOFAULT(%edx) /* restore original lofault */
- popl %esi
- popl %ebp
-3:
- movl T_COPYOPS(%edx), %eax
- cmpl $0, %eax
- jz 2f
- jmp *CP_COPYIN(%eax)
-
-2: movl $-1, %eax
- ret
- SET_SIZE(copyin)
-
-#undef ARG_UADDR
-#undef ARG_KADDR
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-xcopyin_nta(const void *uaddr, void *kaddr, size_t count, int copy_cached)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(xcopyin_nta)
pushq %rbp
movq %rsp, %rbp
@@ -1730,95 +1319,10 @@ _xcopyin_nta_err:
ret
SET_SIZE(xcopyin_nta)
-#elif defined(__i386)
-
-#define ARG_UADDR 4
-#define ARG_KADDR 8
-#define ARG_COUNT 12
-#define ARG_CACHED 16
-
- .globl use_sse_copy
-
- ENTRY(xcopyin_nta)
- movl kernelbase, %ecx
- lea _xcopyin_err, %eax
- movl %gs:CPU_THREAD, %edx
- cmpl %ecx, ARG_UADDR(%esp) /* test uaddr < kernelbase */
- jae 4f
-
- cmpl $0, use_sse_copy /* no sse support */
- jz do_copy_fault
-
- cmpl $0, ARG_CACHED(%esp) /* copy_cached hint set? */
- jnz do_copy_fault
-
- /*
- * Make sure cnt is >= XCOPY_MIN_SIZE bytes
- */
- cmpl $XCOPY_MIN_SIZE, ARG_COUNT(%esp)
- jb do_copy_fault
-
- /*
- * Make sure src and dst are NTA_ALIGN_SIZE aligned,
- * count is COUNT_ALIGN_SIZE aligned.
- */
- movl ARG_UADDR(%esp), %ecx
- orl ARG_KADDR(%esp), %ecx
- andl $NTA_ALIGN_MASK, %ecx
- orl ARG_COUNT(%esp), %ecx
- andl $COUNT_ALIGN_MASK, %ecx
- jnz do_copy_fault
-
- jmp do_copy_fault_nta /* use regular access */
-
-4:
- movl $EFAULT, %eax
- jmp 3f
-
- /*
- * A fault during do_copy_fault or do_copy_fault_nta is
- * indicated through an errno value in %eax and we iret from the
- * trap handler to here.
- */
-_xcopyin_err:
- popl %ecx
- popl %edi
- movl %ecx, T_LOFAULT(%edx) /* restore original lofault */
- popl %esi
- popl %ebp
-3:
- cmpl $0, T_COPYOPS(%edx)
- jz 2f
- movl T_COPYOPS(%edx), %eax
- jmp *CP_XCOPYIN(%eax)
-
-2: rep; ret /* use 2 byte return instruction when branch target */
- /* AMD Software Optimization Guide - Section 6.2 */
- SET_SIZE(xcopyin_nta)
-
-#undef ARG_UADDR
-#undef ARG_KADDR
-#undef ARG_COUNT
-#undef ARG_CACHED
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Copy kernel data to user space.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-copyout(const void *kaddr, void *uaddr, size_t count)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(copyout)
pushq %rbp
movq %rsp, %rbp
@@ -1875,61 +1379,6 @@ _copyout_err:
ret
SET_SIZE(copyout)
-#elif defined(__i386)
-
-#define ARG_KADDR 4
-#define ARG_UADDR 8
-
- ENTRY(copyout)
- movl kernelbase, %ecx
-#ifdef DEBUG
- cmpl %ecx, ARG_KADDR(%esp)
- jnb 1f
- pushl %ebp
- movl %esp, %ebp
- pushl $.copyout_panic_msg
- call panic
-1:
-#endif
- lea _copyout_err, %eax
- movl %gs:CPU_THREAD, %edx
- cmpl %ecx, ARG_UADDR(%esp) /* test uaddr < kernelbase */
- jb do_copy_fault
- jmp 3f
-
-_copyout_err:
- popl %ecx
- popl %edi
- movl %ecx, T_LOFAULT(%edx) /* restore original lofault */
- popl %esi
- popl %ebp
-3:
- movl T_COPYOPS(%edx), %eax
- cmpl $0, %eax
- jz 2f
- jmp *CP_COPYOUT(%eax)
-
-2: movl $-1, %eax
- ret
- SET_SIZE(copyout)
-
-#undef ARG_UADDR
-#undef ARG_KADDR
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-xcopyout_nta(const void *kaddr, void *uaddr, size_t count, int copy_cached)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(xcopyout_nta)
pushq %rbp
movq %rsp, %rbp
@@ -2020,94 +1469,11 @@ _xcopyout_nta_err:
ret
SET_SIZE(xcopyout_nta)
-#elif defined(__i386)
-
-#define ARG_KADDR 4
-#define ARG_UADDR 8
-#define ARG_COUNT 12
-#define ARG_CACHED 16
-
- ENTRY(xcopyout_nta)
- movl kernelbase, %ecx
- lea _xcopyout_err, %eax
- movl %gs:CPU_THREAD, %edx
- cmpl %ecx, ARG_UADDR(%esp) /* test uaddr < kernelbase */
- jae 4f
-
- cmpl $0, use_sse_copy /* no sse support */
- jz do_copy_fault
-
- cmpl $0, ARG_CACHED(%esp) /* copy_cached hint set? */
- jnz do_copy_fault
-
- /*
- * Make sure cnt is >= XCOPY_MIN_SIZE bytes
- */
- cmpl $XCOPY_MIN_SIZE, %edx
- jb do_copy_fault
-
- /*
- * Make sure src and dst are NTA_ALIGN_SIZE aligned,
- * count is COUNT_ALIGN_SIZE aligned.
- */
- movl ARG_UADDR(%esp), %ecx
- orl ARG_KADDR(%esp), %ecx
- andl $NTA_ALIGN_MASK, %ecx
- orl ARG_COUNT(%esp), %ecx
- andl $COUNT_ALIGN_MASK, %ecx
- jnz do_copy_fault
- jmp do_copy_fault_nta
-
-4:
- movl $EFAULT, %eax
- jmp 3f
-
- /*
- * A fault during do_copy_fault or do_copy_fault_nta is
- * indicated through an errno value in %eax and we iret from the
- * trap handler to here.
- */
-_xcopyout_err:
- / restore the original lofault
- popl %ecx
- popl %edi
- movl %ecx, T_LOFAULT(%edx) / original lofault
- popl %esi
- popl %ebp
-3:
- cmpl $0, T_COPYOPS(%edx)
- jz 2f
- movl T_COPYOPS(%edx), %eax
- jmp *CP_XCOPYOUT(%eax)
-
-2: rep; ret /* use 2 byte return instruction when branch target */
- /* AMD Software Optimization Guide - Section 6.2 */
- SET_SIZE(xcopyout_nta)
-
-#undef ARG_UADDR
-#undef ARG_KADDR
-#undef ARG_COUNT
-#undef ARG_CACHED
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Copy a null terminated string from one point to another in
* the kernel address space.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-copystr(const char *from, char *to, size_t maxlength, size_t *lencopied)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(copystr)
pushq %rbp
movq %rsp, %rbp
@@ -2172,109 +1538,11 @@ copystr_done:
ret
SET_SIZE(copystr)
-#elif defined(__i386)
-
-#define ARG_FROM 8
-#define ARG_TO 12
-#define ARG_MAXLEN 16
-#define ARG_LENCOPIED 20
-
- ENTRY(copystr)
-#ifdef DEBUG
- pushl %ebp
- movl %esp, %ebp
- movl kernelbase, %eax
- cmpl %eax, ARG_FROM(%esp)
- jb 0f
- cmpl %eax, ARG_TO(%esp)
- jnb 1f
-0: pushl $.copystr_panic_msg
- call panic
-1: popl %ebp
-#endif
- /* get the current lofault address */
- movl %gs:CPU_THREAD, %eax
- movl T_LOFAULT(%eax), %eax
-do_copystr:
- pushl %ebp /* setup stack frame */
- movl %esp, %ebp
- pushl %ebx /* save registers */
- pushl %edi
-
- movl %gs:CPU_THREAD, %ebx
- movl T_LOFAULT(%ebx), %edi
- pushl %edi /* save the current lofault */
- movl %eax, T_LOFAULT(%ebx) /* new lofault */
-
- movl ARG_MAXLEN(%ebp), %ecx
- cmpl $0, %ecx
- je copystr_enametoolong /* maxlength == 0 */
-
- movl ARG_FROM(%ebp), %ebx /* source address */
- movl ARG_TO(%ebp), %edx /* destination address */
-
-copystr_loop:
- decl %ecx
- movb (%ebx), %al
- incl %ebx
- movb %al, (%edx)
- incl %edx
- cmpb $0, %al
- je copystr_null /* null char */
- cmpl $0, %ecx
- jne copystr_loop
-
-copystr_enametoolong:
- movl $ENAMETOOLONG, %eax
- jmp copystr_out
-
-copystr_null:
- xorl %eax, %eax /* no error */
-
-copystr_out:
- cmpl $0, ARG_LENCOPIED(%ebp) /* want length? */
- je copystr_done /* no */
- movl ARG_MAXLEN(%ebp), %edx
- subl %ecx, %edx /* compute length and store it */
- movl ARG_LENCOPIED(%ebp), %ecx
- movl %edx, (%ecx)
-
-copystr_done:
- popl %edi
- movl %gs:CPU_THREAD, %ebx
- movl %edi, T_LOFAULT(%ebx) /* restore the original lofault */
-
- popl %edi
- popl %ebx
- popl %ebp
- ret
- SET_SIZE(copystr)
-
-#undef ARG_FROM
-#undef ARG_TO
-#undef ARG_MAXLEN
-#undef ARG_LENCOPIED
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Copy a null terminated string from the user address space into
* the kernel address space.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-copyinstr(const char *uaddr, char *kaddr, size_t maxlength,
- size_t *lencopied)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(copyinstr)
pushq %rbp
movq %rsp, %rbp
@@ -2336,69 +1604,11 @@ _copyinstr_error:
ret
SET_SIZE(copyinstr)
-#elif defined(__i386)
-
-#define ARG_UADDR 4
-#define ARG_KADDR 8
-
- ENTRY(copyinstr)
- movl kernelbase, %ecx
-#ifdef DEBUG
- cmpl %ecx, ARG_KADDR(%esp)
- jnb 1f
- pushl %ebp
- movl %esp, %ebp
- pushl $.copyinstr_panic_msg
- call panic
-1:
-#endif
- lea _copyinstr_error, %eax
- cmpl %ecx, ARG_UADDR(%esp) /* test uaddr < kernelbase */
- jb do_copystr
- movl %gs:CPU_THREAD, %edx
- jmp 3f
-
-_copyinstr_error:
- popl %edi
- movl %gs:CPU_THREAD, %edx
- movl %edi, T_LOFAULT(%edx) /* original lofault */
-
- popl %edi
- popl %ebx
- popl %ebp
-3:
- movl T_COPYOPS(%edx), %eax
- cmpl $0, %eax
- jz 2f
- jmp *CP_COPYINSTR(%eax)
-
-2: movl $EFAULT, %eax /* return EFAULT */
- ret
- SET_SIZE(copyinstr)
-
-#undef ARG_UADDR
-#undef ARG_KADDR
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Copy a null terminated string from the kernel
* address space to the user address space.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-copyoutstr(const char *kaddr, char *uaddr, size_t maxlength,
- size_t *lencopied)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(copyoutstr)
pushq %rbp
movq %rsp, %rbp
@@ -2459,87 +1669,11 @@ _copyoutstr_error:
ret
SET_SIZE(copyoutstr)
-#elif defined(__i386)
-
-#define ARG_KADDR 4
-#define ARG_UADDR 8
-
- ENTRY(copyoutstr)
- movl kernelbase, %ecx
-#ifdef DEBUG
- cmpl %ecx, ARG_KADDR(%esp)
- jnb 1f
- pushl %ebp
- movl %esp, %ebp
- pushl $.copyoutstr_panic_msg
- call panic
-1:
-#endif
- lea _copyoutstr_error, %eax
- cmpl %ecx, ARG_UADDR(%esp) /* test uaddr < kernelbase */
- jb do_copystr
- movl %gs:CPU_THREAD, %edx
- jmp 3f
-
-_copyoutstr_error:
- popl %edi
- movl %gs:CPU_THREAD, %edx
- movl %edi, T_LOFAULT(%edx) /* restore the original lofault */
-
- popl %edi
- popl %ebx
- popl %ebp
-3:
- movl T_COPYOPS(%edx), %eax
- cmpl $0, %eax
- jz 2f
- jmp *CP_COPYOUTSTR(%eax)
-
-2: movl $EFAULT, %eax /* return EFAULT */
- ret
- SET_SIZE(copyoutstr)
-
-#undef ARG_KADDR
-#undef ARG_UADDR
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Since all of the fuword() variants are so similar, we have a macro to spit
* them out. This allows us to create DTrace-unobservable functions easily.
*/
-#if defined(__lint)
-
-#if defined(__amd64)
-
-/* ARGSUSED */
-int
-fuword64(const void *addr, uint64_t *dst)
-{ return (0); }
-
-#endif
-
-/* ARGSUSED */
-int
-fuword32(const void *addr, uint32_t *dst)
-{ return (0); }
-
-/* ARGSUSED */
-int
-fuword16(const void *addr, uint16_t *dst)
-{ return (0); }
-
-/* ARGSUSED */
-int
-fuword8(const void *addr, uint8_t *dst)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
/*
* Note that we don't save and reload the arguments here
* because their values are not altered in the copy path.
@@ -2580,79 +1714,12 @@ _flt_/**/NAME: \
FUWORD(fuword16, movw, %ax, CP_FUWORD16,10,14,15)
FUWORD(fuword8, movb, %al, CP_FUWORD8,11,16,17)
-#elif defined(__i386)
-
-#define FUWORD(NAME, INSTR, REG, COPYOP) \
- ENTRY(NAME) \
- movl %gs:CPU_THREAD, %ecx; \
- movl kernelbase, %eax; \
- cmpl %eax, 4(%esp); \
- jae 1f; \
- lea _flt_/**/NAME, %edx; \
- movl %edx, T_LOFAULT(%ecx); \
- movl 4(%esp), %eax; \
- movl 8(%esp), %edx; \
- INSTR (%eax), REG; \
- movl $0, T_LOFAULT(%ecx); \
- INSTR REG, (%edx); \
- xorl %eax, %eax; \
- ret; \
-_flt_/**/NAME: \
- movl $0, T_LOFAULT(%ecx); \
-1: \
- movl T_COPYOPS(%ecx), %eax; \
- cmpl $0, %eax; \
- jz 2f; \
- jmp *COPYOP(%eax); \
-2: \
- movl $-1, %eax; \
- ret; \
- SET_SIZE(NAME)
-
- FUWORD(fuword32, movl, %eax, CP_FUWORD32)
- FUWORD(fuword16, movw, %ax, CP_FUWORD16)
- FUWORD(fuword8, movb, %al, CP_FUWORD8)
-
-#endif /* __i386 */
-
#undef FUWORD
-#endif /* __lint */
-
/*
* Set user word.
*/
-#if defined(__lint)
-
-#if defined(__amd64)
-
-/* ARGSUSED */
-int
-suword64(void *addr, uint64_t value)
-{ return (0); }
-
-#endif
-
-/* ARGSUSED */
-int
-suword32(void *addr, uint32_t value)
-{ return (0); }
-
-/* ARGSUSED */
-int
-suword16(void *addr, uint16_t value)
-{ return (0); }
-
-/* ARGSUSED */
-int
-suword8(void *addr, uint8_t value)
-{ return (0); }
-
-#else /* lint */
-
-#if defined(__amd64)
-
/*
* Note that we don't save and reload the arguments here
* because their values are not altered in the copy path.
@@ -2690,75 +1757,8 @@ _flt_/**/NAME: \
SUWORD(suword16, movw, %si, CP_SUWORD16,14,22,23)
SUWORD(suword8, movb, %sil, CP_SUWORD8,15,24,25)
-#elif defined(__i386)
-
-#define SUWORD(NAME, INSTR, REG, COPYOP) \
- ENTRY(NAME) \
- movl %gs:CPU_THREAD, %ecx; \
- movl kernelbase, %eax; \
- cmpl %eax, 4(%esp); \
- jae 1f; \
- lea _flt_/**/NAME, %edx; \
- movl %edx, T_LOFAULT(%ecx); \
- movl 4(%esp), %eax; \
- movl 8(%esp), %edx; \
- INSTR REG, (%eax); \
- movl $0, T_LOFAULT(%ecx); \
- xorl %eax, %eax; \
- ret; \
-_flt_/**/NAME: \
- movl $0, T_LOFAULT(%ecx); \
-1: \
- movl T_COPYOPS(%ecx), %eax; \
- cmpl $0, %eax; \
- jz 3f; \
- movl COPYOP(%eax), %ecx; \
- jmp *%ecx; \
-3: \
- movl $-1, %eax; \
- ret; \
- SET_SIZE(NAME)
-
- SUWORD(suword32, movl, %edx, CP_SUWORD32)
- SUWORD(suword16, movw, %dx, CP_SUWORD16)
- SUWORD(suword8, movb, %dl, CP_SUWORD8)
-
-#endif /* __i386 */
-
#undef SUWORD
-#endif /* __lint */
-
-#if defined(__lint)
-
-#if defined(__amd64)
-
-/*ARGSUSED*/
-void
-fuword64_noerr(const void *addr, uint64_t *dst)
-{}
-
-#endif
-
-/*ARGSUSED*/
-void
-fuword32_noerr(const void *addr, uint32_t *dst)
-{}
-
-/*ARGSUSED*/
-void
-fuword8_noerr(const void *addr, uint8_t *dst)
-{}
-
-/*ARGSUSED*/
-void
-fuword16_noerr(const void *addr, uint16_t *dst)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
#define FUWORD_NOERR(NAME, INSTR, REG) \
ENTRY(NAME) \
cmpq kernelbase(%rip), %rdi; \
@@ -2773,60 +1773,8 @@ fuword16_noerr(const void *addr, uint16_t *dst)
FUWORD_NOERR(fuword16_noerr, movw, %ax)
FUWORD_NOERR(fuword8_noerr, movb, %al)
-#elif defined(__i386)
-
-#define FUWORD_NOERR(NAME, INSTR, REG) \
- ENTRY(NAME) \
- movl 4(%esp), %eax; \
- cmpl kernelbase, %eax; \
- jb 1f; \
- movl kernelbase, %eax; \
-1: movl 8(%esp), %edx; \
- INSTR (%eax), REG; \
- INSTR REG, (%edx); \
- ret; \
- SET_SIZE(NAME)
-
- FUWORD_NOERR(fuword32_noerr, movl, %ecx)
- FUWORD_NOERR(fuword16_noerr, movw, %cx)
- FUWORD_NOERR(fuword8_noerr, movb, %cl)
-
-#endif /* __i386 */
-
#undef FUWORD_NOERR
-#endif /* __lint */
-
-#if defined(__lint)
-
-#if defined(__amd64)
-
-/*ARGSUSED*/
-void
-suword64_noerr(void *addr, uint64_t value)
-{}
-
-#endif
-
-/*ARGSUSED*/
-void
-suword32_noerr(void *addr, uint32_t value)
-{}
-
-/*ARGSUSED*/
-void
-suword16_noerr(void *addr, uint16_t value)
-{}
-
-/*ARGSUSED*/
-void
-suword8_noerr(void *addr, uint8_t value)
-{}
-
-#else /* lint */
-
-#if defined(__amd64)
-
#define SUWORD_NOERR(NAME, INSTR, REG) \
ENTRY(NAME) \
cmpq kernelbase(%rip), %rdi; \
@@ -2840,72 +1788,14 @@ suword8_noerr(void *addr, uint8_t value)
SUWORD_NOERR(suword16_noerr, movw, %si)
SUWORD_NOERR(suword8_noerr, movb, %sil)
-#elif defined(__i386)
-
-#define SUWORD_NOERR(NAME, INSTR, REG) \
- ENTRY(NAME) \
- movl 4(%esp), %eax; \
- cmpl kernelbase, %eax; \
- jb 1f; \
- movl kernelbase, %eax; \
-1: \
- movl 8(%esp), %edx; \
- INSTR REG, (%eax); \
- ret; \
- SET_SIZE(NAME)
-
- SUWORD_NOERR(suword32_noerr, movl, %edx)
- SUWORD_NOERR(suword16_noerr, movw, %dx)
- SUWORD_NOERR(suword8_noerr, movb, %dl)
-
-#endif /* __i386 */
-
#undef SUWORD_NOERR
-#endif /* lint */
-
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-int
-subyte(void *addr, uchar_t value)
-{ return (0); }
-
-/*ARGSUSED*/
-void
-subyte_noerr(void *addr, uchar_t value)
-{}
-
-/*ARGSUSED*/
-int
-fulword(const void *addr, ulong_t *valuep)
-{ return (0); }
-
-/*ARGSUSED*/
-void
-fulword_noerr(const void *addr, ulong_t *valuep)
-{}
-
-/*ARGSUSED*/
-int
-sulword(void *addr, ulong_t valuep)
-{ return (0); }
-
-/*ARGSUSED*/
-void
-sulword_noerr(void *addr, ulong_t valuep)
-{}
-
-#else
.weak subyte
subyte=suword8
.weak subyte_noerr
subyte_noerr=suword8_noerr
-#if defined(__amd64)
-
.weak fulword
fulword=fuword64
.weak fulword_noerr
@@ -2915,69 +1805,6 @@ sulword_noerr(void *addr, ulong_t valuep)
.weak sulword_noerr
sulword_noerr=suword64_noerr
-#elif defined(__i386)
-
- .weak fulword
- fulword=fuword32
- .weak fulword_noerr
- fulword_noerr=fuword32_noerr
- .weak sulword
- sulword=suword32
- .weak sulword_noerr
- sulword_noerr=suword32_noerr
-
-#endif /* __i386 */
-
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*
- * Copy a block of storage - must not overlap (from + len <= to).
- * No fault handler installed (to be called under on_fault())
- */
-
-/* ARGSUSED */
-void
-copyout_noerr(const void *kfrom, void *uto, size_t count)
-{}
-
-/* ARGSUSED */
-void
-copyin_noerr(const void *ufrom, void *kto, size_t count)
-{}
-
-/*
- * Zero a block of storage in user space
- */
-
-/* ARGSUSED */
-void
-uzero(void *addr, size_t count)
-{}
-
-/*
- * copy a block of storage in user space
- */
-
-/* ARGSUSED */
-void
-ucopy(const void *ufrom, void *uto, size_t ulength)
-{}
-
-/*
- * copy a string in user space
- */
-
-/* ARGSUSED */
-void
-ucopystr(const char *ufrom, char *uto, size_t umaxlength, size_t *lencopied)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(copyin_noerr)
movq kernelbase(%rip), %rax
#ifdef DEBUG
@@ -3045,76 +1872,6 @@ ucopystr(const char *ufrom, char *uto, size_t umaxlength, size_t *lencopied)
jmp do_copystr
SET_SIZE(ucopystr)
-#elif defined(__i386)
-
- ENTRY(copyin_noerr)
- movl kernelbase, %eax
-#ifdef DEBUG
- cmpl %eax, 8(%esp)
- jae 1f
- pushl $.cpyin_ne_pmsg
- call panic
-1:
-#endif
- cmpl %eax, 4(%esp)
- jb do_copy
- movl %eax, 4(%esp) /* force fault at kernelbase */
- jmp do_copy
- SET_SIZE(copyin_noerr)
-
- ENTRY(copyout_noerr)
- movl kernelbase, %eax
-#ifdef DEBUG
- cmpl %eax, 4(%esp)
- jae 1f
- pushl $.cpyout_ne_pmsg
- call panic
-1:
-#endif
- cmpl %eax, 8(%esp)
- jb do_copy
- movl %eax, 8(%esp) /* force fault at kernelbase */
- jmp do_copy
- SET_SIZE(copyout_noerr)
-
- ENTRY(uzero)
- movl kernelbase, %eax
- cmpl %eax, 4(%esp)
- jb do_zero
- movl %eax, 4(%esp) /* force fault at kernelbase */
- jmp do_zero
- SET_SIZE(uzero)
-
- ENTRY(ucopy)
- movl kernelbase, %eax
- cmpl %eax, 4(%esp)
- jb 1f
- movl %eax, 4(%esp) /* force fault at kernelbase */
-1:
- cmpl %eax, 8(%esp)
- jb do_copy
- movl %eax, 8(%esp) /* force fault at kernelbase */
- jmp do_copy
- SET_SIZE(ucopy)
-
- ENTRY(ucopystr)
- movl kernelbase, %eax
- cmpl %eax, 4(%esp)
- jb 1f
- movl %eax, 4(%esp) /* force fault at kernelbase */
-1:
- cmpl %eax, 8(%esp)
- jb 2f
- movl %eax, 8(%esp) /* force fault at kernelbase */
-2:
- /* do_copystr expects the lofault address in %eax */
- movl %gs:CPU_THREAD, %eax
- movl T_LOFAULT(%eax), %eax
- jmp do_copystr
- SET_SIZE(ucopystr)
-
-#endif /* __i386 */
-
#ifdef DEBUG
.data
.kcopy_panic_msg:
@@ -3145,9 +1902,29 @@ ucopystr(const char *ufrom, char *uto, size_t umaxlength, size_t *lencopied)
.string "copyout_noerr: argument not in kernel address space"
#endif
-#endif /* __lint */
+/*
+ * These functions are used for SMAP, supervisor mode access protection. They
+ * are hotpatched to become real instructions when the system starts up which is
+ * done in mlsetup() as a part of enabling the other CR4 related features.
+ *
+ * Generally speaking, smap_disable() is a stac instruction and smap_enable is a
+ * clac instruction. It's safe to call these any number of times, and in fact,
+ * out of paranoia, the kernel will likely call it at several points.
+ */
+
+ ENTRY(smap_disable)
+ nop
+ nop
+ nop
+ ret
+ SET_SIZE(smap_disable)
-#ifndef __lint
+ ENTRY(smap_enable)
+ nop
+ nop
+ nop
+ ret
+ SET_SIZE(smap_enable)
.data
.align 4
@@ -3162,5 +1939,3 @@ _smap_enable_patch_count:
.size _smap_disable_patch_count, 4
_smap_disable_patch_count:
.long SMAP_DISABLE_COUNT
-
-#endif /* __lint */
diff --git a/usr/src/uts/intel/ia32/ml/ddi_i86_asm.s b/usr/src/uts/intel/ia32/ml/ddi_i86_asm.s
index f90efdc922..c45f93e008 100644
--- a/usr/src/uts/intel/ia32/ml/ddi_i86_asm.s
+++ b/usr/src/uts/intel/ia32/ml/ddi_i86_asm.s
@@ -28,249 +28,9 @@
* Copyright 2019 Joyent, Inc.
*/
-#if defined(lint) || defined(__lint)
-#include <sys/types.h>
-#include <sys/sunddi.h>
-#else
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include "assym.h"
-#endif
-
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-uint8_t
-ddi_get8(ddi_acc_handle_t handle, uint8_t *addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint8_t
-ddi_mem_get8(ddi_acc_handle_t handle, uint8_t *addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint8_t
-ddi_io_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint16_t
-ddi_get16(ddi_acc_handle_t handle, uint16_t *addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint16_t
-ddi_mem_get16(ddi_acc_handle_t handle, uint16_t *addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint16_t
-ddi_io_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint32_t
-ddi_get32(ddi_acc_handle_t handle, uint32_t *addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint32_t
-ddi_mem_get32(ddi_acc_handle_t handle, uint32_t *addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint32_t
-ddi_io_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint64_t
-ddi_get64(ddi_acc_handle_t handle, uint64_t *addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint64_t
-ddi_mem_get64(ddi_acc_handle_t handle, uint64_t *addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-void
-ddi_put8(ddi_acc_handle_t handle, uint8_t *addr, uint8_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_io_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_put16(ddi_acc_handle_t handle, uint16_t *addr, uint16_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_io_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_put32(ddi_acc_handle_t handle, uint32_t *addr, uint32_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_io_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_put64(ddi_acc_handle_t handle, uint64_t *addr, uint64_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_put64(ddi_acc_handle_t handle, uint64_t *dev_addr, uint64_t value)
-{}
-
-/*ARGSUSED*/
-void
-ddi_rep_get8(ddi_acc_handle_t handle, uint8_t *host_addr, uint8_t *dev_addr,
- size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_rep_get16(ddi_acc_handle_t handle, uint16_t *host_addr, uint16_t *dev_addr,
- size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_rep_get32(ddi_acc_handle_t handle, uint32_t *host_addr, uint32_t *dev_addr,
- size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_rep_get64(ddi_acc_handle_t handle, uint64_t *host_addr, uint64_t *dev_addr,
- size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_rep_put8(ddi_acc_handle_t handle, uint8_t *host_addr, uint8_t *dev_addr,
- size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_rep_put16(ddi_acc_handle_t handle, uint16_t *host_addr, uint16_t *dev_addr,
- size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_rep_put32(ddi_acc_handle_t handle, uint32_t *host_addr, uint32_t *dev_addr,
- size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr, uint64_t *dev_addr,
- size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_rep_get8(ddi_acc_handle_t handle, uint8_t *host_addr,
- uint8_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_rep_get16(ddi_acc_handle_t handle, uint16_t *host_addr,
- uint16_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_rep_get32(ddi_acc_handle_t handle, uint32_t *host_addr,
- uint32_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_rep_get64(ddi_acc_handle_t handle, uint64_t *host_addr,
- uint64_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_rep_put8(ddi_acc_handle_t handle, uint8_t *host_addr,
- uint8_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_rep_put16(ddi_acc_handle_t handle, uint16_t *host_addr,
- uint16_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_rep_put32(ddi_acc_handle_t handle, uint32_t *host_addr,
- uint32_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
- uint64_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-#else /* lint */
-
-
-#if defined(__amd64)
ENTRY(ddi_get8)
ALTENTRY(ddi_getb)
@@ -300,40 +60,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_io_getb)
SET_SIZE(ddi_io_get8)
-#elif defined(__i386)
-
- ENTRY(ddi_get8)
- ALTENTRY(ddi_getb)
- ALTENTRY(ddi_mem_getb)
- ALTENTRY(ddi_mem_get8)
- ALTENTRY(ddi_io_getb)
- ALTENTRY(ddi_io_get8)
- movl 4(%esp), %eax
- movl ACC_ATTR(%eax), %ecx
- cmpl $_CONST(DDI_ACCATTR_IO_SPACE|DDI_ACCATTR_DIRECT), %ecx
- jne 1f
- movl 8(%esp), %edx
- xorl %eax, %eax
- inb (%dx)
- ret
-1:
- cmpl $_CONST(DDI_ACCATTR_CPU_VADDR|DDI_ACCATTR_DIRECT), %ecx
- jne 2f
- movl 8(%esp), %eax
- movzbl (%eax), %eax
- ret
-2:
- jmp *ACC_GETB(%eax)
- SET_SIZE(ddi_get8)
- SET_SIZE(ddi_getb)
- SET_SIZE(ddi_mem_getb)
- SET_SIZE(ddi_mem_get8)
- SET_SIZE(ddi_io_getb)
- SET_SIZE(ddi_io_get8)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_get16)
ALTENTRY(ddi_getw)
@@ -363,40 +89,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_io_getw)
SET_SIZE(ddi_io_get16)
-#elif defined(__i386)
-
- ENTRY(ddi_get16)
- ALTENTRY(ddi_getw)
- ALTENTRY(ddi_mem_getw)
- ALTENTRY(ddi_mem_get16)
- ALTENTRY(ddi_io_getw)
- ALTENTRY(ddi_io_get16)
- movl 4(%esp), %eax
- movl ACC_ATTR(%eax), %ecx
- cmpl $_CONST(DDI_ACCATTR_IO_SPACE|DDI_ACCATTR_DIRECT), %ecx
- jne 3f
- movl 8(%esp), %edx
- xorl %eax, %eax
- inw (%dx)
- ret
-3:
- cmpl $_CONST(DDI_ACCATTR_CPU_VADDR|DDI_ACCATTR_DIRECT), %ecx
- jne 4f
- movl 8(%esp), %eax
- movzwl (%eax), %eax
- ret
-4:
- jmp *ACC_GETW(%eax)
- SET_SIZE(ddi_get16)
- SET_SIZE(ddi_getw)
- SET_SIZE(ddi_mem_getw)
- SET_SIZE(ddi_mem_get16)
- SET_SIZE(ddi_io_getw)
- SET_SIZE(ddi_io_get16)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_get32)
ALTENTRY(ddi_getl)
@@ -425,39 +117,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_io_getl)
SET_SIZE(ddi_io_get32)
-#elif defined(__i386)
-
- ENTRY(ddi_get32)
- ALTENTRY(ddi_getl)
- ALTENTRY(ddi_mem_getl)
- ALTENTRY(ddi_mem_get32)
- ALTENTRY(ddi_io_getl)
- ALTENTRY(ddi_io_get32)
- movl 4(%esp), %eax
- movl ACC_ATTR(%eax), %ecx
- cmpl $_CONST(DDI_ACCATTR_IO_SPACE|DDI_ACCATTR_DIRECT), %ecx
- jne 5f
- movl 8(%esp), %edx
- inl (%dx)
- ret
-5:
- cmpl $_CONST(DDI_ACCATTR_CPU_VADDR|DDI_ACCATTR_DIRECT), %ecx
- jne 6f
- movl 8(%esp), %eax
- movl (%eax), %eax
- ret
-6:
- jmp *ACC_GETL(%eax)
- SET_SIZE(ddi_get32)
- SET_SIZE(ddi_getl)
- SET_SIZE(ddi_mem_getl)
- SET_SIZE(ddi_mem_get32)
- SET_SIZE(ddi_io_getl)
- SET_SIZE(ddi_io_get32)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_get64)
ALTENTRY(ddi_getll)
@@ -470,22 +129,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_mem_getll)
SET_SIZE(ddi_mem_get64)
-#elif defined(__i386)
-
- ENTRY(ddi_get64)
- ALTENTRY(ddi_getll)
- ALTENTRY(ddi_mem_getll)
- ALTENTRY(ddi_mem_get64)
- movl 4(%esp), %eax
- jmp *ACC_GETLL(%eax)
- SET_SIZE(ddi_get64)
- SET_SIZE(ddi_getll)
- SET_SIZE(ddi_mem_getll)
- SET_SIZE(ddi_mem_get64)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_put8)
ALTENTRY(ddi_putb)
@@ -515,41 +158,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_io_putb)
SET_SIZE(ddi_io_put8)
-#elif defined(__i386)
-
- ENTRY(ddi_put8)
- ALTENTRY(ddi_putb)
- ALTENTRY(ddi_mem_putb)
- ALTENTRY(ddi_mem_put8)
- ALTENTRY(ddi_io_putb)
- ALTENTRY(ddi_io_put8)
- movl 4(%esp), %eax
- movl ACC_ATTR(%eax), %ecx
- cmpl $_CONST(DDI_ACCATTR_IO_SPACE|DDI_ACCATTR_DIRECT), %ecx
- jne 7f
- movl 12(%esp), %eax
- movl 8(%esp), %edx
- outb (%dx)
- ret
-7:
- cmpl $_CONST(DDI_ACCATTR_CPU_VADDR|DDI_ACCATTR_DIRECT), %ecx
- jne 8f
- movl 8(%esp), %eax
- movl 12(%esp), %ecx
- movb %cl, (%eax)
- ret
-8:
- jmp *ACC_PUTB(%eax)
- SET_SIZE(ddi_put8)
- SET_SIZE(ddi_putb)
- SET_SIZE(ddi_mem_putb)
- SET_SIZE(ddi_mem_put8)
- SET_SIZE(ddi_io_putb)
- SET_SIZE(ddi_io_put8)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_put16)
ALTENTRY(ddi_putw)
@@ -579,41 +187,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_io_putw)
SET_SIZE(ddi_io_put16)
-#elif defined(__i386)
-
- ENTRY(ddi_put16)
- ALTENTRY(ddi_putw)
- ALTENTRY(ddi_mem_putw)
- ALTENTRY(ddi_mem_put16)
- ALTENTRY(ddi_io_putw)
- ALTENTRY(ddi_io_put16)
- movl 4(%esp), %eax
- movl ACC_ATTR(%eax), %ecx
- cmpl $_CONST(DDI_ACCATTR_IO_SPACE|DDI_ACCATTR_DIRECT), %ecx
- jne 8f
- movl 12(%esp), %eax
- movl 8(%esp), %edx
- outw (%dx)
- ret
-8:
- cmpl $_CONST(DDI_ACCATTR_CPU_VADDR|DDI_ACCATTR_DIRECT), %ecx
- jne 9f
- movl 8(%esp), %eax
- movl 12(%esp), %ecx
- movw %cx, (%eax)
- ret
-9:
- jmp *ACC_PUTW(%eax)
- SET_SIZE(ddi_put16)
- SET_SIZE(ddi_putw)
- SET_SIZE(ddi_mem_putw)
- SET_SIZE(ddi_mem_put16)
- SET_SIZE(ddi_io_putw)
- SET_SIZE(ddi_io_put16)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_put32)
ALTENTRY(ddi_putl)
@@ -643,41 +216,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_io_putl)
SET_SIZE(ddi_io_put32)
-#elif defined(__i386)
-
- ENTRY(ddi_put32)
- ALTENTRY(ddi_putl)
- ALTENTRY(ddi_mem_putl)
- ALTENTRY(ddi_mem_put32)
- ALTENTRY(ddi_io_putl)
- ALTENTRY(ddi_io_put32)
- movl 4(%esp), %eax
- movl ACC_ATTR(%eax), %ecx
- cmpl $_CONST(DDI_ACCATTR_IO_SPACE|DDI_ACCATTR_DIRECT), %ecx
- jne 8f
- movl 12(%esp), %eax
- movl 8(%esp), %edx
- outl (%dx)
- ret
-8:
- cmpl $_CONST(DDI_ACCATTR_CPU_VADDR|DDI_ACCATTR_DIRECT), %ecx
- jne 9f
- movl 8(%esp), %eax
- movl 12(%esp), %ecx
- movl %ecx, (%eax)
- ret
-9:
- jmp *ACC_PUTL(%eax)
- SET_SIZE(ddi_put32)
- SET_SIZE(ddi_putl)
- SET_SIZE(ddi_mem_putl)
- SET_SIZE(ddi_mem_put32)
- SET_SIZE(ddi_io_putl)
- SET_SIZE(ddi_io_put32)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_put64)
ALTENTRY(ddi_putll)
@@ -690,22 +228,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_mem_putll)
SET_SIZE(ddi_mem_put64)
-#elif defined(__i386)
-
- ENTRY(ddi_put64)
- ALTENTRY(ddi_putll)
- ALTENTRY(ddi_mem_putll)
- ALTENTRY(ddi_mem_put64)
- movl 4(%esp), %eax
- jmp *ACC_PUTLL(%eax)
- SET_SIZE(ddi_put64)
- SET_SIZE(ddi_putll)
- SET_SIZE(ddi_mem_putll)
- SET_SIZE(ddi_mem_put64)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_rep_get8)
ALTENTRY(ddi_rep_getb)
@@ -718,22 +240,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_mem_rep_getb)
SET_SIZE(ddi_mem_rep_get8)
-#elif defined(__i386)
-
- ENTRY(ddi_rep_get8)
- ALTENTRY(ddi_rep_getb)
- ALTENTRY(ddi_mem_rep_getb)
- ALTENTRY(ddi_mem_rep_get8)
- movl 4(%esp), %eax
- jmp *ACC_REP_GETB(%eax)
- SET_SIZE(ddi_rep_get8)
- SET_SIZE(ddi_rep_getb)
- SET_SIZE(ddi_mem_rep_getb)
- SET_SIZE(ddi_mem_rep_get8)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_rep_get16)
ALTENTRY(ddi_rep_getw)
@@ -746,22 +252,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_mem_rep_getw)
SET_SIZE(ddi_mem_rep_get16)
-#elif defined(__i386)
-
- ENTRY(ddi_rep_get16)
- ALTENTRY(ddi_rep_getw)
- ALTENTRY(ddi_mem_rep_getw)
- ALTENTRY(ddi_mem_rep_get16)
- movl 4(%esp), %eax
- jmp *ACC_REP_GETW(%eax)
- SET_SIZE(ddi_rep_get16)
- SET_SIZE(ddi_rep_getw)
- SET_SIZE(ddi_mem_rep_getw)
- SET_SIZE(ddi_mem_rep_get16)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_rep_get32)
ALTENTRY(ddi_rep_getl)
@@ -774,22 +264,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_mem_rep_getl)
SET_SIZE(ddi_mem_rep_get32)
-#elif defined(__i386)
-
- ENTRY(ddi_rep_get32)
- ALTENTRY(ddi_rep_getl)
- ALTENTRY(ddi_mem_rep_getl)
- ALTENTRY(ddi_mem_rep_get32)
- movl 4(%esp), %eax
- jmp *ACC_REP_GETL(%eax)
- SET_SIZE(ddi_rep_get32)
- SET_SIZE(ddi_rep_getl)
- SET_SIZE(ddi_mem_rep_getl)
- SET_SIZE(ddi_mem_rep_get32)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_rep_get64)
ALTENTRY(ddi_rep_getll)
@@ -802,22 +276,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_mem_rep_getll)
SET_SIZE(ddi_mem_rep_get64)
-#elif defined(__i386)
-
- ENTRY(ddi_rep_get64)
- ALTENTRY(ddi_rep_getll)
- ALTENTRY(ddi_mem_rep_getll)
- ALTENTRY(ddi_mem_rep_get64)
- movl 4(%esp), %eax
- jmp *ACC_REP_GETLL(%eax)
- SET_SIZE(ddi_rep_get64)
- SET_SIZE(ddi_rep_getll)
- SET_SIZE(ddi_mem_rep_getll)
- SET_SIZE(ddi_mem_rep_get64)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_rep_put8)
ALTENTRY(ddi_rep_putb)
@@ -830,22 +288,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_mem_rep_putb)
SET_SIZE(ddi_mem_rep_put8)
-#elif defined(__i386)
-
- ENTRY(ddi_rep_put8)
- ALTENTRY(ddi_rep_putb)
- ALTENTRY(ddi_mem_rep_putb)
- ALTENTRY(ddi_mem_rep_put8)
- movl 4(%esp), %eax
- jmp *ACC_REP_PUTB(%eax)
- SET_SIZE(ddi_rep_put8)
- SET_SIZE(ddi_rep_putb)
- SET_SIZE(ddi_mem_rep_putb)
- SET_SIZE(ddi_mem_rep_put8)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_rep_put16)
ALTENTRY(ddi_rep_putw)
@@ -858,22 +300,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_mem_rep_putw)
SET_SIZE(ddi_mem_rep_put16)
-#elif defined(__i386)
-
- ENTRY(ddi_rep_put16)
- ALTENTRY(ddi_rep_putw)
- ALTENTRY(ddi_mem_rep_putw)
- ALTENTRY(ddi_mem_rep_put16)
- movl 4(%esp), %eax
- jmp *ACC_REP_PUTW(%eax)
- SET_SIZE(ddi_rep_put16)
- SET_SIZE(ddi_rep_putw)
- SET_SIZE(ddi_mem_rep_putw)
- SET_SIZE(ddi_mem_rep_put16)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_rep_put32)
ALTENTRY(ddi_rep_putl)
@@ -886,22 +312,6 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_mem_rep_putl)
SET_SIZE(ddi_mem_rep_put32)
-#elif defined(__i386)
-
- ENTRY(ddi_rep_put32)
- ALTENTRY(ddi_rep_putl)
- ALTENTRY(ddi_mem_rep_putl)
- ALTENTRY(ddi_mem_rep_put32)
- movl 4(%esp), %eax
- jmp *ACC_REP_PUTL(%eax)
- SET_SIZE(ddi_rep_put32)
- SET_SIZE(ddi_rep_putl)
- SET_SIZE(ddi_mem_rep_putl)
- SET_SIZE(ddi_mem_rep_put32)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(ddi_rep_put64)
ALTENTRY(ddi_rep_putll)
@@ -914,153 +324,28 @@ ddi_mem_rep_put64(ddi_acc_handle_t handle, uint64_t *host_addr,
SET_SIZE(ddi_mem_rep_putll)
SET_SIZE(ddi_mem_rep_put64)
-#elif defined(__i386)
-
- ENTRY(ddi_rep_put64)
- ALTENTRY(ddi_rep_putll)
- ALTENTRY(ddi_mem_rep_putll)
- ALTENTRY(ddi_mem_rep_put64)
- movl 4(%esp), %eax
- jmp *ACC_REP_PUTLL(%eax)
- SET_SIZE(ddi_rep_put64)
- SET_SIZE(ddi_rep_putll)
- SET_SIZE(ddi_mem_rep_putll)
- SET_SIZE(ddi_mem_rep_put64)
-
-#endif /* __i386 */
-
-#endif /* lint */
-
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-uint8_t
-i_ddi_vaddr_get8(ddi_acc_impl_t *hdlp, uint8_t *addr)
-{
- return (*addr);
-}
-
-/*ARGSUSED*/
-uint16_t
-i_ddi_vaddr_get16(ddi_acc_impl_t *hdlp, uint16_t *addr)
-{
- return (*addr);
-}
-
-/*ARGSUSED*/
-uint32_t
-i_ddi_vaddr_get32(ddi_acc_impl_t *hdlp, uint32_t *addr)
-{
- return (*addr);
-}
-
-/*ARGSUSED*/
-uint64_t
-i_ddi_vaddr_get64(ddi_acc_impl_t *hdlp, uint64_t *addr)
-{
- return (*addr);
-}
-
-#else /* lint */
-
-#if defined(__amd64)
-
ENTRY(i_ddi_vaddr_get8)
movzbq (%rsi), %rax
ret
SET_SIZE(i_ddi_vaddr_get8)
-#elif defined(__i386)
-
- ENTRY(i_ddi_vaddr_get8)
- movl 8(%esp), %eax
- movzbl (%eax), %eax
- ret
- SET_SIZE(i_ddi_vaddr_get8)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
-
ENTRY(i_ddi_vaddr_get16)
movzwq (%rsi), %rax
ret
SET_SIZE(i_ddi_vaddr_get16)
-#elif defined(__i386)
-
- ENTRY(i_ddi_vaddr_get16)
- movl 8(%esp), %eax
- movzwl (%eax), %eax
- ret
- SET_SIZE(i_ddi_vaddr_get16)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_vaddr_get32)
movl (%rsi), %eax
ret
SET_SIZE(i_ddi_vaddr_get32)
-#elif defined(__i386)
-
- ENTRY(i_ddi_vaddr_get32)
- movl 8(%esp), %eax
- movl (%eax), %eax
- ret
- SET_SIZE(i_ddi_vaddr_get32)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_vaddr_get64)
movq (%rsi), %rax
ret
SET_SIZE(i_ddi_vaddr_get64)
-#elif defined(__i386)
-
- ENTRY(i_ddi_vaddr_get64)
- movl 8(%esp), %ecx
- movl (%ecx), %eax
- movl 4(%ecx), %edx
- ret
- SET_SIZE(i_ddi_vaddr_get64)
-
-#endif /* __i386 */
-
-#endif /* lint */
-
-
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-uint8_t
-i_ddi_io_get8(ddi_acc_impl_t *hdlp, uint8_t *addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint16_t
-i_ddi_io_get16(ddi_acc_impl_t *hdlp, uint16_t *addr)
-{
- return (0);
-}
-
-/*ARGSUSED*/
-uint32_t
-i_ddi_io_get32(ddi_acc_impl_t *hdlp, uint32_t *addr)
-{
- return (0);
-}
-
-#else /* lint */
-
-#if defined(__amd64)
ENTRY(i_ddi_io_get8)
movq %rsi, %rdx
@@ -1069,18 +354,6 @@ i_ddi_io_get32(ddi_acc_impl_t *hdlp, uint32_t *addr)
ret
SET_SIZE(i_ddi_io_get8)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_get8)
- movl 8(%esp), %edx
- inb (%dx)
- movzbl %al, %eax
- ret
- SET_SIZE(i_ddi_io_get8)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_io_get16)
movq %rsi, %rdx
@@ -1089,18 +362,6 @@ i_ddi_io_get32(ddi_acc_impl_t *hdlp, uint32_t *addr)
ret
SET_SIZE(i_ddi_io_get16)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_get16)
- movl 8(%esp), %edx
- inw (%dx)
- movzwl %ax, %eax
- ret
- SET_SIZE(i_ddi_io_get16)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_io_get32)
movq %rsi, %rdx
@@ -1108,147 +369,29 @@ i_ddi_io_get32(ddi_acc_impl_t *hdlp, uint32_t *addr)
ret
SET_SIZE(i_ddi_io_get32)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_get32)
- movl 8(%esp), %edx
- inl (%dx)
- ret
- SET_SIZE(i_ddi_io_get32)
-
-#endif /* __i386 */
-
-#endif /* lint */
-
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-void
-i_ddi_vaddr_put8(ddi_acc_impl_t *hdlp, uint8_t *addr, uint8_t value)
-{
- *addr = value;
-}
-
-/*ARGSUSED*/
-void
-i_ddi_vaddr_put16(ddi_acc_impl_t *hdlp, uint16_t *addr, uint16_t value)
-{
- *addr = value;
-}
-
-/*ARGSUSED*/
-void
-i_ddi_vaddr_put32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value)
-{
- *(uint32_t *)addr = value;
-}
-
-/*ARGSUSED*/
-void
-i_ddi_vaddr_put64(ddi_acc_impl_t *hdlp, uint64_t *addr, uint64_t value)
-{
- *addr = value;
-}
-
-#else /* lint */
-
-#if defined(__amd64)
-
ENTRY(i_ddi_vaddr_put8)
movb %dl, (%rsi)
ret
SET_SIZE(i_ddi_vaddr_put8)
-#elif defined(__i386)
-
- ENTRY(i_ddi_vaddr_put8)
- movl 8(%esp), %eax
- movb 12(%esp), %cl
- movb %cl, (%eax)
- ret
- SET_SIZE(i_ddi_vaddr_put8)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_vaddr_put16)
movw %dx, (%rsi)
ret
SET_SIZE(i_ddi_vaddr_put16)
-#elif defined(__i386)
-
- ENTRY(i_ddi_vaddr_put16)
- movl 8(%esp), %eax
- movl 12(%esp), %ecx
- movw %cx, (%eax)
- ret
- SET_SIZE(i_ddi_vaddr_put16)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_vaddr_put32)
movl %edx, (%rsi)
ret
SET_SIZE(i_ddi_vaddr_put32)
-#elif defined(__i386)
-
- ENTRY(i_ddi_vaddr_put32)
- movl 8(%esp), %eax
- movl 12(%esp), %ecx
- movl %ecx, (%eax)
- ret
- SET_SIZE(i_ddi_vaddr_put32)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_vaddr_put64)
movq %rdx, (%rsi)
ret
SET_SIZE(i_ddi_vaddr_put64)
-#elif defined(__i386)
-
- ENTRY(i_ddi_vaddr_put64)
- movl 8(%esp), %ecx
- movl 12(%esp), %edx
- movl 16(%esp), %eax
- movl %edx, (%ecx)
- movl %eax, 4(%ecx)
- ret
- SET_SIZE(i_ddi_vaddr_put64)
-
-#endif /* __i386 */
-
-#endif /* lint */
-
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-void
-i_ddi_io_put8(ddi_acc_impl_t *hdlp, uint8_t *addr, uint8_t value)
-{}
-
-/*ARGSUSED*/
-void
-i_ddi_io_put16(ddi_acc_impl_t *hdlp, uint16_t *addr, uint16_t value)
-{}
-
-/*ARGSUSED*/
-void
-i_ddi_io_put32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value)
-{}
-
-#else /* lint */
-
-#if defined(__amd64)
-
ENTRY(i_ddi_io_put8)
movq %rdx, %rax
movq %rsi, %rdx
@@ -1256,18 +399,6 @@ i_ddi_io_put32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value)
ret
SET_SIZE(i_ddi_io_put8)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_put8)
- movl 12(%esp), %eax
- movl 8(%esp), %edx
- outb (%dx)
- ret
- SET_SIZE(i_ddi_io_put8)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_io_put16)
movq %rdx, %rax
@@ -1276,18 +407,6 @@ i_ddi_io_put32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value)
ret
SET_SIZE(i_ddi_io_put16)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_put16)
- movl 12(%esp), %eax
- movl 8(%esp), %edx
- outw (%dx)
- ret
- SET_SIZE(i_ddi_io_put16)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_io_put32)
movq %rdx, %rax
@@ -1296,43 +415,6 @@ i_ddi_io_put32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value)
ret
SET_SIZE(i_ddi_io_put32)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_put32)
- movl 12(%esp), %eax
- movl 8(%esp), %edx
- outl (%dx)
- ret
- SET_SIZE(i_ddi_io_put32)
-
-#endif /* __i386 */
-
-#endif /* lint */
-
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-void
-i_ddi_io_rep_get8(ddi_acc_impl_t *hdlp, uint8_t *host_addr,
- uint8_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-i_ddi_io_rep_get16(ddi_acc_impl_t *hdlp, uint16_t *host_addr,
- uint16_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-i_ddi_io_rep_get32(ddi_acc_impl_t *hdlp, uint32_t *host_addr,
- uint32_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-#else /* lint */
-
-#if defined(__amd64)
-
/*
* Incoming arguments
*
@@ -1370,42 +452,6 @@ gb_ioadv_done:
SET_SIZE(i_ddi_io_rep_get8)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_rep_get8)
- pushl %edi
-
- movl 12(%esp),%edi / get host_addr
- movl 16(%esp),%edx / get port
- movl 20(%esp),%ecx / get repcount
- cmpl $DDI_DEV_AUTOINCR, 24(%esp)
- je gb_ioadv
-
- rep
- insb
- popl %edi
- ret
-
-gb_ioadv:
- andl %ecx, %ecx
- jz gb_ioadv_done
-gb_ioadv2:
- inb (%dx)
- movb %al,(%edi)
- incl %edi
- incl %edx
- decl %ecx
- jg gb_ioadv2
-
-gb_ioadv_done:
- popl %edi
- ret
-
- SET_SIZE(i_ddi_io_rep_get8)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_io_rep_get16)
@@ -1433,41 +479,6 @@ gw_ioadv_done:
/* AMD Software Optimization Guide - Section 6.2 */
SET_SIZE(i_ddi_io_rep_get16)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_rep_get16)
- pushl %edi
-
- movl 12(%esp),%edi / get host_addr
- movl 16(%esp),%edx / get port
- movl 20(%esp),%ecx / get repcount
- cmpl $DDI_DEV_AUTOINCR, 24(%esp)
- je gw_ioadv
-
- rep
- insw
- popl %edi
- ret
-
-gw_ioadv:
- andl %ecx, %ecx
- jz gw_ioadv_done
-gw_ioadv2:
- inw (%dx)
- movw %ax,(%edi)
- addl $2, %edi
- addl $2, %edx
- decl %ecx
- jg gw_ioadv2
-
-gw_ioadv_done:
- popl %edi
- ret
- SET_SIZE(i_ddi_io_rep_get16)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_io_rep_get32)
@@ -1496,68 +507,6 @@ gl_ioadv_done:
SET_SIZE(i_ddi_io_rep_get32)
-
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_rep_get32)
- pushl %edi
-
- movl 12(%esp),%edi / get host_addr
- movl 16(%esp),%edx / get port
- movl 20(%esp),%ecx / get repcount
- cmpl $DDI_DEV_AUTOINCR, 24(%esp)
- je gl_ioadv
-
- rep
- insl
- popl %edi
- ret
-
-gl_ioadv:
- andl %ecx, %ecx
- jz gl_ioadv_done
-gl_ioadv2:
- inl (%dx)
- movl %eax,(%edi)
- addl $4, %edi
- addl $4, %edx
- decl %ecx
- jg gl_ioadv2
-
-gl_ioadv_done:
- popl %edi
- ret
-
- SET_SIZE(i_ddi_io_rep_get32)
-
-#endif /* __i386 */
-
-#endif /* lint */
-
-#if defined(lint) || defined(__lint)
-
-/*ARGSUSED*/
-void
-i_ddi_io_rep_put8(ddi_acc_impl_t *hdlp, uint8_t *host_addr,
- uint8_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-i_ddi_io_rep_put16(ddi_acc_impl_t *hdlp, uint16_t *host_addr,
- uint16_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-/*ARGSUSED*/
-void
-i_ddi_io_rep_put32(ddi_acc_impl_t *hdlp, uint32_t *host_addr,
- uint32_t *dev_addr, size_t repcount, uint_t flags)
-{}
-
-#else /* lint */
-
-#if defined(__amd64)
-
/*
* Incoming arguments
*
@@ -1595,42 +544,6 @@ pb_ioadv_done:
/* AMD Software Optimization Guide - Section 6.2 */
SET_SIZE(i_ddi_io_rep_put8)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_rep_put8)
- pushl %esi
-
- movl 12(%esp),%esi / get host_addr
- movl 16(%esp),%edx / get port
- movl 20(%esp),%ecx / get repcount
- cmpl $DDI_DEV_AUTOINCR, 24(%esp)
- je pb_ioadv
-
- rep
- outsb
- popl %esi
- ret
-
-pb_ioadv:
- andl %ecx, %ecx
- jz pb_ioadv_done
-pb_ioadv2:
- movb (%esi), %al
- outb (%dx)
- incl %esi
- incl %edx
- decl %ecx
- jg pb_ioadv2
-
-pb_ioadv_done:
- popl %esi
- ret
- SET_SIZE(i_ddi_io_rep_put8)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
-
ENTRY(i_ddi_io_rep_put16)
cmpq $DDI_DEV_AUTOINCR, %r8
@@ -1657,41 +570,6 @@ pw_ioadv_done:
/* AMD Software Optimization Guide - Section 6.2 */
SET_SIZE(i_ddi_io_rep_put16)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_rep_put16)
- pushl %esi
-
- movl 12(%esp),%esi / get host_addr
- movl 16(%esp),%edx / get port
- movl 20(%esp),%ecx / get repcount
- cmpl $DDI_DEV_AUTOINCR, 24(%esp)
- je pw_ioadv
-
- rep
- outsw
- popl %esi
- ret
-
-pw_ioadv:
- andl %ecx, %ecx
- jz pw_ioadv_done
-pw_ioadv2:
- movw (%esi), %ax
- outw (%dx)
- addl $2, %esi
- addl $2, %edx
- decl %ecx
- jg pw_ioadv2
-
-pw_ioadv_done:
- popl %esi
- ret
- SET_SIZE(i_ddi_io_rep_put16)
-
-#endif /* __i386 */
-
-#if defined(__amd64)
ENTRY(i_ddi_io_rep_put32)
@@ -1719,38 +597,3 @@ pl_ioadv_done:
/* AMD Software Optimization Guide - Section 6.2 */
SET_SIZE(i_ddi_io_rep_put32)
-#elif defined(__i386)
-
- ENTRY(i_ddi_io_rep_put32)
- pushl %esi
-
- movl 12(%esp),%esi / get host_addr
- movl 16(%esp),%edx / get port
- movl 20(%esp),%ecx / get repcount
- cmpl $DDI_DEV_AUTOINCR, 24(%esp)
- je pl_ioadv
-
- rep
- outsl
- popl %esi
- ret
-
-pl_ioadv:
- andl %ecx, %ecx
- jz pl_ioadv_done
-pl_ioadv2:
- movl (%esi), %eax
- outl (%dx)
- addl $4, %esi
- addl $4, %edx
- decl %ecx
- jg pl_ioadv2
-
-pl_ioadv_done:
- popl %esi
- ret
- SET_SIZE(i_ddi_io_rep_put32)
-
-#endif /* __i386 */
-
-#endif /* lint */
diff --git a/usr/src/uts/intel/ia32/ml/desctbls_asm.s b/usr/src/uts/intel/ia32/ml/desctbls_asm.s
index 26cea36fff..4528bc07ad 100644
--- a/usr/src/uts/intel/ia32/ml/desctbls_asm.s
+++ b/usr/src/uts/intel/ia32/ml/desctbls_asm.s
@@ -23,6 +23,10 @@
* Use is subject to license terms.
*/
+/*
+ * Copyright 2019 Joyent, Inc.
+ */
+
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include <sys/regset.h>
@@ -32,33 +36,7 @@
#include <sys/segments.h>
#include <sys/trap.h>
-#if defined(__lint)
-#include <sys/types.h>
-#include <sys/systm.h>
-#include <sys/thread.h>
-#include <sys/archsystm.h>
-#include <sys/byteorder.h>
-#include <sys/dtrace.h>
-#include <sys/x86_archext.h>
-#else /* __lint */
#include "assym.h"
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-rd_idtr(desctbr_t *idtr)
-{}
-
-/*ARGSUSED*/
-void
-wr_idtr(desctbr_t *idtr)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY_NP(rd_idtr)
sidt (%rdi)
@@ -70,45 +48,6 @@ wr_idtr(desctbr_t *idtr)
ret
SET_SIZE(wr_idtr)
-#elif defined(__i386)
-
- ENTRY_NP(rd_idtr)
- pushl %ebp
- movl %esp, %ebp
- movl 8(%ebp), %edx
- sidt (%edx)
- leave
- ret
- SET_SIZE(rd_idtr)
-
- ENTRY_NP(wr_idtr)
- pushl %ebp
- movl %esp, %ebp
- movl 8(%ebp), %edx
- lidt (%edx)
- leave
- ret
- SET_SIZE(wr_idtr)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-rd_gdtr(desctbr_t *gdtr)
-{}
-
-/*ARGSUSED*/
-void
-wr_gdtr(desctbr_t *gdtr)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(rd_gdtr)
pushq %rbp
movq %rsp, %rbp
@@ -128,47 +67,6 @@ wr_gdtr(desctbr_t *gdtr)
ret
SET_SIZE(wr_gdtr)
-#elif defined(__i386)
-
- ENTRY_NP(rd_gdtr)
- pushl %ebp
- movl %esp, %ebp
- movl 8(%ebp), %edx
- sgdt (%edx)
- leave
- ret
- SET_SIZE(rd_gdtr)
-
- ENTRY_NP(wr_gdtr)
- pushl %ebp
- movl %esp, %ebp
- movl 8(%ebp), %edx
- lgdt (%edx)
- jmp 1f
- nop
-1:
- leave
- ret
- SET_SIZE(wr_gdtr)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__amd64)
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-load_segment_registers(selector_t cs, selector_t fs, selector_t gs,
- selector_t ss)
-{}
-
-selector_t
-get_cs_register()
-{ return (0); }
-
-#else /* __lint */
-
/*
* loads zero selector for ds and es.
*/
@@ -200,70 +98,6 @@ get_cs_register()
ret
SET_SIZE(get_cs_register)
-#endif /* __lint */
-#elif defined(__i386)
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-load_segment_registers(
- selector_t cs, selector_t ds, selector_t es,
- selector_t fs, selector_t gs, selector_t ss)
-{}
-
-selector_t
-get_cs_register()
-{ return ((selector_t) 0); }
-
-#else /* __lint */
-
- ENTRY_NP(load_segment_registers)
- pushl %ebp
- movl %esp, %ebp
-
- pushl 0x8(%ebp)
- pushl $.newcs
- lret
-.newcs:
- movw 0xc(%ebp), %ax
- movw %ax, %ds
- movw 0x10(%ebp), %ax
- movw %ax, %es
- movw 0x14(%ebp), %ax
- movw %ax, %fs
- movw 0x18(%ebp), %ax
- movw %ax, %gs
- movw 0x1c(%ebp), %ax
- movw %ax, %ss
- leave
- ret
- SET_SIZE(load_segment_registers)
-
- ENTRY_NP(get_cs_register)
- movl $0, %eax
- movw %cs, %ax
- ret
- SET_SIZE(get_cs_register)
-
-#endif /* __lint */
-#endif /* __i386 */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-wr_ldtr(selector_t ldtsel)
-{}
-
-selector_t
-rd_ldtr(void)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(wr_ldtr)
movq %rdi, %rax
lldt %ax
@@ -276,47 +110,9 @@ rd_ldtr(void)
ret
SET_SIZE(rd_ldtr)
-#elif defined(__i386)
-
- ENTRY_NP(wr_ldtr)
- movw 4(%esp), %ax
- lldt %ax
- ret
- SET_SIZE(wr_ldtr)
-
- ENTRY_NP(rd_ldtr)
- xorl %eax, %eax
- sldt %ax
- ret
- SET_SIZE(rd_ldtr)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-wr_tsr(selector_t tsssel)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(wr_tsr)
movq %rdi, %rax
ltr %ax
ret
SET_SIZE(wr_tsr)
-#elif defined(__i386)
-
- ENTRY_NP(wr_tsr)
- movw 4(%esp), %ax
- ltr %ax
- ret
- SET_SIZE(wr_tsr)
-
-#endif /* __i386 */
-#endif /* __lint */
diff --git a/usr/src/uts/intel/ia32/ml/exception.s b/usr/src/uts/intel/ia32/ml/exception.s
index b35eab3220..92c410adc0 100644
--- a/usr/src/uts/intel/ia32/ml/exception.s
+++ b/usr/src/uts/intel/ia32/ml/exception.s
@@ -51,8 +51,6 @@
#include <sys/traptrace.h>
#include <sys/machparam.h>
-#if !defined(__lint)
-
#include "assym.h"
/*
@@ -67,7 +65,7 @@
* it get saved as is running native.
*/
-#if defined(__xpv) && defined(__amd64)
+#if defined(__xpv)
#define NPTRAP_NOERR(trapno) \
pushq $0; \
@@ -85,7 +83,7 @@
XPV_TRAP_POP; \
pushq $trapno
-#else /* __xpv && __amd64 */
+#else /* __xpv */
#define TRAP_NOERR(trapno) \
push $0; \
@@ -100,11 +98,11 @@
#define TRAP_ERR(trapno) \
push $trapno
-#endif /* __xpv && __amd64 */
+#endif /* __xpv */
/*
* These are the stacks used on cpu0 for taking double faults,
- * NMIs and MCEs (the latter two only on amd64 where we have IST).
+ * NMIs and MCEs.
*
* We define them here instead of in a C file so that we can page-align
* them (gcc won't do that in a .c file).
@@ -134,7 +132,6 @@
ENTRY_NP(dbgtrap)
TRAP_NOERR(T_SGLSTP) /* $1 */
-#if defined(__amd64)
#if !defined(__xpv) /* no sysenter support yet */
/*
* If we get here as a result of single-stepping a sysenter
@@ -193,29 +190,9 @@
movq %rax, %db6
#endif
-#elif defined(__i386)
-
- INTR_PUSH
-#if defined(__xpv)
- pushl $6
- call kdi_dreg_get
- addl $4, %esp
- movl %eax, %esi /* %dr6 -> %esi */
- pushl $0
- pushl $6
- call kdi_dreg_set /* 0 -> %dr6 */
- addl $8, %esp
-#else
- movl %db6, %esi
- xorl %eax, %eax
- movl %eax, %db6
-#endif
-#endif /* __i386 */
-
jmp cmntrap_pushed
SET_SIZE(dbgtrap)
-#if defined(__amd64)
#if !defined(__xpv)
/*
@@ -277,11 +254,8 @@
#define SET_CPU_GSBASE /* noop on the hypervisor */
#endif /* __xpv */
-#endif /* __amd64 */
-#if defined(__amd64)
-
/*
* #NMI
*
@@ -314,43 +288,10 @@
/*NOTREACHED*/
SET_SIZE(nmiint)
-#elif defined(__i386)
-
- /*
- * #NMI
- */
- ENTRY_NP(nmiint)
- TRAP_NOERR(T_NMIFLT) /* $2 */
-
- /*
- * Save all registers and setup segment registers
- * with kernel selectors.
- */
- INTR_PUSH
- INTGATE_INIT_KERNEL_FLAGS
-
- TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
- TRACE_REGS(%edi, %esp, %ebx, %ecx)
- TRACE_STAMP(%edi)
-
- movl %esp, %ebp
-
- pushl %ebp
- call av_dispatch_nmivect
- addl $4, %esp
-
- INTR_POP_USER
- IRET
- SET_SIZE(nmiint)
-
-#endif /* __i386 */
-
/*
* #BP
*/
ENTRY_NP(brktrap)
-
-#if defined(__amd64)
XPV_TRAP_POP
cmpw $KCS_SEL, 8(%rsp)
jne bp_user
@@ -368,7 +309,6 @@
jmp ud_kernel
bp_user:
-#endif /* __amd64 */
NPTRAP_NOERR(T_BPTFLT) /* $3 */
jmp dtrace_trap
@@ -391,8 +331,6 @@ bp_user:
jmp cmntrap
SET_SIZE(boundstrap)
-#if defined(__amd64)
-
ENTRY_NP(invoptrap)
XPV_TRAP_POP
@@ -454,12 +392,12 @@ ud_push:
ud_leave:
/*
- * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
- * followed by a "popq %rbp". This is quite a bit simpler on amd64
- * than it is on i386 -- we can exploit the fact that the %rsp is
- * explicitly saved to effect the pop without having to reshuffle
- * the other data pushed for the trap.
+ * We must emulate a "leave", which is the same as a "movq %rbp,
+ * %rsp" followed by a "popq %rbp". We can exploit the fact
+ * that the %rsp is explicitly saved to effect the pop without
+ * having to reshuffle the other data pushed for the trap.
*/
+
INTR_POP
pushq %rax /* push temp */
movq 8(%rsp), %rax /* load calling RIP */
@@ -515,126 +453,6 @@ ud_user:
jmp cmntrap
SET_SIZE(invoptrap)
-#elif defined(__i386)
-
- /*
- * #UD
- */
- ENTRY_NP(invoptrap)
- /*
- * If we are taking an invalid opcode trap while in the kernel, this
- * is likely an FBT probe point.
- */
- pushl %gs
- cmpw $KGS_SEL, (%esp)
- jne 8f
-
- addl $4, %esp
-#if defined(__xpv)
- movb $0, 6(%esp) /* clear saved upcall_mask from %cs */
-#endif /* __xpv */
- pusha
- pushl %eax /* push %eax -- may be return value */
- pushl %esp /* push stack pointer */
- addl $48, (%esp) /* adjust to incoming args */
- pushl 40(%esp) /* push calling EIP */
- call dtrace_invop
- ALTENTRY(dtrace_invop_callsite)
- addl $12, %esp
- cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
- je 1f
- cmpl $DTRACE_INVOP_POPL_EBP, %eax
- je 2f
- cmpl $DTRACE_INVOP_LEAVE, %eax
- je 3f
- cmpl $DTRACE_INVOP_NOP, %eax
- je 4f
- jmp 7f
-1:
- /*
- * We must emulate a "pushl %ebp". To do this, we pull the stack
- * down 4 bytes, and then store the base pointer.
- */
- popa
- subl $4, %esp /* make room for %ebp */
- pushl %eax /* push temp */
- movl 8(%esp), %eax /* load calling EIP */
- incl %eax /* increment over LOCK prefix */
- movl %eax, 4(%esp) /* store calling EIP */
- movl 12(%esp), %eax /* load calling CS */
- movl %eax, 8(%esp) /* store calling CS */
- movl 16(%esp), %eax /* load calling EFLAGS */
- movl %eax, 12(%esp) /* store calling EFLAGS */
- movl %ebp, 16(%esp) /* push %ebp */
- popl %eax /* pop off temp */
- jmp _emul_done
-2:
- /*
- * We must emulate a "popl %ebp". To do this, we do the opposite of
- * the above: we remove the %ebp from the stack, and squeeze up the
- * saved state from the trap.
- */
- popa
- pushl %eax /* push temp */
- movl 16(%esp), %ebp /* pop %ebp */
- movl 12(%esp), %eax /* load calling EFLAGS */
- movl %eax, 16(%esp) /* store calling EFLAGS */
- movl 8(%esp), %eax /* load calling CS */
- movl %eax, 12(%esp) /* store calling CS */
- movl 4(%esp), %eax /* load calling EIP */
- incl %eax /* increment over LOCK prefix */
- movl %eax, 8(%esp) /* store calling EIP */
- popl %eax /* pop off temp */
- addl $4, %esp /* adjust stack pointer */
- jmp _emul_done
-3:
- /*
- * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
- * followed by a "popl %ebp". This looks similar to the above, but
- * requires two temporaries: one for the new base pointer, and one
- * for the staging register.
- */
- popa
- pushl %eax /* push temp */
- pushl %ebx /* push temp */
- movl %ebp, %ebx /* set temp to old %ebp */
- movl (%ebx), %ebp /* pop %ebp */
- movl 16(%esp), %eax /* load calling EFLAGS */
- movl %eax, (%ebx) /* store calling EFLAGS */
- movl 12(%esp), %eax /* load calling CS */
- movl %eax, -4(%ebx) /* store calling CS */
- movl 8(%esp), %eax /* load calling EIP */
- incl %eax /* increment over LOCK prefix */
- movl %eax, -8(%ebx) /* store calling EIP */
- movl %ebx, -4(%esp) /* temporarily store new %esp */
- popl %ebx /* pop off temp */
- popl %eax /* pop off temp */
- movl -12(%esp), %esp /* set stack pointer */
- subl $8, %esp /* adjust for three pushes, one pop */
- jmp _emul_done
-4:
- /*
- * We must emulate a "nop". This is obviously not hard: we need only
- * advance the %eip by one.
- */
- popa
- incl (%esp)
-_emul_done:
- IRET /* return from interrupt */
-7:
- popa
- pushl $0
- pushl $T_ILLINST /* $6 */
- jmp cmntrap
-8:
- addl $4, %esp
- pushl $0
- pushl $T_ILLINST /* $6 */
- jmp cmntrap
- SET_SIZE(invoptrap)
-
-#endif /* __i386 */
-
/*
* #NM
*/
@@ -646,7 +464,6 @@ _emul_done:
SET_SIZE(ndptrap)
#if !defined(__xpv)
-#if defined(__amd64)
/*
* #DF
@@ -699,129 +516,6 @@ _emul_done:
SET_SIZE(syserrtrap)
-#elif defined(__i386)
-
- /*
- * #DF
- */
- ENTRY_NP(syserrtrap)
- cli /* disable interrupts */
-
- /*
- * We share this handler with kmdb (if kmdb is loaded). As such, we
- * may have reached this point after encountering a #df in kmdb. If
- * that happens, we'll still be on kmdb's IDT. We need to switch back
- * to this CPU's IDT before proceeding. Furthermore, if we did arrive
- * here from kmdb, kmdb is probably in a very sickly state, and
- * shouldn't be entered from the panic flow. We'll suppress that
- * entry by setting nopanicdebug.
- */
-
- subl $DESCTBR_SIZE, %esp
- movl %gs:CPU_IDT, %eax
- sidt (%esp)
- cmpl DTR_BASE(%esp), %eax
- je 1f
-
- movl %eax, DTR_BASE(%esp)
- movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
- lidt (%esp)
-
- movl $1, nopanicdebug
-
-1: addl $DESCTBR_SIZE, %esp
-
- /*
- * Check the CPL in the TSS to see what mode
- * (user or kernel) we took the fault in. At this
- * point we are running in the context of the double
- * fault task (dftss) but the CPU's task points to
- * the previous task (ktss) where the process context
- * has been saved as the result of the task switch.
- */
- movl %gs:CPU_TSS, %eax /* get the TSS */
- movl TSS_SS(%eax), %ebx /* save the fault SS */
- movl TSS_ESP(%eax), %edx /* save the fault ESP */
- testw $CPL_MASK, TSS_CS(%eax) /* user mode ? */
- jz make_frame
- movw TSS_SS0(%eax), %ss /* get on the kernel stack */
- movl TSS_ESP0(%eax), %esp
-
- /*
- * Clear the NT flag to avoid a task switch when the process
- * finally pops the EFL off the stack via an iret. Clear
- * the TF flag since that is what the processor does for
- * a normal exception. Clear the IE flag so that interrupts
- * remain disabled.
- */
- movl TSS_EFL(%eax), %ecx
- andl $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
- pushl %ecx
- popfl /* restore the EFL */
- movw TSS_LDT(%eax), %cx /* restore the LDT */
- lldt %cx
-
- /*
- * Restore process segment selectors.
- */
- movw TSS_DS(%eax), %ds
- movw TSS_ES(%eax), %es
- movw TSS_FS(%eax), %fs
- movw TSS_GS(%eax), %gs
-
- /*
- * Restore task segment selectors.
- */
- movl $KDS_SEL, TSS_DS(%eax)
- movl $KDS_SEL, TSS_ES(%eax)
- movl $KDS_SEL, TSS_SS(%eax)
- movl $KFS_SEL, TSS_FS(%eax)
- movl $KGS_SEL, TSS_GS(%eax)
-
- /*
- * Clear the TS bit, the busy bits in both task
- * descriptors, and switch tasks.
- */
- clts
- leal gdt0, %ecx
- movl DFTSS_SEL+4(%ecx), %esi
- andl $_BITNOT(0x200), %esi
- movl %esi, DFTSS_SEL+4(%ecx)
- movl KTSS_SEL+4(%ecx), %esi
- andl $_BITNOT(0x200), %esi
- movl %esi, KTSS_SEL+4(%ecx)
- movw $KTSS_SEL, %cx
- ltr %cx
-
- /*
- * Restore part of the process registers.
- */
- movl TSS_EBP(%eax), %ebp
- movl TSS_ECX(%eax), %ecx
- movl TSS_ESI(%eax), %esi
- movl TSS_EDI(%eax), %edi
-
-make_frame:
- /*
- * Make a trap frame. Leave the error code (0) on
- * the stack since the first word on a trap stack is
- * unused anyway.
- */
- pushl %ebx / fault SS
- pushl %edx / fault ESP
- pushl TSS_EFL(%eax) / fault EFL
- pushl TSS_CS(%eax) / fault CS
- pushl TSS_EIP(%eax) / fault EIP
- pushl $0 / error code
- pushl $T_DBLFLT / trap number 8
- movl TSS_EBX(%eax), %ebx / restore EBX
- movl TSS_EDX(%eax), %edx / restore EDX
- movl TSS_EAX(%eax), %eax / restore EAX
- sti / enable interrupts
- jmp cmntrap
- SET_SIZE(syserrtrap)
-
-#endif /* __i386 */
#endif /* !__xpv */
/*
@@ -837,9 +531,7 @@ make_frame:
*/
ENTRY_NP(segnptrap)
TRAP_ERR(T_SEGFLT) /* $11 already have error code on stack */
-#if defined(__amd64)
SET_CPU_GSBASE
-#endif
jmp cmntrap
SET_SIZE(segnptrap)
@@ -848,9 +540,7 @@ make_frame:
*/
ENTRY_NP(stktrap)
TRAP_ERR(T_STKFLT) /* $12 already have error code on stack */
-#if defined(__amd64)
SET_CPU_GSBASE
-#endif
jmp cmntrap
SET_SIZE(stktrap)
@@ -859,9 +549,7 @@ make_frame:
*/
ENTRY_NP(gptrap)
TRAP_ERR(T_GPFLT) /* $13 already have error code on stack */
-#if defined(__amd64)
SET_CPU_GSBASE
-#endif
jmp cmntrap
SET_SIZE(gptrap)
@@ -873,65 +561,17 @@ make_frame:
INTR_PUSH
#if defined(__xpv)
-#if defined(__amd64)
movq %gs:CPU_VCPU_INFO, %r15
movq VCPU_INFO_ARCH_CR2(%r15), %r15 /* vcpu[].arch.cr2 */
-#elif defined(__i386)
- movl %gs:CPU_VCPU_INFO, %esi
- movl VCPU_INFO_ARCH_CR2(%esi), %esi /* vcpu[].arch.cr2 */
-#endif /* __i386 */
#else /* __xpv */
-#if defined(__amd64)
movq %cr2, %r15
-#elif defined(__i386)
- movl %cr2, %esi
-#endif /* __i386 */
#endif /* __xpv */
jmp cmntrap_pushed
SET_SIZE(pftrap)
-#if !defined(__amd64)
-
- .globl idt0_default_r
-
- /*
- * #PF pentium bug workaround
- */
- ENTRY_NP(pentium_pftrap)
- pushl %eax
- movl %cr2, %eax
- andl $MMU_STD_PAGEMASK, %eax
-
- cmpl %eax, %cs:idt0_default_r+2 /* fixme */
-
- je check_for_user_address
-user_mode:
- popl %eax
- pushl $T_PGFLT /* $14 */
- jmp cmntrap
-check_for_user_address:
- /*
- * Before we assume that we have an unmapped trap on our hands,
- * check to see if this is a fault from user mode. If it is,
- * we'll kick back into the page fault handler.
- */
- movl 4(%esp), %eax /* error code */
- andl $PF_ERR_USER, %eax
- jnz user_mode
-
- /*
- * We now know that this is the invalid opcode trap.
- */
- popl %eax
- addl $4, %esp /* pop error code */
- jmp invoptrap
- SET_SIZE(pentium_pftrap)
-
-#endif /* !__amd64 */
-
ENTRY_NP(resvtrap)
TRAP_NOERR(T_RESVTRAP) /* (reserved) */
jmp cmntrap
@@ -958,8 +598,6 @@ check_for_user_address:
*/
.globl cmi_mca_trap /* see uts/i86pc/os/cmi.c */
-#if defined(__amd64)
-
ENTRY_NP(mcetrap)
TRAP_NOERR(T_MCE) /* $18 */
@@ -980,30 +618,6 @@ check_for_user_address:
jmp _sys_rtt
SET_SIZE(mcetrap)
-#else
-
- ENTRY_NP(mcetrap)
- TRAP_NOERR(T_MCE) /* $18 */
-
- INTR_PUSH
- INTGATE_INIT_KERNEL_FLAGS
-
- TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
- TRACE_REGS(%edi, %esp, %ebx, %ecx)
- TRACE_STAMP(%edi)
-
- movl %esp, %ebp
-
- movl %esp, %ecx
- pushl %ecx /* arg0 = struct regs *rp */
- call cmi_mca_trap /* cmi_mca_trap(rp) */
- addl $4, %esp /* pop arg0 */
-
- jmp _sys_rtt
- SET_SIZE(mcetrap)
-
-#endif
-
/*
* #XF
*/
@@ -1019,8 +633,6 @@ check_for_user_address:
.globl fasttable
-#if defined(__amd64)
-
ENTRY_NP(fasttrap)
cmpl $T_LASTFAST, %eax
ja 1f
@@ -1051,36 +663,11 @@ check_for_user_address:
jmp gptrap
SET_SIZE(fasttrap)
-#elif defined(__i386)
-
- ENTRY_NP(fasttrap)
- cmpl $T_LASTFAST, %eax
- ja 1f
- jmp *%cs:fasttable(, %eax, CLONGSIZE)
-1:
- /*
- * Fast syscall number was illegal. Make it look
- * as if the INT failed. Modify %eip to point before the
- * INT, push the expected error code and fake a GP fault.
- *
- * XXX Why make the error code be offset into idt + 1?
- * Instead we should push a real (soft?) error code
- * on the stack and #gp handler could know about fasttraps?
- */
- subl $2, (%esp) /* XXX int insn 2-bytes */
- pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
- jmp gptrap
- SET_SIZE(fasttrap)
-
-#endif /* __i386 */
-
ENTRY_NP(dtrace_ret)
TRAP_NOERR(T_DTRACE_RET)
jmp dtrace_trap
SET_SIZE(dtrace_ret)
-#if defined(__amd64)
-
/*
* RFLAGS 24 bytes up the stack from %rsp.
* XXX a constant would be nicer.
@@ -1093,15 +680,6 @@ check_for_user_address:
/*NOTREACHED*/
SET_SIZE(fast_null)
-#elif defined(__i386)
-
- ENTRY_NP(fast_null)
- orw $PS_C, 8(%esp) /* set carry bit in user flags */
- IRET
- SET_SIZE(fast_null)
-
-#endif /* __i386 */
-
/*
* Interrupts start at 32
*/
@@ -1337,4 +915,3 @@ check_for_user_address:
MKIVCT(254)
MKIVCT(255)
-#endif /* __lint */
diff --git a/usr/src/uts/intel/ia32/ml/float.s b/usr/src/uts/intel/ia32/ml/float.s
index 0a242e0475..b3c4643707 100644
--- a/usr/src/uts/intel/ia32/ml/float.s
+++ b/usr/src/uts/intel/ia32/ml/float.s
@@ -42,20 +42,7 @@
#include <sys/privregs.h>
#include <sys/x86_archext.h>
-#if defined(__lint)
-#include <sys/types.h>
-#include <sys/fp.h>
-#else
#include "assym.h"
-#endif
-
-#if defined(__lint)
-
-uint_t
-fpu_initial_probe(void)
-{ return (0); }
-
-#else /* __lint */
/*
* Returns zero if x87 "chip" is present(!)
@@ -68,48 +55,16 @@ fpu_initial_probe(void)
ret
SET_SIZE(fpu_initial_probe)
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-fxsave_insn(struct fxsave_state *fx)
-{}
-
-#else /* __lint */
-
ENTRY_NP(fxsave_insn)
fxsaveq (%rdi)
ret
SET_SIZE(fxsave_insn)
-#endif /* __lint */
-
/*
* One of these routines is called from any lwp with floating
* point context as part of the prolog of a context switch.
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-xsave_ctxt(void *arg)
-{}
-
-/*ARGSUSED*/
-void
-xsaveopt_ctxt(void *arg)
-{}
-
-/*ARGSUSED*/
-void
-fpxsave_ctxt(void *arg)
-{}
-
-#else /* __lint */
-
/*
* These three functions define the Intel "xsave" handling for CPUs with
* different features. Newer AMD CPUs can also use these functions. See the
@@ -224,32 +179,6 @@ fpxsave_ctxt(void *arg)
.4byte 0x0
.4byte 0x0
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-fpsave(struct fnsave_state *f)
-{}
-
-/*ARGSUSED*/
-void
-fpxsave(struct fxsave_state *f)
-{}
-
-/*ARGSUSED*/
-void
-xsave(struct xsave_state *f, uint64_t m)
-{}
-
-/*ARGSUSED*/
-void
-xsaveopt(struct xsave_state *f, uint64_t m)
-{}
-
-#else /* __lint */
ENTRY_NP(fpxsave)
CLTS
@@ -283,27 +212,11 @@ xsaveopt(struct xsave_state *f, uint64_t m)
ret
SET_SIZE(xsaveopt)
-#endif /* __lint */
-
/*
* These functions are used when restoring the FPU as part of the epilogue of a
* context switch.
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-fpxrestore_ctxt(void *arg)
-{}
-
-/*ARGSUSED*/
-void
-xrestore_ctxt(void *arg)
-{}
-
-#else /* __lint */
-
ENTRY(fpxrestore_ctxt)
cmpl $_CONST(FPU_EN|FPU_VALID), FPU_CTX_FPU_FLAGS(%rdi)
jne 1f
@@ -328,22 +241,6 @@ xrestore_ctxt(void *arg)
ret
SET_SIZE(xrestore_ctxt)
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-fpxrestore(struct fxsave_state *f)
-{}
-
-/*ARGSUSED*/
-void
-xrestore(struct xsave_state *f, uint64_t m)
-{}
-
-#else /* __lint */
ENTRY_NP(fpxrestore)
CLTS
@@ -360,39 +257,19 @@ xrestore(struct xsave_state *f, uint64_t m)
ret
SET_SIZE(xrestore)
-#endif /* __lint */
-
/*
* Disable the floating point unit.
*/
-#if defined(__lint)
-
-void
-fpdisable(void)
-{}
-
-#else /* __lint */
-
ENTRY_NP(fpdisable)
STTS(%rdi) /* set TS bit in %cr0 (disable FPU) */
ret
SET_SIZE(fpdisable)
-#endif /* __lint */
-
/*
* Initialize the fpu hardware.
*/
-#if defined(__lint)
-
-void
-fpinit(void)
-{}
-
-#else /* __lint */
-
ENTRY_NP(fpinit)
CLTS
cmpl $FP_XSAVE, fp_save_mech
@@ -414,25 +291,11 @@ fpinit(void)
ret
SET_SIZE(fpinit)
-#endif /* __lint */
-
/*
* Clears FPU exception state.
* Returns the FP status word.
*/
-#if defined(__lint)
-
-uint32_t
-fperr_reset(void)
-{ return (0); }
-
-uint32_t
-fpxerr_reset(void)
-{ return (0); }
-
-#else /* __lint */
-
ENTRY_NP(fperr_reset)
CLTS
xorl %eax, %eax
@@ -454,18 +317,6 @@ fpxerr_reset(void)
ret
SET_SIZE(fpxerr_reset)
-#endif /* __lint */
-
-#if defined(__lint)
-
-uint32_t
-fpgetcwsw(void)
-{
- return (0);
-}
-
-#else /* __lint */
-
ENTRY_NP(fpgetcwsw)
pushq %rbp
movq %rsp, %rbp
@@ -478,22 +329,10 @@ fpgetcwsw(void)
ret
SET_SIZE(fpgetcwsw)
-#endif /* __lint */
-
/*
* Returns the MXCSR register.
*/
-#if defined(__lint)
-
-uint32_t
-fpgetmxcsr(void)
-{
- return (0);
-}
-
-#else /* __lint */
-
ENTRY_NP(fpgetmxcsr)
pushq %rbp
movq %rsp, %rbp
@@ -505,4 +344,3 @@ fpgetmxcsr(void)
ret
SET_SIZE(fpgetmxcsr)
-#endif /* __lint */
diff --git a/usr/src/uts/intel/ia32/ml/hypersubr.s b/usr/src/uts/intel/ia32/ml/hypersubr.s
index fb70bf1818..e6378d8518 100644
--- a/usr/src/uts/intel/ia32/ml/hypersubr.s
+++ b/usr/src/uts/intel/ia32/ml/hypersubr.s
@@ -37,88 +37,18 @@
/*
* Hypervisor "system calls"
*
- * i386
- * %eax == call number
- * args in registers (%ebx, %ecx, %edx, %esi, %edi)
- *
* amd64
* %rax == call number
* args in registers (%rdi, %rsi, %rdx, %r10, %r8, %r9)
*
- * Note that for amd64 we use %r10 instead of %rcx for passing 4th argument
- * as in C calling convention since the "syscall" instruction clobbers %rcx.
+ * Note that we use %r10 instead of %rcx for passing 4th argument as in
+ * C calling convention since the "syscall" instruction clobbers %rcx.
*
* (These calls can be done more efficiently as gcc-style inlines, but
* for simplicity and help with initial debugging, we use these primitives
* to build the hypervisor calls up from C wrappers.)
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-long
-__hypercall0(int callnum)
-{ return (0); }
-
-/*ARGSUSED*/
-long
-__hypercall1(int callnum, ulong_t a1)
-{ return (0); }
-
-/*ARGSUSED*/
-long
-__hypercall2(int callnum, ulong_t a1, ulong_t a2)
-{ return (0); }
-
-/*ARGSUSED*/
-long
-__hypercall3(int callnum, ulong_t a1, ulong_t a2, ulong_t a3)
-{ return (0); }
-
-/*ARGSUSED*/
-long
-__hypercall4(int callnum, ulong_t a1, ulong_t a2, ulong_t a3, ulong_t a4)
-{ return (0); }
-
-/*ARGSUSED*/
-long
-__hypercall5(int callnum,
- ulong_t a1, ulong_t a2, ulong_t a3, ulong_t a4, ulong_t a5)
-{ return (0); }
-
-/*ARGSUSED*/
-int
-__hypercall0_int(int callnum)
-{ return (0); }
-
-/*ARGSUSED*/
-int
-__hypercall1_int(int callnum, ulong_t a1)
-{ return (0); }
-
-/*ARGSUSED*/
-int
-__hypercall2_int(int callnum, ulong_t a1, ulong_t a2)
-{ return (0); }
-
-/*ARGSUSED*/
-int
-__hypercall3_int(int callnum, ulong_t a1, ulong_t a2, ulong_t a3)
-{ return (0); }
-
-/*ARGSUSED*/
-int
-__hypercall4_int(int callnum, ulong_t a1, ulong_t a2, ulong_t a3, ulong_t a4)
-{ return (0); }
-
-/*ARGSUSED*/
-int
-__hypercall5_int(int callnum,
- ulong_t a1, ulong_t a2, ulong_t a3, ulong_t a4, ulong_t a5)
-{ return (0); }
-
-#else /* __lint */
-
/*
* XXPV grr - assembler can't deal with an instruction in a quoted string
*/
@@ -164,30 +94,17 @@ hypercall_shared_info_page:
hypercall_page:
.skip HYPERCALL_PAGESIZE
.size hypercall_page, HYPERCALL_PAGESIZE
-#if defined(__amd64)
#define TRAP_INSTR \
shll $5, %eax; \
addq $hypercall_page, %rax; \
INDIRECT_JMP_REG(rax);
-#else
-#define TRAP_INSTR \
- shll $5, %eax; \
- addl $hypercall_page, %eax; \
- call *%eax
-#endif
#else /* !_xpv */
-#if defined(__amd64)
#define TRAP_INSTR syscall
-#elif defined(__i386)
-#define TRAP_INSTR int $0x82
-#endif
#endif /* !__xpv */
-#if defined(__amd64)
-
ENTRY_NP(__hypercall0)
ALTENTRY(__hypercall0_int)
movl %edi, %eax
@@ -245,81 +162,3 @@ hypercall_page:
ret
SET_SIZE(__hypercall5)
-#elif defined(__i386)
-
- ENTRY_NP(__hypercall0)
- ALTENTRY(__hypercall0_int)
- movl 4(%esp), %eax
- TRAP_INSTR
- ret
- SET_SIZE(__hypercall0)
-
- ENTRY_NP(__hypercall1)
- ALTENTRY(__hypercall1_int)
- pushl %ebx
- movl 8(%esp), %eax
- movl 12(%esp), %ebx
- TRAP_INSTR
- popl %ebx
- ret
- SET_SIZE(__hypercall1)
-
- ENTRY_NP(__hypercall2)
- ALTENTRY(__hypercall2_int)
- pushl %ebx
- movl 8(%esp), %eax
- movl 12(%esp), %ebx
- movl 16(%esp), %ecx
- TRAP_INSTR
- popl %ebx
- ret
- SET_SIZE(__hypercall2)
-
- ENTRY_NP(__hypercall3)
- ALTENTRY(__hypercall3_int)
- pushl %ebx
- movl 8(%esp), %eax
- movl 12(%esp), %ebx
- movl 16(%esp), %ecx
- movl 20(%esp), %edx
- TRAP_INSTR
- popl %ebx
- ret
- SET_SIZE(__hypercall3)
-
- ENTRY_NP(__hypercall4)
- ALTENTRY(__hypercall4_int)
- pushl %ebx
- pushl %esi
- movl 12(%esp), %eax
- movl 16(%esp), %ebx
- movl 20(%esp), %ecx
- movl 24(%esp), %edx
- movl 28(%esp), %esi
- TRAP_INSTR
- popl %esi
- popl %ebx
- ret
- SET_SIZE(__hypercall4)
-
- ENTRY_NP(__hypercall5)
- ALTENTRY(__hypercall5_int)
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 16(%esp), %eax
- movl 20(%esp), %ebx
- movl 24(%esp), %ecx
- movl 28(%esp), %edx
- movl 32(%esp), %esi
- movl 36(%esp), %edi
- TRAP_INSTR
- popl %edi
- popl %esi
- popl %ebx
- ret
- SET_SIZE(__hypercall5)
-
-#endif /* __i386 */
-
-#endif /* lint */
diff --git a/usr/src/uts/intel/ia32/ml/i86_subr.s b/usr/src/uts/intel/ia32/ml/i86_subr.s
index 3297fa398c..213ab84c8f 100644
--- a/usr/src/uts/intel/ia32/ml/i86_subr.s
+++ b/usr/src/uts/intel/ia32/ml/i86_subr.s
@@ -57,17 +57,7 @@
#include <sys/psw.h>
#include <sys/x86_archext.h>
-#if defined(__lint)
-#include <sys/types.h>
-#include <sys/systm.h>
-#include <sys/thread.h>
-#include <sys/archsystm.h>
-#include <sys/byteorder.h>
-#include <sys/dtrace.h>
-#include <sys/ftrace.h>
-#else /* __lint */
#include "assym.h"
-#endif /* __lint */
#include <sys/dditypes.h>
/*
@@ -80,21 +70,6 @@
* uts/intel/ia32/ml/copy.s.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-on_fault(label_t *ljb)
-{ return (0); }
-
-void
-no_fault(void)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(on_fault)
movq %gs:CPU_THREAD, %rsi
leaq catch_fault(%rip), %rdx
@@ -122,52 +97,11 @@ catch_fault:
ret
SET_SIZE(no_fault)
-#elif defined(__i386)
-
- ENTRY(on_fault)
- movl %gs:CPU_THREAD, %edx
- movl 4(%esp), %eax /* jumpbuf address */
- leal catch_fault, %ecx
- movl %eax, T_ONFAULT(%edx) /* jumpbuf in t_onfault */
- movl %ecx, T_LOFAULT(%edx) /* catch_fault in t_lofault */
- jmp setjmp /* let setjmp do the rest */
-
-catch_fault:
- movl %gs:CPU_THREAD, %edx
- xorl %eax, %eax
- movl T_ONFAULT(%edx), %ecx /* address of save area */
- movl %eax, T_ONFAULT(%edx) /* turn off onfault */
- movl %eax, T_LOFAULT(%edx) /* turn off lofault */
- pushl %ecx
- call longjmp /* let longjmp do the rest */
- SET_SIZE(on_fault)
-
- ENTRY(no_fault)
- movl %gs:CPU_THREAD, %edx
- xorl %eax, %eax
- movl %eax, T_ONFAULT(%edx) /* turn off onfault */
- movl %eax, T_LOFAULT(%edx) /* turn off lofault */
- ret
- SET_SIZE(no_fault)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Default trampoline code for on_trap() (see <sys/ontrap.h>). We just
* do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
*/
-#if defined(lint)
-
-void
-on_trap_trampoline(void)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(on_trap_trampoline)
movq %gs:CPU_THREAD, %rsi
movq T_ONTRAP(%rsi), %rdi
@@ -175,34 +109,11 @@ on_trap_trampoline(void)
jmp longjmp
SET_SIZE(on_trap_trampoline)
-#elif defined(__i386)
-
- ENTRY(on_trap_trampoline)
- movl %gs:CPU_THREAD, %eax
- movl T_ONTRAP(%eax), %eax
- addl $OT_JMPBUF, %eax
- pushl %eax
- call longjmp
- SET_SIZE(on_trap_trampoline)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Push a new element on to the t_ontrap stack. Refer to <sys/ontrap.h> for
* more information about the on_trap() mechanism. If the on_trap_data is the
* same as the topmost stack element, we just modify that element.
*/
-#if defined(lint)
-
-/*ARGSUSED*/
-int
-on_trap(on_trap_data_t *otp, uint_t prot)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(on_trap)
movw %si, OT_PROT(%rdi) /* ot_prot = prot */
@@ -224,59 +135,15 @@ on_trap(on_trap_data_t *otp, uint_t prot)
jmp setjmp
SET_SIZE(on_trap)
-#elif defined(__i386)
-
- ENTRY(on_trap)
- movl 4(%esp), %eax /* %eax = otp */
- movl 8(%esp), %edx /* %edx = prot */
-
- movw %dx, OT_PROT(%eax) /* ot_prot = prot */
- movw $0, OT_TRAP(%eax) /* ot_trap = 0 */
- leal on_trap_trampoline, %edx /* %edx = &on_trap_trampoline */
- movl %edx, OT_TRAMPOLINE(%eax) /* ot_trampoline = %edx */
- movl $0, OT_HANDLE(%eax) /* ot_handle = NULL */
- movl $0, OT_PAD1(%eax) /* ot_pad1 = NULL */
- movl %gs:CPU_THREAD, %edx /* %edx = curthread */
- movl T_ONTRAP(%edx), %ecx /* %ecx = curthread->t_ontrap */
- cmpl %eax, %ecx /* if (otp == %ecx) */
- je 0f /* don't modify t_ontrap */
-
- movl %ecx, OT_PREV(%eax) /* ot_prev = t_ontrap */
- movl %eax, T_ONTRAP(%edx) /* curthread->t_ontrap = otp */
-
-0: addl $OT_JMPBUF, %eax /* %eax = &ot_jmpbuf */
- movl %eax, 4(%esp) /* put %eax back on the stack */
- jmp setjmp /* let setjmp do the rest */
- SET_SIZE(on_trap)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Setjmp and longjmp implement non-local gotos using state vectors
* type label_t.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-setjmp(label_t *lp)
-{ return (0); }
-
-/* ARGSUSED */
-void
-longjmp(label_t *lp)
-{}
-
-#else /* __lint */
-
#if LABEL_PC != 0
#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
#endif /* LABEL_PC != 0 */
-#if defined(__amd64)
-
ENTRY(setjmp)
movq %rsp, LABEL_SP(%rdi)
movq %rbp, LABEL_RBP(%rdi)
@@ -306,37 +173,6 @@ longjmp(label_t *lp)
ret
SET_SIZE(longjmp)
-#elif defined(__i386)
-
- ENTRY(setjmp)
- movl 4(%esp), %edx /* address of save area */
- movl %ebp, LABEL_EBP(%edx)
- movl %ebx, LABEL_EBX(%edx)
- movl %esi, LABEL_ESI(%edx)
- movl %edi, LABEL_EDI(%edx)
- movl %esp, 4(%edx)
- movl (%esp), %ecx /* %eip (return address) */
- movl %ecx, (%edx) /* LABEL_PC is 0 */
- subl %eax, %eax /* return 0 */
- ret
- SET_SIZE(setjmp)
-
- ENTRY(longjmp)
- movl 4(%esp), %edx /* address of save area */
- movl LABEL_EBP(%edx), %ebp
- movl LABEL_EBX(%edx), %ebx
- movl LABEL_ESI(%edx), %esi
- movl LABEL_EDI(%edx), %edi
- movl 4(%edx), %esp
- movl (%edx), %ecx /* %eip (return addr); LABEL_PC is 0 */
- movl $1, %eax
- addl $4, %esp /* pop ret adr */
- jmp *%ecx /* indirect */
- SET_SIZE(longjmp)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* if a() calls b() calls caller(),
* caller() returns return address in a().
@@ -344,171 +180,44 @@ longjmp(label_t *lp)
* sequence.)
*/
-#if defined(__lint)
-
-caddr_t
-caller(void)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(caller)
movq 8(%rbp), %rax /* b()'s return pc, in a() */
ret
SET_SIZE(caller)
-#elif defined(__i386)
-
- ENTRY(caller)
- movl 4(%ebp), %eax /* b()'s return pc, in a() */
- ret
- SET_SIZE(caller)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* if a() calls callee(), callee() returns the
* return address in a();
*/
-#if defined(__lint)
-
-caddr_t
-callee(void)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(callee)
movq (%rsp), %rax /* callee()'s return pc, in a() */
ret
SET_SIZE(callee)
-#elif defined(__i386)
-
- ENTRY(callee)
- movl (%esp), %eax /* callee()'s return pc, in a() */
- ret
- SET_SIZE(callee)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* return the current frame pointer
*/
-#if defined(__lint)
-
-greg_t
-getfp(void)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(getfp)
movq %rbp, %rax
ret
SET_SIZE(getfp)
-#elif defined(__i386)
-
- ENTRY(getfp)
- movl %ebp, %eax
- ret
- SET_SIZE(getfp)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Invalidate a single page table entry in the TLB
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-mmu_invlpg(caddr_t m)
-{}
-
-#else /* __lint */
-
ENTRY(mmu_invlpg)
invlpg (%rdi)
ret
SET_SIZE(mmu_invlpg)
-#endif /* __lint */
-
/*
* Get/Set the value of various control registers
*/
-#if defined(__lint)
-
-ulong_t
-getcr0(void)
-{ return (0); }
-
-/* ARGSUSED */
-void
-setcr0(ulong_t value)
-{}
-
-ulong_t
-getcr2(void)
-{ return (0); }
-
-ulong_t
-getcr3(void)
-{ return (0); }
-
-#if !defined(__xpv)
-/* ARGSUSED */
-void
-setcr3(ulong_t val)
-{}
-
-void
-reload_cr3(void)
-{}
-#endif
-
-ulong_t
-getcr4(void)
-{ return (0); }
-
-/* ARGSUSED */
-void
-setcr4(ulong_t val)
-{}
-
-#if defined(__amd64)
-
-ulong_t
-getcr8(void)
-{ return (0); }
-
-/* ARGSUSED */
-void
-setcr8(ulong_t val)
-{}
-
-#endif /* __amd64 */
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(getcr0)
movq %cr0, %rax
ret
@@ -569,93 +278,6 @@ setcr8(ulong_t val)
ret
SET_SIZE(setcr8)
-#elif defined(__i386)
-
- ENTRY(getcr0)
- movl %cr0, %eax
- ret
- SET_SIZE(getcr0)
-
- ENTRY(setcr0)
- movl 4(%esp), %eax
- movl %eax, %cr0
- ret
- SET_SIZE(setcr0)
-
- /*
- * "lock mov %cr0" is used on processors which indicate it is
- * supported via CPUID. Normally the 32 bit TPR is accessed via
- * the local APIC.
- */
- ENTRY(getcr8)
- lock
- movl %cr0, %eax
- ret
- SET_SIZE(getcr8)
-
- ENTRY(setcr8)
- movl 4(%esp), %eax
- lock
- movl %eax, %cr0
- ret
- SET_SIZE(setcr8)
-
- ENTRY(getcr2)
-#if defined(__xpv)
- movl %gs:CPU_VCPU_INFO, %eax
- movl VCPU_INFO_ARCH_CR2(%eax), %eax
-#else
- movl %cr2, %eax
-#endif
- ret
- SET_SIZE(getcr2)
-
- ENTRY(getcr3)
- movl %cr3, %eax
- ret
- SET_SIZE(getcr3)
-
-#if !defined(__xpv)
-
- ENTRY(setcr3)
- movl 4(%esp), %eax
- movl %eax, %cr3
- ret
- SET_SIZE(setcr3)
-
- ENTRY(reload_cr3)
- movl %cr3, %eax
- movl %eax, %cr3
- ret
- SET_SIZE(reload_cr3)
-
-#endif /* __xpv */
-
- ENTRY(getcr4)
- movl %cr4, %eax
- ret
- SET_SIZE(getcr4)
-
- ENTRY(setcr4)
- movl 4(%esp), %eax
- movl %eax, %cr4
- ret
- SET_SIZE(setcr4)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-uint32_t
-__cpuid_insn(struct cpuid_regs *regs)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(__cpuid_insn)
movq %rbx, %r8
movq %rcx, %r9
@@ -675,44 +297,6 @@ __cpuid_insn(struct cpuid_regs *regs)
ret
SET_SIZE(__cpuid_insn)
-#elif defined(__i386)
-
- ENTRY(__cpuid_insn)
- pushl %ebp
- movl 0x8(%esp), %ebp /* %ebp = regs */
- pushl %ebx
- pushl %ecx
- pushl %edx
- movl (%ebp), %eax /* %eax = regs->cp_eax */
- movl 0x4(%ebp), %ebx /* %ebx = regs->cp_ebx */
- movl 0x8(%ebp), %ecx /* %ecx = regs->cp_ecx */
- movl 0xc(%ebp), %edx /* %edx = regs->cp_edx */
- cpuid
- movl %eax, (%ebp) /* regs->cp_eax = %eax */
- movl %ebx, 0x4(%ebp) /* regs->cp_ebx = %ebx */
- movl %ecx, 0x8(%ebp) /* regs->cp_ecx = %ecx */
- movl %edx, 0xc(%ebp) /* regs->cp_edx = %edx */
- popl %edx
- popl %ecx
- popl %ebx
- popl %ebp
- ret
- SET_SIZE(__cpuid_insn)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(i86_monitor)
pushq %rbp
movq %rsp, %rbp
@@ -725,34 +309,6 @@ i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
ret
SET_SIZE(i86_monitor)
-#elif defined(__i386)
-
-ENTRY_NP(i86_monitor)
- pushl %ebp
- movl %esp, %ebp
- movl 0x8(%ebp),%eax /* addr */
- movl 0xc(%ebp),%ecx /* extensions */
- movl 0x10(%ebp),%edx /* hints */
- clflush (%eax)
- .byte 0x0f, 0x01, 0xc8 /* monitor */
- leave
- ret
- SET_SIZE(i86_monitor)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-i86_mwait(uint32_t data, uint32_t extensions)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(i86_mwait)
pushq %rbp
call x86_md_clear
@@ -764,39 +320,12 @@ i86_mwait(uint32_t data, uint32_t extensions)
ret
SET_SIZE(i86_mwait)
-#elif defined(__i386)
-
- ENTRY_NP(i86_mwait)
- pushl %ebp
- movl %esp, %ebp
- movl 0x8(%ebp),%eax /* data */
- movl 0xc(%ebp),%ecx /* extensions */
- .byte 0x0f, 0x01, 0xc9 /* mwait */
- leave
- ret
- SET_SIZE(i86_mwait)
-
-#endif /* __i386 */
-#endif /* __lint */
-
#if defined(__xpv)
/*
* Defined in C
*/
#else
-#if defined(__lint)
-
-hrtime_t
-tsc_read(void)
-{
- return (0);
-}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(tsc_read)
movq %rbx, %r11
movl $0, %eax
@@ -841,92 +370,19 @@ _tsc_lfence_start:
_tsc_lfence_end:
SET_SIZE(tsc_read)
-#else /* __i386 */
-
- ENTRY_NP(tsc_read)
- pushl %ebx
- movl $0, %eax
- cpuid
- rdtsc
- popl %ebx
- ret
- .globl _tsc_mfence_start
-_tsc_mfence_start:
- mfence
- rdtsc
- ret
- .globl _tsc_mfence_end
-_tsc_mfence_end:
- .globl _tscp_start
-_tscp_start:
- .byte 0x0f, 0x01, 0xf9 /* rdtscp instruction */
- ret
- .globl _tscp_end
-_tscp_end:
- .globl _no_rdtsc_start
-_no_rdtsc_start:
- xorl %edx, %edx
- xorl %eax, %eax
- ret
- .globl _no_rdtsc_end
-_no_rdtsc_end:
- .globl _tsc_lfence_start
-_tsc_lfence_start:
- lfence
- rdtsc
- ret
- .globl _tsc_lfence_end
-_tsc_lfence_end:
- SET_SIZE(tsc_read)
-
-#endif /* __i386 */
-
-#endif /* __lint */
-
#endif /* __xpv */
-#ifdef __lint
-/*
- * Do not use this function for obtaining clock tick. This
- * is called by callers who do not need to have a guarenteed
- * correct tick value. The proper routine to use is tsc_read().
- */
-u_longlong_t
-randtick(void)
-{
- return (0);
-}
-#else
-#if defined(__amd64)
ENTRY_NP(randtick)
rdtsc
shlq $32, %rdx
orq %rdx, %rax
ret
SET_SIZE(randtick)
-#else
- ENTRY_NP(randtick)
- rdtsc
- ret
- SET_SIZE(randtick)
-#endif /* __i386 */
-#endif /* __lint */
/*
* Insert entryp after predp in a doubly linked list.
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-_insque(caddr_t entryp, caddr_t predp)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(_insque)
movq (%rsi), %rax /* predp->forw */
movq %rsi, CPTRSIZE(%rdi) /* entryp->back = predp */
@@ -936,37 +392,10 @@ _insque(caddr_t entryp, caddr_t predp)
ret
SET_SIZE(_insque)
-#elif defined(__i386)
-
- ENTRY(_insque)
- movl 8(%esp), %edx
- movl 4(%esp), %ecx
- movl (%edx), %eax /* predp->forw */
- movl %edx, CPTRSIZE(%ecx) /* entryp->back = predp */
- movl %eax, (%ecx) /* entryp->forw = predp->forw */
- movl %ecx, (%edx) /* predp->forw = entryp */
- movl %ecx, CPTRSIZE(%eax) /* predp->forw->back = entryp */
- ret
- SET_SIZE(_insque)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Remove entryp from a doubly linked list
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-_remque(caddr_t entryp)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(_remque)
movq (%rdi), %rax /* entry->forw */
movq CPTRSIZE(%rdi), %rdx /* entry->back */
@@ -975,36 +404,11 @@ _remque(caddr_t entryp)
ret
SET_SIZE(_remque)
-#elif defined(__i386)
-
- ENTRY(_remque)
- movl 4(%esp), %ecx
- movl (%ecx), %eax /* entry->forw */
- movl CPTRSIZE(%ecx), %edx /* entry->back */
- movl %eax, (%edx) /* entry->back->forw = entry->forw */
- movl %edx, CPTRSIZE(%eax) /* entry->forw->back = entry->back */
- ret
- SET_SIZE(_remque)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Returns the number of
* non-NULL bytes in string argument.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-size_t
-strlen(const char *str)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
/*
* This is close to a simple transliteration of a C version of this
* routine. We should either just -make- this be a C version, or
@@ -1050,58 +454,12 @@ str_valid:
ret
SET_SIZE(strlen)
-#elif defined(__i386)
-
- ENTRY(strlen)
-#ifdef DEBUG
- movl postbootkernelbase, %eax
- cmpl %eax, 4(%esp)
- jae str_valid
- pushl %ebp
- movl %esp, %ebp
- pushl $.str_panic_msg
- call panic
-#endif /* DEBUG */
-
-str_valid:
- movl 4(%esp), %eax /* %eax = string address */
- testl $3, %eax /* if %eax not word aligned */
- jnz .not_word_aligned /* goto .not_word_aligned */
- .align 4
-.word_aligned:
- movl (%eax), %edx /* move 1 word from (%eax) to %edx */
- movl $0x7f7f7f7f, %ecx
- andl %edx, %ecx /* %ecx = %edx & 0x7f7f7f7f */
- addl $4, %eax /* next word */
- addl $0x7f7f7f7f, %ecx /* %ecx += 0x7f7f7f7f */
- orl %edx, %ecx /* %ecx |= %edx */
- andl $0x80808080, %ecx /* %ecx &= 0x80808080 */
- cmpl $0x80808080, %ecx /* if no null byte in this word */
- je .word_aligned /* goto .word_aligned */
- subl $4, %eax /* post-incremented */
-.not_word_aligned:
- cmpb $0, (%eax) /* if a byte in (%eax) is null */
- je .null_found /* goto .null_found */
- incl %eax /* next byte */
- testl $3, %eax /* if %eax not word aligned */
- jnz .not_word_aligned /* goto .not_word_aligned */
- jmp .word_aligned /* goto .word_aligned */
- .align 4
-.null_found:
- subl 4(%esp), %eax /* %eax -= string address */
- ret
- SET_SIZE(strlen)
-
-#endif /* __i386 */
-
#ifdef DEBUG
.text
.str_panic_msg:
.string "strlen: argument below kernelbase"
#endif /* DEBUG */
-#endif /* __lint */
-
/*
* Berkeley 4.3 introduced symbolically named interrupt levels
* as a way deal with priority in a machine independent fashion.
@@ -1140,25 +498,6 @@ str_valid:
* spl0() Used to lower priority to 0.
*/
-#if defined(__lint)
-
-int spl0(void) { return (0); }
-int spl6(void) { return (0); }
-int spl7(void) { return (0); }
-int spl8(void) { return (0); }
-int splhigh(void) { return (0); }
-int splhi(void) { return (0); }
-int splzs(void) { return (0); }
-
-/* ARGSUSED */
-void
-splx(int level)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
#define SETPRI(level) \
movl $/**/level, %edi; /* new priority */ \
jmp do_splx /* redirect to do_splx */
@@ -1167,22 +506,6 @@ splx(int level)
movl $/**/level, %edi; /* new priority */ \
jmp splr /* redirect to splr */
-#elif defined(__i386)
-
-#define SETPRI(level) \
- pushl $/**/level; /* new priority */ \
- call do_splx; /* invoke common splx code */ \
- addl $4, %esp; /* unstack arg */ \
- ret
-
-#define RAISE(level) \
- pushl $/**/level; /* new priority */ \
- call splr; /* invoke common splr code */ \
- addl $4, %esp; /* unstack args */ \
- ret
-
-#endif /* __i386 */
-
/* locks out all interrupts, including memory errors */
ENTRY(spl8)
SETPRI(15)
@@ -1221,70 +544,14 @@ splx(int level)
jmp do_splx /* redirect to common splx code */
SET_SIZE(splx)
-#endif /* __lint */
-
-#if defined(__i386)
-
-/*
- * Read and write the %gs register
- */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-uint16_t
-getgs(void)
-{ return (0); }
-
-/*ARGSUSED*/
-void
-setgs(uint16_t sel)
-{}
-
-#else /* __lint */
-
- ENTRY(getgs)
- clr %eax
- movw %gs, %ax
- ret
- SET_SIZE(getgs)
-
- ENTRY(setgs)
- movw 4(%esp), %gs
- ret
- SET_SIZE(setgs)
-
-#endif /* __lint */
-#endif /* __i386 */
-
-#if defined(__lint)
-
-void
-pc_reset(void)
-{}
-
-void
-efi_reset(void)
-{}
-
-#else /* __lint */
-
ENTRY(wait_500ms)
-#if defined(__amd64)
pushq %rbx
-#elif defined(__i386)
- push %ebx
-#endif
movl $50000, %ebx
1:
call tenmicrosec
decl %ebx
jnz 1b
-#if defined(__amd64)
popq %rbx
-#elif defined(__i386)
- pop %ebx
-#endif
ret
SET_SIZE(wait_500ms)
@@ -1297,11 +564,7 @@ efi_reset(void)
ENTRY(pc_reset)
-#if defined(__i386)
- testl $RESET_METHOD_KBC, pc_reset_methods
-#elif defined(__amd64)
testl $RESET_METHOD_KBC, pc_reset_methods(%rip)
-#endif
jz 1f
/
@@ -1321,11 +584,7 @@ efi_reset(void)
call wait_500ms
1:
-#if defined(__i386)
- testl $RESET_METHOD_PORT92, pc_reset_methods
-#elif defined(__amd64)
testl $RESET_METHOD_PORT92, pc_reset_methods(%rip)
-#endif
jz 3f
/
@@ -1347,11 +606,7 @@ efi_reset(void)
call wait_500ms
3:
-#if defined(__i386)
- testl $RESET_METHOD_PCI, pc_reset_methods
-#elif defined(__amd64)
testl $RESET_METHOD_PCI, pc_reset_methods(%rip)
-#endif
jz 4f
/ Try the PCI (soft) reset vector (should work on all modern systems,
@@ -1377,15 +632,9 @@ efi_reset(void)
/ Also, use triple fault for EFI firmware
/
ENTRY(efi_reset)
-#if defined(__amd64)
pushq $0x0
pushq $0x0 / IDT base of 0, limit of 0 + 2 unused bytes
lidt (%rsp)
-#elif defined(__i386)
- pushl $0x0
- pushl $0x0 / IDT base of 0, limit of 0 + 2 unused bytes
- lidt (%esp)
-#endif
int $0x0 / Trigger interrupt, generate triple-fault
cli
@@ -1394,23 +643,10 @@ efi_reset(void)
SET_SIZE(efi_reset)
SET_SIZE(pc_reset)
-#endif /* __lint */
-
/*
* C callable in and out routines
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-outl(int port_address, uint32_t val)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(outl)
movw %di, %dx
movl %esi, %eax
@@ -1418,32 +654,6 @@ outl(int port_address, uint32_t val)
ret
SET_SIZE(outl)
-#elif defined(__i386)
-
- .set PORT, 4
- .set VAL, 8
-
- ENTRY(outl)
- movw PORT(%esp), %dx
- movl VAL(%esp), %eax
- outl (%dx)
- ret
- SET_SIZE(outl)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-outw(int port_address, uint16_t val)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(outw)
movw %di, %dx
movw %si, %ax
@@ -1451,29 +661,6 @@ outw(int port_address, uint16_t val)
ret
SET_SIZE(outw)
-#elif defined(__i386)
-
- ENTRY(outw)
- movw PORT(%esp), %dx
- movw VAL(%esp), %ax
- D16 outl (%dx)
- ret
- SET_SIZE(outw)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-outb(int port_address, uint8_t val)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(outb)
movw %di, %dx
movb %sil, %al
@@ -1481,29 +668,6 @@ outb(int port_address, uint8_t val)
ret
SET_SIZE(outb)
-#elif defined(__i386)
-
- ENTRY(outb)
- movw PORT(%esp), %dx
- movb VAL(%esp), %al
- outb (%dx)
- ret
- SET_SIZE(outb)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/* ARGSUSED */
-uint32_t
-inl(int port_address)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(inl)
xorl %eax, %eax
movw %di, %dx
@@ -1511,28 +675,6 @@ inl(int port_address)
ret
SET_SIZE(inl)
-#elif defined(__i386)
-
- ENTRY(inl)
- movw PORT(%esp), %dx
- inl (%dx)
- ret
- SET_SIZE(inl)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/* ARGSUSED */
-uint16_t
-inw(int port_address)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(inw)
xorl %eax, %eax
movw %di, %dx
@@ -1540,29 +682,6 @@ inw(int port_address)
ret
SET_SIZE(inw)
-#elif defined(__i386)
-
- ENTRY(inw)
- subl %eax, %eax
- movw PORT(%esp), %dx
- D16 inl (%dx)
- ret
- SET_SIZE(inw)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-/* ARGSUSED */
-uint8_t
-inb(int port_address)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(inb)
xorl %eax, %eax
@@ -1571,29 +690,6 @@ inb(int port_address)
ret
SET_SIZE(inb)
-#elif defined(__i386)
-
- ENTRY(inb)
- subl %eax, %eax
- movw PORT(%esp), %dx
- inb (%dx)
- ret
- SET_SIZE(inb)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-repoutsw(int port, uint16_t *addr, int cnt)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(repoutsw)
movl %edx, %ecx
@@ -1603,48 +699,6 @@ repoutsw(int port, uint16_t *addr, int cnt)
ret
SET_SIZE(repoutsw)
-#elif defined(__i386)
-
- /*
- * The arguments and saved registers are on the stack in the
- * following order:
- * | cnt | +16
- * | *addr | +12
- * | port | +8
- * | eip | +4
- * | esi | <-- %esp
- * If additional values are pushed onto the stack, make sure
- * to adjust the following constants accordingly.
- */
- .set PORT, 8
- .set ADDR, 12
- .set COUNT, 16
-
- ENTRY(repoutsw)
- pushl %esi
- movl PORT(%esp), %edx
- movl ADDR(%esp), %esi
- movl COUNT(%esp), %ecx
- rep
- D16 outsl
- popl %esi
- ret
- SET_SIZE(repoutsw)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-repinsw(int port_addr, uint16_t *addr, int cnt)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(repinsw)
movl %edx, %ecx
@@ -1654,33 +708,6 @@ repinsw(int port_addr, uint16_t *addr, int cnt)
ret
SET_SIZE(repinsw)
-#elif defined(__i386)
-
- ENTRY(repinsw)
- pushl %edi
- movl PORT(%esp), %edx
- movl ADDR(%esp), %edi
- movl COUNT(%esp), %ecx
- rep
- D16 insl
- popl %edi
- ret
- SET_SIZE(repinsw)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-repinsb(int port, uint8_t *addr, int count)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(repinsb)
movl %edx, %ecx
@@ -1691,52 +718,11 @@ repinsb(int port, uint8_t *addr, int count)
ret
SET_SIZE(repinsb)
-#elif defined(__i386)
-
- /*
- * The arguments and saved registers are on the stack in the
- * following order:
- * | cnt | +16
- * | *addr | +12
- * | port | +8
- * | eip | +4
- * | esi | <-- %esp
- * If additional values are pushed onto the stack, make sure
- * to adjust the following constants accordingly.
- */
- .set IO_PORT, 8
- .set IO_ADDR, 12
- .set IO_COUNT, 16
-
- ENTRY(repinsb)
- pushl %edi
- movl IO_ADDR(%esp), %edi
- movl IO_COUNT(%esp), %ecx
- movl IO_PORT(%esp), %edx
- rep
- insb
- popl %edi
- ret
- SET_SIZE(repinsb)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Input a stream of 32-bit words.
* NOTE: count is a DWORD count.
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-repinsd(int port, uint32_t *addr, int count)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(repinsd)
movl %edx, %ecx
@@ -1747,36 +733,10 @@ repinsd(int port, uint32_t *addr, int count)
ret
SET_SIZE(repinsd)
-#elif defined(__i386)
-
- ENTRY(repinsd)
- pushl %edi
- movl IO_ADDR(%esp), %edi
- movl IO_COUNT(%esp), %ecx
- movl IO_PORT(%esp), %edx
- rep
- insl
- popl %edi
- ret
- SET_SIZE(repinsd)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Output a stream of bytes
* NOTE: count is a byte count
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-repoutsb(int port, uint8_t *addr, int count)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(repoutsb)
movl %edx, %ecx
@@ -1786,36 +746,10 @@ repoutsb(int port, uint8_t *addr, int count)
ret
SET_SIZE(repoutsb)
-#elif defined(__i386)
-
- ENTRY(repoutsb)
- pushl %esi
- movl IO_ADDR(%esp), %esi
- movl IO_COUNT(%esp), %ecx
- movl IO_PORT(%esp), %edx
- rep
- outsb
- popl %esi
- ret
- SET_SIZE(repoutsb)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Output a stream of 32-bit words
* NOTE: count is a DWORD count
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-repoutsd(int port, uint32_t *addr, int count)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(repoutsd)
movl %edx, %ecx
@@ -1825,22 +759,6 @@ repoutsd(int port, uint32_t *addr, int count)
ret
SET_SIZE(repoutsd)
-#elif defined(__i386)
-
- ENTRY(repoutsd)
- pushl %esi
- movl IO_ADDR(%esp), %esi
- movl IO_COUNT(%esp), %ecx
- movl IO_PORT(%esp), %edx
- rep
- outsl
- popl %esi
- ret
- SET_SIZE(repoutsd)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* void int3(void)
* void int18(void)
@@ -1848,26 +766,6 @@ repoutsd(int port, uint32_t *addr, int count)
* void int_cmci(void)
*/
-#if defined(__lint)
-
-void
-int3(void)
-{}
-
-void
-int18(void)
-{}
-
-void
-int20(void)
-{}
-
-void
-int_cmci(void)
-{}
-
-#else /* __lint */
-
ENTRY(int3)
int $T_BPTFLT
ret
@@ -1894,19 +792,6 @@ int_cmci(void)
ret
SET_SIZE(int_cmci)
-#endif /* __lint */
-
-#if defined(__lint)
-
-/* ARGSUSED */
-int
-scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(scanc)
/* rdi == size */
/* rsi == cp */
@@ -1927,55 +812,11 @@ scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
ret
SET_SIZE(scanc)
-#elif defined(__i386)
-
- ENTRY(scanc)
- pushl %edi
- pushl %esi
- movb 24(%esp), %cl /* mask = %cl */
- movl 16(%esp), %esi /* cp = %esi */
- movl 20(%esp), %edx /* table = %edx */
- movl %esi, %edi
- addl 12(%esp), %edi /* end = &cp[size]; */
-.scanloop:
- cmpl %edi, %esi /* while (cp < end */
- jnb .scandone
- movzbl (%esi), %eax /* %al = *cp */
- incl %esi /* cp++ */
- movb (%edx, %eax), %al /* %al = table[*cp] */
- testb %al, %cl
- jz .scanloop /* && (table[*cp] & mask) == 0) */
- dec %esi /* post-incremented */
-.scandone:
- movl %edi, %eax
- subl %esi, %eax /* return (end - cp) */
- popl %esi
- popl %edi
- ret
- SET_SIZE(scanc)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Replacement functions for ones that are normally inlined.
* In addition to the copy in i86.il, they are defined here just in case.
*/
-#if defined(__lint)
-
-ulong_t
-intr_clear(void)
-{ return (0); }
-
-ulong_t
-clear_int_flag(void)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(intr_clear)
ENTRY(clear_int_flag)
pushfq
@@ -2002,98 +843,17 @@ clear_int_flag(void)
SET_SIZE(clear_int_flag)
SET_SIZE(intr_clear)
-#elif defined(__i386)
-
- ENTRY(intr_clear)
- ENTRY(clear_int_flag)
- pushfl
- popl %eax
-#if defined(__xpv)
- leal xpv_panicking, %edx
- movl (%edx), %edx
- cmpl $0, %edx
- jne 2f
- CLIRET(%edx, %cl) /* returns event mask in %cl */
- /*
- * Synthesize the PS_IE bit from the event mask bit
- */
- andl $_BITNOT(PS_IE), %eax
- testb $1, %cl
- jnz 1f
- orl $PS_IE, %eax
-1:
- ret
-2:
-#endif
- CLI(%edx)
- ret
- SET_SIZE(clear_int_flag)
- SET_SIZE(intr_clear)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-struct cpu *
-curcpup(void)
-{ return 0; }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(curcpup)
movq %gs:CPU_SELF, %rax
ret
SET_SIZE(curcpup)
-#elif defined(__i386)
-
- ENTRY(curcpup)
- movl %gs:CPU_SELF, %eax
- ret
- SET_SIZE(curcpup)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
* These functions reverse the byte order of the input parameter and returns
* the result. This is to convert the byte order from host byte order
* (little endian) to network byte order (big endian), or vice versa.
*/
-#if defined(__lint)
-
-uint64_t
-htonll(uint64_t i)
-{ return (i); }
-
-uint64_t
-ntohll(uint64_t i)
-{ return (i); }
-
-uint32_t
-htonl(uint32_t i)
-{ return (i); }
-
-uint32_t
-ntohl(uint32_t i)
-{ return (i); }
-
-uint16_t
-htons(uint16_t i)
-{ return (i); }
-
-uint16_t
-ntohs(uint16_t i)
-{ return (i); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(htonll)
ALTENTRY(ntohll)
movq %rdi, %rax
@@ -2121,54 +881,6 @@ ntohs(uint16_t i)
SET_SIZE(ntohs)
SET_SIZE(htons)
-#elif defined(__i386)
-
- ENTRY(htonll)
- ALTENTRY(ntohll)
- movl 4(%esp), %edx
- movl 8(%esp), %eax
- bswap %edx
- bswap %eax
- ret
- SET_SIZE(ntohll)
- SET_SIZE(htonll)
-
- ENTRY(htonl)
- ALTENTRY(ntohl)
- movl 4(%esp), %eax
- bswap %eax
- ret
- SET_SIZE(ntohl)
- SET_SIZE(htonl)
-
- ENTRY(htons)
- ALTENTRY(ntohs)
- movl 4(%esp), %eax
- bswap %eax
- shrl $16, %eax
- ret
- SET_SIZE(ntohs)
- SET_SIZE(htons)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-intr_restore(ulong_t i)
-{ return; }
-
-/* ARGSUSED */
-void
-restore_int_flag(ulong_t i)
-{ return; }
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(intr_restore)
ENTRY(restore_int_flag)
@@ -2193,72 +905,16 @@ restore_int_flag(ulong_t i)
SET_SIZE(restore_int_flag)
SET_SIZE(intr_restore)
-#elif defined(__i386)
-
- ENTRY(intr_restore)
- ENTRY(restore_int_flag)
- testl $PS_IE, 4(%esp)
- jz 1f
-#if defined(__xpv)
- leal xpv_panicking, %edx
- movl (%edx), %edx
- cmpl $0, %edx
- jne 1f
- /*
- * Since we're -really- running unprivileged, our attempt
- * to change the state of the IF bit will be ignored.
- * The virtual IF bit is tweaked by CLI and STI.
- */
- IE_TO_EVENT_MASK(%edx, 4(%esp))
-#else
- sti
-#endif
-1:
- ret
- SET_SIZE(restore_int_flag)
- SET_SIZE(intr_restore)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-void
-sti(void)
-{}
-
-void
-cli(void)
-{}
-
-#else /* __lint */
-
ENTRY(sti)
STI
ret
SET_SIZE(sti)
ENTRY(cli)
-#if defined(__amd64)
CLI(%rax)
-#elif defined(__i386)
- CLI(%eax)
-#endif /* __i386 */
ret
SET_SIZE(cli)
-#endif /* __lint */
-
-#if defined(__lint)
-
-dtrace_icookie_t
-dtrace_interrupt_disable(void)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(dtrace_interrupt_disable)
pushfq
popq %rax
@@ -2282,45 +938,6 @@ dtrace_interrupt_disable(void)
ret
SET_SIZE(dtrace_interrupt_disable)
-#elif defined(__i386)
-
- ENTRY(dtrace_interrupt_disable)
- pushfl
- popl %eax
-#if defined(__xpv)
- leal xpv_panicking, %edx
- movl (%edx), %edx
- cmpl $0, %edx
- jne .dtrace_interrupt_disable_done
- CLIRET(%edx, %cl) /* returns event mask in %cl */
- /*
- * Synthesize the PS_IE bit from the event mask bit
- */
- andl $_BITNOT(PS_IE), %eax
- testb $1, %cl
- jnz .dtrace_interrupt_disable_done
- orl $PS_IE, %eax
-#else
- CLI(%edx)
-#endif
-.dtrace_interrupt_disable_done:
- ret
- SET_SIZE(dtrace_interrupt_disable)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-dtrace_interrupt_enable(dtrace_icookie_t cookie)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(dtrace_interrupt_enable)
pushq %rdi
popfq
@@ -2340,43 +957,6 @@ dtrace_interrupt_enable(dtrace_icookie_t cookie)
ret
SET_SIZE(dtrace_interrupt_enable)
-#elif defined(__i386)
-
- ENTRY(dtrace_interrupt_enable)
- movl 4(%esp), %eax
- pushl %eax
- popfl
-#if defined(__xpv)
- leal xpv_panicking, %edx
- movl (%edx), %edx
- cmpl $0, %edx
- jne .dtrace_interrupt_enable_done
- /*
- * Since we're -really- running unprivileged, our attempt
- * to change the state of the IF bit will be ignored. The
- * virtual IF bit is tweaked by CLI and STI.
- */
- IE_TO_EVENT_MASK(%edx, %eax)
-#endif
-.dtrace_interrupt_enable_done:
- ret
- SET_SIZE(dtrace_interrupt_enable)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-
-#if defined(lint)
-
-void
-dtrace_membar_producer(void)
-{}
-
-void
-dtrace_membar_consumer(void)
-{}
-
-#else /* __lint */
ENTRY(dtrace_membar_producer)
rep; ret /* use 2 byte return instruction when branch target */
@@ -2388,70 +968,15 @@ dtrace_membar_consumer(void)
/* AMD Software Optimization Guide - Section 6.2 */
SET_SIZE(dtrace_membar_consumer)
-#endif /* __lint */
-
-#if defined(__lint)
-
-kthread_id_t
-threadp(void)
-{ return ((kthread_id_t)0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(threadp)
movq %gs:CPU_THREAD, %rax
ret
SET_SIZE(threadp)
-#elif defined(__i386)
-
- ENTRY(threadp)
- movl %gs:CPU_THREAD, %eax
- ret
- SET_SIZE(threadp)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* Checksum routine for Internet Protocol Headers
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-unsigned int
-ip_ocsum(
- ushort_t *address, /* ptr to 1st message buffer */
- int halfword_count, /* length of data */
- unsigned int sum) /* partial checksum */
-{
- int i;
- unsigned int psum = 0; /* partial sum */
-
- for (i = 0; i < halfword_count; i++, address++) {
- psum += *address;
- }
-
- while ((psum >> 16) != 0) {
- psum = (psum & 0xffff) + (psum >> 16);
- }
-
- psum += sum;
-
- while ((psum >> 16) != 0) {
- psum = (psum & 0xffff) + (psum >> 16);
- }
-
- return (psum);
-}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(ip_ocsum)
pushq %rbp
movq %rsp, %rbp
@@ -2570,139 +1095,10 @@ ip_ocsum(
.quad .only48, .only52, .only56, .only60
SET_SIZE(ip_ocsum)
-#elif defined(__i386)
-
- ENTRY(ip_ocsum)
- pushl %ebp
- movl %esp, %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 12(%ebp), %ecx /* count of half words */
- movl 16(%ebp), %edx /* partial checksum */
- movl 8(%ebp), %esi
- xorl %eax, %eax
- testl %ecx, %ecx
- jz .ip_ocsum_done
-
- testl $3, %esi
- jnz .ip_csum_notaligned
-.ip_csum_aligned:
-.next_iter:
- subl $32, %ecx
- jl .less_than_32
-
- addl 0(%esi), %edx
-.only60:
- adcl 4(%esi), %eax
-.only56:
- adcl 8(%esi), %edx
-.only52:
- adcl 12(%esi), %eax
-.only48:
- adcl 16(%esi), %edx
-.only44:
- adcl 20(%esi), %eax
-.only40:
- adcl 24(%esi), %edx
-.only36:
- adcl 28(%esi), %eax
-.only32:
- adcl 32(%esi), %edx
-.only28:
- adcl 36(%esi), %eax
-.only24:
- adcl 40(%esi), %edx
-.only20:
- adcl 44(%esi), %eax
-.only16:
- adcl 48(%esi), %edx
-.only12:
- adcl 52(%esi), %eax
-.only8:
- adcl 56(%esi), %edx
-.only4:
- adcl 60(%esi), %eax /* We could be adding -1 and -1 with a carry */
-.only0:
- adcl $0, %eax /* we could be adding -1 in eax with a carry */
- adcl $0, %eax
-
- addl $64, %esi
- andl %ecx, %ecx
- jnz .next_iter
-
-.ip_ocsum_done:
- addl %eax, %edx
- adcl $0, %edx
- movl %edx, %eax /* form a 16 bit checksum by */
- shrl $16, %eax /* adding two halves of 32 bit checksum */
- addw %dx, %ax
- adcw $0, %ax
- andl $0xffff, %eax
- popl %edi /* restore registers */
- popl %esi
- popl %ebx
- leave
- ret
-
-.ip_csum_notaligned:
- xorl %edi, %edi
- movw (%esi), %di
- addl %edi, %edx
- adcl $0, %edx
- addl $2, %esi
- decl %ecx
- jmp .ip_csum_aligned
-
-.less_than_32:
- addl $32, %ecx
- testl $1, %ecx
- jz .size_aligned
- andl $0xfe, %ecx
- movzwl (%esi, %ecx, 2), %edi
- addl %edi, %edx
- adcl $0, %edx
-.size_aligned:
- movl %ecx, %edi
- shrl $1, %ecx
- shl $1, %edi
- subl $64, %edi
- addl %edi, %esi
- movl $.ip_ocsum_jmptbl, %edi
- lea (%edi, %ecx, 4), %edi
- xorl %ecx, %ecx
- clc
- jmp *(%edi)
- SET_SIZE(ip_ocsum)
-
- .data
- .align 4
-
-.ip_ocsum_jmptbl:
- .long .only0, .only4, .only8, .only12, .only16, .only20
- .long .only24, .only28, .only32, .only36, .only40, .only44
- .long .only48, .only52, .only56, .only60
-
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* multiply two long numbers and yield a u_longlong_t result, callable from C.
* Provided to manipulate hrtime_t values.
*/
-#if defined(__lint)
-
-/* result = a * b; */
-
-/* ARGSUSED */
-unsigned long long
-mul32(uint_t a, uint_t b)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(mul32)
xorl %edx, %edx /* XX64 joe, paranoia? */
@@ -2713,47 +1109,6 @@ mul32(uint_t a, uint_t b)
ret
SET_SIZE(mul32)
-#elif defined(__i386)
-
- ENTRY(mul32)
- movl 8(%esp), %eax
- movl 4(%esp), %ecx
- mull %ecx
- ret
- SET_SIZE(mul32)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(notused)
-#if defined(__lint)
-/* ARGSUSED */
-void
-load_pte64(uint64_t *pte, uint64_t pte_value)
-{}
-#else /* __lint */
- .globl load_pte64
-load_pte64:
- movl 4(%esp), %eax
- movl 8(%esp), %ecx
- movl 12(%esp), %edx
- movl %edx, 4(%eax)
- movl %ecx, (%eax)
- ret
-#endif /* __lint */
-#endif /* notused */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-scan_memory(caddr_t addr, size_t size)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(scan_memory)
shrq $3, %rsi /* convert %rsi from byte to quadword count */
jz .scanm_done
@@ -2765,37 +1120,6 @@ scan_memory(caddr_t addr, size_t size)
/* AMD Software Optimization Guide - Section 6.2 */
SET_SIZE(scan_memory)
-#elif defined(__i386)
-
- ENTRY(scan_memory)
- pushl %ecx
- pushl %esi
- movl 16(%esp), %ecx /* move 2nd arg into rep control register */
- shrl $2, %ecx /* convert from byte count to word count */
- jz .scanm_done
- movl 12(%esp), %esi /* move 1st arg into lodsw control register */
- .byte 0xf3 /* rep prefix. lame assembler. sigh. */
- lodsl
-.scanm_done:
- popl %esi
- popl %ecx
- ret
- SET_SIZE(scan_memory)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-/*ARGSUSED */
-int
-lowbit(ulong_t i)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(lowbit)
movl $-1, %eax
@@ -2805,37 +1129,6 @@ lowbit(ulong_t i)
ret
SET_SIZE(lowbit)
-#elif defined(__i386)
-
- ENTRY(lowbit)
- bsfl 4(%esp), %eax
- jz 0f
- incl %eax
- ret
-0:
- xorl %eax, %eax
- ret
- SET_SIZE(lowbit)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-int
-highbit(ulong_t i)
-{ return (0); }
-
-/*ARGSUSED*/
-int
-highbit64(uint64_t i)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(highbit)
ALTENTRY(highbit64)
movl $-1, %eax
@@ -2846,70 +1139,8 @@ highbit64(uint64_t i)
SET_SIZE(highbit64)
SET_SIZE(highbit)
-#elif defined(__i386)
-
- ENTRY(highbit)
- bsrl 4(%esp), %eax
- jz 0f
- incl %eax
- ret
-0:
- xorl %eax, %eax
- ret
- SET_SIZE(highbit)
-
- ENTRY(highbit64)
- bsrl 8(%esp), %eax
- jz highbit
- addl $33, %eax
- ret
- SET_SIZE(highbit64)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-uint64_t
-rdmsr(uint_t r)
-{ return (0); }
-
-/*ARGSUSED*/
-void
-wrmsr(uint_t r, const uint64_t val)
-{}
-
-/*ARGSUSED*/
-uint64_t
-xrdmsr(uint_t r)
-{ return (0); }
-
-/*ARGSUSED*/
-void
-xwrmsr(uint_t r, const uint64_t val)
-{}
-
-void
-invalidate_cache(void)
-{}
-
-/*ARGSUSED*/
-uint64_t
-get_xcr(uint_t r)
-{ return (0); }
-
-/*ARGSUSED*/
-void
-set_xcr(uint_t r, const uint64_t val)
-{}
-
-#else /* __lint */
-
#define XMSR_ACCESS_VAL $0x9c5a203a
-#if defined(__amd64)
-
ENTRY(rdmsr)
movl %edi, %ecx
rdmsr
@@ -2971,84 +1202,11 @@ set_xcr(uint_t r, const uint64_t val)
ret
SET_SIZE(set_xcr)
-#elif defined(__i386)
-
- ENTRY(rdmsr)
- movl 4(%esp), %ecx
- rdmsr
- ret
- SET_SIZE(rdmsr)
-
- ENTRY(wrmsr)
- movl 4(%esp), %ecx
- movl 8(%esp), %eax
- movl 12(%esp), %edx
- wrmsr
- ret
- SET_SIZE(wrmsr)
-
- ENTRY(xrdmsr)
- pushl %ebp
- movl %esp, %ebp
- movl 8(%esp), %ecx
- pushl %edi
- movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */
- rdmsr
- popl %edi
- leave
- ret
- SET_SIZE(xrdmsr)
-
- ENTRY(xwrmsr)
- pushl %ebp
- movl %esp, %ebp
- movl 8(%esp), %ecx
- movl 12(%esp), %eax
- movl 16(%esp), %edx
- pushl %edi
- movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */
- wrmsr
- popl %edi
- leave
- ret
- SET_SIZE(xwrmsr)
-
- ENTRY(get_xcr)
- movl 4(%esp), %ecx
- #xgetbv
- .byte 0x0f,0x01,0xd0
- ret
- SET_SIZE(get_xcr)
-
- ENTRY(set_xcr)
- movl 4(%esp), %ecx
- movl 8(%esp), %eax
- movl 12(%esp), %edx
- #xsetbv
- .byte 0x0f,0x01,0xd1
- ret
- SET_SIZE(set_xcr)
-
-#endif /* __i386 */
-
ENTRY(invalidate_cache)
wbinvd
ret
SET_SIZE(invalidate_cache)
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-getcregs(struct cregs *crp)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(getcregs)
#if defined(__xpv)
/*
@@ -3111,67 +1269,6 @@ getcregs(struct cregs *crp)
#undef GETMSR
-#elif defined(__i386)
-
- ENTRY_NP(getcregs)
-#if defined(__xpv)
- /*
- * Only a few of the hardware control registers or descriptor tables
- * are directly accessible to us, so just zero the structure.
- *
- * XXPV Perhaps it would be helpful for the hypervisor to return
- * virtualized versions of these for post-mortem use.
- * (Need to reevaluate - perhaps it already does!)
- */
- movl 4(%esp), %edx
- pushl $CREGSZ
- pushl %edx
- call bzero
- addl $8, %esp
- movl 4(%esp), %edx
-
- /*
- * Dump what limited information we can
- */
- movl %cr0, %eax
- movl %eax, CREG_CR0(%edx) /* cr0 */
- movl %cr2, %eax
- movl %eax, CREG_CR2(%edx) /* cr2 */
- movl %cr3, %eax
- movl %eax, CREG_CR3(%edx) /* cr3 */
- movl %cr4, %eax
- movl %eax, CREG_CR4(%edx) /* cr4 */
-
-#else /* __xpv */
-
- movl 4(%esp), %edx
- movw $0, CREG_GDT+6(%edx)
- movw $0, CREG_IDT+6(%edx)
- sgdt CREG_GDT(%edx) /* gdt */
- sidt CREG_IDT(%edx) /* idt */
- sldt CREG_LDT(%edx) /* ldt */
- str CREG_TASKR(%edx) /* task */
- movl %cr0, %eax
- movl %eax, CREG_CR0(%edx) /* cr0 */
- movl %cr2, %eax
- movl %eax, CREG_CR2(%edx) /* cr2 */
- movl %cr3, %eax
- movl %eax, CREG_CR3(%edx) /* cr3 */
- bt $X86FSET_LARGEPAGE, x86_featureset
- jnc .nocr4
- movl %cr4, %eax
- movl %eax, CREG_CR4(%edx) /* cr4 */
- jmp .skip
-.nocr4:
- movl $0, CREG_CR4(%edx)
-.skip:
-#endif
- ret
- SET_SIZE(getcregs)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* A panic trigger is a word which is updated atomically and can only be set
@@ -3181,21 +1278,6 @@ getcregs(struct cregs *crp)
* has its own version of this function to allow it to panic correctly from
* probe context.
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-int
-panic_trigger(int *tp)
-{ return (0); }
-
-/*ARGSUSED*/
-int
-dtrace_panic_trigger(int *tp)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY_NP(panic_trigger)
xorl %eax, %eax
@@ -3223,37 +1305,6 @@ dtrace_panic_trigger(int *tp)
ret
SET_SIZE(dtrace_panic_trigger)
-#elif defined(__i386)
-
- ENTRY_NP(panic_trigger)
- movl 4(%esp), %edx / %edx = address of trigger
- movl $0xdefacedd, %eax / %eax = 0xdefacedd
- lock / assert lock
- xchgl %eax, (%edx) / exchange %eax and the trigger
- cmpl $0, %eax / if (%eax == 0x0)
- je 0f / return (1);
- movl $0, %eax / else
- ret / return (0);
-0: movl $1, %eax
- ret
- SET_SIZE(panic_trigger)
-
- ENTRY_NP(dtrace_panic_trigger)
- movl 4(%esp), %edx / %edx = address of trigger
- movl $0xdefacedd, %eax / %eax = 0xdefacedd
- lock / assert lock
- xchgl %eax, (%edx) / exchange %eax and the trigger
- cmpl $0, %eax / if (%eax == 0x0)
- je 0f / return (1);
- movl $0, %eax / else
- ret / return (0);
-0: movl $1, %eax
- ret
- SET_SIZE(dtrace_panic_trigger)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* The panic() and cmn_err() functions invoke vpanic() as a common entry point
* into the panic code implemented in panicsys(). vpanic() is responsible
@@ -3268,21 +1319,6 @@ dtrace_panic_trigger(int *tp)
* sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
* branches back into vpanic().
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-vpanic(const char *format, va_list alist)
-{}
-
-/*ARGSUSED*/
-void
-dtrace_vpanic(const char *format, va_list alist)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY_NP(vpanic) /* Initial stack layout: */
@@ -3433,159 +1469,6 @@ vpanic_common:
SET_SIZE(dtrace_vpanic)
-#elif defined(__i386)
-
- ENTRY_NP(vpanic) / Initial stack layout:
-
- pushl %ebp / | %eip | 20
- movl %esp, %ebp / | %ebp | 16
- pushl %eax / | %eax | 12
- pushl %ebx / | %ebx | 8
- pushl %ecx / | %ecx | 4
- pushl %edx / | %edx | 0
-
- movl %esp, %ebx / %ebx = current stack pointer
-
- lea panic_quiesce, %eax / %eax = &panic_quiesce
- pushl %eax / push &panic_quiesce
- call panic_trigger / %eax = panic_trigger()
- addl $4, %esp / reset stack pointer
-
-vpanic_common:
- cmpl $0, %eax / if (%eax == 0)
- je 0f / goto 0f;
-
- /*
- * If panic_trigger() was successful, we are the first to initiate a
- * panic: we now switch to the reserved panic_stack before continuing.
- */
- lea panic_stack, %esp / %esp = panic_stack
- addl $PANICSTKSIZE, %esp / %esp += PANICSTKSIZE
-
-0: subl $REGSIZE, %esp / allocate struct regs
-
- /*
- * Now that we've got everything set up, store the register values as
- * they were when we entered vpanic() to the designated location in
- * the regs structure we allocated on the stack.
- */
-#if !defined(__GNUC_AS__)
- movw %gs, %edx
- movl %edx, REGOFF_GS(%esp)
- movw %fs, %edx
- movl %edx, REGOFF_FS(%esp)
- movw %es, %edx
- movl %edx, REGOFF_ES(%esp)
- movw %ds, %edx
- movl %edx, REGOFF_DS(%esp)
-#else /* __GNUC_AS__ */
- mov %gs, %edx
- mov %edx, REGOFF_GS(%esp)
- mov %fs, %edx
- mov %edx, REGOFF_FS(%esp)
- mov %es, %edx
- mov %edx, REGOFF_ES(%esp)
- mov %ds, %edx
- mov %edx, REGOFF_DS(%esp)
-#endif /* __GNUC_AS__ */
- movl %edi, REGOFF_EDI(%esp)
- movl %esi, REGOFF_ESI(%esp)
- movl 16(%ebx), %ecx
- movl %ecx, REGOFF_EBP(%esp)
- movl %ebx, %ecx
- addl $20, %ecx
- movl %ecx, REGOFF_ESP(%esp)
- movl 8(%ebx), %ecx
- movl %ecx, REGOFF_EBX(%esp)
- movl 0(%ebx), %ecx
- movl %ecx, REGOFF_EDX(%esp)
- movl 4(%ebx), %ecx
- movl %ecx, REGOFF_ECX(%esp)
- movl 12(%ebx), %ecx
- movl %ecx, REGOFF_EAX(%esp)
- movl $0, REGOFF_TRAPNO(%esp)
- movl $0, REGOFF_ERR(%esp)
- lea vpanic, %ecx
- movl %ecx, REGOFF_EIP(%esp)
-#if !defined(__GNUC_AS__)
- movw %cs, %edx
-#else /* __GNUC_AS__ */
- mov %cs, %edx
-#endif /* __GNUC_AS__ */
- movl %edx, REGOFF_CS(%esp)
- pushfl
- popl %ecx
-#if defined(__xpv)
- /*
- * Synthesize the PS_IE bit from the event mask bit
- */
- CURTHREAD(%edx)
- KPREEMPT_DISABLE(%edx)
- EVENT_MASK_TO_IE(%edx, %ecx)
- CURTHREAD(%edx)
- KPREEMPT_ENABLE_NOKP(%edx)
-#endif
- movl %ecx, REGOFF_EFL(%esp)
- movl $0, REGOFF_UESP(%esp)
-#if !defined(__GNUC_AS__)
- movw %ss, %edx
-#else /* __GNUC_AS__ */
- mov %ss, %edx
-#endif /* __GNUC_AS__ */
- movl %edx, REGOFF_SS(%esp)
-
- movl %esp, %ecx / %ecx = &regs
- pushl %eax / push on_panic_stack
- pushl %ecx / push &regs
- movl 12(%ebp), %ecx / %ecx = alist
- pushl %ecx / push alist
- movl 8(%ebp), %ecx / %ecx = format
- pushl %ecx / push format
- call panicsys / panicsys();
- addl $16, %esp / pop arguments
-
- addl $REGSIZE, %esp
- popl %edx
- popl %ecx
- popl %ebx
- popl %eax
- leave
- ret
- SET_SIZE(vpanic)
-
- ENTRY_NP(dtrace_vpanic) / Initial stack layout:
-
- pushl %ebp / | %eip | 20
- movl %esp, %ebp / | %ebp | 16
- pushl %eax / | %eax | 12
- pushl %ebx / | %ebx | 8
- pushl %ecx / | %ecx | 4
- pushl %edx / | %edx | 0
-
- movl %esp, %ebx / %ebx = current stack pointer
-
- lea panic_quiesce, %eax / %eax = &panic_quiesce
- pushl %eax / push &panic_quiesce
- call dtrace_panic_trigger / %eax = dtrace_panic_trigger()
- addl $4, %esp / reset stack pointer
- jmp vpanic_common / jump back to common code
-
- SET_SIZE(dtrace_vpanic)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-void
-hres_tick(void)
-{}
-
-int64_t timedelta;
-hrtime_t hrtime_base;
-
-#else /* __lint */
-
DGDEF3(timedelta, 8, 8)
.long 0, 0
@@ -3599,8 +1482,6 @@ hrtime_t hrtime_base;
DGDEF3(adj_shift, 4, 4)
.long ADJ_SHIFT
-#if defined(__amd64)
-
ENTRY_NP(hres_tick)
pushq %rbp
movq %rsp, %rbp
@@ -3653,212 +1534,6 @@ hrtime_t hrtime_base;
ret
SET_SIZE(hres_tick)
-#elif defined(__i386)
-
- ENTRY_NP(hres_tick)
- pushl %ebp
- movl %esp, %ebp
- pushl %esi
- pushl %ebx
-
- /*
- * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
- * hres_last_tick can only be modified while holding CLOCK_LOCK).
- * At worst, performing this now instead of under CLOCK_LOCK may
- * introduce some jitter in pc_gethrestime().
- */
- call *gethrtimef
- movl %eax, %ebx
- movl %edx, %esi
-
- movl $hres_lock, %eax
- movl $-1, %edx
-.CL1:
- xchgb %dl, (%eax)
- testb %dl, %dl
- jz .CL3 / got it
-.CL2:
- cmpb $0, (%eax) / possible to get lock?
- pause
- jne .CL2
- jmp .CL1 / yes, try again
-.CL3:
- /*
- * compute the interval since last time hres_tick was called
- * and adjust hrtime_base and hrestime accordingly
- * hrtime_base is an 8 byte value (in nsec), hrestime is
- * timestruc_t (sec, nsec)
- */
-
- lea hres_last_tick, %eax
-
- movl %ebx, %edx
- movl %esi, %ecx
-
- subl (%eax), %edx
- sbbl 4(%eax), %ecx
-
- addl %edx, hrtime_base / add interval to hrtime_base
- adcl %ecx, hrtime_base+4
-
- addl %edx, hrestime+4 / add interval to hrestime.tv_nsec
-
- /
- / Now that we have CLOCK_LOCK, we can update hres_last_tick.
- /
- movl %ebx, (%eax)
- movl %esi, 4(%eax)
-
- / get hrestime at this moment. used as base for pc_gethrestime
- /
- / Apply adjustment, if any
- /
- / #define HRES_ADJ (NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
- / (max_hres_adj)
- /
- / void
- / adj_hrestime()
- / {
- / long long adj;
- /
- / if (hrestime_adj == 0)
- / adj = 0;
- / else if (hrestime_adj > 0) {
- / if (hrestime_adj < HRES_ADJ)
- / adj = hrestime_adj;
- / else
- / adj = HRES_ADJ;
- / }
- / else {
- / if (hrestime_adj < -(HRES_ADJ))
- / adj = -(HRES_ADJ);
- / else
- / adj = hrestime_adj;
- / }
- /
- / timedelta -= adj;
- / hrestime_adj = timedelta;
- / hrestime.tv_nsec += adj;
- /
- / while (hrestime.tv_nsec >= NANOSEC) {
- / one_sec++;
- / hrestime.tv_sec++;
- / hrestime.tv_nsec -= NANOSEC;
- / }
- / }
-__adj_hrestime:
- movl hrestime_adj, %esi / if (hrestime_adj == 0)
- movl hrestime_adj+4, %edx
- andl %esi, %esi
- jne .CL4 / no
- andl %edx, %edx
- jne .CL4 / no
- subl %ecx, %ecx / yes, adj = 0;
- subl %edx, %edx
- jmp .CL5
-.CL4:
- subl %ecx, %ecx
- subl %eax, %eax
- subl %esi, %ecx
- sbbl %edx, %eax
- andl %eax, %eax / if (hrestime_adj > 0)
- jge .CL6
-
- / In the following comments, HRES_ADJ is used, while in the code
- / max_hres_adj is used.
- /
- / The test for "hrestime_adj < HRES_ADJ" is complicated because
- / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits. We rely
- / on the logical equivalence of:
- /
- / !(hrestime_adj < HRES_ADJ)
- /
- / and the two step sequence:
- /
- / (HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
- /
- / which computes whether or not the least significant 32-bits
- / of hrestime_adj is greater than HRES_ADJ, followed by:
- /
- / Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
- /
- / which generates a carry whenever step 1 is true or the most
- / significant long of the longlong hrestime_adj is non-zero.
-
- movl max_hres_adj, %ecx / hrestime_adj is positive
- subl %esi, %ecx
- movl %edx, %eax
- adcl $-1, %eax
- jnc .CL7
- movl max_hres_adj, %ecx / adj = HRES_ADJ;
- subl %edx, %edx
- jmp .CL5
-
- / The following computation is similar to the one above.
- /
- / The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
- / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits. We rely
- / on the logical equivalence of:
- /
- / (hrestime_adj > -HRES_ADJ)
- /
- / and the two step sequence:
- /
- / (HRES_ADJ + lsw(hrestime_adj)) generates a Carry
- /
- / which means the least significant 32-bits of hrestime_adj is
- / greater than -HRES_ADJ, followed by:
- /
- / Previous Carry + 0 + msw(hrestime_adj) generates a Carry
- /
- / which generates a carry only when step 1 is true and the most
- / significant long of the longlong hrestime_adj is -1.
-
-.CL6: / hrestime_adj is negative
- movl %esi, %ecx
- addl max_hres_adj, %ecx
- movl %edx, %eax
- adcl $0, %eax
- jc .CL7
- xor %ecx, %ecx
- subl max_hres_adj, %ecx / adj = -(HRES_ADJ);
- movl $-1, %edx
- jmp .CL5
-.CL7:
- movl %esi, %ecx / adj = hrestime_adj;
-.CL5:
- movl timedelta, %esi
- subl %ecx, %esi
- movl timedelta+4, %eax
- sbbl %edx, %eax
- movl %esi, timedelta
- movl %eax, timedelta+4 / timedelta -= adj;
- movl %esi, hrestime_adj
- movl %eax, hrestime_adj+4 / hrestime_adj = timedelta;
- addl hrestime+4, %ecx
-
- movl %ecx, %eax / eax = tv_nsec
-1:
- cmpl $NANOSEC, %eax / if ((unsigned long)tv_nsec >= NANOSEC)
- jb .CL8 / no
- incl one_sec / yes, one_sec++;
- incl hrestime / hrestime.tv_sec++;
- addl $-NANOSEC, %eax / tv_nsec -= NANOSEC
- jmp 1b / check for more seconds
-
-.CL8:
- movl %eax, hrestime+4 / store final into hrestime.tv_nsec
- incl hres_lock / release the hres_lock
-
- popl %ebx
- popl %esi
- leave
- ret
- SET_SIZE(hres_tick)
-
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* void prefetch_smap_w(void *)
*
@@ -3866,52 +1541,21 @@ __adj_hrestime:
* Not implemented for ia32. Stub for compatibility.
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-void prefetch_smap_w(void *smp)
-{}
-
-#else /* __lint */
-
ENTRY(prefetch_smap_w)
rep; ret /* use 2 byte return instruction when branch target */
/* AMD Software Optimization Guide - Section 6.2 */
SET_SIZE(prefetch_smap_w)
-#endif /* __lint */
-
/*
* prefetch_page_r(page_t *)
* issue prefetch instructions for a page_t
*/
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-prefetch_page_r(void *pp)
-{}
-
-#else /* __lint */
ENTRY(prefetch_page_r)
rep; ret /* use 2 byte return instruction when branch target */
/* AMD Software Optimization Guide - Section 6.2 */
SET_SIZE(prefetch_page_r)
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-int
-bcmp(const void *s1, const void *s2, size_t count)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(bcmp)
pushq %rbp
movq %rsp, %rbp
@@ -3936,155 +1580,18 @@ bcmp(const void *s1, const void *s2, size_t count)
ret
SET_SIZE(bcmp)
-#elif defined(__i386)
-
-#define ARG_S1 8
-#define ARG_S2 12
-#define ARG_LENGTH 16
-
- ENTRY(bcmp)
- pushl %ebp
- movl %esp, %ebp / create new stack frame
-#ifdef DEBUG
- cmpl $0, ARG_LENGTH(%ebp)
- je 1f
- movl postbootkernelbase, %eax
- cmpl %eax, ARG_S1(%ebp)
- jb 0f
- cmpl %eax, ARG_S2(%ebp)
- jnb 1f
-0: pushl $.bcmp_panic_msg
- call panic
-1:
-#endif /* DEBUG */
-
- pushl %edi / save register variable
- movl ARG_S1(%ebp), %eax / %eax = address of string 1
- movl ARG_S2(%ebp), %ecx / %ecx = address of string 2
- cmpl %eax, %ecx / if the same string
- je .equal / goto .equal
- movl ARG_LENGTH(%ebp), %edi / %edi = length in bytes
- cmpl $4, %edi / if %edi < 4
- jb .byte_check / goto .byte_check
- .align 4
-.word_loop:
- movl (%ecx), %edx / move 1 word from (%ecx) to %edx
- leal -4(%edi), %edi / %edi -= 4
- cmpl (%eax), %edx / compare 1 word from (%eax) with %edx
- jne .word_not_equal / if not equal, goto .word_not_equal
- leal 4(%ecx), %ecx / %ecx += 4 (next word)
- leal 4(%eax), %eax / %eax += 4 (next word)
- cmpl $4, %edi / if %edi >= 4
- jae .word_loop / goto .word_loop
-.byte_check:
- cmpl $0, %edi / if %edi == 0
- je .equal / goto .equal
- jmp .byte_loop / goto .byte_loop (checks in bytes)
-.word_not_equal:
- leal 4(%edi), %edi / %edi += 4 (post-decremented)
- .align 4
-.byte_loop:
- movb (%ecx), %dl / move 1 byte from (%ecx) to %dl
- cmpb %dl, (%eax) / compare %dl with 1 byte from (%eax)
- jne .not_equal / if not equal, goto .not_equal
- incl %ecx / %ecx++ (next byte)
- incl %eax / %eax++ (next byte)
- decl %edi / %edi--
- jnz .byte_loop / if not zero, goto .byte_loop
-.equal:
- xorl %eax, %eax / %eax = 0
- popl %edi / restore register variable
- leave / restore old stack frame
- ret / return (NULL)
- .align 4
-.not_equal:
- movl $1, %eax / return 1
- popl %edi / restore register variable
- leave / restore old stack frame
- ret / return (NULL)
- SET_SIZE(bcmp)
-
-#endif /* __i386 */
-
#ifdef DEBUG
.text
.bcmp_panic_msg:
.string "bcmp: arguments below kernelbase"
#endif /* DEBUG */
-#endif /* __lint */
-
-#if defined(__lint)
-
-uint_t
-bsrw_insn(uint16_t mask)
-{
- uint_t index = sizeof (mask) * NBBY - 1;
-
- while ((mask & (1 << index)) == 0)
- index--;
- return (index);
-}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(bsrw_insn)
xorl %eax, %eax
bsrw %di, %ax
ret
SET_SIZE(bsrw_insn)
-#elif defined(__i386)
-
- ENTRY_NP(bsrw_insn)
- movw 4(%esp), %cx
- xorl %eax, %eax
- bsrw %cx, %ax
- ret
- SET_SIZE(bsrw_insn)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-uint_t
-atomic_btr32(uint32_t *pending, uint_t pil)
-{
- return (*pending &= ~(1 << pil));
-}
-
-#else /* __lint */
-
-#if defined(__i386)
-
- ENTRY_NP(atomic_btr32)
- movl 4(%esp), %ecx
- movl 8(%esp), %edx
- xorl %eax, %eax
- lock
- btrl %edx, (%ecx)
- setc %al
- ret
- SET_SIZE(atomic_btr32)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
- uint_t arg2)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(switch_sp_and_call)
pushq %rbp
movq %rsp, %rbp /* set up stack frame */
@@ -4097,33 +1604,6 @@ switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
ret
SET_SIZE(switch_sp_and_call)
-#elif defined(__i386)
-
- ENTRY_NP(switch_sp_and_call)
- pushl %ebp
- mov %esp, %ebp /* set up stack frame */
- movl 8(%ebp), %esp /* switch stack pointer */
- pushl 20(%ebp) /* push func arg 2 */
- pushl 16(%ebp) /* push func arg 1 */
- call *12(%ebp) /* call function */
- addl $8, %esp /* pop arguments */
- leave /* restore stack */
- ret
- SET_SIZE(switch_sp_and_call)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-void
-kmdb_enter(void)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY_NP(kmdb_enter)
pushq %rbp
movq %rsp, %rbp
@@ -4145,60 +1625,11 @@ kmdb_enter(void)
ret
SET_SIZE(kmdb_enter)
-#elif defined(__i386)
-
- ENTRY_NP(kmdb_enter)
- pushl %ebp
- movl %esp, %ebp
-
- /*
- * Save flags, do a 'cli' then return the saved flags
- */
- call intr_clear
-
- int $T_DBGENTR
-
- /*
- * Restore the saved flags
- */
- pushl %eax
- call intr_restore
- addl $4, %esp
-
- leave
- ret
- SET_SIZE(kmdb_enter)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-void
-return_instr(void)
-{}
-
-#else /* __lint */
-
ENTRY_NP(return_instr)
rep; ret /* use 2 byte instruction when branch target */
/* AMD Software Optimization Guide - Section 6.2 */
SET_SIZE(return_instr)
-#endif /* __lint */
-
-#if defined(__lint)
-
-ulong_t
-getflags(void)
-{
- return (0);
-}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(getflags)
pushfq
popq %rax
@@ -4219,42 +1650,6 @@ getflags(void)
ret
SET_SIZE(getflags)
-#elif defined(__i386)
-
- ENTRY(getflags)
- pushfl
- popl %eax
-#if defined(__xpv)
- CURTHREAD(%ecx)
- KPREEMPT_DISABLE(%ecx)
- /*
- * Synthesize the PS_IE bit from the event mask bit
- */
- CURVCPU(%edx)
- andl $_BITNOT(PS_IE), %eax
- XEN_TEST_UPCALL_MASK(%edx)
- jnz 1f
- orl $PS_IE, %eax
-1:
- KPREEMPT_ENABLE_NOKP(%ecx)
-#endif
- ret
- SET_SIZE(getflags)
-
-#endif /* __i386 */
-
-#endif /* __lint */
-
-#if defined(__lint)
-
-ftrace_icookie_t
-ftrace_interrupt_disable(void)
-{ return (0); }
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(ftrace_interrupt_disable)
pushfq
popq %rax
@@ -4262,93 +1657,22 @@ ftrace_interrupt_disable(void)
ret
SET_SIZE(ftrace_interrupt_disable)
-#elif defined(__i386)
-
- ENTRY(ftrace_interrupt_disable)
- pushfl
- popl %eax
- CLI(%edx)
- ret
- SET_SIZE(ftrace_interrupt_disable)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-ftrace_interrupt_enable(ftrace_icookie_t cookie)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(ftrace_interrupt_enable)
pushq %rdi
popfq
ret
SET_SIZE(ftrace_interrupt_enable)
-#elif defined(__i386)
-
- ENTRY(ftrace_interrupt_enable)
- movl 4(%esp), %eax
- pushl %eax
- popfl
- ret
- SET_SIZE(ftrace_interrupt_enable)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined (__lint)
-
-/*ARGSUSED*/
-void
-clflush_insn(caddr_t addr)
-{}
-
-#else /* __lint */
-
-#if defined (__amd64)
ENTRY(clflush_insn)
clflush (%rdi)
ret
SET_SIZE(clflush_insn)
-#elif defined (__i386)
- ENTRY(clflush_insn)
- movl 4(%esp), %eax
- clflush (%eax)
- ret
- SET_SIZE(clflush_insn)
-
-#endif /* __i386 */
-#endif /* __lint */
-#if defined (__lint)
-/*ARGSUSED*/
-void
-mfence_insn(void)
-{}
-
-#else /* __lint */
-
-#if defined (__amd64)
- ENTRY(mfence_insn)
- mfence
- ret
- SET_SIZE(mfence_insn)
-#elif defined (__i386)
ENTRY(mfence_insn)
mfence
ret
SET_SIZE(mfence_insn)
-#endif /* __i386 */
-#endif /* __lint */
-
/*
* VMware implements an I/O port that programs can query to detect if software
* is running in a VMware hypervisor. This hypervisor port behaves differently
@@ -4358,16 +1682,6 @@ mfence_insn(void)
* References: http://kb.vmware.com/kb/1009458
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-vmware_port(int cmd, uint32_t *regs) { return; }
-
-#else
-
-#if defined(__amd64)
-
ENTRY(vmware_port)
pushq %rbx
movl $VMWARE_HVMAGIC, %eax
@@ -4383,25 +1697,3 @@ vmware_port(int cmd, uint32_t *regs) { return; }
ret
SET_SIZE(vmware_port)
-#elif defined(__i386)
-
- ENTRY(vmware_port)
- pushl %ebx
- pushl %esi
- movl $VMWARE_HVMAGIC, %eax
- movl $0xffffffff, %ebx
- movl 12(%esp), %ecx
- movl $VMWARE_HVPORT, %edx
- inl (%dx)
- movl 16(%esp), %esi
- movl %eax, (%esi)
- movl %ebx, 4(%esi)
- movl %ecx, 8(%esi)
- movl %edx, 12(%esi)
- popl %esi
- popl %ebx
- ret
- SET_SIZE(vmware_port)
-
-#endif /* __i386 */
-#endif /* __lint */
diff --git a/usr/src/uts/intel/ia32/ml/ia32.il b/usr/src/uts/intel/ia32/ml/ia32.il
deleted file mode 100644
index 8ced7d69a6..0000000000
--- a/usr/src/uts/intel/ia32/ml/ia32.il
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-/
-/ Inline functions for i386 kernels.
-/ Shared between all x86 platform variants.
-/
-
-/
-/ return current thread pointer
-/
-/ NOTE: the "0x10" should be replaced by the computed value of the
-/ offset of "cpu_thread" from the beginning of the struct cpu.
-/ Including "assym.h" does not work, however, since that stuff
-/ is PSM-specific and is only visible to the 'unix' build anyway.
-/ Same with current cpu pointer, where "0xc" should be replaced
-/ by the computed value of the offset of "cpu_self".
-/ Ugh -- what a disaster.
-/
- .inline threadp,0
- movl %gs:0x10, %eax
- .end
-
-/
-/ return current cpu pointer
-/
- .inline curcpup,0
- movl %gs:0xc, %eax
- .end
-
-/
-/ return caller
-/
- .inline caller,0
- movl 4(%ebp), %eax
- .end
-
-/
-/ convert ipl to spl. This is the identity function for i86
-/
- .inline ipltospl,0
- movl (%esp), %eax
- .end
-
-/
-/ Networking byte order functions (too bad, Intel has the wrong byte order)
-/
- .inline htonll,4
- movl (%esp), %edx
- movl 4(%esp), %eax
- bswap %edx
- bswap %eax
- .end
-
- .inline ntohll,4
- movl (%esp), %edx
- movl 4(%esp), %eax
- bswap %edx
- bswap %eax
- .end
-
- .inline htonl,4
- movl (%esp), %eax
- bswap %eax
- .end
-
- .inline ntohl,4
- movl (%esp), %eax
- bswap %eax
- .end
-
- .inline htons,4
- movl (%esp), %eax
- bswap %eax
- shrl $16, %eax
- .end
-
- .inline ntohs,4
- movl (%esp), %eax
- bswap %eax
- shrl $16, %eax
- .end
-
-/*
- * multiply two long numbers and yield a u_longlong_t result
- * Provided to manipulate hrtime_t values.
- */
- .inline mul32, 8
- movl 4(%esp), %eax
- movl (%esp), %ecx
- mull %ecx
- .end
-
-/*
- * Unlock hres_lock and increment the count value. (See clock.h)
- */
- .inline unlock_hres_lock, 0
- lock
- incl hres_lock
- .end
-
- .inline atomic_orb,8
- movl (%esp), %eax
- movl 4(%esp), %edx
- lock
- orb %dl,(%eax)
- .end
-
- .inline atomic_andb,8
- movl (%esp), %eax
- movl 4(%esp), %edx
- lock
- andb %dl,(%eax)
- .end
-
-/*
- * atomic inc/dec operations.
- * void atomic_inc16(uint16_t *addr) { ++*addr; }
- * void atomic_dec16(uint16_t *addr) { --*addr; }
- */
- .inline atomic_inc16,4
- movl (%esp), %eax
- lock
- incw (%eax)
- .end
-
- .inline atomic_dec16,4
- movl (%esp), %eax
- lock
- decw (%eax)
- .end
-
-/*
- * Call the pause instruction. To the Pentium 4 Xeon processor, it acts as
- * a hint that the code sequence is a busy spin-wait loop. Without a pause
- * instruction in these loops, the P4 Xeon processor may suffer a severe
- * penalty when exiting the loop because the processor detects a possible
- * memory violation. Inserting the pause instruction significantly reduces
- * the likelihood of a memory order violation, improving performance.
- * The pause instruction is a NOP on all other IA-32 processors.
- */
- .inline ht_pause, 0
- rep / our compiler doesn't support "pause" yet,
- nop / so we're using "F3 90" opcode directly
- .end
-
-/*
- * prefetch 64 bytes
- *
- * prefetch is an SSE extension which is not supported on older 32-bit processors
- * so define this as a no-op for now
- */
-
- .inline prefetch_read_many,4
-/ movl (%esp), %eax
-/ prefetcht0 (%eax)
-/ prefetcht0 32(%eax)
- .end
-
- .inline prefetch_read_once,4
-/ movl (%esp), %eax
-/ prefetchnta (%eax)
-/ prefetchnta 32(%eax)
- .end
-
- .inline prefetch_write_many,4
-/ movl (%esp), %eax
-/ prefetcht0 (%eax)
-/ prefetcht0 32(%eax)
- .end
-
- .inline prefetch_write_once,4
-/ movl (%esp), %eax
-/ prefetcht0 (%eax)
-/ prefetcht0 32(%eax)
- .end
-
diff --git a/usr/src/uts/intel/ia32/ml/lock_prim.s b/usr/src/uts/intel/ia32/ml/lock_prim.s
index 363595ad5a..4267561bf7 100644
--- a/usr/src/uts/intel/ia32/ml/lock_prim.s
+++ b/usr/src/uts/intel/ia32/ml/lock_prim.s
@@ -21,17 +21,13 @@
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ */
+
+/*
* Copyright 2019 Joyent, Inc.
*/
-#if defined(lint) || defined(__lint)
-#include <sys/types.h>
-#include <sys/thread.h>
-#include <sys/cpuvar.h>
-#include <vm/page.h>
-#else /* __lint */
#include "assym.h"
-#endif /* __lint */
#include <sys/mutex_impl.h>
#include <sys/asm_linkage.h>
@@ -48,28 +44,8 @@
* ulock_try() is for a lock in the user address space.
*/
-#if defined(lint) || defined(__lint)
-
-/* ARGSUSED */
-int
-lock_try(lock_t *lp)
-{ return (0); }
-
-/* ARGSUSED */
-int
-lock_spin_try(lock_t *lp)
-{ return (0); }
-
-/* ARGSUSED */
-int
-ulock_try(lock_t *lp)
-{ return (0); }
-
-#else /* __lint */
.globl kernelbase
-#if defined(__amd64)
-
ENTRY(lock_try)
movb $-1, %dl
movzbq %dl, %rax
@@ -117,57 +93,6 @@ ulock_pass:
ret
SET_SIZE(ulock_try)
-#else
-
- ENTRY(lock_try)
- movl $1,%edx
- movl 4(%esp),%ecx /* ecx = lock addr */
- xorl %eax,%eax
- xchgb %dl, (%ecx) /* using dl will avoid partial */
- testb %dl,%dl /* stalls on P6 ? */
- setz %al
-.lock_try_lockstat_patch_point:
- ret
- movl %gs:CPU_THREAD, %edx /* edx = thread addr */
- testl %eax, %eax
- jz 0f
- movl $LS_LOCK_TRY_ACQUIRE, %eax
- jmp lockstat_wrapper
-0:
- ret
- SET_SIZE(lock_try)
-
- ENTRY(lock_spin_try)
- movl $-1,%edx
- movl 4(%esp),%ecx /* ecx = lock addr */
- xorl %eax,%eax
- xchgb %dl, (%ecx) /* using dl will avoid partial */
- testb %dl,%dl /* stalls on P6 ? */
- setz %al
- ret
- SET_SIZE(lock_spin_try)
-
- ENTRY(ulock_try)
-#ifdef DEBUG
- movl kernelbase, %eax
- cmpl %eax, 4(%esp) /* test uaddr < kernelbase */
- jb ulock_pass /* uaddr < kernelbase, proceed */
-
- pushl $.ulock_panic_msg
- call panic
-
-#endif /* DEBUG */
-
-ulock_pass:
- movl $1,%eax
- movl 4(%esp),%ecx
- xchgb %al, (%ecx)
- xorb $1, %al
- ret
- SET_SIZE(ulock_try)
-
-#endif /* !__amd64 */
-
#ifdef DEBUG
.data
.ulock_panic_msg:
@@ -175,29 +100,11 @@ ulock_pass:
.text
#endif /* DEBUG */
-#endif /* __lint */
-
/*
* lock_clear(lp)
* - unlock lock without changing interrupt priority level.
*/
-#if defined(lint) || defined(__lint)
-
-/* ARGSUSED */
-void
-lock_clear(lock_t *lp)
-{}
-
-/* ARGSUSED */
-void
-ulock_clear(lock_t *lp)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(lock_clear)
movb $0, (%rdi)
.lock_clear_lockstat_patch_point:
@@ -226,38 +133,6 @@ ulock_clr:
ret
SET_SIZE(ulock_clear)
-#else
-
- ENTRY(lock_clear)
- movl 4(%esp), %eax
- movb $0, (%eax)
-.lock_clear_lockstat_patch_point:
- ret
- movl %gs:CPU_THREAD, %edx /* edx = thread addr */
- movl %eax, %ecx /* ecx = lock pointer */
- movl $LS_LOCK_CLEAR_RELEASE, %eax
- jmp lockstat_wrapper
- SET_SIZE(lock_clear)
-
- ENTRY(ulock_clear)
-#ifdef DEBUG
- movl kernelbase, %ecx
- cmpl %ecx, 4(%esp) /* test uaddr < kernelbase */
- jb ulock_clr /* uaddr < kernelbase, proceed */
-
- pushl $.ulock_clear_msg
- call panic
-#endif
-
-ulock_clr:
- movl 4(%esp),%eax
- xorl %ecx,%ecx
- movb %cl, (%eax)
- ret
- SET_SIZE(ulock_clear)
-
-#endif /* !__amd64 */
-
#ifdef DEBUG
.data
.ulock_clear_msg:
@@ -266,24 +141,11 @@ ulock_clr:
#endif /* DEBUG */
-#endif /* __lint */
-
/*
* lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
* Drops lp, sets pil to new_pil, stores old pil in *old_pil.
*/
-#if defined(lint) || defined(__lint)
-
-/* ARGSUSED */
-void
-lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(lock_set_spl)
pushq %rbp
movq %rsp, %rbp
@@ -315,88 +177,21 @@ lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
jmp lock_set_spl_spin
SET_SIZE(lock_set_spl)
-#else
-
- ENTRY(lock_set_spl)
- movl 8(%esp), %eax /* get priority level */
- pushl %eax
- call splr /* raise priority level */
- movl 8(%esp), %ecx /* ecx = lock addr */
- movl $-1, %edx
- addl $4, %esp
- xchgb %dl, (%ecx) /* try to set lock */
- testb %dl, %dl /* did we get the lock? ... */
- movl 12(%esp), %edx /* edx = olp pil addr (ZF unaffected) */
- jnz .lss_miss /* ... no, go to C for the hard case */
- movw %ax, (%edx) /* store old pil */
-.lock_set_spl_lockstat_patch_point:
- ret
- movl %gs:CPU_THREAD, %edx /* edx = thread addr*/
- movl $LS_LOCK_SET_SPL_ACQUIRE, %eax
- jmp lockstat_wrapper
-.lss_miss:
- pushl %eax /* original pil */
- pushl %edx /* old_pil addr */
- pushl 16(%esp) /* new_pil */
- pushl %ecx /* lock addr */
- call lock_set_spl_spin
- addl $16, %esp
- ret
- SET_SIZE(lock_set_spl)
-
-#endif /* !__amd64 */
-
-#endif /* __lint */
-
/*
* void
* lock_init(lp)
*/
-#if defined(__lint)
-
-/* ARGSUSED */
-void
-lock_init(lock_t *lp)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(lock_init)
movb $0, (%rdi)
ret
SET_SIZE(lock_init)
-#else
-
- ENTRY(lock_init)
- movl 4(%esp), %eax
- movb $0, (%eax)
- ret
- SET_SIZE(lock_init)
-
-#endif /* !__amd64 */
-
-#endif /* __lint */
-
/*
* void
* lock_set(lp)
*/
-#if defined(lint) || defined(__lint)
-
-/* ARGSUSED */
-void
-lock_set(lock_t *lp)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(lock_set)
movb $-1, %dl
xchgb %dl, (%rdi) /* try to set lock */
@@ -410,40 +205,10 @@ lock_set(lock_t *lp)
jmp lockstat_wrapper
SET_SIZE(lock_set)
-#else
-
- ENTRY(lock_set)
- movl 4(%esp), %ecx /* ecx = lock addr */
- movl $-1, %edx
- xchgb %dl, (%ecx) /* try to set lock */
- testb %dl, %dl /* did we get it? */
- jnz lock_set_spin /* no, go to C for the hard case */
-.lock_set_lockstat_patch_point:
- ret
- movl %gs:CPU_THREAD, %edx /* edx = thread addr */
- movl $LS_LOCK_SET_ACQUIRE, %eax
- jmp lockstat_wrapper
- SET_SIZE(lock_set)
-
-#endif /* !__amd64 */
-
-#endif /* __lint */
-
/*
* lock_clear_splx(lp, s)
*/
-#if defined(lint) || defined(__lint)
-
-/* ARGSUSED */
-void
-lock_clear_splx(lock_t *lp, int s)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(lock_clear_splx)
movb $0, (%rdi) /* clear lock */
.lock_clear_splx_lockstat_patch_point:
@@ -465,32 +230,6 @@ lock_clear_splx(lock_t *lp, int s)
jmp lockstat_wrapper
SET_SIZE(lock_clear_splx)
-#else
-
- ENTRY(lock_clear_splx)
- movl 4(%esp), %eax /* eax = lock addr */
- movb $0, (%eax) /* clear lock */
-.lock_clear_splx_lockstat_patch_point:
- jmp 0f
-0:
- movl 8(%esp), %edx /* edx = desired pil */
- movl %edx, 4(%esp) /* set spl arg up for splx */
- jmp splx /* let splx do it's thing */
-.lock_clear_splx_lockstat:
- movl 8(%esp), %edx /* edx = desired pil */
- pushl %ebp /* set up stack frame */
- movl %esp, %ebp
- pushl %edx
- call splx
- leave /* unwind stack */
- movl 4(%esp), %ecx /* ecx = lock pointer */
- movl %gs:CPU_THREAD, %edx /* edx = thread addr */
- movl $LS_LOCK_CLEAR_SPLX_RELEASE, %eax
- jmp lockstat_wrapper
- SET_SIZE(lock_clear_splx)
-
-#endif /* !__amd64 */
-
#if defined(__GNUC_AS__)
#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL \
(.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2)
@@ -505,8 +244,6 @@ lock_clear_splx(lock_t *lp, int s)
[.lock_clear_splx_lockstat_patch_point + 1]
#endif
-#endif /* __lint */
-
/*
* mutex_enter() and mutex_exit().
*
@@ -530,31 +267,6 @@ lock_clear_splx(lock_t *lp, int s)
* Note that we don't need to test lockstat_event_mask here -- we won't
* patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
*/
-#if defined(lint) || defined(__lint)
-
-/* ARGSUSED */
-void
-mutex_enter(kmutex_t *lp)
-{}
-
-/* ARGSUSED */
-int
-mutex_tryenter(kmutex_t *lp)
-{ return (0); }
-
-/* ARGSUSED */
-int
-mutex_adaptive_tryenter(mutex_impl_t *lp)
-{ return (0); }
-
-/* ARGSUSED */
-void
-mutex_exit(kmutex_t *lp)
-{}
-
-#else
-
-#if defined(__amd64)
ENTRY_NP(mutex_enter)
movq %gs:CPU_THREAD, %rdx /* rdx = thread ptr */
@@ -717,181 +429,6 @@ mutex_exit_critical_size:
.quad .mutex_exit_critical_end - mutex_exit_critical_start
SET_SIZE(mutex_exit_critical_size)
-#else
-
- ENTRY_NP(mutex_enter)
- movl %gs:CPU_THREAD, %edx /* edx = thread ptr */
- movl 4(%esp), %ecx /* ecx = lock ptr */
- xorl %eax, %eax /* eax = 0 (unheld adaptive) */
- lock
- cmpxchgl %edx, (%ecx)
- jnz mutex_vector_enter
-#if defined(OPTERON_WORKAROUND_6323525)
-.mutex_enter_lockstat_patch_point:
-.mutex_enter_6323525_patch_point:
- ret /* nop space for lfence */
- nop
- nop
-.mutex_enter_lockstat_6323525_patch_point: /* new patch point if lfence */
- nop
-#else /* OPTERON_WORKAROUND_6323525 */
-.mutex_enter_lockstat_patch_point:
- ret
-#endif /* OPTERON_WORKAROUND_6323525 */
- movl $LS_MUTEX_ENTER_ACQUIRE, %eax
- ALTENTRY(lockstat_wrapper) /* expects edx=thread, ecx=lock, */
- /* eax=lockstat event */
- pushl %ebp /* buy a frame */
- movl %esp, %ebp
- incb T_LOCKSTAT(%edx) /* curthread->t_lockstat++ */
- pushl %edx /* save thread pointer */
- movl $lockstat_probemap, %edx
- movl (%edx, %eax, DTRACE_IDSIZE), %eax
- testl %eax, %eax /* check for non-zero probe */
- jz 1f
- pushl %ecx /* push lock */
- pushl %eax /* push probe ID */
- call *lockstat_probe
- addl $8, %esp
-1:
- popl %edx /* restore thread pointer */
- decb T_LOCKSTAT(%edx) /* curthread->t_lockstat-- */
- movl $1, %eax /* return success if tryenter */
- popl %ebp /* pop off frame */
- ret
- SET_SIZE(lockstat_wrapper)
- SET_SIZE(mutex_enter)
-
- ENTRY(lockstat_wrapper_arg) /* expects edx=thread, ecx=lock, */
- /* eax=lockstat event, pushed arg */
- incb T_LOCKSTAT(%edx) /* curthread->t_lockstat++ */
- pushl %edx /* save thread pointer */
- movl $lockstat_probemap, %edx
- movl (%edx, %eax, DTRACE_IDSIZE), %eax
- testl %eax, %eax /* check for non-zero probe */
- jz 1f
- pushl %ebp /* save %ebp */
- pushl 8(%esp) /* push arg1 */
- movl %ebp, 12(%esp) /* fake up the stack frame */
- movl %esp, %ebp /* fake up base pointer */
- addl $12, %ebp /* adjust faked base pointer */
- pushl %ecx /* push lock */
- pushl %eax /* push probe ID */
- call *lockstat_probe
- addl $12, %esp /* adjust for arguments */
- popl %ebp /* pop frame */
-1:
- popl %edx /* restore thread pointer */
- decb T_LOCKSTAT(%edx) /* curthread->t_lockstat-- */
- movl $1, %eax /* return success if tryenter */
- addl $4, %esp /* pop argument */
- ret
- SET_SIZE(lockstat_wrapper_arg)
-
-
- ENTRY(mutex_tryenter)
- movl %gs:CPU_THREAD, %edx /* edx = thread ptr */
- movl 4(%esp), %ecx /* ecx = lock ptr */
- xorl %eax, %eax /* eax = 0 (unheld adaptive) */
- lock
- cmpxchgl %edx, (%ecx)
- jnz mutex_vector_tryenter
- movl %ecx, %eax
-#if defined(OPTERON_WORKAROUND_6323525)
-.mutex_tryenter_lockstat_patch_point:
-.mutex_tryenter_6323525_patch_point:
- ret /* nop space for lfence */
- nop
- nop
-.mutex_tryenter_lockstat_6323525_patch_point: /* new patch point if lfence */
- nop
-#else /* OPTERON_WORKAROUND_6323525 */
-.mutex_tryenter_lockstat_patch_point:
- ret
-#endif /* OPTERON_WORKAROUND_6323525 */
- movl $LS_MUTEX_ENTER_ACQUIRE, %eax
- jmp lockstat_wrapper
- SET_SIZE(mutex_tryenter)
-
- ENTRY(mutex_adaptive_tryenter)
- movl %gs:CPU_THREAD, %edx /* edx = thread ptr */
- movl 4(%esp), %ecx /* ecx = lock ptr */
- xorl %eax, %eax /* eax = 0 (unheld adaptive) */
- lock
- cmpxchgl %edx, (%ecx)
- jnz 0f
- movl %ecx, %eax
-#if defined(OPTERON_WORKAROUND_6323525)
-.mutex_atryenter_6323525_patch_point:
- ret /* nop space for lfence */
- nop
- nop
- nop
-#else /* OPTERON_WORKAROUND_6323525 */
- ret
-#endif /* OPTERON_WORKAROUND_6323525 */
-0:
- xorl %eax, %eax
- ret
- SET_SIZE(mutex_adaptive_tryenter)
-
- .globl mutex_owner_running_critical_start
-
- ENTRY(mutex_owner_running)
-mutex_owner_running_critical_start:
- movl 4(%esp), %eax /* get owner field */
- movl (%eax), %eax
- andl $MUTEX_THREAD, %eax /* remove waiters bit */
- cmpl $0, %eax /* if free, skip */
- je 1f /* go return 0 */
- movl T_CPU(%eax), %ecx /* get owner->t_cpu */
- movl CPU_THREAD(%ecx), %edx /* get t_cpu->cpu_thread */
-.mutex_owner_running_critical_end:
- cmpl %eax, %edx /* owner == running thread? */
- je 2f /* yes, go return cpu */
-1:
- xorl %eax, %eax /* return 0 */
- ret
-2:
- movl %ecx, %eax /* return cpu */
- ret
-
- SET_SIZE(mutex_owner_running)
-
- .globl mutex_owner_running_critical_size
- .type mutex_owner_running_critical_size, @object
- .align CPTRSIZE
-mutex_owner_running_critical_size:
- .long .mutex_owner_running_critical_end - mutex_owner_running_critical_start
- SET_SIZE(mutex_owner_running_critical_size)
-
- .globl mutex_exit_critical_start
-
- ENTRY(mutex_exit)
-mutex_exit_critical_start: /* If interrupted, restart here */
- movl %gs:CPU_THREAD, %edx
- movl 4(%esp), %ecx
- cmpl %edx, (%ecx)
- jne mutex_vector_exit /* wrong type or wrong owner */
- movl $0, (%ecx) /* clear owner AND lock */
-.mutex_exit_critical_end:
-.mutex_exit_lockstat_patch_point:
- ret
- movl $LS_MUTEX_EXIT_RELEASE, %eax
- jmp lockstat_wrapper
- SET_SIZE(mutex_exit)
-
- .globl mutex_exit_critical_size
- .type mutex_exit_critical_size, @object
- .align CPTRSIZE
-mutex_exit_critical_size:
- .long .mutex_exit_critical_end - mutex_exit_critical_start
- SET_SIZE(mutex_exit_critical_size)
-
-#endif /* !__amd64 */
-
-#endif /* __lint */
-
/*
* rw_enter() and rw_exit().
*
@@ -900,21 +437,6 @@ mutex_exit_critical_size:
* and rw_exit (no waiters or not the last reader). If anything complicated
* is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
*/
-#if defined(lint) || defined(__lint)
-
-/* ARGSUSED */
-void
-rw_enter(krwlock_t *lp, krw_t rw)
-{}
-
-/* ARGSUSED */
-void
-rw_exit(krwlock_t *lp)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(rw_enter)
cmpl $RW_WRITER, %esi
@@ -1000,103 +522,7 @@ rw_exit(krwlock_t *lp)
jmp lockstat_wrapper_arg
SET_SIZE(rw_exit)
-#else
-
- ENTRY(rw_enter)
- movl 4(%esp), %ecx /* ecx = lock ptr */
- cmpl $RW_WRITER, 8(%esp)
- je .rw_write_enter
- movl (%ecx), %eax /* eax = old rw_wwwh value */
- testl $RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
- jnz rw_enter_sleep
- leal RW_READ_LOCK(%eax), %edx /* edx = new rw_wwwh value */
- lock
- cmpxchgl %edx, (%ecx) /* try to grab read lock */
- jnz rw_enter_sleep
-.rw_read_enter_lockstat_patch_point:
- ret
- movl %gs:CPU_THREAD, %edx /* edx = thread ptr */
- movl $LS_RW_ENTER_ACQUIRE, %eax
- pushl $RW_READER
- jmp lockstat_wrapper_arg
-.rw_write_enter:
- movl %gs:CPU_THREAD, %edx
- orl $RW_WRITE_LOCKED, %edx /* edx = write-locked value */
- xorl %eax, %eax /* eax = unheld value */
- lock
- cmpxchgl %edx, (%ecx) /* try to grab write lock */
- jnz rw_enter_sleep
-
-#if defined(OPTERON_WORKAROUND_6323525)
-.rw_write_enter_lockstat_patch_point:
-.rw_write_enter_6323525_patch_point:
- ret
- nop
- nop
-.rw_write_enter_lockstat_6323525_patch_point:
- nop
-#else /* OPTERON_WORKAROUND_6323525 */
-.rw_write_enter_lockstat_patch_point:
- ret
-#endif /* OPTERON_WORKAROUND_6323525 */
-
- movl %gs:CPU_THREAD, %edx /* edx = thread ptr */
- movl $LS_RW_ENTER_ACQUIRE, %eax
- pushl $RW_WRITER
- jmp lockstat_wrapper_arg
- SET_SIZE(rw_enter)
-
- ENTRY(rw_exit)
- movl 4(%esp), %ecx /* ecx = lock ptr */
- movl (%ecx), %eax /* eax = old rw_wwwh value */
- cmpl $RW_READ_LOCK, %eax /* single-reader, no waiters? */
- jne .rw_not_single_reader
- xorl %edx, %edx /* edx = new value (unheld) */
-.rw_read_exit:
- lock
- cmpxchgl %edx, (%ecx) /* try to drop read lock */
- jnz rw_exit_wakeup
-.rw_read_exit_lockstat_patch_point:
- ret
- movl $LS_RW_EXIT_RELEASE, %eax
- pushl $RW_READER
- jmp lockstat_wrapper_arg
-.rw_not_single_reader:
- testl $RW_WRITE_LOCKED, %eax /* write-locked or write-wanted? */
- jnz .rw_write_exit
- leal -RW_READ_LOCK(%eax), %edx /* edx = new value */
- cmpl $RW_READ_LOCK, %edx
- jge .rw_read_exit /* not last reader, safe to drop */
- jmp rw_exit_wakeup /* last reader with waiters */
-.rw_write_exit:
- movl %gs:CPU_THREAD, %eax /* eax = thread ptr */
- xorl %edx, %edx /* edx = new value (unheld) */
- orl $RW_WRITE_LOCKED, %eax /* eax = write-locked value */
- lock
- cmpxchgl %edx, (%ecx) /* try to drop read lock */
- jnz rw_exit_wakeup
-.rw_write_exit_lockstat_patch_point:
- ret
- movl %gs:CPU_THREAD, %edx /* edx = thread ptr */
- movl $LS_RW_EXIT_RELEASE, %eax
- pushl $RW_WRITER
- jmp lockstat_wrapper_arg
- SET_SIZE(rw_exit)
-
-#endif /* !__amd64 */
-
-#endif /* __lint */
-
#if defined(OPTERON_WORKAROUND_6323525)
-#if defined(lint) || defined(__lint)
-
-int workaround_6323525_patched;
-
-void
-patch_workaround_6323525(void)
-{}
-
-#else /* lint */
/*
* If it is necessary to patch the lock enter routines with the lfence
@@ -1107,8 +533,6 @@ patch_workaround_6323525(void)
DGDEF3(workaround_6323525_patched, 4, 4)
.long 0
-#if defined(__amd64)
-
#define HOT_MUTEX_PATCH(srcaddr, dstaddr, size) \
movq $size, %rbx; \
movq $dstaddr, %r13; \
@@ -1172,69 +596,9 @@ _lfence_insn:
SET_SIZE(patch_workaround_6323525)
-#else /* __amd64 */
-
-#define HOT_MUTEX_PATCH(srcaddr, dstaddr, size) \
- movl $size, %ebx; \
- movl $srcaddr, %esi; \
- addl %ebx, %esi; \
- movl $dstaddr, %edi; \
- addl %ebx, %edi; \
-0: \
- decl %esi; \
- decl %edi; \
- pushl $1; \
- movzbl (%esi), %eax; \
- pushl %eax; \
- pushl %edi; \
- call hot_patch_kernel_text; \
- addl $12, %esp; \
- decl %ebx; \
- testl %ebx, %ebx; \
- jg 0b;
-
-
- /* see comments above */
- ENTRY_NP(patch_workaround_6323525)
- pushl %ebp
- movl %esp, %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
-
- movl $1, workaround_6323525_patched
-
- HOT_MUTEX_PATCH(_lfence_insn, .mutex_enter_6323525_patch_point, 4)
- HOT_MUTEX_PATCH(_lfence_insn, .mutex_tryenter_6323525_patch_point, 4)
- HOT_MUTEX_PATCH(_lfence_insn, .mutex_atryenter_6323525_patch_point, 4)
- HOT_MUTEX_PATCH(_lfence_insn, .rw_write_enter_6323525_patch_point, 4)
-
- popl %edi
- popl %esi
- popl %ebx
- movl %ebp, %esp
- popl %ebp
- ret
-_lfence_insn:
- .byte 0xf, 0xae, 0xe8 / [lfence instruction]
- ret
- SET_SIZE(patch_workaround_6323525)
-
-#endif /* !__amd64 */
-#endif /* !lint */
#endif /* OPTERON_WORKAROUND_6323525 */
-#if defined(lint) || defined(__lint)
-
-void
-lockstat_hot_patch(void)
-{}
-
-#else
-
-#if defined(__amd64)
-
#define HOT_PATCH(addr, event, active_instr, normal_instr, len) \
movq $normal_instr, %rsi; \
movq $active_instr, %rdi; \
@@ -1248,29 +612,9 @@ lockstat_hot_patch(void)
movq $addr, %rdi; \
call hot_patch_kernel_text
-#else
-
-#define HOT_PATCH(addr, event, active_instr, normal_instr, len) \
- movl $normal_instr, %ecx; \
- movl $active_instr, %edx; \
- movl $lockstat_probemap, %eax; \
- movl _MUL(event, DTRACE_IDSIZE)(%eax), %eax; \
- testl %eax, %eax; \
- jz . + 4; \
- movl %edx, %ecx; \
- pushl $len; \
- pushl %ecx; \
- pushl $addr; \
- call hot_patch_kernel_text; \
- addl $12, %esp;
-
-#endif /* !__amd64 */
-
ENTRY(lockstat_hot_patch)
-#if defined(__amd64)
pushq %rbp /* align stack properly */
movq %rsp, %rbp
-#endif /* __amd64 */
#if defined(OPTERON_WORKAROUND_6323525)
cmpl $0, workaround_6323525_patched
@@ -1318,42 +662,10 @@ lockstat_hot_patch(void)
HOT_PATCH(LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT,
LS_LOCK_CLEAR_SPLX_RELEASE,
LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL, 0, 1);
-#if defined(__amd64)
leave /* unwind stack */
-#endif /* __amd64 */
ret
SET_SIZE(lockstat_hot_patch)
-#endif /* __lint */
-
-#if defined(lint) || defined(__lint)
-
-/* XX64 membar_*() should be inlines */
-
-void
-membar_sync(void)
-{}
-
-void
-membar_enter(void)
-{}
-
-void
-membar_exit(void)
-{}
-
-void
-membar_producer(void)
-{}
-
-void
-membar_consumer(void)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(membar_enter)
ALTENTRY(membar_exit)
ALTENTRY(membar_sync)
@@ -1373,43 +685,6 @@ membar_consumer(void)
ret
SET_SIZE(membar_consumer)
-#else
-
- ENTRY(membar_enter)
- ALTENTRY(membar_exit)
- ALTENTRY(membar_sync)
- lock
- xorl $0, (%esp)
- ret
- SET_SIZE(membar_sync)
- SET_SIZE(membar_exit)
- SET_SIZE(membar_enter)
-
-/*
- * On machines that support sfence and lfence, these
- * memory barriers can be more precisely implemented
- * without causing the whole world to stop
- */
- ENTRY(membar_producer)
- .globl _patch_sfence_ret
-_patch_sfence_ret: /* c.f. membar #StoreStore */
- lock
- xorl $0, (%esp)
- ret
- SET_SIZE(membar_producer)
-
- ENTRY(membar_consumer)
- .globl _patch_lfence_ret
-_patch_lfence_ret: /* c.f. membar #LoadLoad */
- lock
- xorl $0, (%esp)
- ret
- SET_SIZE(membar_consumer)
-
-#endif /* !__amd64 */
-
-#endif /* __lint */
-
/*
* thread_onproc()
* Set thread in onproc state for the specified CPU.
@@ -1417,18 +692,6 @@ _patch_lfence_ret: /* c.f. membar #LoadLoad */
* Since the new lock isn't held, the store ordering is important.
* If not done in assembler, the compiler could reorder the stores.
*/
-#if defined(lint) || defined(__lint)
-
-void
-thread_onproc(kthread_id_t t, cpu_t *cp)
-{
- t->t_state = TS_ONPROC;
- t->t_lockp = &cp->cpu_thread_lock;
-}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(thread_onproc)
addq $CPU_THREAD_LOCK, %rsi /* pointer to disp_lock while running */
@@ -1437,36 +700,11 @@ thread_onproc(kthread_id_t t, cpu_t *cp)
ret
SET_SIZE(thread_onproc)
-#else
-
- ENTRY(thread_onproc)
- movl 4(%esp), %eax
- movl 8(%esp), %ecx
- addl $CPU_THREAD_LOCK, %ecx /* pointer to disp_lock while running */
- movl $ONPROC_THREAD, T_STATE(%eax) /* set state to TS_ONPROC */
- movl %ecx, T_LOCKP(%eax) /* store new lock pointer */
- ret
- SET_SIZE(thread_onproc)
-
-#endif /* !__amd64 */
-
-#endif /* __lint */
-
/*
* mutex_delay_default(void)
* Spins for approx a few hundred processor cycles and returns to caller.
*/
-#if defined(lint) || defined(__lint)
-
-void
-mutex_delay_default(void)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(mutex_delay_default)
movq $92,%r11
0: decq %r11
@@ -1474,20 +712,3 @@ mutex_delay_default(void)
ret
SET_SIZE(mutex_delay_default)
-#else
-
- ENTRY(mutex_delay_default)
- push %ebp
- movl %esp,%ebp
- andl $-16,%esp
- push %ebx
- movl $93,%ebx
-0: decl %ebx
- jg 0b
- pop %ebx
- leave
- ret
- SET_SIZE(mutex_delay_default)
-
-#endif /* !__amd64 */
-#endif /* __lint */
diff --git a/usr/src/uts/intel/ia32/ml/modstubs.s b/usr/src/uts/intel/ia32/ml/modstubs.s
index 3900b37d2c..59598c47e0 100644
--- a/usr/src/uts/intel/ia32/ml/modstubs.s
+++ b/usr/src/uts/intel/ia32/ml/modstubs.s
@@ -26,12 +26,6 @@
#include <sys/asm_linkage.h>
-#if defined(__lint)
-
-char stubs_base[1], stubs_end[1];
-
-#else /* __lint */
-
#include "assym.h"
/*
@@ -1322,4 +1316,3 @@ fcnname/**/_info: \
ENTRY_NP(stubs_end)
nop
-#endif /* lint */
diff --git a/usr/src/uts/intel/ia32/ml/ovbcopy.s b/usr/src/uts/intel/ia32/ml/ovbcopy.s
index 6774f392e5..0687e67e4b 100644
--- a/usr/src/uts/intel/ia32/ml/ovbcopy.s
+++ b/usr/src/uts/intel/ia32/ml/ovbcopy.s
@@ -3,7 +3,9 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
+/*
+ * Copyright 2019 Joyent, Inc.
+ */
/*-
* Copyright (c) 1993 The Regents of the University of California.
@@ -42,18 +44,6 @@
#include <sys/asm_linkage.h>
-#if defined(__lint)
-
-/*
- * Overlapping bcopy (source and target may overlap arbitrarily).
- */
-/* ARGSUSED */
-void
-ovbcopy(const void *from, void *to, size_t count)
-{}
-
-#else /* __lint */
-
/*
* Adapted from fbsd bcopy().
*
@@ -62,8 +52,6 @@ ovbcopy(const void *from, void *to, size_t count)
* ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
*/
-#if defined(__amd64)
-
ENTRY(ovbcopy)
xchgq %rsi,%rdi
movq %rdx,%rcx
@@ -102,53 +90,3 @@ reverse:
ret
SET_SIZE(ovbcopy)
-#elif defined(__i386)
-
- ENTRY(ovbcopy)
- pushl %esi
- pushl %edi
- movl 12(%esp),%esi
- movl 16(%esp),%edi
- movl 20(%esp),%ecx
-
- movl %edi,%eax
- subl %esi,%eax
- cmpl %ecx,%eax /* overlapping && src < dst? */
- jb reverse
-
- shrl $2,%ecx /* copy by 32-bit words */
- cld /* nope, copy forwards */
- rep
- movsl
- movl 20(%esp),%ecx
- andl $3,%ecx /* any bytes left? */
- rep
- movsb
- popl %edi
- popl %esi
- ret
-
-reverse:
- addl %ecx,%edi /* copy backwards */
- addl %ecx,%esi
- decl %edi
- decl %esi
- andl $3,%ecx /* any fractional bytes? */
- std
- rep
- movsb
- movl 20(%esp),%ecx /* copy remainder by 32-bit words */
- shrl $2,%ecx
- subl $3,%esi
- subl $3,%edi
- rep
- movsl
- popl %edi
- popl %esi
- cld
- ret
- SET_SIZE(ovbcopy)
-
-#endif /* __i386 */
-
-#endif /* __lint */
diff --git a/usr/src/uts/intel/ia32/ml/sseblk.s b/usr/src/uts/intel/ia32/ml/sseblk.s
index 092b3e52fd..836b6b6c97 100644
--- a/usr/src/uts/intel/ia32/ml/sseblk.s
+++ b/usr/src/uts/intel/ia32/ml/sseblk.s
@@ -23,25 +23,21 @@
* Use is subject to license terms.
*/
-#pragma ident "%Z%%M% %I% %E% SMI"
+/*
+ * Copyright 2019 Joyent, Inc.
+ */
#include <sys/asm_linkage.h>
#include <sys/regset.h>
#include <sys/privregs.h>
-#if defined(__lint)
-#include <sys/types.h>
-#include <sys/archsystm.h>
-#else
#include "assym.h"
-#endif
/*
* Do block operations using Streaming SIMD extensions
*/
#if defined(DEBUG)
-#if defined(__amd64)
#define ASSERT_KPREEMPT_DISABLED(t, r32, msg) \
movq %gs:CPU_THREAD, t; \
movsbl T_PREEMPT(t), r32; \
@@ -53,18 +49,6 @@
xorl %eax, %eax; \
call panic; \
5:
-#elif defined(__i386)
-#define ASSERT_KPREEMPT_DISABLED(t, r32, msg) \
- movl %gs:CPU_THREAD, t; \
- movsbl T_PREEMPT(t), r32; \
- testl r32, r32; \
- jne 5f; \
- pushl %ebp; \
- movl %esp, %ebp; \
- pushl $msg; \
- call panic; \
-5:
-#endif /* __i386 */
#else /* DEBUG */
#define ASSERT_KPREEMPT_DISABLED(t, r32, msg)
#endif /* DEBUG */
@@ -77,23 +61,6 @@
#error "mucked up constants"
#endif
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-hwblkclr(void *addr, size_t size)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-#define ADD addq
-#define SUB subq
-#else
-#define ADD addl
-#define SUB subl
-#endif
-
#define SAVE_XMM0(r) \
SAVE_XMM_PROLOG(r, 1); \
movdqa %xmm0, (r)
@@ -106,8 +73,8 @@ hwblkclr(void *addr, size_t size)
movntdq %xmm0, 0x10(dst); \
movntdq %xmm0, 0x20(dst); \
movntdq %xmm0, 0x30(dst); \
- ADD $BLOCKSIZE, dst; \
- SUB $1, cnt
+ addq $BLOCKSIZE, dst; \
+ subq $1, cnt
#define ZERO_LOOP_FINI_XMM(dst) \
mfence
@@ -116,8 +83,6 @@ hwblkclr(void *addr, size_t size)
movdqa 0x0(r), %xmm0; \
RSTOR_XMM_EPILOG(r, 1)
-#if defined(__amd64)
-
/*
* %rdi dst
* %rsi size
@@ -158,65 +123,6 @@ hwblkclr(void *addr, size_t size)
jmp bzero
SET_SIZE(hwblkclr)
-#elif defined(__i386)
-
- /*
- * %eax dst
- * %ecx size in bytes, loop count
- * %ebx saved %cr0 (#if DEBUG then t->t_preempt)
- * %edi pointer to %xmm register save area
- */
- ENTRY(hwblkclr)
- movl 4(%esp), %eax
- movl 8(%esp), %ecx
- testl $BLOCKMASK, %eax /* address must be BLOCKSIZE aligned */
- jne .dobzero
- cmpl $BLOCKSIZE, %ecx /* size must be at least BLOCKSIZE */
- jl .dobzero
- testl $BLOCKMASK, %ecx /* .. and be a multiple of BLOCKSIZE */
- jne .dobzero
- shrl $BLOCKSHIFT, %ecx
- movl 0xc(%esp), %edx
- pushl %ebx
-
- pushl %esi
- ASSERT_KPREEMPT_DISABLED(%esi, %ebx, .not_disabled)
- popl %esi
- movl %cr0, %ebx
- clts
- testl $CR0_TS, %ebx
- jnz 1f
-
- pushl %edi
- SAVE_XMM0(%edi)
-1: ZERO_LOOP_INIT_XMM(%eax)
-9: ZERO_LOOP_BODY_XMM(%eax, %ecx)
- jnz 9b
- ZERO_LOOP_FINI_XMM(%eax)
-
- testl $CR0_TS, %ebx
- jnz 2f
- RSTOR_XMM0(%edi)
- popl %edi
-2: movl %ebx, %cr0
- popl %ebx
- ret
-.dobzero:
- jmp bzero
- SET_SIZE(hwblkclr)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-/*ARGSUSED*/
-void
-hwblkpagecopy(const void *src, void *dst)
-{}
-
-#else /* __lint */
#define PREFETCH_START(src) \
prefetchnta 0x0(src); \
@@ -244,7 +150,7 @@ hwblkpagecopy(const void *src, void *dst)
movdqa 0x50(src), %xmm5; \
movdqa 0x60(src), %xmm6; \
movdqa 0x70(src), %xmm7; \
- ADD $0x80, src
+ addq $0x80, src
#define COPY_LOOP_BODY_XMM(src, dst, cnt) \
prefetchnta 0x80(src); \
@@ -265,10 +171,10 @@ hwblkpagecopy(const void *src, void *dst)
movntdq %xmm7, 0x70(dst); \
movdqa 0x40(src), %xmm4; \
movdqa 0x50(src), %xmm5; \
- ADD $0x80, dst; \
+ addq $0x80, dst; \
movdqa 0x60(src), %xmm6; \
movdqa 0x70(src), %xmm7; \
- ADD $0x80, src; \
+ addq $0x80, src; \
subl $1, cnt
#define COPY_LOOP_FINI_XMM(dst) \
@@ -292,8 +198,6 @@ hwblkpagecopy(const void *src, void *dst)
movdqa 0x70(r), %xmm7; \
RSTOR_XMM_EPILOG(r, 8)
-#if defined(__amd64)
-
/*
* %rdi src
* %rsi dst
@@ -330,70 +234,6 @@ hwblkpagecopy(const void *src, void *dst)
ret
SET_SIZE(hwblkpagecopy)
-#elif defined(__i386)
-
- /*
- * %eax src
- * %edx dst
- * %ecx loop count
- * %ebx saved %cr0 (#if DEBUG then t->t_prempt)
- * %edi pointer to %xmm register save area
- * %esi #if DEBUG temporary thread pointer
- */
- ENTRY(hwblkpagecopy)
- movl 4(%esp), %eax
- movl 8(%esp), %edx
- PREFETCH_START(%eax)
- pushl %ebx
- /*
- * PAGESIZE is 4096, each loop moves 128 bytes, but the initial
- * load and final store save us one loop count
- */
- movl $_CONST(32 - 1), %ecx
- pushl %esi
- ASSERT_KPREEMPT_DISABLED(%esi, %ebx, .not_disabled)
- popl %esi
- movl %cr0, %ebx
- clts
- testl $CR0_TS, %ebx
- jnz 3f
- pushl %edi
- SAVE_XMMS(%edi)
-3: COPY_LOOP_INIT_XMM(%eax)
-4: COPY_LOOP_BODY_XMM(%eax, %edx, %ecx)
- jnz 4b
- COPY_LOOP_FINI_XMM(%edx)
- testl $CR0_TS, %ebx
- jnz 5f
- RSTOR_XMMS(%edi)
- popl %edi
-5: movl %ebx, %cr0
- popl %ebx
- mfence
- ret
- SET_SIZE(hwblkpagecopy)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(__lint)
-
-/*
- * Version of hwblkclr which doesn't use XMM registers.
- * Note that it requires aligned dst and len.
- *
- * XXPV This needs to be performance tuned at some point.
- * Is 4 the best number of iterations to unroll?
- */
-/*ARGSUSED*/
-void
-block_zero_no_xmm(void *dst, int len)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
-
ENTRY(block_zero_no_xmm)
pushq %rbp
movq %rsp, %rbp
@@ -412,49 +252,6 @@ block_zero_no_xmm(void *dst, int len)
ret
SET_SIZE(block_zero_no_xmm)
-#elif defined(__i386)
-
- ENTRY(block_zero_no_xmm)
- pushl %ebp
- movl %esp, %ebp
- xorl %eax, %eax
- movl 8(%ebp), %edx
- movl 12(%ebp), %ecx
- addl %ecx, %edx
- negl %ecx
-1:
- movnti %eax, (%edx, %ecx)
- movnti %eax, 4(%edx, %ecx)
- movnti %eax, 8(%edx, %ecx)
- movnti %eax, 12(%edx, %ecx)
- addl $16, %ecx
- jnz 1b
- mfence
- leave
- ret
- SET_SIZE(block_zero_no_xmm)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-
-#if defined(__lint)
-
-/*
- * Version of page copy which doesn't use XMM registers.
- *
- * XXPV This needs to be performance tuned at some point.
- * Is 4 the right number of iterations to unroll?
- * Is the load/store order optimal? Should it use prefetch?
- */
-/*ARGSUSED*/
-void
-page_copy_no_xmm(void *dst, void *src)
-{}
-
-#else /* __lint */
-
-#if defined(__amd64)
ENTRY(page_copy_no_xmm)
movq $MMU_STD_PAGESIZE, %rcx
@@ -476,36 +273,7 @@ page_copy_no_xmm(void *dst, void *src)
ret
SET_SIZE(page_copy_no_xmm)
-#elif defined(__i386)
-
- ENTRY(page_copy_no_xmm)
- pushl %esi
- movl $MMU_STD_PAGESIZE, %ecx
- movl 8(%esp), %edx
- movl 12(%esp), %esi
- addl %ecx, %edx
- addl %ecx, %esi
- negl %ecx
-1:
- movl (%esi, %ecx), %eax
- movnti %eax, (%edx, %ecx)
- movl 4(%esi, %ecx), %eax
- movnti %eax, 4(%edx, %ecx)
- movl 8(%esi, %ecx), %eax
- movnti %eax, 8(%edx, %ecx)
- movl 12(%esi, %ecx), %eax
- movnti %eax, 12(%edx, %ecx)
- addl $16, %ecx
- jnz 1b
- mfence
- popl %esi
- ret
- SET_SIZE(page_copy_no_xmm)
-
-#endif /* __i386 */
-#endif /* __lint */
-
-#if defined(DEBUG) && !defined(__lint)
+#if defined(DEBUG)
.text
.not_disabled:
.string "sseblk: preemption not disabled!"
diff --git a/usr/src/uts/intel/ia32/sys/Makefile b/usr/src/uts/intel/ia32/sys/Makefile
index 0ef2320b16..5f4708436f 100644
--- a/usr/src/uts/intel/ia32/sys/Makefile
+++ b/usr/src/uts/intel/ia32/sys/Makefile
@@ -19,21 +19,17 @@
# CDDL HEADER END
#
#
-#pragma ident "%Z%%M% %I% %E% SMI"
-#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
-# uts/intel/ia32/sys/Makefile
+# Copyright 2020 Joyent, Inc.
#
# include global definitions
include ../../../../Makefile.master
HDRS= \
asm_linkage.h \
- kdi_regs.h \
machtypes.h \
- privmregs.h \
privregs.h \
psw.h \
pte.h \
diff --git a/usr/src/uts/intel/ia32/sys/kdi_regs.h b/usr/src/uts/intel/ia32/sys/kdi_regs.h
deleted file mode 100644
index e87948189a..0000000000
--- a/usr/src/uts/intel/ia32/sys/kdi_regs.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright 2018 Joyent, Inc.
- */
-
-#ifndef _IA32_SYS_KDI_REGS_H
-#define _IA32_SYS_KDI_REGS_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define KDIREG_NGREG 21
-
-/*
- * %ss appears in a different place than a typical struct regs, since the
- * machine won't save %ss on a trap entry from the same privilege level.
- */
-
-#define KDIREG_SAVFP 0
-#define KDIREG_SAVPC 1
-#define KDIREG_SS 2
-#define KDIREG_GS 3
-#define KDIREG_FS 4
-#define KDIREG_ES 5
-#define KDIREG_DS 6
-#define KDIREG_EDI 7
-#define KDIREG_ESI 8
-#define KDIREG_EBP 9
-#define KDIREG_ESP 10
-#define KDIREG_EBX 11
-#define KDIREG_EDX 12
-#define KDIREG_ECX 13
-#define KDIREG_EAX 14
-#define KDIREG_TRAPNO 15
-#define KDIREG_ERR 16
-#define KDIREG_EIP 17
-#define KDIREG_CS 18
-#define KDIREG_EFLAGS 19
-#define KDIREG_UESP 20
-
-#define KDIREG_PC KDIREG_EIP
-#define KDIREG_SP KDIREG_ESP
-#define KDIREG_FP KDIREG_EBP
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _IA32_SYS_KDI_REGS_H */
diff --git a/usr/src/uts/intel/ia32/sys/privmregs.h b/usr/src/uts/intel/ia32/sys/privmregs.h
deleted file mode 100644
index 87d9b74bfe..0000000000
--- a/usr/src/uts/intel/ia32/sys/privmregs.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
- * Use is subject to license terms.
- */
-
-#ifndef _IA32_SYS_PRIVMREGS_H
-#define _IA32_SYS_PRIVMREGS_H
-
-#pragma ident "%Z%%M% %I% %E% SMI"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if !defined(__i386)
-#error "non-i386 code depends on i386 privileged header!"
-#endif
-
-#ifndef _ASM
-
-#define PM_GREGS (1 << 0)
-#define PM_CRREGS (1 << 1)
-#define PM_DRREGS (1 << 2)
-
-/*
- * This structure is intended to represent a complete machine state for a CPU,
- * when that information is available. It is only for use internally between
- * KMDB and the kernel, or within MDB. Note that this isn't yet finished.
- */
-typedef struct privmregs {
- ulong_t pm_flags;
- /* general registers */
- struct regs pm_gregs;
- /* cr0-8 */
- ulong_t pm_cr[8];
- /* dr0-8 */
- ulong_t pm_dr[8];
-} privmregs_t;
-
-#endif /* !_ASM */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* !_IA32_SYS_PRIVMREGS_H */
diff --git a/usr/src/uts/intel/io/acpica/osl_ml.s b/usr/src/uts/intel/io/acpica/osl_ml.s
index 9d6bbc97f4..81e7533e3a 100644
--- a/usr/src/uts/intel/io/acpica/osl_ml.s
+++ b/usr/src/uts/intel/io/acpica/osl_ml.s
@@ -23,14 +23,13 @@
* Use is subject to license terms.
*/
+/*
+ * Copyright 2019 Joyent, Inc.
+ */
+
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
-#if defined(lint) || defined(__lint)
-#include <sys/types.h>
-#include "acpi.h"
-#endif /* lint */
-
/*
* Implementation as specific by ACPI 3.0 specification
* section 5.2.10.1
@@ -54,16 +53,6 @@
/* Offset of GlobalLock element in FACS structure */
#define GlobalLock 0x10
-#if defined(lint) || defined(__lint)
-
-/* ARGSUSED */
-UINT32
-__acpi_acquire_global_lock(void *Facs)
-{ return (0); }
-
-#else /* lint */
-
-#if defined(__amd64)
ENTRY(__acpi_acquire_global_lock)
movq $0xff, %rax / error return if FACS is null
orq %rdi, %rdi / %rdi contains pointer to FACS
@@ -84,44 +73,7 @@ __acpi_acquire_global_lock(void *Facs)
ret
SET_SIZE(__acpi_acquire_global_lock)
-#elif defined(__i386)
-
- ENTRY(__acpi_acquire_global_lock)
- movl $0xff, %eax / error return if FACS is null
- movl 4(%esp), %ecx / %ecx contains pointer to FACS
- orl %ecx, %ecx
- jz 1f
- leal GlobalLock(%ecx), %ecx / make %ecx point at the lock
-0:
- movl (%ecx), %eax
- movl %eax, %edx
- andl $0xFFFFFFFE, %edx
- btsl $1, %edx
- adcl $0, %edx
- lock
- cmpxchgl %edx, (%ecx)
- jnz 0b
- cmpb $3, %dl
- sbbl %eax, %eax
-1:
- ret
- SET_SIZE(__acpi_acquire_global_lock)
-
-#endif /* i386 */
-
-#endif /* lint */
-
-
-#if defined(lint) || defined(__lint)
-/* ARGSUSED */
-UINT32
-__acpi_release_global_lock(void *Facs)
-{ return (0); }
-
-#else /* lint */
-
-#if defined(__amd64)
ENTRY(__acpi_release_global_lock)
xorq %rax, %rax / error return if FACS is null
orq %rdi, %rdi / %rdi contains pointer to FACS
@@ -139,48 +91,13 @@ __acpi_release_global_lock(void *Facs)
ret
SET_SIZE(__acpi_release_global_lock)
-#elif defined(__i386)
-
- ENTRY(__acpi_release_global_lock)
- xorl %eax, %eax / error return if FACS is null
- movl 4(%esp), %ecx / %ecx contains pointer to FACS
- orl %ecx, %ecx
- jz 1f
- leal GlobalLock(%ecx), %ecx / make %ecx point at the lock
-0:
- movl (%ecx), %eax
- movl %eax, %edx
- andl $0xFFFFFFFC, %edx
- lock
- cmpxchgl %edx, (%ecx)
- jnz 0b
- andl $1, %eax
-1:
- ret
- SET_SIZE(__acpi_release_global_lock)
-
-#endif /* i386 */
-
-#endif /* lint */
-
/*
* execute WBINVD instruction
*/
-#if defined(lint) || defined(__lint)
-
-/* ARGSUSED */
-void
-__acpi_wbinvd(void)
-{ }
-
-#else /* lint */
-
ENTRY(__acpi_wbinvd)
wbinvd
ret
SET_SIZE(__acpi_wbinvd)
-#endif /* lint */
-
diff --git a/usr/src/uts/intel/kdi/kdi_asm.s b/usr/src/uts/intel/kdi/kdi_asm.s
index 3dd6db5952..5bef22916b 100644
--- a/usr/src/uts/intel/kdi/kdi_asm.s
+++ b/usr/src/uts/intel/kdi/kdi_asm.s
@@ -31,10 +31,6 @@
* the IDT stubs that drop into here (mainly via kdi_cmnint).
*/
-#if defined(__lint)
-#include <sys/types.h>
-#else
-
#include <sys/segments.h>
#include <sys/asm_linkage.h>
#include <sys/controlregs.h>
@@ -716,4 +712,3 @@ kdi_pass_invaltrap:
SETDREG(kdi_setdr6, %dr6)
SETDREG(kdi_setdr7, %dr7)
-#endif /* !__lint */
diff --git a/usr/src/uts/intel/kdi/kdi_idthdl.s b/usr/src/uts/intel/kdi/kdi_idthdl.s
index 510bb20fcb..77ef433184 100644
--- a/usr/src/uts/intel/kdi/kdi_idthdl.s
+++ b/usr/src/uts/intel/kdi/kdi_idthdl.s
@@ -33,10 +33,6 @@
* different number.
*/
-#if defined(__lint)
-#include <sys/types.h>
-#else
-
#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include <sys/machprivregs.h>
@@ -325,4 +321,3 @@ kdi_isr_end:
nop
#endif
-#endif /* !__lint */