summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjmcp <James.McPherson@Sun.COM>2009-09-14 23:39:39 -0700
committerjmcp <James.McPherson@Sun.COM>2009-09-14 23:39:39 -0700
commit7d87efa8fdfb9453670f2832df666fdae8291a84 (patch)
tree3903783b5474d050644204ca9fcbc1d25673ebec
parentd58d6067b4e8b9207c4b06986154e531009cfc4c (diff)
downloadillumos-gate-7d87efa8fdfb9453670f2832df666fdae8291a84.tar.gz
backout 6841893: breaks sparc build
backout 6839215: breaks sparc build backout 6880217: breaks sparc build
-rw-r--r--usr/src/cmd/boot/installgrub/installgrub.c155
-rw-r--r--usr/src/cmd/boot/installgrub/message.h13
-rw-r--r--usr/src/pkgdefs/SUNWcakr.i/prototype_com3
-rw-r--r--usr/src/pkgdefs/SUNWckr/prototype_i3863
-rw-r--r--usr/src/pkgdefs/SUNWhea/prototype_i3862
-rw-r--r--usr/src/tools/scripts/bfu.sh18
-rw-r--r--usr/src/uts/i86pc/Makefile.files2
-rw-r--r--usr/src/uts/i86pc/Makefile.i86pc.shared2
-rw-r--r--usr/src/uts/i86pc/Makefile.rules7
-rw-r--r--usr/src/uts/i86pc/io/amd_iommu/amd_iommu.c27
-rw-r--r--usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c74
-rw-r--r--usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c41
-rw-r--r--usr/src/uts/i86pc/sys/Makefile1
-rw-r--r--usr/src/uts/i86pc/sys/amd_iommu.h5
-rw-r--r--usr/src/uts/intel/Makefile.files11
-rw-r--r--usr/src/uts/intel/Makefile.intel.shared1
-rw-r--r--usr/src/uts/intel/Makefile.rules7
-rw-r--r--usr/src/uts/intel/amd_iommu/Makefile (renamed from usr/src/uts/i86pc/amd_iommu/Makefile)9
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu.c440
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu.conf (renamed from usr/src/uts/i86pc/io/amd_iommu/amd_iommu.conf)0
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu_acpi.c (renamed from usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c)2
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu_acpi.h (renamed from usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.h)2
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu_cmd.c (renamed from usr/src/uts/i86pc/io/amd_iommu/amd_iommu_cmd.c)2
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.c1880
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.h (renamed from usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.h)0
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu_log.c (renamed from usr/src/uts/i86pc/io/amd_iommu/amd_iommu_log.c)2
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu_log.h (renamed from usr/src/uts/i86pc/io/amd_iommu/amd_iommu_log.h)2
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.c1699
-rw-r--r--usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.h (renamed from usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.h)2
-rw-r--r--usr/src/uts/intel/sys/Makefile1
-rw-r--r--usr/src/uts/intel/sys/amd_iommu.h56
31 files changed, 4208 insertions, 261 deletions
diff --git a/usr/src/cmd/boot/installgrub/installgrub.c b/usr/src/cmd/boot/installgrub/installgrub.c
index ed552b44ce..f37f1430f5 100644
--- a/usr/src/cmd/boot/installgrub/installgrub.c
+++ b/usr/src/cmd/boot/installgrub/installgrub.c
@@ -228,13 +228,11 @@ static unsigned int
get_start_sector(int fd)
{
static unsigned int start_sect = 0;
- uint32_t secnum = 0, numsec = 0;
- int i, pno, rval, log_part = 0;
+ uint32_t secnum, numsec;
+ int i, pno, rval, ext_sol_part_found = 0;
struct mboot *mboot;
struct ipart *part;
ext_part_t *epp;
- struct part_info dkpi;
- struct extpart_info edkpi;
if (start_sect)
return (start_sect);
@@ -243,84 +241,24 @@ get_start_sector(int fd)
for (i = 0; i < FD_NUMPART; i++) {
part = (struct ipart *)mboot->parts + i;
if (is_bootpar) {
- if (part->systid == 0xbe) {
- start_sect = part->relsect;
- partition = i;
- goto found_part;
- }
- }
- }
-
- /*
- * We will not support x86 boot partition on extended partitions
- */
- if (is_bootpar) {
- (void) fprintf(stderr, NOBOOTPAR);
- exit(-1);
- }
-
- /*
- * Not an x86 boot partition. Search for Solaris fdisk partition
- * Get the solaris partition information from the device
- * and compare the offset of S2 with offset of solaris partition
- * from fdisk partition table.
- */
- if (ioctl(fd, DKIOCEXTPARTINFO, &edkpi) < 0) {
- if (ioctl(fd, DKIOCPARTINFO, &dkpi) < 0) {
- (void) fprintf(stderr, PART_FAIL);
- exit(-1);
- } else {
- edkpi.p_start = dkpi.p_start;
- }
- }
-
- for (i = 0; i < FD_NUMPART; i++) {
- part = (struct ipart *)mboot->parts + i;
-
- if (part->relsect == 0) {
- (void) fprintf(stderr, BAD_PART, i);
- exit(-1);
- }
-
- if (edkpi.p_start >= part->relsect &&
- edkpi.p_start < (part->relsect + part->numsect)) {
- /* Found the partition */
- break;
+ if (part->systid == 0xbe)
+ break;
}
}
- if (i == FD_NUMPART) {
- /* No solaris fdisk partitions (primary or logical) */
- (void) fprintf(stderr, NOSOLPAR);
- exit(-1);
- }
-
- /*
- * We have found a Solaris fdisk partition (primary or extended)
- * Handle the simple case first: Solaris in a primary partition
- */
- if (!fdisk_is_dos_extended(part->systid)) {
- start_sect = part->relsect;
- partition = i;
- goto found_part;
- }
-
- /*
- * Solaris in a logical partition. Find that partition in the
- * extended part.
- */
+ /* Read extended partition to find a solaris partition */
if ((rval = libfdisk_init(&epp, device_p0, NULL, FDISK_READ_DISK))
!= FDISK_SUCCESS) {
switch (rval) {
/*
- * The first 2 cases are not an error per-se, just that
- * there is no Solaris logical partition
+ * FDISK_EBADLOGDRIVE and FDISK_ENOLOGDRIVE can
+ * be considered as soft errors and hence
+ * we do not exit
*/
case FDISK_EBADLOGDRIVE:
+ break;
case FDISK_ENOLOGDRIVE:
- (void) fprintf(stderr, NOSOLPAR);
- exit(-1);
- /*NOTREACHED*/
+ break;
case FDISK_ENOVGEOM:
(void) fprintf(stderr, NO_VIRT_GEOM);
exit(1);
@@ -341,18 +279,54 @@ get_start_sector(int fd)
}
rval = fdisk_get_solaris_part(epp, &pno, &secnum, &numsec);
- if (rval != FDISK_SUCCESS) {
- /* No solaris logical partition */
- (void) fprintf(stderr, NOSOLPAR);
- exit(-1);
+ if (rval == FDISK_SUCCESS) {
+ ext_sol_part_found = 1;
}
libfdisk_fini(&epp);
- start_sect = secnum;
- partition = pno - 1;
- log_part = 1;
+ /*
+ * If there is no boot partition, find the solaris partition
+ */
+
+ if (i == FD_NUMPART) {
+ struct part_info dkpi;
+ struct extpart_info edkpi;
+
+ /*
+ * Get the solaris partition information from the device
+ * and compare the offset of S2 with offset of solaris partition
+ * from fdisk partition table.
+ */
+ if (ioctl(fd, DKIOCEXTPARTINFO, &edkpi) < 0) {
+ if (ioctl(fd, DKIOCPARTINFO, &dkpi) < 0) {
+ (void) fprintf(stderr, PART_FAIL);
+ exit(-1);
+ } else {
+ edkpi.p_start = dkpi.p_start;
+ }
+ }
+
+ for (i = 0; i < FD_NUMPART; i++) {
+ part = (struct ipart *)mboot->parts + i;
+
+ if (part->relsect == 0) {
+ (void) fprintf(stderr, BAD_PART, i);
+ exit(-1);
+ }
+
+ if (edkpi.p_start >= part->relsect &&
+ edkpi.p_start < (part->relsect + part->numsect)) {
+ /* Found the partition */
+ break;
+ }
+ }
+ }
+
+ if ((i == FD_NUMPART) && (!ext_sol_part_found)) {
+ (void) fprintf(stderr, BOOTPAR);
+ exit(-1);
+ }
-found_part:
/* get confirmation for -m */
if (write_mboot && !force_mboot) {
(void) fprintf(stdout, MBOOT_PROMPT);
@@ -362,21 +336,16 @@ found_part:
}
}
- /*
- * Currently if Solaris is in an extended partition we need to
- * write GRUB to the MBR. Check for this.
- */
- if (log_part && !write_mboot) {
- (void) fprintf(stderr, EXTSOLPAR);
- exit(-1);
+ if (fdisk_is_dos_extended(part->systid)) {
+ start_sect = secnum;
+ partition = pno;
+ } else {
+ start_sect = part->relsect;
+ partition = i;
}
- /*
- * warn, if Solaris in primary partition and GRUB not in MBR and
- * partition is not active
- */
- if (!log_part && part->bootid != 128 && !write_mboot) {
- (void) fprintf(stdout, SOLPAR_INACTIVE, partition + 1);
+ if (part->bootid != 128 && write_mboot == 0) {
+ (void) fprintf(stdout, BOOTPAR_INACTIVE, i + 1);
}
return (start_sect);
diff --git a/usr/src/cmd/boot/installgrub/message.h b/usr/src/cmd/boot/installgrub/message.h
index 8bc491f71c..594a7da1a1 100644
--- a/usr/src/cmd/boot/installgrub/message.h
+++ b/usr/src/cmd/boot/installgrub/message.h
@@ -34,20 +34,13 @@ extern "C" {
#define DRY_RUN gettext("dry run--nothing will be written to disk\n")
-#define NOSOLPAR \
- gettext("Solaris partition not found. Aborting operation.\n")
+#define BOOTPAR gettext("Solaris partition not found. Abort operation.\n")
-#define NOBOOTPAR \
- gettext("Solaris x86 boot partition not found. Aborting operation.\n")
-
-#define SOLPAR_INACTIVE gettext("Solaris fdisk partition is inactive.\n")
+#define BOOTPAR_INACTIVE gettext("Solaris boot partition inactive.\n")
#define BOOTPAR_NOTFOUND \
gettext("Solaris boot partition not found on %s\n")
-#define EXTSOLPAR \
- gettext("Solaris in extended partition. -m (MBR) option required\n")
-
#define NOT_RAW_DEVICE gettext("device %s is not a char special device\n")
#define NOT_ROOT_SLICE gettext("raw device must be a root slice (not s2)\n")
@@ -119,8 +112,6 @@ extern "C" {
#define LIBFDISK_INIT_FAIL gettext("Failed to initialize libfdisk.\n")
-
-
#ifdef __cplusplus
}
#endif
diff --git a/usr/src/pkgdefs/SUNWcakr.i/prototype_com b/usr/src/pkgdefs/SUNWcakr.i/prototype_com
index 8bc0475440..64f1cd1bdc 100644
--- a/usr/src/pkgdefs/SUNWcakr.i/prototype_com
+++ b/usr/src/pkgdefs/SUNWcakr.i/prototype_com
@@ -68,7 +68,6 @@ f none platform/i86pc/kernel/drv/amd64/acpippm 755 root sys
f none platform/i86pc/kernel/drv/acpippm 755 root sys
f none platform/i86pc/kernel/drv/acpippm.conf 644 root sys
f none platform/i86pc/kernel/drv/amd64/acpinex 755 root sys
-f none platform/i86pc/kernel/drv/amd64/amd_iommu 755 root sys
f none platform/i86pc/kernel/drv/amd64/ppm 755 root sys
f none platform/i86pc/kernel/drv/amd64/isa 755 root sys
f none platform/i86pc/kernel/drv/amd64/npe 755 root sys
@@ -76,8 +75,6 @@ f none platform/i86pc/kernel/drv/amd64/pci 755 root sys
f none platform/i86pc/kernel/drv/amd64/pit_beep 755 root sys
f none platform/i86pc/kernel/drv/amd64/rootnex 755 root sys
f none platform/i86pc/kernel/drv/acpinex 755 root sys
-f none platform/i86pc/kernel/drv/amd_iommu 755 root sys
-f none platform/i86pc/kernel/drv/amd_iommu.conf 644 root sys
f none platform/i86pc/kernel/drv/cpudrv 755 root sys
f none platform/i86pc/kernel/drv/isa 755 root sys
f none platform/i86pc/kernel/drv/npe 755 root sys
diff --git a/usr/src/pkgdefs/SUNWckr/prototype_i386 b/usr/src/pkgdefs/SUNWckr/prototype_i386
index 83e8fec867..1b29fa83a3 100644
--- a/usr/src/pkgdefs/SUNWckr/prototype_i386
+++ b/usr/src/pkgdefs/SUNWckr/prototype_i386
@@ -68,6 +68,8 @@ f none kernel/drv/arp 755 root sys
f none kernel/drv/acpi_drv 755 root sys
f none kernel/drv/acpi_drv.conf 644 root sys
f none kernel/drv/acpi_toshiba 755 root sys
+f none kernel/drv/amd_iommu 755 root sys
+f none kernel/drv/amd_iommu.conf 644 root sys
f none kernel/drv/bl 755 root sys
f none kernel/drv/bmc 755 root sys
f none kernel/drv/bmc.conf 644 root sys
@@ -294,6 +296,7 @@ f none kernel/drv/amd64/aggr 755 root sys
f none kernel/drv/amd64/arp 755 root sys
f none kernel/drv/amd64/acpi_drv 755 root sys
f none kernel/drv/amd64/acpi_toshiba 755 root sys
+f none kernel/drv/amd64/amd_iommu 755 root sys
f none kernel/drv/amd64/bl 755 root sys
f none kernel/drv/amd64/bmc 755 root sys
f none kernel/drv/amd64/bridge 755 root sys
diff --git a/usr/src/pkgdefs/SUNWhea/prototype_i386 b/usr/src/pkgdefs/SUNWhea/prototype_i386
index fef19b5687..d6c31532ad 100644
--- a/usr/src/pkgdefs/SUNWhea/prototype_i386
+++ b/usr/src/pkgdefs/SUNWhea/prototype_i386
@@ -75,6 +75,7 @@ f none usr/include/ia32/sys/trap.h 644 root bin
f none usr/include/ia32/sys/traptrace.h 644 root bin
f none usr/include/sys/kdi_regs.h 644 root bin
f none usr/include/stack_unwind.h 644 root bin
+f none usr/include/sys/amd_iommu.h 644 root bin
f none usr/include/sys/bootregs.h 644 root bin
f none usr/include/sys/bootsvcs.h 644 root bin
f none usr/include/sys/controlregs.h 644 root bin
@@ -120,7 +121,6 @@ d none usr/platform/i86pc/include 755 root bin
d none usr/platform/i86pc/include/sys 755 root bin
f none usr/platform/i86pc/include/sys/asm_misc.h 644 root bin
f none usr/platform/i86pc/include/sys/acpidev.h 644 root bin
-f none usr/platform/i86pc/include/sys/amd_iommu.h 644 root bin
f none usr/platform/i86pc/include/sys/clock.h 644 root bin
f none usr/platform/i86pc/include/sys/cram.h 644 root bin
f none usr/platform/i86pc/include/sys/debug_info.h 644 root bin
diff --git a/usr/src/tools/scripts/bfu.sh b/usr/src/tools/scripts/bfu.sh
index 962226d667..fe9f110a99 100644
--- a/usr/src/tools/scripts/bfu.sh
+++ b/usr/src/tools/scripts/bfu.sh
@@ -7888,24 +7888,6 @@ mondo_loop() {
rm -f $root/dev/rsr[0-9]*
#
- # Remove old amd_iommu driver
- #
-
- #
- # old: need to remove going forwards:
- #
- rm -f $root/kernel/drv/amd_iommu
- rm -f $root/kernel/drv/amd_iommu.conf
- rm -f $root/kernel/drv/amd64/amd_iommu
-
- #
- # new: need to remove going backwards:
- #
- rm -f $root/platform/i86pc/kernel/drv/amd_iommu.conf
- rm -f $root/platform/i86pc/kernel/drv/amd_iommu
- rm -f $root/platform/i86pc/kernel/drv/amd64/amd_iommu
-
- #
# The pkg* commands should not be used after this point and before
# archive extraction as libcrypto/libssl may not be available.
#
diff --git a/usr/src/uts/i86pc/Makefile.files b/usr/src/uts/i86pc/Makefile.files
index 91756fbb21..da0b6928bf 100644
--- a/usr/src/uts/i86pc/Makefile.files
+++ b/usr/src/uts/i86pc/Makefile.files
@@ -206,8 +206,6 @@ ROOTNEX_OBJS += rootnex.o iommu_rscs.o dmar_acpi.o intel_iommu.o
TZMON_OBJS += tzmon.o
UPPC_OBJS += uppc.o psm_common.o
XSVC_OBJS += xsvc.o
-AMD_IOMMU_OBJS += amd_iommu.o amd_iommu_impl.o amd_iommu_acpi.o \
- amd_iommu_cmd.o amd_iommu_log.o amd_iommu_page_tables.o
#
# Build up defines and paths.
diff --git a/usr/src/uts/i86pc/Makefile.i86pc.shared b/usr/src/uts/i86pc/Makefile.i86pc.shared
index cd8d7d1737..4b8e780995 100644
--- a/usr/src/uts/i86pc/Makefile.i86pc.shared
+++ b/usr/src/uts/i86pc/Makefile.i86pc.shared
@@ -255,13 +255,11 @@ DRV_KMODS += xsvc
DRV_KMODS += tzmon
DRV_KMODS += acpi_drv
DRV_KMODS += acpinex
-DRV_KMODS += amd_iommu
DRV_KMODS += ioat
DRV_KMODS += fipe
DRV_KMODS += cpudrv
-
#
# Platform Power Modules
#
diff --git a/usr/src/uts/i86pc/Makefile.rules b/usr/src/uts/i86pc/Makefile.rules
index d17ff166bd..c51773eadb 100644
--- a/usr/src/uts/i86pc/Makefile.rules
+++ b/usr/src/uts/i86pc/Makefile.rules
@@ -79,10 +79,6 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/i86pc/io/acpi/acpinex/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
-$(OBJS_DIR)/%.o: $(UTSBASE)/i86pc/io/amd_iommu/%.c
- $(COMPILE.c) -o $@ $<
- $(CTFCONVERT_O)
-
$(OBJS_DIR)/%.o: $(UTSBASE)/i86pc/io/ioat/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
@@ -299,9 +295,6 @@ $(LINTS_DIR)/%.ln: $(UTSBASE)/i86pc/io/acpi/acpidev/%.c
$(LINTS_DIR)/%.ln: $(UTSBASE)/i86pc/io/acpi/acpinex/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
-$(LINTS_DIR)/%.ln: $(UTSBASE)/i86pc/io/amd_iommu/%.c
- @($(LHEAD) $(LINT.c) $< $(LTAIL))
-
$(LINTS_DIR)/%.ln: $(UTSBASE)/i86pc/io/ioat/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu.c b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu.c
index 950b5e1e5b..1f638061c0 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu.c
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu.c
@@ -54,7 +54,6 @@ static int amd_iommu_open(dev_t *devp, int flag, int otyp, cred_t *credp);
static int amd_iommu_close(dev_t dev, int flag, int otyp, cred_t *credp);
static int amd_iommu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
cred_t *credp, int *rvalp);
-static int amd_iommu_quiesce(dev_info_t *dip);
static struct cb_ops amd_iommu_cb_ops = {
amd_iommu_open, /* cb_open */
@@ -88,8 +87,7 @@ static struct dev_ops amd_iommu_dev_ops = {
nodev, /* devo_reset */
&amd_iommu_cb_ops, /* devo_cb_ops */
NULL, /* devo_bus_ops */
- nulldev, /* devo_power */
- amd_iommu_quiesce, /* devo_quiesce */
+ nulldev /* devo_power */
};
static struct modldrv modldrv = {
@@ -444,26 +442,3 @@ amd_iommu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
return (ENOTTY);
}
-
-static int
-amd_iommu_quiesce(dev_info_t *dip)
-{
- int instance = ddi_get_instance(dip);
- struct amd_iommu_state *statep;
- const char *f = "amd_iommu_quiesce";
-
- statep = ddi_get_soft_state(amd_iommu_statep, instance);
- if (statep == NULL) {
- cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
- f, instance);
- return (DDI_FAILURE);
- }
-
- if (amd_iommu_teardown(dip, statep, AMD_IOMMU_QUIESCE) != DDI_SUCCESS) {
- cmn_err(CE_WARN, "%s: Unable to quiesce AMD IOMMU "
- "%s%d", f, ddi_driver_name(dip), instance);
- return (DDI_FAILURE);
- }
-
- return (DDI_SUCCESS);
-}
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c
index d2f625b219..8deb468c2f 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.c
@@ -35,7 +35,7 @@
#include "amd_iommu_acpi.h"
#include "amd_iommu_page_tables.h"
-static int amd_iommu_fini(amd_iommu_t *iommu, int type);
+static int amd_iommu_fini(amd_iommu_t *iommu);
static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
static void amd_iommu_stop(amd_iommu_t *iommu);
@@ -481,7 +481,7 @@ amd_iommu_setup_tables_and_buffers(amd_iommu_t *iommu)
}
static void
-amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu, int type)
+amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu)
{
dev_info_t *dip = iommu->aiomt_dip;
int instance = ddi_get_instance(dip);
@@ -493,22 +493,12 @@ amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu, int type)
AMD_IOMMU_EVENTBASE, 0);
AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
AMD_IOMMU_EVENTLEN, 0);
- AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
- AMD_IOMMU_EVENTHEADPTR, 0);
- AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
- AMD_IOMMU_EVENTTAILPTR, 0);
-
iommu->aiomt_cmdbuf = NULL;
AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
AMD_IOMMU_COMBASE, 0);
AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
AMD_IOMMU_COMLEN, 0);
- AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
- AMD_IOMMU_CMDHEADPTR, 0);
- AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
- AMD_IOMMU_CMDTAILPTR, 0);
-
iommu->aiomt_devtbl = NULL;
AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
@@ -516,7 +506,7 @@ amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu, int type)
AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
AMD_IOMMU_DEVTABSIZE, 0);
- if (iommu->aiomt_dmahdl == NULL || type == AMD_IOMMU_QUIESCE)
+ if (iommu->aiomt_dmahdl == NULL)
return;
/* Unbind the handle */
@@ -1060,7 +1050,7 @@ amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
"control regs. Skipping IOMMU idx=%d", f, driver,
instance, idx);
mutex_exit(&iommu->aiomt_mutex);
- (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
+ (void) amd_iommu_fini(iommu);
return (NULL);
}
@@ -1104,13 +1094,13 @@ amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
*/
if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
mutex_exit(&iommu->aiomt_mutex);
- (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
+ (void) amd_iommu_fini(iommu);
return (NULL);
}
if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
mutex_exit(&iommu->aiomt_mutex);
- (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
+ (void) amd_iommu_fini(iommu);
return (NULL);
}
@@ -1118,7 +1108,7 @@ amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
mutex_exit(&iommu->aiomt_mutex);
- (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
+ (void) amd_iommu_fini(iommu);
return (NULL);
}
@@ -1135,20 +1125,20 @@ amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
*/
if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
mutex_exit(&iommu->aiomt_mutex);
- (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
+ (void) amd_iommu_fini(iommu);
return (NULL);
}
if (amd_iommu_start(iommu) != DDI_SUCCESS) {
mutex_exit(&iommu->aiomt_mutex);
- (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
+ (void) amd_iommu_fini(iommu);
return (NULL);
}
/* xxx register/start race */
if (amd_iommu_register(iommu) != DDI_SUCCESS) {
mutex_exit(&iommu->aiomt_mutex);
- (void) amd_iommu_fini(iommu, AMD_IOMMU_TEARDOWN);
+ (void) amd_iommu_fini(iommu);
return (NULL);
}
@@ -1161,7 +1151,7 @@ amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
}
static int
-amd_iommu_fini(amd_iommu_t *iommu, int type)
+amd_iommu_fini(amd_iommu_t *iommu)
{
int idx = iommu->aiomt_idx;
dev_info_t *dip = iommu->aiomt_dip;
@@ -1169,28 +1159,17 @@ amd_iommu_fini(amd_iommu_t *iommu, int type)
const char *driver = ddi_driver_name(dip);
const char *f = "amd_iommu_fini";
- if (type == AMD_IOMMU_TEARDOWN) {
- mutex_enter(&iommu->aiomt_mutex);
- if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
- cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
- "idx = %d", f, driver, instance, idx);
- return (DDI_FAILURE);
- }
+ mutex_enter(&iommu->aiomt_mutex);
+ if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
+ cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
+ "idx = %d", f, driver, instance, idx);
+ return (DDI_FAILURE);
}
-
amd_iommu_stop(iommu);
-
- if (type == AMD_IOMMU_TEARDOWN) {
- amd_iommu_fini_page_tables(iommu);
- amd_iommu_teardown_interrupts(iommu);
- amd_iommu_teardown_exclusion(iommu);
- }
-
- amd_iommu_teardown_tables_and_buffers(iommu, type);
-
- if (type == AMD_IOMMU_QUIESCE)
- return (DDI_SUCCESS);
-
+ amd_iommu_fini_page_tables(iommu);
+ amd_iommu_teardown_interrupts(iommu);
+ amd_iommu_teardown_exclusion(iommu);
+ amd_iommu_teardown_tables_and_buffers(iommu);
if (iommu->aiomt_va != NULL) {
hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
@@ -1267,7 +1246,7 @@ amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
/* check if cap ID is secure device cap id */
if (id != PCI_CAP_ID_SECURE_DEV) {
if (amd_iommu_debug) {
- cmn_err(CE_NOTE,
+ cmn_err(CE_WARN,
"%s: %s%d: skipping IOMMU: idx(0x%x) "
"cap ID (0x%x) != secure dev capid (0x%x)",
f, driver, instance, idx, id,
@@ -1320,21 +1299,20 @@ amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
}
int
-amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep, int type)
+amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep)
{
int instance = ddi_get_instance(dip);
const char *driver = ddi_driver_name(dip);
- amd_iommu_t *iommu, *next_iommu;
+ amd_iommu_t *iommu;
int teardown;
int error = DDI_SUCCESS;
const char *f = "amd_iommu_teardown";
teardown = 0;
for (iommu = statep->aioms_iommu_start; iommu;
- iommu = next_iommu) {
+ iommu = iommu->aiomt_next) {
ASSERT(statep->aioms_nunits > 0);
- next_iommu = iommu->aiomt_next;
- if (amd_iommu_fini(iommu, type) != DDI_SUCCESS) {
+ if (amd_iommu_fini(iommu) != DDI_SUCCESS) {
error = DDI_FAILURE;
continue;
}
@@ -1416,7 +1394,7 @@ map_current_window(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
mutex_enter(&amd_iommu_pgtable_lock);
if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
- cmn_err(CE_NOTE, "%s: %s%d: idx=%d Attempting to get cookies "
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d Attempting to get cookies "
"from handle for device %s",
f, driver, instance, idx, path);
}
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c
index 5fbee01479..e59a3dde38 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c
+++ b/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.c
@@ -510,14 +510,14 @@ amd_iommu_set_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
{
uint64_t *devtbl_entry;
amd_iommu_cmdargs_t cmdargs = {0};
- int error, flags;
+ int error;
dev_info_t *idip = iommu->aiomt_dip;
const char *driver = ddi_driver_name(idip);
int instance = ddi_get_instance(idip);
const char *f = "amd_iommu_set_devtbl_entry";
if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
- cmn_err(CE_NOTE, "%s: attempting to set devtbl entry for %s",
+ cmn_err(CE_WARN, "%s: attempting to set devtbl entry for %s",
f, path);
}
@@ -536,39 +536,10 @@ amd_iommu_set_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
[deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
- cmn_err(CE_NOTE, "%s: deviceid=%u devtbl entry (%p) for %s",
+ cmn_err(CE_WARN, "%s: deviceid=%u devtbl entry (%p) for %s",
f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
}
- /*
- * Flush internal caches, need to do this if we came up from
- * fast boot
- */
- cmdargs.ca_deviceid = deviceid;
- error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
- &cmdargs, 0, 0);
- if (error != DDI_SUCCESS) {
- cmn_err(CE_WARN, "%s: idx=%d: deviceid=%d"
- "Failed to invalidate domain in IOMMU HW cache",
- f, iommu->aiomt_idx, deviceid);
- return (error);
- }
-
- cmdargs.ca_domainid = (uint16_t)domainid;
- cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
- flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
- AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
-
- error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
- &cmdargs, flags, 0);
- if (error != DDI_SUCCESS) {
- cmn_err(CE_WARN, "%s: idx=%d: domainid=%d"
- "Failed to invalidate translations in IOMMU HW cache",
- f, iommu->aiomt_idx, cmdargs.ca_domainid);
- return (error);
- }
-
- /* Initialize device table entry */
if (init_devtbl(iommu, devtbl_entry, domainid, dp)) {
cmdargs.ca_deviceid = deviceid;
error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
@@ -611,7 +582,7 @@ amd_iommu_clear_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
[deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
- cmn_err(CE_NOTE, "%s: deviceid=%u devtbl entry (%p) for %s",
+ cmn_err(CE_WARN, "%s: deviceid=%u devtbl entry (%p) for %s",
f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
}
@@ -1577,7 +1548,7 @@ amd_iommu_map_pa2va(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
for (pfn = pfn_start; pfn <= pfn_end; pfn++, pg++) {
if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
- cmn_err(CE_NOTE, "%s: attempting to create page tables "
+ cmn_err(CE_WARN, "%s: attempting to create page tables "
"for pfn = %p, va = %p, path = %s",
f, (void *)(uintptr_t)(pfn << MMU_PAGESHIFT),
(void *)(uintptr_t)(pg << MMU_PAGESHIFT), path);
@@ -1597,7 +1568,7 @@ amd_iommu_map_pa2va(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
- cmn_err(CE_NOTE, "%s: successfuly created page tables "
+ cmn_err(CE_WARN, "%s: successfuly created page tables "
"for pfn = %p, vapg = %p, path = %s",
f, (void *)(uintptr_t)pfn,
(void *)(uintptr_t)pg, path);
diff --git a/usr/src/uts/i86pc/sys/Makefile b/usr/src/uts/i86pc/sys/Makefile
index 70234741e1..d4f62525de 100644
--- a/usr/src/uts/i86pc/sys/Makefile
+++ b/usr/src/uts/i86pc/sys/Makefile
@@ -38,7 +38,6 @@ FILEMODE = 644
HDRS= \
acpidev.h \
- amd_iommu.h \
asm_misc.h \
clock.h \
cram.h \
diff --git a/usr/src/uts/i86pc/sys/amd_iommu.h b/usr/src/uts/i86pc/sys/amd_iommu.h
index 1977d063aa..d2e969f355 100644
--- a/usr/src/uts/i86pc/sys/amd_iommu.h
+++ b/usr/src/uts/i86pc/sys/amd_iommu.h
@@ -43,11 +43,8 @@ typedef struct amd_iommu_state {
int aioms_nunits; /* # of IOMMUs in function */
} amd_iommu_state_t;
-#define AMD_IOMMU_QUIESCE (0)
-#define AMD_IOMMU_TEARDOWN (1)
-
int amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep);
-int amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep, int type);
+int amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep);
int amd_iommu_lookup_src_bdf(uint16_t bdf, uint16_t *src_bdfp);
#endif /* _KERNEL */
diff --git a/usr/src/uts/intel/Makefile.files b/usr/src/uts/intel/Makefile.files
index 3216c7c555..b00253f54e 100644
--- a/usr/src/uts/intel/Makefile.files
+++ b/usr/src/uts/intel/Makefile.files
@@ -252,6 +252,17 @@ CORE_PCBE_OBJS = core_pcbe.o
AMR_OBJS = amr.o
#
+# AMD_IOMMU module
+#
+AMD_IOMMU_OBJS = \
+ amd_iommu.o \
+ amd_iommu_impl.o \
+ amd_iommu_acpi.o \
+ amd_iommu_cmd.o \
+ amd_iommu_log.o \
+ amd_iommu_page_tables.o
+
+#
# IOMMULIB module
#
IOMMULIB_OBJS = iommulib.o
diff --git a/usr/src/uts/intel/Makefile.intel.shared b/usr/src/uts/intel/Makefile.intel.shared
index a0061e02e6..5206642277 100644
--- a/usr/src/uts/intel/Makefile.intel.shared
+++ b/usr/src/uts/intel/Makefile.intel.shared
@@ -400,6 +400,7 @@ DRV_KMODS += rge
DRV_KMODS += rtls
DRV_KMODS += sfe
DRV_KMODS += amd8111s
+DRV_KMODS += amd_iommu
DRV_KMODS += igb
DRV_KMODS += ixgbe
DRV_KMODS += vr
diff --git a/usr/src/uts/intel/Makefile.rules b/usr/src/uts/intel/Makefile.rules
index e2fc0368a7..960abe810f 100644
--- a/usr/src/uts/intel/Makefile.rules
+++ b/usr/src/uts/intel/Makefile.rules
@@ -153,6 +153,10 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/amd8111s/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/amd_iommu/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
$(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/amr/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
@@ -390,6 +394,9 @@ $(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/agpmaster/%.c
$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/amd8111s/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
+$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/amd_iommu/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
$(LINTS_DIR)/%.ln: $(UTSBASE)/intel/io/amr/%.c
@($(LHEAD) $(LINT.c) $< $(LTAIL))
diff --git a/usr/src/uts/i86pc/amd_iommu/Makefile b/usr/src/uts/intel/amd_iommu/Makefile
index b9fb8e2f18..cc9da32012 100644
--- a/usr/src/uts/i86pc/amd_iommu/Makefile
+++ b/usr/src/uts/intel/amd_iommu/Makefile
@@ -23,6 +23,7 @@
#
# This Makefile drives production of the amd_iommu driver kernel module.
#
+# intel implementation architecture dependent
#
#
@@ -36,13 +37,13 @@ UTSBASE = ../..
MODULE = amd_iommu
OBJECTS = $(AMD_IOMMU_OBJS:%=$(OBJS_DIR)/%)
LINTS = $(AMD_IOMMU_OBJS:%.o=$(LINTS_DIR)/%.ln)
-ROOTMODULE = $(ROOT_PSM_DRV_DIR)/$(MODULE)
-CONF_SRCDIR = $(UTSBASE)/i86pc/io/amd_iommu
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/intel/io/amd_iommu
#
# Include common rules.
#
-include $(UTSBASE)/i86pc/Makefile.i86pc
+include $(UTSBASE)/intel/Makefile.intel
#
# Define targets
@@ -80,4 +81,4 @@ install: $(INSTALL_DEPS) $(CONF_INSTALL_DEPS)
#
# Include common targets.
#
-include $(UTSBASE)/i86pc/Makefile.targ
+include $(UTSBASE)/intel/Makefile.targ
diff --git a/usr/src/uts/intel/io/amd_iommu/amd_iommu.c b/usr/src/uts/intel/io/amd_iommu/amd_iommu.c
new file mode 100644
index 0000000000..8afa190b23
--- /dev/null
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu.c
@@ -0,0 +1,440 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/types.h>
+#include <sys/file.h>
+#include <sys/errno.h>
+#include <sys/open.h>
+#include <sys/stat.h>
+#include <sys/cred.h>
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/devops.h>
+#include <sys/ddi.h>
+
+#include <sys/amd_iommu.h>
+#include "amd_iommu_impl.h"
+#include "amd_iommu_acpi.h"
+
+
+#define AMD_IOMMU_MINOR2INST(x) (x)
+#define AMD_IOMMU_INST2MINOR(x) (x)
+#define AMD_IOMMU_NODETYPE "ddi_iommu"
+#define AMD_IOMMU_MINOR_NAME "amd-iommu"
+
+static int amd_iommu_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
+ void **result);
+static int amd_iommu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+static int amd_iommu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
+static int amd_iommu_open(dev_t *devp, int flag, int otyp, cred_t *credp);
+static int amd_iommu_close(dev_t dev, int flag, int otyp, cred_t *credp);
+static int amd_iommu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
+ cred_t *credp, int *rvalp);
+
+static struct cb_ops amd_iommu_cb_ops = {
+ amd_iommu_open, /* cb_open */
+ amd_iommu_close, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read */
+ nodev, /* cb_write */
+ amd_iommu_ioctl, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ NULL, /* cb_str */
+ D_NEW | D_MP, /* cb_flag */
+ CB_REV, /* cb_rev */
+ nodev, /* cb_aread */
+ nodev /* cb_awrite */
+};
+
+static struct dev_ops amd_iommu_dev_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ amd_iommu_getinfo, /* devo_getinfo */
+ nulldev, /* devo_identify */
+ nulldev, /* devo_probe */
+ amd_iommu_attach, /* devo_attach */
+ amd_iommu_detach, /* devo_detach */
+ nodev, /* devo_reset */
+ &amd_iommu_cb_ops, /* devo_cb_ops */
+ NULL, /* devo_bus_ops */
+ nulldev /* devo_power */
+};
+
+static struct modldrv modldrv = {
+ &mod_driverops,
+ "AMD IOMMU 0.1",
+ &amd_iommu_dev_ops
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1,
+ (void *)&modldrv,
+ NULL
+};
+
+amd_iommu_debug_t amd_iommu_debug;
+kmutex_t amd_iommu_global_lock;
+const char *amd_iommu_modname = "amd_iommu";
+amd_iommu_alias_t **amd_iommu_alias;
+amd_iommu_page_table_hash_t amd_iommu_page_table_hash;
+static void *amd_iommu_statep;
+int amd_iommu_64bit_bug;
+int amd_iommu_unity_map;
+int amd_iommu_no_RW_perms;
+int amd_iommu_no_unmap;
+int amd_iommu_pageva_inval_all;
+int amd_iommu_disable; /* disable IOMMU */
+char *amd_iommu_disable_list; /* list of drivers bypassing IOMMU */
+
+int
+_init(void)
+{
+ int error = ENOTSUP;
+
+#if defined(__amd64) && !defined(__xpv)
+
+ error = ddi_soft_state_init(&amd_iommu_statep,
+ sizeof (struct amd_iommu_state), 1);
+ if (error) {
+ cmn_err(CE_WARN, "%s: _init: failed to init soft state.",
+ amd_iommu_modname);
+ return (error);
+ }
+
+ if (amd_iommu_acpi_init() != DDI_SUCCESS) {
+ if (amd_iommu_debug) {
+ cmn_err(CE_WARN, "%s: _init: ACPI init failed.",
+ amd_iommu_modname);
+ }
+ ddi_soft_state_fini(&amd_iommu_statep);
+ return (ENOTSUP);
+ }
+
+ amd_iommu_read_boot_props();
+
+ if (amd_iommu_page_table_hash_init(&amd_iommu_page_table_hash)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: _init: Page table hash init failed.",
+ amd_iommu_modname);
+ if (amd_iommu_disable_list) {
+ kmem_free(amd_iommu_disable_list,
+ strlen(amd_iommu_disable_list) + 1);
+ amd_iommu_disable_list = NULL;
+ }
+ amd_iommu_acpi_fini();
+ ddi_soft_state_fini(&amd_iommu_statep);
+ amd_iommu_statep = NULL;
+ return (EFAULT);
+ }
+
+ error = mod_install(&modlinkage);
+ if (error) {
+ cmn_err(CE_WARN, "%s: _init: mod_install failed.",
+ amd_iommu_modname);
+ amd_iommu_page_table_hash_fini(&amd_iommu_page_table_hash);
+ if (amd_iommu_disable_list) {
+ kmem_free(amd_iommu_disable_list,
+ strlen(amd_iommu_disable_list) + 1);
+ amd_iommu_disable_list = NULL;
+ }
+ amd_iommu_acpi_fini();
+ ddi_soft_state_fini(&amd_iommu_statep);
+ amd_iommu_statep = NULL;
+ return (error);
+ }
+ error = 0;
+#endif
+
+ return (error);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ int error;
+
+ error = mod_remove(&modlinkage);
+ if (error)
+ return (error);
+
+ amd_iommu_page_table_hash_fini(&amd_iommu_page_table_hash);
+ if (amd_iommu_disable_list) {
+ kmem_free(amd_iommu_disable_list,
+ strlen(amd_iommu_disable_list) + 1);
+ amd_iommu_disable_list = NULL;
+ }
+ amd_iommu_acpi_fini();
+ ddi_soft_state_fini(&amd_iommu_statep);
+ amd_iommu_statep = NULL;
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
+{
+ struct amd_iommu_state *statep;
+
+ ASSERT(result);
+
+ *result = NULL;
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ statep = ddi_get_soft_state(amd_iommu_statep,
+ AMD_IOMMU_MINOR2INST(getminor((dev_t)arg)));
+ if (statep) {
+ *result = statep->aioms_devi;
+ return (DDI_SUCCESS);
+ }
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *result = (void *)(uintptr_t)
+ AMD_IOMMU_MINOR2INST(getminor((dev_t)arg));
+ return (DDI_SUCCESS);
+ }
+
+ return (DDI_FAILURE);
+}
+
+static int
+amd_iommu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ struct amd_iommu_state *statep;
+
+ ASSERT(instance >= 0);
+ ASSERT(driver);
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ if (ddi_soft_state_zalloc(amd_iommu_statep, instance)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Unable to allocate soft state for "
+ "%s%d", driver, instance);
+ return (DDI_FAILURE);
+ }
+
+ statep = ddi_get_soft_state(amd_iommu_statep, instance);
+ if (statep == NULL) {
+ cmn_err(CE_WARN, "Unable to get soft state for "
+ "%s%d", driver, instance);
+ ddi_soft_state_free(amd_iommu_statep, instance);
+ return (DDI_FAILURE);
+ }
+
+ if (ddi_create_minor_node(dip, AMD_IOMMU_MINOR_NAME, S_IFCHR,
+ AMD_IOMMU_INST2MINOR(instance), AMD_IOMMU_NODETYPE,
+ 0) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Unable to create minor node for "
+ "%s%d", driver, instance);
+ ddi_remove_minor_node(dip, NULL);
+ ddi_soft_state_free(amd_iommu_statep, instance);
+ return (DDI_FAILURE);
+ }
+
+ statep->aioms_devi = dip;
+ statep->aioms_instance = instance;
+ statep->aioms_iommu_start = NULL;
+ statep->aioms_iommu_end = NULL;
+
+ amd_iommu_lookup_conf_props(dip);
+
+ if (amd_iommu_disable_list) {
+ cmn_err(CE_NOTE, "AMD IOMMU disabled for the following"
+ " drivers:\n%s", amd_iommu_disable_list);
+ }
+
+ if (amd_iommu_disable) {
+ cmn_err(CE_NOTE, "AMD IOMMU disabled by user");
+ } else if (amd_iommu_setup(dip, statep) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Unable to initialize AMD IOMMU "
+ "%s%d", driver, instance);
+ ddi_remove_minor_node(dip, NULL);
+ ddi_soft_state_free(amd_iommu_statep, instance);
+ return (DDI_FAILURE);
+ }
+
+ ddi_report_dev(dip);
+
+ return (DDI_SUCCESS);
+
+ case DDI_RESUME:
+ return (DDI_SUCCESS);
+ default:
+ return (DDI_FAILURE);
+ }
+}
+
+static int
+amd_iommu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ struct amd_iommu_state *statep;
+
+ ASSERT(instance >= 0);
+ ASSERT(driver);
+
+ switch (cmd) {
+ case DDI_DETACH:
+ statep = ddi_get_soft_state(amd_iommu_statep, instance);
+ if (statep == NULL) {
+ cmn_err(CE_WARN, "%s%d: Cannot get soft state",
+ driver, instance);
+ return (DDI_FAILURE);
+ }
+ return (DDI_FAILURE);
+ case DDI_SUSPEND:
+ return (DDI_SUCCESS);
+ default:
+ return (DDI_FAILURE);
+ }
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_open(dev_t *devp, int flag, int otyp, cred_t *credp)
+{
+ int instance = AMD_IOMMU_MINOR2INST(getminor(*devp));
+ struct amd_iommu_state *statep;
+ const char *f = "amd_iommu_open";
+
+ if (instance < 0) {
+ cmn_err(CE_WARN, "%s: invalid instance %d",
+ f, instance);
+ return (ENXIO);
+ }
+
+ if (!(flag & (FREAD|FWRITE))) {
+ cmn_err(CE_WARN, "%s: invalid flags %d", f, flag);
+ return (EINVAL);
+ }
+
+ if (otyp != OTYP_CHR) {
+ cmn_err(CE_WARN, "%s: invalid otyp %d", f, otyp);
+ return (EINVAL);
+ }
+
+ statep = ddi_get_soft_state(amd_iommu_statep, instance);
+ if (statep == NULL) {
+ cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
+ f, instance);
+ return (ENXIO);
+ }
+
+ ASSERT(statep->aioms_instance == instance);
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_close(dev_t dev, int flag, int otyp, cred_t *credp)
+{
+ int instance = AMD_IOMMU_MINOR2INST(getminor(dev));
+ struct amd_iommu_state *statep;
+ const char *f = "amd_iommu_close";
+
+ if (instance < 0) {
+ cmn_err(CE_WARN, "%s: invalid instance %d", f, instance);
+ return (ENXIO);
+ }
+
+ if (!(flag & (FREAD|FWRITE))) {
+ cmn_err(CE_WARN, "%s: invalid flags %d", f, flag);
+ return (EINVAL);
+ }
+
+ if (otyp != OTYP_CHR) {
+ cmn_err(CE_WARN, "%s: invalid otyp %d", f, otyp);
+ return (EINVAL);
+ }
+
+ statep = ddi_get_soft_state(amd_iommu_statep, instance);
+ if (statep == NULL) {
+ cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
+ f, instance);
+ return (ENXIO);
+ }
+
+ ASSERT(statep->aioms_instance == instance);
+ return (0);
+
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
+ int *rvalp)
+{
+ int instance = AMD_IOMMU_MINOR2INST(getminor(dev));
+ struct amd_iommu_state *statep;
+ const char *f = "amd_iommu_ioctl";
+
+ ASSERT(*rvalp);
+
+ if (instance < 0) {
+ cmn_err(CE_WARN, "%s: invalid instance %d", f, instance);
+ return (ENXIO);
+ }
+
+
+ if (!(mode & (FREAD|FWRITE))) {
+ cmn_err(CE_WARN, "%s: invalid mode %d", f, mode);
+ return (EINVAL);
+ }
+
+ if (mode & FKIOCTL) {
+ cmn_err(CE_WARN, "%s: FKIOCTL unsupported mode %d", f, mode);
+ return (EINVAL);
+ }
+
+ statep = ddi_get_soft_state(amd_iommu_statep, instance);
+ if (statep == NULL) {
+ cmn_err(CE_WARN, "%s: cannot get soft state: instance %d",
+ f, instance);
+ return (ENXIO);
+ }
+
+ ASSERT(statep->aioms_instance == instance);
+
+ return (ENOTTY);
+}
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu.conf b/usr/src/uts/intel/io/amd_iommu/amd_iommu.conf
index 7110453a8d..7110453a8d 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu.conf
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu.conf
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c b/usr/src/uts/intel/io/amd_iommu/amd_iommu_acpi.c
index 83a808cf16..9d64ab3c0c 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu_acpi.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.h b/usr/src/uts/intel/io/amd_iommu/amd_iommu_acpi.h
index adcccb4859..78106a52e2 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.h
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu_acpi.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_cmd.c b/usr/src/uts/intel/io/amd_iommu/amd_iommu_cmd.c
index e86f771742..6f66c0dd23 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_cmd.c
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu_cmd.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
diff --git a/usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.c b/usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.c
new file mode 100644
index 0000000000..8deb468c2f
--- /dev/null
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.c
@@ -0,0 +1,1880 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/sunddi.h>
+#include <sys/iommulib.h>
+#include <sys/amd_iommu.h>
+#include <sys/pci_cap.h>
+#include <sys/bootconf.h>
+#include <sys/ddidmareq.h>
+
+#include "amd_iommu_impl.h"
+#include "amd_iommu_acpi.h"
+#include "amd_iommu_page_tables.h"
+
+static int amd_iommu_fini(amd_iommu_t *iommu);
+static void amd_iommu_teardown_interrupts(amd_iommu_t *iommu);
+static void amd_iommu_stop(amd_iommu_t *iommu);
+
+static int amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip);
+static int amd_iommu_allochdl(iommulib_handle_t handle,
+ dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
+ int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep);
+static int amd_iommu_freehdl(iommulib_handle_t handle,
+ dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
+static int amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
+ dev_info_t *rdip, ddi_dma_handle_t dma_handle,
+ struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
+ uint_t *ccountp);
+static int amd_iommu_unbindhdl(iommulib_handle_t handle,
+ dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle);
+static int amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
+ dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
+ size_t len, uint_t cache_flags);
+static int amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
+ dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
+ off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
+ uint_t *ccountp);
+static int amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
+ dev_info_t *rdip, struct ddi_dma_req *dmareq,
+ ddi_dma_handle_t *dma_handle);
+static int amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
+ dev_info_t *rdip, ddi_dma_handle_t dma_handle,
+ enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
+ caddr_t *objpp, uint_t cache_flags);
+
+static int unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
+ ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked);
+
+extern void *device_arena_alloc(size_t size, int vm_flag);
+extern void device_arena_free(void * vaddr, size_t size);
+
+ddi_dma_attr_t amd_iommu_dma_attr = {
+ DMA_ATTR_V0,
+ 0U, /* dma_attr_addr_lo */
+ 0xffffffffffffffffULL, /* dma_attr_addr_hi */
+ 0xffffffffU, /* dma_attr_count_max */
+ (uint64_t)4096, /* dma_attr_align */
+ 1, /* dma_attr_burstsizes */
+ 64, /* dma_attr_minxfer */
+ 0xffffffffU, /* dma_attr_maxxfer */
+ 0xffffffffU, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen, variable */
+ 64, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+ddi_device_acc_attr_t amd_iommu_devacc = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_NEVERSWAP_ACC,
+ DDI_STRICTORDER_ACC
+};
+
+struct iommulib_ops amd_iommulib_ops = {
+ IOMMU_OPS_VERSION,
+ AMD_IOMMU,
+ "AMD IOMMU Vers. 1",
+ NULL,
+ amd_iommu_probe,
+ amd_iommu_allochdl,
+ amd_iommu_freehdl,
+ amd_iommu_bindhdl,
+ amd_iommu_unbindhdl,
+ amd_iommu_sync,
+ amd_iommu_win,
+ amd_iommu_map,
+ amd_iommu_mctl
+};
+
+static kmutex_t amd_iommu_pgtable_lock;
+
+static int
+amd_iommu_register(amd_iommu_t *iommu)
+{
+ dev_info_t *dip = iommu->aiomt_dip;
+ const char *driver = ddi_driver_name(dip);
+ int instance = ddi_get_instance(dip);
+ iommulib_ops_t *iommulib_ops;
+ iommulib_handle_t handle;
+ const char *f = "amd_iommu_register";
+
+ iommulib_ops = kmem_zalloc(sizeof (iommulib_ops_t), KM_SLEEP);
+
+ *iommulib_ops = amd_iommulib_ops;
+
+ iommulib_ops->ilops_data = (void *)iommu;
+ iommu->aiomt_iommulib_ops = iommulib_ops;
+
+ if (iommulib_iommu_register(dip, iommulib_ops, &handle)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Register with iommulib "
+ "failed idx=%d", f, driver, instance, iommu->aiomt_idx);
+ kmem_free(iommulib_ops, sizeof (iommulib_ops_t));
+ return (DDI_FAILURE);
+ }
+
+ iommu->aiomt_iommulib_handle = handle;
+
+ return (DDI_SUCCESS);
+}
+
+static int
+amd_iommu_unregister(amd_iommu_t *iommu)
+{
+ if (iommu->aiomt_iommulib_handle == NULL) {
+ /* we never registered */
+ return (DDI_SUCCESS);
+ }
+
+ if (iommulib_iommu_unregister(iommu->aiomt_iommulib_handle)
+ != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ kmem_free(iommu->aiomt_iommulib_ops, sizeof (iommulib_ops_t));
+ iommu->aiomt_iommulib_ops = NULL;
+ iommu->aiomt_iommulib_handle = NULL;
+
+ return (DDI_SUCCESS);
+}
+
+static int
+amd_iommu_setup_passthru(amd_iommu_t *iommu)
+{
+ gfx_entry_t *gfxp;
+ dev_info_t *dip;
+
+ /*
+ * Setup passthru mapping for "special" devices
+ */
+ amd_iommu_set_passthru(iommu, NULL);
+
+ for (gfxp = gfx_devinfo_list; gfxp; gfxp = gfxp->g_next) {
+ gfxp->g_ref++;
+ dip = gfxp->g_dip;
+ if (dip) {
+ amd_iommu_set_passthru(iommu, dip);
+ }
+ gfxp->g_ref--;
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static int
+amd_iommu_start(amd_iommu_t *iommu)
+{
+ dev_info_t *dip = iommu->aiomt_dip;
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ amd_iommu_acpi_ivhd_t *hinfop;
+ const char *f = "amd_iommu_start";
+
+ hinfop = amd_iommu_lookup_all_ivhd();
+
+ /*
+ * Disable HT tunnel translation.
+ * XXX use ACPI
+ */
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_HT_TUN_ENABLE, 0);
+
+ if (hinfop) {
+ if (amd_iommu_debug) {
+ cmn_err(CE_NOTE,
+ "amd_iommu: using ACPI for CTRL registers");
+ }
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_ISOC, hinfop->ach_Isoc);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_RESPASSPW, hinfop->ach_ResPassPW);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_PASSPW, hinfop->ach_PassPW);
+ }
+
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_INVTO, 5);
+
+
+ /*
+ * The Device table entry bit 0 (V) controls whether the device
+ * table entry is valid for address translation and Device table
+ * entry bit 128 (IV) controls whether interrupt remapping is valid.
+ * By setting both to zero we are essentially doing pass-thru. Since
+ * this table is zeroed on allocation, essentially we will have
+ * pass-thru when IOMMU is enabled.
+ */
+
+ /* Finally enable the IOMMU ... */
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_ENABLE, 1);
+
+ if (amd_iommu_debug) {
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+ "Successfully started AMD IOMMU", f, driver, instance,
+ iommu->aiomt_idx);
+ }
+ cmn_err(CE_NOTE, "AMD IOMMU (%d,%d) enabled",
+ instance, iommu->aiomt_idx);
+
+ return (DDI_SUCCESS);
+}
+
+static void
+amd_iommu_stop(amd_iommu_t *iommu)
+{
+ dev_info_t *dip = iommu->aiomt_dip;
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ const char *f = "amd_iommu_stop";
+
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_ENABLE, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_EVENTINT_ENABLE, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_COMWAITINT_ENABLE, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_EVENTLOG_ENABLE, 0);
+
+ /*
+ * Disable translation on HT tunnel traffic
+ */
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_HT_TUN_ENABLE, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_CMDBUF_ENABLE, 0);
+
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMYU idx=%d. "
+ "Successfully stopped AMD IOMMU", f, driver, instance,
+ iommu->aiomt_idx);
+}
+
+static int
+amd_iommu_setup_tables_and_buffers(amd_iommu_t *iommu)
+{
+ dev_info_t *dip = iommu->aiomt_dip;
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ uint32_t dma_bufsz;
+ caddr_t addr;
+ uint32_t sz;
+ uint32_t p2sz;
+ int i;
+ uint64_t *dentry;
+ int err;
+ const char *f = "amd_iommu_setup_tables_and_buffers";
+
+ /*
+ * We will put the Device Table, Command Buffer and
+ * Event Log in contiguous memory. Allocate the maximum
+ * size allowed for such structures
+ * Device Table: 256b * 64K = 32B * 64K
+ * Command Buffer: 128b * 32K = 16B * 32K
+ * Event Log: 128b * 32K = 16B * 32K
+ */
+ iommu->aiomt_devtbl_sz = (1<<AMD_IOMMU_DEVTBL_SZ) * AMD_IOMMU_DEVENT_SZ;
+ iommu->aiomt_cmdbuf_sz = (1<<AMD_IOMMU_CMDBUF_SZ) * AMD_IOMMU_CMD_SZ;
+ iommu->aiomt_eventlog_sz =
+ (1<<AMD_IOMMU_EVENTLOG_SZ) * AMD_IOMMU_EVENT_SZ;
+
+ dma_bufsz = iommu->aiomt_devtbl_sz + iommu->aiomt_cmdbuf_sz
+ + iommu->aiomt_eventlog_sz;
+
+ /*
+ * Alloc a DMA handle.
+ */
+ err = ddi_dma_alloc_handle(dip, &amd_iommu_dma_attr,
+ DDI_DMA_SLEEP, NULL, &iommu->aiomt_dmahdl);
+ if (err != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot alloc DMA handle for "
+ "AMD IOMMU tables and buffers", f, driver, instance);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Alloc memory for tables and buffers
+ * XXX remove cast to size_t
+ */
+ err = ddi_dma_mem_alloc(iommu->aiomt_dmahdl, dma_bufsz,
+ &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
+ DDI_DMA_SLEEP, NULL, (caddr_t *)&iommu->aiomt_dma_bufva,
+ (size_t *)&iommu->aiomt_dma_mem_realsz, &iommu->aiomt_dma_mem_hdl);
+ if (err != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot alloc memory for DMA "
+ "to AMD IOMMU tables and buffers", f, driver, instance);
+ iommu->aiomt_dma_bufva = NULL;
+ iommu->aiomt_dma_mem_realsz = 0;
+ ddi_dma_free_handle(&iommu->aiomt_dmahdl);
+ iommu->aiomt_dmahdl = NULL;
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * The VA must be 4K aligned and >= table size
+ */
+ ASSERT(((uintptr_t)iommu->aiomt_dma_bufva &
+ AMD_IOMMU_TABLE_ALIGN) == 0);
+ ASSERT(iommu->aiomt_dma_mem_realsz >= dma_bufsz);
+
+ /*
+ * Now bind the handle
+ */
+ err = ddi_dma_addr_bind_handle(iommu->aiomt_dmahdl, NULL,
+ iommu->aiomt_dma_bufva, iommu->aiomt_dma_mem_realsz,
+ DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
+ NULL, &iommu->aiomt_buf_dma_cookie, &iommu->aiomt_buf_dma_ncookie);
+ if (err != DDI_DMA_MAPPED) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot bind memory for DMA "
+ "to AMD IOMMU tables and buffers. bufrealsz=%p",
+ f, driver, instance,
+ (void *)(uintptr_t)iommu->aiomt_dma_mem_realsz);
+ iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
+ iommu->aiomt_buf_dma_cookie.dmac_size = 0;
+ iommu->aiomt_buf_dma_cookie.dmac_type = 0;
+ iommu->aiomt_buf_dma_ncookie = 0;
+ ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
+ iommu->aiomt_dma_mem_hdl = NULL;
+ iommu->aiomt_dma_bufva = NULL;
+ iommu->aiomt_dma_mem_realsz = 0;
+ ddi_dma_free_handle(&iommu->aiomt_dmahdl);
+ iommu->aiomt_dmahdl = NULL;
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * We assume the DMA engine on the IOMMU is capable of handling the
+ * whole table buffer in a single cookie. If not and multiple cookies
+ * are needed we fail.
+ */
+ if (iommu->aiomt_buf_dma_ncookie != 1) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot handle multiple "
+ "cookies for DMA to AMD IOMMU tables and buffers. "
+ "#cookies=%u", f, driver, instance,
+ iommu->aiomt_buf_dma_ncookie);
+ (void) ddi_dma_unbind_handle(iommu->aiomt_dmahdl);
+ iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
+ iommu->aiomt_buf_dma_cookie.dmac_size = 0;
+ iommu->aiomt_buf_dma_cookie.dmac_type = 0;
+ iommu->aiomt_buf_dma_ncookie = 0;
+ ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
+ iommu->aiomt_dma_mem_hdl = NULL;
+ iommu->aiomt_dma_bufva = NULL;
+ iommu->aiomt_dma_mem_realsz = 0;
+ ddi_dma_free_handle(&iommu->aiomt_dmahdl);
+ iommu->aiomt_dmahdl = NULL;
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * The address in the cookie must be 4K aligned and >= table size
+ */
+ ASSERT((iommu->aiomt_buf_dma_cookie.dmac_cookie_addr
+ & AMD_IOMMU_TABLE_ALIGN) == 0);
+ ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size
+ <= iommu->aiomt_dma_mem_realsz);
+ ASSERT(iommu->aiomt_buf_dma_cookie.dmac_size >= dma_bufsz);
+
+ /*
+ * Setup the device table pointers in the iommu struct as
+ * well as the IOMMU device table register
+ */
+ iommu->aiomt_devtbl = iommu->aiomt_dma_bufva;
+ bzero(iommu->aiomt_devtbl, iommu->aiomt_devtbl_sz);
+
+ /*
+ * Set V=1 and TV = 0, so any inadvertant pass-thrus cause
+ * page faults. Also set SE bit so we aren't swamped with
+ * page fault messages
+ */
+ for (i = 0; i <= AMD_IOMMU_MAX_DEVICEID; i++) {
+ /*LINTED*/
+ dentry = (uint64_t *)&iommu->aiomt_devtbl
+ [i * AMD_IOMMU_DEVTBL_ENTRY_SZ];
+ AMD_IOMMU_REG_SET64(dentry, AMD_IOMMU_DEVTBL_V, 1);
+ AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SE, 1);
+ }
+
+ addr = (caddr_t)(uintptr_t)iommu->aiomt_buf_dma_cookie.dmac_cookie_addr;
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
+ AMD_IOMMU_DEVTABBASE, ((uint64_t)(uintptr_t)addr) >> 12);
+ sz = (iommu->aiomt_devtbl_sz >> 12) - 1;
+ ASSERT(sz <= ((1 << 9) - 1));
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
+ AMD_IOMMU_DEVTABSIZE, sz);
+
+ /*
+ * Setup the command buffer pointers
+ */
+ iommu->aiomt_cmdbuf = iommu->aiomt_devtbl +
+ iommu->aiomt_devtbl_sz;
+ bzero(iommu->aiomt_cmdbuf, iommu->aiomt_cmdbuf_sz);
+ addr += iommu->aiomt_devtbl_sz;
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
+ AMD_IOMMU_COMBASE, ((uint64_t)(uintptr_t)addr) >> 12);
+
+ p2sz = AMD_IOMMU_CMDBUF_SZ;
+ ASSERT(p2sz >= AMD_IOMMU_CMDBUF_MINSZ &&
+ p2sz <= AMD_IOMMU_CMDBUF_MAXSZ);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
+ AMD_IOMMU_COMLEN, p2sz);
+ /*LINTED*/
+ iommu->aiomt_cmd_tail = (uint32_t *)iommu->aiomt_cmdbuf;
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_head_va),
+ AMD_IOMMU_CMDHEADPTR, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_tail_va),
+ AMD_IOMMU_CMDTAILPTR, 0);
+
+ /*
+ * Setup the event log pointers
+ */
+ iommu->aiomt_eventlog = iommu->aiomt_cmdbuf +
+ iommu->aiomt_eventlog_sz;
+ bzero(iommu->aiomt_eventlog, iommu->aiomt_eventlog_sz);
+ addr += iommu->aiomt_cmdbuf_sz;
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
+ AMD_IOMMU_EVENTBASE, ((uint64_t)(uintptr_t)addr) >> 12);
+ p2sz = AMD_IOMMU_EVENTLOG_SZ;
+ ASSERT(p2sz >= AMD_IOMMU_EVENTLOG_MINSZ &&
+ p2sz <= AMD_IOMMU_EVENTLOG_MAXSZ);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
+ AMD_IOMMU_EVENTLEN, sz);
+ /*LINTED*/
+ iommu->aiomt_event_head = (uint32_t *)iommu->aiomt_eventlog;
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_head_va),
+ AMD_IOMMU_EVENTHEADPTR, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_tail_va),
+ AMD_IOMMU_EVENTTAILPTR, 0);
+
+ /* dma sync so device sees this init */
+ SYNC_FORDEV(iommu->aiomt_dmahdl);
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_TABLES) {
+ cmn_err(CE_NOTE, "%s: %s%d: successfully setup AMD IOMMU "
+ "tables, idx=%d", f, driver, instance, iommu->aiomt_idx);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+amd_iommu_teardown_tables_and_buffers(amd_iommu_t *iommu)
+{
+ dev_info_t *dip = iommu->aiomt_dip;
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ const char *f = "amd_iommu_teardown_tables_and_buffers";
+
+ iommu->aiomt_eventlog = NULL;
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
+ AMD_IOMMU_EVENTBASE, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_eventlog_va),
+ AMD_IOMMU_EVENTLEN, 0);
+
+ iommu->aiomt_cmdbuf = NULL;
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
+ AMD_IOMMU_COMBASE, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_cmdbuf_va),
+ AMD_IOMMU_COMLEN, 0);
+
+ iommu->aiomt_devtbl = NULL;
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
+ AMD_IOMMU_DEVTABBASE, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_devtbl_va),
+ AMD_IOMMU_DEVTABSIZE, 0);
+
+ if (iommu->aiomt_dmahdl == NULL)
+ return;
+
+ /* Unbind the handle */
+ if (ddi_dma_unbind_handle(iommu->aiomt_dmahdl) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: failed to unbind handle: "
+ "%p for IOMMU idx=%d", f, driver, instance,
+ (void *)iommu->aiomt_dmahdl, iommu->aiomt_idx);
+ }
+ iommu->aiomt_buf_dma_cookie.dmac_laddress = 0;
+ iommu->aiomt_buf_dma_cookie.dmac_size = 0;
+ iommu->aiomt_buf_dma_cookie.dmac_type = 0;
+ iommu->aiomt_buf_dma_ncookie = 0;
+
+ /* Free the table memory allocated for DMA */
+ ddi_dma_mem_free(&iommu->aiomt_dma_mem_hdl);
+ iommu->aiomt_dma_mem_hdl = NULL;
+ iommu->aiomt_dma_bufva = NULL;
+ iommu->aiomt_dma_mem_realsz = 0;
+
+ /* Free the DMA handle */
+ ddi_dma_free_handle(&iommu->aiomt_dmahdl);
+ iommu->aiomt_dmahdl = NULL;
+}
+
+static void
+amd_iommu_enable_interrupts(amd_iommu_t *iommu)
+{
+ ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
+ AMD_IOMMU_CMDBUF_RUN) == 0);
+ ASSERT(AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
+ AMD_IOMMU_EVENT_LOG_RUN) == 0);
+
+ /* Must be set prior to enabling command buffer */
+ /* Must be set prior to enabling event logging */
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_CMDBUF_ENABLE, 1);
+ /* No interrupts for completion wait - too heavy weight. use polling */
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_COMWAITINT_ENABLE, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_EVENTLOG_ENABLE, 1);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_ctrl_va),
+ AMD_IOMMU_EVENTINT_ENABLE, 1);
+}
+
+static int
+amd_iommu_setup_exclusion(amd_iommu_t *iommu)
+{
+ amd_iommu_acpi_ivmd_t *minfop;
+
+ minfop = amd_iommu_lookup_all_ivmd();
+
+ if (minfop && minfop->acm_ExclRange == 1) {
+ cmn_err(CE_NOTE, "Programming exclusion range");
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+ AMD_IOMMU_EXCL_BASE_ADDR,
+ minfop->acm_ivmd_phys_start >> 12);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+ AMD_IOMMU_EXCL_BASE_ALLOW, 1);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+ AMD_IOMMU_EXCL_BASE_EXEN, 1);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
+ AMD_IOMMU_EXCL_LIM, (minfop->acm_ivmd_phys_start +
+ minfop->acm_ivmd_phys_len) >> 12);
+ } else {
+ if (amd_iommu_debug) {
+ cmn_err(CE_NOTE, "Skipping exclusion range");
+ }
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+ AMD_IOMMU_EXCL_BASE_ADDR, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+ AMD_IOMMU_EXCL_BASE_ALLOW, 1);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_base_va),
+ AMD_IOMMU_EXCL_BASE_EXEN, 0);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_excl_lim_va),
+ AMD_IOMMU_EXCL_LIM, 0);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+amd_iommu_teardown_exclusion(amd_iommu_t *iommu)
+{
+ (void) amd_iommu_setup_exclusion(iommu);
+}
+
+static uint_t
+amd_iommu_intr_handler(caddr_t arg1, caddr_t arg2)
+{
+ /*LINTED*/
+ amd_iommu_t *iommu = (amd_iommu_t *)arg1;
+ dev_info_t *dip = iommu->aiomt_dip;
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ const char *f = "amd_iommu_intr_handler";
+
+ ASSERT(arg1);
+ ASSERT(arg2 == NULL);
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d. In INTR handler",
+ f, driver, instance, iommu->aiomt_idx);
+ }
+
+ if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
+ AMD_IOMMU_EVENT_LOG_INT) == 1) {
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: IOMMU unit idx=%d "
+ "Event Log Interrupt", f, driver, instance,
+ iommu->aiomt_idx);
+ }
+ (void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISPLAY);
+ WAIT_SEC(1);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
+ AMD_IOMMU_EVENT_LOG_INT, 1);
+ return (DDI_INTR_CLAIMED);
+ }
+
+ if (AMD_IOMMU_REG_GET64(REGADDR64(iommu->aiomt_reg_status_va),
+ AMD_IOMMU_EVENT_OVERFLOW_INT) == 1) {
+ cmn_err(CE_NOTE, "!%s: %s%d: IOMMU unit idx=%d "
+ "Event Overflow Interrupt", f, driver, instance,
+ iommu->aiomt_idx);
+ (void) amd_iommu_read_log(iommu, AMD_IOMMU_LOG_DISCARD);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
+ AMD_IOMMU_EVENT_LOG_INT, 1);
+ AMD_IOMMU_REG_SET64(REGADDR64(iommu->aiomt_reg_status_va),
+ AMD_IOMMU_EVENT_OVERFLOW_INT, 1);
+ return (DDI_INTR_CLAIMED);
+ }
+
+ return (DDI_INTR_UNCLAIMED);
+}
+
+
+static int
+amd_iommu_setup_interrupts(amd_iommu_t *iommu)
+{
+ dev_info_t *dip = iommu->aiomt_dip;
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ int intrcap0;
+ int intrcapN;
+ int type;
+ int err;
+ int req;
+ int avail;
+ int p2req;
+ int actual;
+ int i;
+ int j;
+ const char *f = "amd_iommu_setup_interrupts";
+
+ if (ddi_intr_get_supported_types(dip, &type) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: ddi_intr_get_supported_types "
+ "failed: idx=%d", f, driver, instance, iommu->aiomt_idx);
+ return (DDI_FAILURE);
+ }
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+ "Interrupt types supported = 0x%x", f, driver, instance,
+ iommu->aiomt_idx, type);
+ }
+
+ /*
+ * for now we only support MSI
+ */
+ if ((type & DDI_INTR_TYPE_MSI) == 0) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
+ "MSI interrupts not supported. Failing init.",
+ f, driver, instance, iommu->aiomt_idx);
+ return (DDI_FAILURE);
+ }
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. MSI supported",
+ f, driver, instance, iommu->aiomt_idx);
+ }
+
+ err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_MSI, &req);
+ if (err != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d. "
+ "ddi_intr_get_nintrs failed err = %d",
+ f, driver, instance, iommu->aiomt_idx, err);
+ return (DDI_FAILURE);
+ }
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+ "MSI number of interrupts requested: %d",
+ f, driver, instance, iommu->aiomt_idx, req);
+ }
+
+ if (req == 0) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
+ "interrupts requested. Failing init", f,
+ driver, instance, iommu->aiomt_idx);
+ return (DDI_FAILURE);
+ }
+
+ err = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSI, &avail);
+ if (err != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d "
+ "ddi_intr_get_navail failed err = %d", f,
+ driver, instance, iommu->aiomt_idx, err);
+ return (DDI_FAILURE);
+ }
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+ "MSI number of interrupts available: %d",
+ f, driver, instance, iommu->aiomt_idx, avail);
+ }
+
+ if (avail == 0) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: 0 MSI "
+ "interrupts available. Failing init", f,
+ driver, instance, iommu->aiomt_idx);
+ return (DDI_FAILURE);
+ }
+
+ if (avail < req) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: MSI "
+ "interrupts: requested (%d) > available (%d). "
+ "Failing init", f, driver, instance, iommu->aiomt_idx,
+ req, avail);
+ return (DDI_FAILURE);
+ }
+
+ /* Allocate memory for DDI interrupt handles */
+ iommu->aiomt_intr_htable_sz = req * sizeof (ddi_intr_handle_t);
+ iommu->aiomt_intr_htable = kmem_zalloc(iommu->aiomt_intr_htable_sz,
+ KM_SLEEP);
+
+ iommu->aiomt_intr_state = AMD_IOMMU_INTR_TABLE;
+
+ /* Convert req to a power of two as required by ddi_intr_alloc */
+ p2req = 0;
+ while (1<<p2req <= req)
+ p2req++;
+ p2req--;
+ req = 1<<p2req;
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+ "MSI power of 2 number of interrupts: %d,%d",
+ f, driver, instance, iommu->aiomt_idx, p2req, req);
+ }
+
+ err = ddi_intr_alloc(iommu->aiomt_dip, iommu->aiomt_intr_htable,
+ DDI_INTR_TYPE_MSI, 0, req, &actual, DDI_INTR_ALLOC_STRICT);
+ if (err != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+ "ddi_intr_alloc failed: err = %d",
+ f, driver, instance, iommu->aiomt_idx, err);
+ amd_iommu_teardown_interrupts(iommu);
+ return (DDI_FAILURE);
+ }
+
+ iommu->aiomt_actual_intrs = actual;
+ iommu->aiomt_intr_state = AMD_IOMMU_INTR_ALLOCED;
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d. "
+ "number of interrupts actually allocated %d",
+ f, driver, instance, iommu->aiomt_idx, actual);
+ }
+
+ if (iommu->aiomt_actual_intrs < req) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+ "ddi_intr_alloc failed: actual (%d) < req (%d)",
+ f, driver, instance, iommu->aiomt_idx,
+ iommu->aiomt_actual_intrs, req);
+ amd_iommu_teardown_interrupts(iommu);
+ return (DDI_FAILURE);
+ }
+
+ for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
+ if (ddi_intr_add_handler(iommu->aiomt_intr_htable[i],
+ amd_iommu_intr_handler, (void *)iommu, NULL)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+ "ddi_intr_add_handler failed: intr = %d, err = %d",
+ f, driver, instance, iommu->aiomt_idx, i, err);
+ for (j = 0; j < i; j++) {
+ (void) ddi_intr_remove_handler(
+ iommu->aiomt_intr_htable[j]);
+ }
+ amd_iommu_teardown_interrupts(iommu);
+ return (DDI_FAILURE);
+ }
+ }
+ iommu->aiomt_intr_state = AMD_IOMMU_INTR_HANDLER;
+
+ intrcap0 = intrcapN = -1;
+ if (ddi_intr_get_cap(iommu->aiomt_intr_htable[0], &intrcap0)
+ != DDI_SUCCESS ||
+ ddi_intr_get_cap(
+ iommu->aiomt_intr_htable[iommu->aiomt_actual_intrs - 1], &intrcapN)
+ != DDI_SUCCESS || intrcap0 != intrcapN) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+ "ddi_intr_get_cap failed or inconsistent cap among "
+ "interrupts: intrcap0 (%d) < intrcapN (%d)",
+ f, driver, instance, iommu->aiomt_idx, intrcap0, intrcapN);
+ amd_iommu_teardown_interrupts(iommu);
+ return (DDI_FAILURE);
+ }
+ iommu->aiomt_intr_cap = intrcap0;
+
+ if (intrcap0 & DDI_INTR_FLAG_BLOCK) {
+ /* Need to call block enable */
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
+ "Need to call block enable",
+ f, driver, instance, iommu->aiomt_idx);
+ }
+ if (ddi_intr_block_enable(iommu->aiomt_intr_htable,
+ iommu->aiomt_actual_intrs) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+ "ddi_intr_block enable failed ", f, driver,
+ instance, iommu->aiomt_idx);
+ (void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
+ iommu->aiomt_actual_intrs);
+ amd_iommu_teardown_interrupts(iommu);
+ return (DDI_FAILURE);
+ }
+ } else {
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
+ "Need to call individual enable",
+ f, driver, instance, iommu->aiomt_idx);
+ }
+ for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
+ if (ddi_intr_enable(iommu->aiomt_intr_htable[i])
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: AMD IOMMU idx=%d: "
+ "ddi_intr_enable failed: intr = %d", f,
+ driver, instance, iommu->aiomt_idx, i);
+ for (j = 0; j < i; j++) {
+ (void) ddi_intr_disable(
+ iommu->aiomt_intr_htable[j]);
+ }
+ amd_iommu_teardown_interrupts(iommu);
+ return (DDI_FAILURE);
+ }
+ }
+ }
+ iommu->aiomt_intr_state = AMD_IOMMU_INTR_ENABLED;
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
+ cmn_err(CE_NOTE, "%s: %s%d: AMD IOMMU idx=%d: "
+ "Interrupts successfully %s enabled. # of interrupts = %d",
+ f, driver, instance, iommu->aiomt_idx,
+ (intrcap0 & DDI_INTR_FLAG_BLOCK) ? "(block)" :
+ "(individually)", iommu->aiomt_actual_intrs);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static void
+amd_iommu_teardown_interrupts(amd_iommu_t *iommu)
+{
+ int i;
+
+ if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ENABLED) {
+ if (iommu->aiomt_intr_cap & DDI_INTR_FLAG_BLOCK) {
+ (void) ddi_intr_block_disable(iommu->aiomt_intr_htable,
+ iommu->aiomt_actual_intrs);
+ } else {
+ for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
+ (void) ddi_intr_disable(
+ iommu->aiomt_intr_htable[i]);
+ }
+ }
+ }
+
+ if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_HANDLER) {
+ for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
+ (void) ddi_intr_remove_handler(
+ iommu->aiomt_intr_htable[i]);
+ }
+ }
+
+ if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_ALLOCED) {
+ for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
+ (void) ddi_intr_free(iommu->aiomt_intr_htable[i]);
+ }
+ }
+ if (iommu->aiomt_intr_state & AMD_IOMMU_INTR_TABLE) {
+ kmem_free(iommu->aiomt_intr_htable,
+ iommu->aiomt_intr_htable_sz);
+ }
+ iommu->aiomt_intr_htable = NULL;
+ iommu->aiomt_intr_htable_sz = 0;
+ iommu->aiomt_intr_state = AMD_IOMMU_INTR_INVALID;
+}
+
+static amd_iommu_t *
+amd_iommu_init(dev_info_t *dip, ddi_acc_handle_t handle, int idx,
+ uint16_t cap_base)
+{
+ amd_iommu_t *iommu;
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ uint32_t caphdr;
+ uint32_t low_addr32;
+ uint32_t hi_addr32;
+ uint32_t range;
+ uint32_t misc;
+ uint64_t pgoffset;
+ amd_iommu_acpi_global_t *global;
+ amd_iommu_acpi_ivhd_t *hinfop;
+ const char *f = "amd_iommu_init";
+
+ global = amd_iommu_lookup_acpi_global();
+ hinfop = amd_iommu_lookup_any_ivhd();
+
+ low_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
+ AMD_IOMMU_CAP_ADDR_LOW_OFF);
+ if (!(low_addr32 & AMD_IOMMU_REG_ADDR_LOCKED)) {
+ cmn_err(CE_WARN, "%s: %s%d: capability registers not locked. "
+ "Unable to use IOMMU unit idx=%d - skipping ...", f, driver,
+ instance, idx);
+ return (NULL);
+ }
+
+ iommu = kmem_zalloc(sizeof (amd_iommu_t), KM_SLEEP);
+ mutex_init(&iommu->aiomt_mutex, NULL, MUTEX_DRIVER, NULL);
+ mutex_enter(&iommu->aiomt_mutex);
+
+ mutex_init(&iommu->aiomt_cmdlock, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&iommu->aiomt_eventlock, NULL, MUTEX_DRIVER, NULL);
+
+ iommu->aiomt_dip = dip;
+ iommu->aiomt_idx = idx;
+
+ /*
+ * Since everything in the capability block is locked and RO at this
+ * point, copy everything into the IOMMU struct
+ */
+
+ /* Get cap header */
+ caphdr = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_HDR_OFF);
+ iommu->aiomt_cap_hdr = caphdr;
+ iommu->aiomt_npcache = AMD_IOMMU_REG_GET32(&caphdr,
+ AMD_IOMMU_CAP_NPCACHE);
+ iommu->aiomt_httun = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_HTTUN);
+
+ if (hinfop)
+ iommu->aiomt_iotlb = hinfop->ach_IotlbSup;
+ else
+ iommu->aiomt_iotlb =
+ AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_IOTLB);
+
+ iommu->aiomt_captype = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
+ iommu->aiomt_capid = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
+
+ /*
+ * Get address of IOMMU control registers
+ */
+ hi_addr32 = PCI_CAP_GET32(handle, 0, cap_base,
+ AMD_IOMMU_CAP_ADDR_HI_OFF);
+ iommu->aiomt_low_addr32 = low_addr32;
+ iommu->aiomt_hi_addr32 = hi_addr32;
+ low_addr32 &= ~AMD_IOMMU_REG_ADDR_LOCKED;
+
+ if (hinfop) {
+ iommu->aiomt_reg_pa = hinfop->ach_IOMMU_reg_base;
+ ASSERT(hinfop->ach_IOMMU_pci_seg == 0);
+ } else {
+ iommu->aiomt_reg_pa = ((uint64_t)hi_addr32 << 32 | low_addr32);
+ }
+
+ /*
+ * Get cap range reg
+ */
+ range = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_RANGE_OFF);
+ iommu->aiomt_range = range;
+ iommu->aiomt_rng_valid = AMD_IOMMU_REG_GET32(&range,
+ AMD_IOMMU_RNG_VALID);
+ if (iommu->aiomt_rng_valid) {
+ iommu->aiomt_rng_bus = AMD_IOMMU_REG_GET32(&range,
+ AMD_IOMMU_RNG_BUS);
+ iommu->aiomt_first_devfn = AMD_IOMMU_REG_GET32(&range,
+ AMD_IOMMU_FIRST_DEVFN);
+ iommu->aiomt_last_devfn = AMD_IOMMU_REG_GET32(&range,
+ AMD_IOMMU_LAST_DEVFN);
+ } else {
+ iommu->aiomt_rng_bus = 0;
+ iommu->aiomt_first_devfn = 0;
+ iommu->aiomt_last_devfn = 0;
+ }
+
+ if (hinfop)
+ iommu->aiomt_ht_unitid = hinfop->ach_IOMMU_UnitID;
+ else
+ iommu->aiomt_ht_unitid = AMD_IOMMU_REG_GET32(&range,
+ AMD_IOMMU_HT_UNITID);
+
+ /*
+ * Get cap misc reg
+ */
+ misc = PCI_CAP_GET32(handle, 0, cap_base, AMD_IOMMU_CAP_MISC_OFF);
+ iommu->aiomt_misc = misc;
+
+ if (global) {
+ iommu->aiomt_htatsresv = global->acg_HtAtsResv;
+ iommu->aiomt_vasize = global->acg_VAsize;
+ iommu->aiomt_pasize = global->acg_PAsize;
+ } else {
+ iommu->aiomt_htatsresv = AMD_IOMMU_REG_GET32(&misc,
+ AMD_IOMMU_HT_ATSRSV);
+ iommu->aiomt_vasize = AMD_IOMMU_REG_GET32(&misc,
+ AMD_IOMMU_VA_SIZE);
+ iommu->aiomt_pasize = AMD_IOMMU_REG_GET32(&misc,
+ AMD_IOMMU_PA_SIZE);
+ }
+
+ if (hinfop) {
+ iommu->aiomt_msinum = hinfop->ach_IOMMU_MSInum;
+ } else {
+ iommu->aiomt_msinum =
+ AMD_IOMMU_REG_GET32(&misc, AMD_IOMMU_MSINUM);
+ }
+
+ /*
+ * Set up mapping between control registers PA and VA
+ */
+ pgoffset = iommu->aiomt_reg_pa & MMU_PAGEOFFSET;
+ ASSERT(pgoffset == 0);
+ iommu->aiomt_reg_pages = mmu_btopr(AMD_IOMMU_REG_SIZE + pgoffset);
+ iommu->aiomt_reg_size = mmu_ptob(iommu->aiomt_reg_pages);
+
+ iommu->aiomt_va = (uintptr_t)device_arena_alloc(
+ ptob(iommu->aiomt_reg_pages), VM_SLEEP);
+ if (iommu->aiomt_va == 0) {
+ cmn_err(CE_WARN, "%s: %s%d: Failed to alloc VA for IOMMU "
+ "control regs. Skipping IOMMU idx=%d", f, driver,
+ instance, idx);
+ mutex_exit(&iommu->aiomt_mutex);
+ (void) amd_iommu_fini(iommu);
+ return (NULL);
+ }
+
+ hat_devload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
+ iommu->aiomt_reg_size,
+ mmu_btop(iommu->aiomt_reg_pa), PROT_READ | PROT_WRITE
+ | HAT_STRICTORDER, HAT_LOAD_LOCK);
+
+ iommu->aiomt_reg_va = iommu->aiomt_va + pgoffset;
+
+ /*
+ * Setup the various control register's VA
+ */
+ iommu->aiomt_reg_devtbl_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_DEVTBL_REG_OFF;
+ iommu->aiomt_reg_cmdbuf_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_CMDBUF_REG_OFF;
+ iommu->aiomt_reg_eventlog_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_EVENTLOG_REG_OFF;
+ iommu->aiomt_reg_ctrl_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_CTRL_REG_OFF;
+ iommu->aiomt_reg_excl_base_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_EXCL_BASE_REG_OFF;
+ iommu->aiomt_reg_excl_lim_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_EXCL_LIM_REG_OFF;
+ iommu->aiomt_reg_cmdbuf_head_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_CMDBUF_HEAD_REG_OFF;
+ iommu->aiomt_reg_cmdbuf_tail_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_CMDBUF_TAIL_REG_OFF;
+ iommu->aiomt_reg_eventlog_head_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_EVENTLOG_HEAD_REG_OFF;
+ iommu->aiomt_reg_eventlog_tail_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_EVENTLOG_TAIL_REG_OFF;
+ iommu->aiomt_reg_status_va = iommu->aiomt_reg_va +
+ AMD_IOMMU_STATUS_REG_OFF;
+
+
+ /*
+ * Setup the DEVICE table, CMD buffer, and LOG buffer in
+ * memory and setup DMA access to this memory location
+ */
+ if (amd_iommu_setup_tables_and_buffers(iommu) != DDI_SUCCESS) {
+ mutex_exit(&iommu->aiomt_mutex);
+ (void) amd_iommu_fini(iommu);
+ return (NULL);
+ }
+
+ if (amd_iommu_setup_exclusion(iommu) != DDI_SUCCESS) {
+ mutex_exit(&iommu->aiomt_mutex);
+ (void) amd_iommu_fini(iommu);
+ return (NULL);
+ }
+
+ amd_iommu_enable_interrupts(iommu);
+
+ if (amd_iommu_setup_interrupts(iommu) != DDI_SUCCESS) {
+ mutex_exit(&iommu->aiomt_mutex);
+ (void) amd_iommu_fini(iommu);
+ return (NULL);
+ }
+
+ /*
+ * need to setup domain table before gfx bypass
+ */
+ amd_iommu_init_page_tables(iommu);
+
+ /*
+ * Set pass-thru for special devices like IOAPIC and HPET
+ *
+ * Also, gfx devices don't use DDI for DMA. No need to register
+ * before setting up gfx passthru
+ */
+ if (amd_iommu_setup_passthru(iommu) != DDI_SUCCESS) {
+ mutex_exit(&iommu->aiomt_mutex);
+ (void) amd_iommu_fini(iommu);
+ return (NULL);
+ }
+
+ if (amd_iommu_start(iommu) != DDI_SUCCESS) {
+ mutex_exit(&iommu->aiomt_mutex);
+ (void) amd_iommu_fini(iommu);
+ return (NULL);
+ }
+
+ /* xxx register/start race */
+ if (amd_iommu_register(iommu) != DDI_SUCCESS) {
+ mutex_exit(&iommu->aiomt_mutex);
+ (void) amd_iommu_fini(iommu);
+ return (NULL);
+ }
+
+ if (amd_iommu_debug) {
+ cmn_err(CE_NOTE, "%s: %s%d: IOMMU idx=%d inited.", f, driver,
+ instance, idx);
+ }
+
+ return (iommu);
+}
+
+static int
+amd_iommu_fini(amd_iommu_t *iommu)
+{
+ int idx = iommu->aiomt_idx;
+ dev_info_t *dip = iommu->aiomt_dip;
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ const char *f = "amd_iommu_fini";
+
+ mutex_enter(&iommu->aiomt_mutex);
+ if (amd_iommu_unregister(iommu) != DDI_SUCCESS) {
+ cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit failed. "
+ "idx = %d", f, driver, instance, idx);
+ return (DDI_FAILURE);
+ }
+ amd_iommu_stop(iommu);
+ amd_iommu_fini_page_tables(iommu);
+ amd_iommu_teardown_interrupts(iommu);
+ amd_iommu_teardown_exclusion(iommu);
+ amd_iommu_teardown_tables_and_buffers(iommu);
+ if (iommu->aiomt_va != NULL) {
+ hat_unload(kas.a_hat, (void *)(uintptr_t)iommu->aiomt_va,
+ iommu->aiomt_reg_size, HAT_UNLOAD_UNLOCK);
+ device_arena_free((void *)(uintptr_t)iommu->aiomt_va,
+ ptob(iommu->aiomt_reg_pages));
+ iommu->aiomt_va = NULL;
+ iommu->aiomt_reg_va = NULL;
+ }
+ mutex_destroy(&iommu->aiomt_eventlock);
+ mutex_destroy(&iommu->aiomt_cmdlock);
+ mutex_exit(&iommu->aiomt_mutex);
+ mutex_destroy(&iommu->aiomt_mutex);
+ kmem_free(iommu, sizeof (amd_iommu_t));
+
+ cmn_err(CE_NOTE, "%s: %s%d: Fini of IOMMU unit complete. idx = %d",
+ f, driver, instance, idx);
+
+ return (DDI_SUCCESS);
+}
+
+int
+amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep)
+{
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ ddi_acc_handle_t handle;
+ uint8_t base_class;
+ uint8_t sub_class;
+ uint8_t prog_class;
+ int idx;
+ uint32_t id;
+ uint16_t cap_base;
+ uint32_t caphdr;
+ uint8_t cap_type;
+ uint8_t cap_id;
+ amd_iommu_t *iommu;
+ const char *f = "amd_iommu_setup";
+
+ ASSERT(instance >= 0);
+ ASSERT(driver);
+
+ /* First setup PCI access to config space */
+
+ if (pci_config_setup(dip, &handle) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: PCI config setup failed: %s%d",
+ f, driver, instance);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * The AMD IOMMU is part of an independent PCI function. There may be
+ * more than one IOMMU in that PCI function
+ */
+ base_class = pci_config_get8(handle, PCI_CONF_BASCLASS);
+ sub_class = pci_config_get8(handle, PCI_CONF_SUBCLASS);
+ prog_class = pci_config_get8(handle, PCI_CONF_PROGCLASS);
+
+ if (base_class != PCI_CLASS_PERIPH || sub_class != PCI_PERIPH_IOMMU ||
+ prog_class != AMD_IOMMU_PCI_PROG_IF) {
+ cmn_err(CE_WARN, "%s: %s%d: invalid PCI class(0x%x)/"
+ "subclass(0x%x)/programming interface(0x%x)", f, driver,
+ instance, base_class, sub_class, prog_class);
+ pci_config_teardown(&handle);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Find and initialize all IOMMU units in this function
+ */
+ for (idx = 0; ; idx++) {
+ if (pci_cap_probe(handle, idx, &id, &cap_base) != DDI_SUCCESS)
+ break;
+
+ /* check if cap ID is secure device cap id */
+ if (id != PCI_CAP_ID_SECURE_DEV) {
+ if (amd_iommu_debug) {
+ cmn_err(CE_WARN,
+ "%s: %s%d: skipping IOMMU: idx(0x%x) "
+ "cap ID (0x%x) != secure dev capid (0x%x)",
+ f, driver, instance, idx, id,
+ PCI_CAP_ID_SECURE_DEV);
+ }
+ continue;
+ }
+
+ /* check if cap type is IOMMU cap type */
+ caphdr = PCI_CAP_GET32(handle, 0, cap_base,
+ AMD_IOMMU_CAP_HDR_OFF);
+ cap_type = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_TYPE);
+ cap_id = AMD_IOMMU_REG_GET32(&caphdr, AMD_IOMMU_CAP_ID);
+
+ if (cap_type != AMD_IOMMU_CAP) {
+ cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
+ "cap type (0x%x) != AMD IOMMU CAP (0x%x)", f,
+ driver, instance, idx, cap_type, AMD_IOMMU_CAP);
+ continue;
+ }
+ ASSERT(cap_id == PCI_CAP_ID_SECURE_DEV);
+ ASSERT(cap_id == id);
+
+ iommu = amd_iommu_init(dip, handle, idx, cap_base);
+ if (iommu == NULL) {
+ cmn_err(CE_WARN, "%s: %s%d: skipping IOMMU: idx(0x%x) "
+ "failed to init IOMMU", f,
+ driver, instance, idx);
+ continue;
+ }
+
+ if (statep->aioms_iommu_start == NULL) {
+ statep->aioms_iommu_start = iommu;
+ } else {
+ statep->aioms_iommu_end->aiomt_next = iommu;
+ }
+ statep->aioms_iommu_end = iommu;
+
+ statep->aioms_nunits++;
+ }
+
+ pci_config_teardown(&handle);
+
+ if (amd_iommu_debug) {
+ cmn_err(CE_NOTE, "%s: %s%d: state=%p: setup %d IOMMU units",
+ f, driver, instance, (void *)statep, statep->aioms_nunits);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+int
+amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep)
+{
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ amd_iommu_t *iommu;
+ int teardown;
+ int error = DDI_SUCCESS;
+ const char *f = "amd_iommu_teardown";
+
+ teardown = 0;
+ for (iommu = statep->aioms_iommu_start; iommu;
+ iommu = iommu->aiomt_next) {
+ ASSERT(statep->aioms_nunits > 0);
+ if (amd_iommu_fini(iommu) != DDI_SUCCESS) {
+ error = DDI_FAILURE;
+ continue;
+ }
+ statep->aioms_nunits--;
+ teardown++;
+ }
+
+ cmn_err(CE_NOTE, "%s: %s%d: state=%p: toredown %d units. "
+ "%d units left", f, driver, instance, (void *)statep,
+ teardown, statep->aioms_nunits);
+
+ return (error);
+}
+
+/* Interface with IOMMULIB */
+/*ARGSUSED*/
+static int
+amd_iommu_probe(iommulib_handle_t handle, dev_info_t *rdip)
+{
+ const char *driver = ddi_driver_name(rdip);
+ char *s;
+ amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
+
+ if (amd_iommu_disable_list) {
+ s = strstr(amd_iommu_disable_list, driver);
+ if (s == NULL)
+ return (DDI_SUCCESS);
+ if (s == amd_iommu_disable_list || *(s - 1) == ':') {
+ s += strlen(driver);
+ if (*s == '\0' || *s == ':') {
+ amd_iommu_set_passthru(iommu, rdip);
+ return (DDI_FAILURE);
+ }
+ }
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_allochdl(iommulib_handle_t handle,
+ dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
+ int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *dma_handlep)
+{
+ return (iommulib_iommu_dma_allochdl(dip, rdip, attr, waitfp,
+ arg, dma_handlep));
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_freehdl(iommulib_handle_t handle,
+ dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
+{
+ return (iommulib_iommu_dma_freehdl(dip, rdip, dma_handle));
+}
+
+/*ARGSUSED*/
+static int
+map_current_window(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
+ struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookie_array, uint_t ccount,
+ int km_flags)
+{
+ const char *driver = ddi_driver_name(iommu->aiomt_dip);
+ int instance = ddi_get_instance(iommu->aiomt_dip);
+ int idx = iommu->aiomt_idx;
+ int i;
+ uint64_t start_va;
+ char *path;
+ int error = DDI_FAILURE;
+ const char *f = "map_current_window";
+
+ path = kmem_alloc(MAXPATHLEN, km_flags);
+ if (path == NULL) {
+ return (DDI_DMA_NORESOURCES);
+ }
+
+ (void) ddi_pathname(rdip, path);
+ mutex_enter(&amd_iommu_pgtable_lock);
+
+ if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d Attempting to get cookies "
+ "from handle for device %s",
+ f, driver, instance, idx, path);
+ }
+
+ start_va = 0;
+ for (i = 0; i < ccount; i++) {
+ if ((error = amd_iommu_map_pa2va(iommu, rdip, attrp, dmareq,
+ cookie_array[i].dmac_cookie_addr,
+ cookie_array[i].dmac_size,
+ AMD_IOMMU_VMEM_MAP, &start_va, km_flags)) != DDI_SUCCESS) {
+ break;
+ }
+ cookie_array[i].dmac_cookie_addr = (uintptr_t)start_va;
+ cookie_array[i].dmac_type = 0;
+ }
+
+ if (i != ccount) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot map cookie# %d "
+ "for device %s", f, driver, instance, idx, i, path);
+ (void) unmap_current_window(iommu, rdip, cookie_array,
+ ccount, i, 1);
+ goto out;
+ }
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+ cmn_err(CE_NOTE, "%s: return SUCCESS", f);
+ }
+
+ error = DDI_DMA_MAPPED;
+out:
+ mutex_exit(&amd_iommu_pgtable_lock);
+ kmem_free(path, MAXPATHLEN);
+ return (error);
+}
+
+/*ARGSUSED*/
+static int
+unmap_current_window(amd_iommu_t *iommu, dev_info_t *rdip,
+ ddi_dma_cookie_t *cookie_array, uint_t ccount, int ncookies, int locked)
+{
+ const char *driver = ddi_driver_name(iommu->aiomt_dip);
+ int instance = ddi_get_instance(iommu->aiomt_dip);
+ int idx = iommu->aiomt_idx;
+ int i;
+ int error = DDI_FAILURE;
+ char *path;
+ int pathfree;
+ const char *f = "unmap_current_window";
+
+ if (!locked)
+ mutex_enter(&amd_iommu_pgtable_lock);
+
+ path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
+ if (path) {
+ (void) ddi_pathname(rdip, path);
+ pathfree = 1;
+ } else {
+ path = "<path-mem-alloc-failed>";
+ pathfree = 0;
+ }
+
+ if (ncookies == -1)
+ ncookies = ccount;
+
+ for (i = 0; i < ncookies; i++) {
+ if (amd_iommu_unmap_va(iommu, rdip,
+ cookie_array[i].dmac_cookie_addr,
+ cookie_array[i].dmac_size,
+ AMD_IOMMU_VMEM_MAP) != DDI_SUCCESS) {
+ break;
+ }
+ }
+
+ if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_COMPL_WAIT, NULL, 0, 0)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: AMD IOMMU completion wait failed for: %s",
+ f, path);
+ }
+
+ if (i != ncookies) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d Cannot unmap cookie# %d "
+ "for device %s", f, driver, instance, idx, i, path);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ error = DDI_SUCCESS;
+
+out:
+ if (pathfree)
+ kmem_free(path, MAXPATHLEN);
+ if (!locked)
+ mutex_exit(&amd_iommu_pgtable_lock);
+ return (error);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_bindhdl(iommulib_handle_t handle, dev_info_t *dip,
+ dev_info_t *rdip, ddi_dma_handle_t dma_handle,
+ struct ddi_dma_req *dmareq, ddi_dma_cookie_t *cookiep,
+ uint_t *ccountp)
+{
+ int dma_error = DDI_DMA_NOMAPPING;
+ int error;
+ char *path;
+ ddi_dma_cookie_t *cookie_array = NULL;
+ uint_t ccount = 0;
+ ddi_dma_impl_t *hp;
+ ddi_dma_attr_t *attrp;
+ int km_flags;
+ amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
+ int instance = ddi_get_instance(rdip);
+ const char *driver = ddi_driver_name(rdip);
+ const char *f = "amd_iommu_bindhdl";
+
+ dma_error = iommulib_iommu_dma_bindhdl(dip, rdip, dma_handle,
+ dmareq, cookiep, ccountp);
+
+ if (dma_error != DDI_DMA_MAPPED && dma_error != DDI_DMA_PARTIAL_MAP)
+ return (dma_error);
+
+ km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
+
+ path = kmem_alloc(MAXPATHLEN, km_flags);
+ if (path) {
+ (void) ddi_pathname(rdip, path);
+ } else {
+ dma_error = DDI_DMA_NORESOURCES;
+ goto unbind;
+ }
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
+ cmn_err(CE_NOTE, "%s: %s got cookie (%p), #cookies: %d",
+ f, path,
+ (void *)cookiep->dmac_cookie_addr,
+ *ccountp);
+ }
+
+ cookie_array = NULL;
+ ccount = 0;
+ if ((error = iommulib_iommu_dma_get_cookies(dip, dma_handle,
+ &cookie_array, &ccount)) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
+ "for device %s", f, driver, instance, path);
+ dma_error = error;
+ goto unbind;
+ }
+
+ hp = (ddi_dma_impl_t *)dma_handle;
+ attrp = &hp->dmai_attr;
+
+ error = map_current_window(iommu, rdip, attrp, dmareq,
+ cookie_array, ccount, km_flags);
+ if (error != DDI_SUCCESS) {
+ dma_error = error;
+ goto unbind;
+ }
+
+ if ((error = iommulib_iommu_dma_set_cookies(dip, dma_handle,
+ cookie_array, ccount)) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
+ "for device %s", f, driver, instance, path);
+ dma_error = error;
+ goto unbind;
+ }
+
+ *cookiep = cookie_array[0];
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
+ cmn_err(CE_NOTE, "%s: %s remapped cookie (%p), #cookies: %d",
+ f, path,
+ (void *)(uintptr_t)cookiep->dmac_cookie_addr,
+ *ccountp);
+ }
+
+ kmem_free(path, MAXPATHLEN);
+ ASSERT(dma_error == DDI_DMA_MAPPED || dma_error == DDI_DMA_PARTIAL_MAP);
+ return (dma_error);
+unbind:
+ kmem_free(path, MAXPATHLEN);
+ (void) iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle);
+ return (dma_error);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_unbindhdl(iommulib_handle_t handle,
+ dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t dma_handle)
+{
+ amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
+ ddi_dma_cookie_t *cookie_array = NULL;
+ uint_t ccount = 0;
+ int error = DDI_FAILURE;
+ int instance = ddi_get_instance(rdip);
+ const char *driver = ddi_driver_name(rdip);
+ const char *f = "amd_iommu_unbindhdl";
+
+ cookie_array = NULL;
+ ccount = 0;
+ if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
+ &ccount) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
+ "for device %p", f, driver, instance, (void *)rdip);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
+ "for device %p", f, driver, instance, (void *)rdip);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ if (iommulib_iommu_dma_unbindhdl(dip, rdip, dma_handle)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: failed to unbindhdl for dip=%p",
+ f, driver, instance, (void *)rdip);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ if (unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: failed to unmap current window "
+ "for dip=%p", f, driver, instance, (void *)rdip);
+ error = DDI_FAILURE;
+ } else {
+ error = DDI_SUCCESS;
+ }
+out:
+ if (cookie_array)
+ kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
+ return (error);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_sync(iommulib_handle_t handle, dev_info_t *dip,
+ dev_info_t *rdip, ddi_dma_handle_t dma_handle, off_t off,
+ size_t len, uint_t cache_flags)
+{
+ ddi_dma_cookie_t *cookie_array = NULL;
+ uint_t ccount = 0;
+ int error;
+ const char *f = "amd_iommu_sync";
+
+ cookie_array = NULL;
+ ccount = 0;
+ if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
+ &ccount) != DDI_SUCCESS) {
+ ASSERT(cookie_array == NULL);
+ cmn_err(CE_WARN, "%s: Cannot get cookies "
+ "for device %p", f, (void *)rdip);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: Cannot clear cookies "
+ "for device %p", f, (void *)rdip);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ error = iommulib_iommu_dma_sync(dip, rdip, dma_handle, off,
+ len, cache_flags);
+
+ if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
+ ccount) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: Cannot set cookies "
+ "for device %p", f, (void *)rdip);
+ error = DDI_FAILURE;
+ } else {
+ cookie_array = NULL;
+ ccount = 0;
+ }
+
+out:
+ if (cookie_array)
+ kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
+ return (error);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_win(iommulib_handle_t handle, dev_info_t *dip,
+ dev_info_t *rdip, ddi_dma_handle_t dma_handle, uint_t win,
+ off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep,
+ uint_t *ccountp)
+{
+ int error = DDI_FAILURE;
+ amd_iommu_t *iommu = iommulib_iommu_getdata(handle);
+ ddi_dma_cookie_t *cookie_array = NULL;
+ uint_t ccount = 0;
+ int km_flags;
+ ddi_dma_impl_t *hp;
+ ddi_dma_attr_t *attrp;
+ struct ddi_dma_req sdmareq = {0};
+ int instance = ddi_get_instance(rdip);
+ const char *driver = ddi_driver_name(rdip);
+ const char *f = "amd_iommu_win";
+
+ km_flags = iommulib_iommu_dma_get_sleep_flags(dip, dma_handle);
+
+ cookie_array = NULL;
+ ccount = 0;
+ if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
+ &ccount) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
+ "for device %p", f, driver, instance, (void *)rdip);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ if (iommulib_iommu_dma_clear_cookies(dip, dma_handle) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot clear cookies "
+ "for device %p", f, driver, instance, (void *)rdip);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ if (iommulib_iommu_dma_win(dip, rdip, dma_handle, win,
+ offp, lenp, cookiep, ccountp) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: failed switch windows for dip=%p",
+ f, driver, instance, (void *)rdip);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ (void) unmap_current_window(iommu, rdip, cookie_array, ccount, -1, 0);
+
+ if (cookie_array) {
+ kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
+ cookie_array = NULL;
+ ccount = 0;
+ }
+
+ cookie_array = NULL;
+ ccount = 0;
+ if (iommulib_iommu_dma_get_cookies(dip, dma_handle, &cookie_array,
+ &ccount) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot get cookies "
+ "for device %p", f, driver, instance, (void *)rdip);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ hp = (ddi_dma_impl_t *)dma_handle;
+ attrp = &hp->dmai_attr;
+
+ sdmareq.dmar_flags = DDI_DMA_RDWR;
+ error = map_current_window(iommu, rdip, attrp, &sdmareq,
+ cookie_array, ccount, km_flags);
+
+ if (iommulib_iommu_dma_set_cookies(dip, dma_handle, cookie_array,
+ ccount) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: Cannot set cookies "
+ "for device %p", f, driver, instance, (void *)rdip);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ *cookiep = cookie_array[0];
+
+ return (error == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
+out:
+ if (cookie_array)
+ kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount);
+
+ return (error);
+}
+
+/* Obsoleted DMA routines */
+
+/*ARGSUSED*/
+static int
+amd_iommu_map(iommulib_handle_t handle, dev_info_t *dip,
+ dev_info_t *rdip, struct ddi_dma_req *dmareq,
+ ddi_dma_handle_t *dma_handle)
+{
+ ASSERT(0);
+ return (iommulib_iommu_dma_map(dip, rdip, dmareq, dma_handle));
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_mctl(iommulib_handle_t handle, dev_info_t *dip,
+ dev_info_t *rdip, ddi_dma_handle_t dma_handle,
+ enum ddi_dma_ctlops request, off_t *offp, size_t *lenp,
+ caddr_t *objpp, uint_t cache_flags)
+{
+ ASSERT(0);
+ return (iommulib_iommu_dma_mctl(dip, rdip, dma_handle,
+ request, offp, lenp, objpp, cache_flags));
+}
+
+uint64_t
+amd_iommu_reg_get64_workaround(uint64_t *regp, uint32_t bits)
+{
+ split_t s;
+ uint32_t *ptr32 = (uint32_t *)regp;
+ uint64_t *s64p = &(s.u64);
+
+ s.u32[0] = ptr32[0];
+ s.u32[1] = ptr32[1];
+
+ return (AMD_IOMMU_REG_GET64_IMPL(s64p, bits));
+}
+
+uint64_t
+amd_iommu_reg_set64_workaround(uint64_t *regp, uint32_t bits, uint64_t value)
+{
+ split_t s;
+ uint32_t *ptr32 = (uint32_t *)regp;
+ uint64_t *s64p = &(s.u64);
+
+ s.u32[0] = ptr32[0];
+ s.u32[1] = ptr32[1];
+
+ AMD_IOMMU_REG_SET64_IMPL(s64p, bits, value);
+
+ *regp = s.u64;
+
+ return (s.u64);
+}
+
+void
+amd_iommu_read_boot_props(void)
+{
+ char *propval;
+
+ /*
+ * if "amd-iommu = no/false" boot property is set,
+ * ignore AMD iommu
+ */
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
+ DDI_PROP_DONTPASS, "amd-iommu", &propval) == DDI_SUCCESS) {
+ if (strcmp(propval, "no") == 0 ||
+ strcmp(propval, "false") == 0) {
+ amd_iommu_disable = 1;
+ }
+ ddi_prop_free(propval);
+ }
+
+ /*
+ * Copy the list of drivers for which IOMMU is disabled by user.
+ */
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
+ DDI_PROP_DONTPASS, "amd-iommu-disable-list", &propval)
+ == DDI_SUCCESS) {
+ amd_iommu_disable_list = kmem_alloc(strlen(propval) + 1,
+ KM_SLEEP);
+ (void) strcpy(amd_iommu_disable_list, propval);
+ ddi_prop_free(propval);
+ }
+
+}
+
+void
+amd_iommu_lookup_conf_props(dev_info_t *dip)
+{
+ char *disable;
+
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu", &disable)
+ == DDI_PROP_SUCCESS) {
+ if (strcmp(disable, "no") == 0) {
+ amd_iommu_disable = 1;
+ }
+ ddi_prop_free(disable);
+ }
+
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS|DDI_PROP_NOTPROM, "amd-iommu-disable-list",
+ &disable) == DDI_PROP_SUCCESS) {
+ amd_iommu_disable_list = kmem_alloc(strlen(disable) + 1,
+ KM_SLEEP);
+ (void) strcpy(amd_iommu_disable_list, disable);
+ ddi_prop_free(disable);
+ }
+}
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.h b/usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.h
index 3fdead1177..3fdead1177 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_impl.h
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu_impl.h
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_log.c b/usr/src/uts/intel/io/amd_iommu/amd_iommu_log.c
index eb72f03c19..fdf15af2a3 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_log.c
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu_log.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_log.h b/usr/src/uts/intel/io/amd_iommu/amd_iommu_log.h
index b9f269986a..2fdbc98521 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_log.h
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu_log.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
diff --git a/usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.c b/usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.c
new file mode 100644
index 0000000000..e59a3dde38
--- /dev/null
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.c
@@ -0,0 +1,1699 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/acpi/acpi.h>
+#include <sys/acpica.h>
+#include <sys/amd_iommu.h>
+#include <sys/bootconf.h>
+#include <sys/sysmacros.h>
+#include <sys/ddidmareq.h>
+
+#include "amd_iommu_impl.h"
+#include "amd_iommu_acpi.h"
+#include "amd_iommu_page_tables.h"
+
+ddi_dma_attr_t amd_iommu_pgtable_dma_attr = {
+ DMA_ATTR_V0,
+ 0U, /* dma_attr_addr_lo */
+ 0xffffffffffffffffULL, /* dma_attr_addr_hi */
+ 0xffffffffU, /* dma_attr_count_max */
+ (uint64_t)4096, /* dma_attr_align */
+ 1, /* dma_attr_burstsizes */
+ 64, /* dma_attr_minxfer */
+ 0xffffffffU, /* dma_attr_maxxfer */
+ 0xffffffffU, /* dma_attr_seg */
+ 1, /* dma_attr_sgllen, variable */
+ 64, /* dma_attr_granular */
+ 0 /* dma_attr_flags */
+};
+
+static amd_iommu_domain_t **amd_iommu_domain_table;
+
+static struct {
+ int f_count;
+ amd_iommu_page_table_t *f_list;
+} amd_iommu_pgtable_freelist;
+int amd_iommu_no_pgtable_freelist;
+
+/*ARGSUSED*/
+static int
+amd_iommu_get_src_bdf(amd_iommu_t *iommu, int32_t bdf, int32_t *src_bdfp)
+{
+ amd_iommu_acpi_ivhd_t *hinfop;
+
+ hinfop = amd_iommu_lookup_ivhd(bdf);
+ if (hinfop == NULL || hinfop->ach_src_deviceid == -1)
+ *src_bdfp = bdf;
+ else
+ *src_bdfp = hinfop->ach_src_deviceid;
+
+ return (DDI_SUCCESS);
+}
+
+static dev_info_t *
+amd_iommu_pci_dip(dev_info_t *rdip, const char *path)
+{
+ dev_info_t *pdip;
+ const char *driver = ddi_driver_name(rdip);
+ int instance = ddi_get_instance(rdip);
+ const char *f = "amd_iommu_pci_dip";
+
+ /* Hold rdip so it and its parents don't go away */
+ ndi_hold_devi(rdip);
+
+ if (ddi_is_pci_dip(rdip))
+ return (rdip);
+
+ pdip = rdip;
+ while (pdip = ddi_get_parent(pdip)) {
+ if (ddi_is_pci_dip(pdip)) {
+ ndi_hold_devi(pdip);
+ ndi_rele_devi(rdip);
+ return (pdip);
+ }
+ }
+
+ cmn_err(CE_WARN, "%s: %s%d dip = %p has no PCI parent, path = %s",
+ f, driver, instance, (void *)rdip, path);
+
+ ndi_rele_devi(rdip);
+
+ ASSERT(0);
+
+ return (NULL);
+}
+
+/*ARGSUSED*/
+static int
+amd_iommu_get_domain(amd_iommu_t *iommu, dev_info_t *rdip, int alias,
+ uint16_t deviceid, domain_id_t *domainid, const char *path)
+{
+ const char *f = "amd_iommu_get_domain";
+
+ *domainid = AMD_IOMMU_INVALID_DOMAIN;
+
+ ASSERT(strcmp(ddi_driver_name(rdip), "agpgart") != 0);
+
+ switch (deviceid) {
+ case AMD_IOMMU_INVALID_DOMAIN:
+ case AMD_IOMMU_IDENTITY_DOMAIN:
+ case AMD_IOMMU_PASSTHRU_DOMAIN:
+ case AMD_IOMMU_SYS_DOMAIN:
+ *domainid = AMD_IOMMU_SYS_DOMAIN;
+ break;
+ default:
+ *domainid = deviceid;
+ break;
+ }
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+ cmn_err(CE_NOTE, "%s: domainid for %s = %d",
+ f, path, *domainid);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static uint16_t
+hash_domain(domain_id_t domainid)
+{
+ return (domainid % AMD_IOMMU_DOMAIN_HASH_SZ);
+}
+
+/*ARGSUSED*/
+void
+amd_iommu_init_page_tables(amd_iommu_t *iommu)
+{
+ amd_iommu_domain_table = kmem_zalloc(
+ sizeof (amd_iommu_domain_t *) * AMD_IOMMU_DOMAIN_HASH_SZ, KM_SLEEP);
+}
+
+/*ARGSUSED*/
+void
+amd_iommu_fini_page_tables(amd_iommu_t *iommu)
+{
+ if (amd_iommu_domain_table) {
+ kmem_free(amd_iommu_domain_table,
+ sizeof (amd_iommu_domain_t *) * AMD_IOMMU_DOMAIN_HASH_SZ);
+ amd_iommu_domain_table = NULL;
+ }
+}
+
+static amd_iommu_domain_t *
+amd_iommu_lookup_domain(amd_iommu_t *iommu, domain_id_t domainid,
+ map_type_t type, int km_flags)
+{
+ uint16_t idx;
+ amd_iommu_domain_t *dp;
+ char name[AMD_IOMMU_VMEM_NAMELEN+1];
+
+ ASSERT(amd_iommu_domain_table);
+
+ idx = hash_domain(domainid);
+
+ for (dp = amd_iommu_domain_table[idx]; dp; dp = dp->d_next) {
+ if (dp->d_domainid == domainid)
+ return (dp);
+ }
+
+ ASSERT(type != AMD_IOMMU_INVALID_MAP);
+
+ dp = kmem_zalloc(sizeof (*dp), km_flags);
+ if (dp == NULL)
+ return (NULL);
+ dp->d_domainid = domainid;
+ dp->d_pgtable_root_4K = 0; /* make this explicit */
+
+ if (type == AMD_IOMMU_VMEM_MAP) {
+ uint64_t base;
+ uint64_t size;
+ (void) snprintf(name, sizeof (name), "dvma_idx%d_domain%d",
+ iommu->aiomt_idx, domainid);
+ base = MMU_PAGESIZE;
+ size = AMD_IOMMU_SIZE_4G - MMU_PAGESIZE;
+ dp->d_vmem = vmem_create(name, (void *)(uintptr_t)base, size,
+ MMU_PAGESIZE, NULL, NULL, NULL, 0,
+ km_flags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
+ if (dp->d_vmem == NULL) {
+ kmem_free(dp, sizeof (*dp));
+ return (NULL);
+ }
+ } else {
+ dp->d_vmem = NULL;
+ }
+
+ dp->d_next = amd_iommu_domain_table[idx];
+ dp->d_prev = NULL;
+ amd_iommu_domain_table[idx] = dp;
+ if (dp->d_next)
+ dp->d_next->d_prev = dp;
+ dp->d_ref = 0;
+
+
+ return (dp);
+}
+
+static void
+amd_iommu_teardown_domain(amd_iommu_t *iommu, amd_iommu_domain_t *dp)
+{
+ uint16_t idx;
+ int flags;
+ amd_iommu_cmdargs_t cmdargs = {0};
+ domain_id_t domainid = dp->d_domainid;
+ const char *f = "amd_iommu_teardown_domain";
+
+ ASSERT(dp->d_ref == 0);
+
+ idx = hash_domain(dp->d_domainid);
+
+ if (dp->d_prev == NULL)
+ amd_iommu_domain_table[idx] = dp->d_next;
+ else
+ dp->d_prev->d_next = dp->d_next;
+
+ if (dp->d_next)
+ dp->d_next->d_prev = dp->d_prev;
+
+ if (dp->d_vmem != NULL) {
+ vmem_destroy(dp->d_vmem);
+ dp->d_vmem = NULL;
+ }
+
+ kmem_free(dp, sizeof (*dp));
+
+ cmdargs.ca_domainid = (uint16_t)domainid;
+ cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
+ flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
+ AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
+
+ if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
+ &cmdargs, flags, 0) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: idx=%d: domainid=%d"
+ "Failed to invalidate domain in IOMMU HW cache",
+ f, iommu->aiomt_idx, cmdargs.ca_domainid);
+ }
+}
+
+static int
+amd_iommu_get_deviceid(amd_iommu_t *iommu, dev_info_t *rdip, int32_t *deviceid,
+ int *aliasp, const char *path)
+{
+ int bus = -1;
+ int device = -1;
+ int func = -1;
+ uint16_t bdf;
+ int32_t src_bdf;
+ dev_info_t *idip = iommu->aiomt_dip;
+ const char *driver = ddi_driver_name(idip);
+ int instance = ddi_get_instance(idip);
+ dev_info_t *pci_dip;
+ const char *f = "amd_iommu_get_deviceid";
+
+ /* be conservative. Always assume an alias */
+ *aliasp = 1;
+ *deviceid = 0;
+
+ /* Check for special special devices (rdip == NULL) */
+ if (rdip == NULL) {
+ if (amd_iommu_get_src_bdf(iommu, -1, &src_bdf) != DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "%s: %s%d: idx=%d, failed to get SRC BDF "
+ "for special-device",
+ f, driver, instance, iommu->aiomt_idx);
+ return (DDI_DMA_NOMAPPING);
+ }
+ *deviceid = src_bdf;
+ *aliasp = 1;
+ return (DDI_SUCCESS);
+ }
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+ cmn_err(CE_NOTE, "%s: attempting to get deviceid for %s",
+ f, path);
+ }
+
+ pci_dip = amd_iommu_pci_dip(rdip, path);
+ if (pci_dip == NULL) {
+ cmn_err(CE_WARN, "%s: %s%d: idx = %d, failed to get PCI dip "
+ "for rdip=%p, path = %s",
+ f, driver, instance, iommu->aiomt_idx, (void *)rdip,
+ path);
+ return (DDI_DMA_NOMAPPING);
+ }
+
+ if (acpica_get_bdf(pci_dip, &bus, &device, &func) != DDI_SUCCESS) {
+ ndi_rele_devi(pci_dip);
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d, failed to get BDF for "
+ "PCI dip (%p). rdip path = %s",
+ f, driver, instance, iommu->aiomt_idx,
+ (void *)pci_dip, path);
+ return (DDI_DMA_NOMAPPING);
+ }
+
+ ndi_rele_devi(pci_dip);
+
+ if (bus > UINT8_MAX || bus < 0 ||
+ device > UINT8_MAX || device < 0 ||
+ func > UINT8_MAX || func < 0) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d, invalid BDF(%d,%d,%d) "
+ "for PCI dip (%p). rdip path = %s", f, driver, instance,
+ iommu->aiomt_idx,
+ bus, device, func,
+ (void *)pci_dip, path);
+ return (DDI_DMA_NOMAPPING);
+ }
+
+ bdf = ((uint8_t)bus << 8) | ((uint8_t)device << 3) | (uint8_t)func;
+
+ if (amd_iommu_get_src_bdf(iommu, bdf, &src_bdf) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d, failed to get SRC BDF "
+ "for PCI dip (%p) rdip path = %s.",
+ f, driver, instance, iommu->aiomt_idx, (void *)pci_dip,
+ path);
+ return (DDI_DMA_NOMAPPING);
+ }
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+ cmn_err(CE_NOTE, "%s: Deviceid = %u for path = %s",
+ f, src_bdf, path);
+ }
+
+ *deviceid = src_bdf;
+ *aliasp = (src_bdf != bdf);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+static int
+init_devtbl(amd_iommu_t *iommu, uint64_t *devtbl_entry, domain_id_t domainid,
+ amd_iommu_domain_t *dp)
+{
+ uint64_t entry[4] = {0};
+ int i;
+
+ /* If already passthru, don't touch */
+ if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V) == 0 &&
+ AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 0) {
+ return (0);
+ }
+
+ if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V) == 1 &&
+ AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 1) {
+
+ ASSERT(dp->d_pgtable_root_4K ==
+ AMD_IOMMU_REG_GET64(&(devtbl_entry[0]),
+ AMD_IOMMU_DEVTBL_ROOT_PGTBL));
+
+ ASSERT(dp->d_domainid == AMD_IOMMU_REG_GET64(&(devtbl_entry[1]),
+ AMD_IOMMU_DEVTBL_DOMAINID));
+
+ return (0);
+ }
+
+ /* New devtbl entry for this domain. Bump up the domain ref-count */
+ dp->d_ref++;
+
+ entry[3] = 0;
+ entry[2] = 0;
+ AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SYSMGT, 1);
+ AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_EX, 1);
+ AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SD, 0);
+ AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_CACHE, 0);
+ AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_IOCTL, 1);
+ AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SA, 0);
+ AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_SE, 1);
+ AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_IOTLB, 1);
+ AMD_IOMMU_REG_SET64(&(entry[1]), AMD_IOMMU_DEVTBL_DOMAINID,
+ (uint16_t)domainid);
+ AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_IW, 1);
+ AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_IR, 1);
+ AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_ROOT_PGTBL,
+ dp->d_pgtable_root_4K);
+ AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_PG_MODE,
+ AMD_IOMMU_PGTABLE_MAXLEVEL);
+ AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_TV,
+ domainid == AMD_IOMMU_PASSTHRU_DOMAIN ? 0 : 1);
+ AMD_IOMMU_REG_SET64(&(entry[0]), AMD_IOMMU_DEVTBL_V,
+ domainid == AMD_IOMMU_PASSTHRU_DOMAIN ? 0 : 1);
+
+ for (i = 1; i < 4; i++) {
+ devtbl_entry[i] = entry[i];
+ }
+ devtbl_entry[0] = entry[0];
+
+ /* we did an actual init */
+ return (1);
+}
+
+void
+amd_iommu_set_passthru(amd_iommu_t *iommu, dev_info_t *rdip)
+{
+ int32_t deviceid;
+ int alias;
+ uint64_t *devtbl_entry;
+ amd_iommu_cmdargs_t cmdargs = {0};
+ char *path;
+ int pathfree;
+ int V;
+ int TV;
+ int instance;
+ const char *driver;
+ const char *f = "amd_iommu_set_passthru";
+
+ if (rdip) {
+ driver = ddi_driver_name(rdip);
+ instance = ddi_get_instance(rdip);
+ } else {
+ driver = "special-device";
+ instance = 0;
+ }
+
+ path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
+ if (path) {
+ if (rdip)
+ (void) ddi_pathname(rdip, path);
+ else
+ (void) strcpy(path, "special-device");
+ pathfree = 1;
+ } else {
+ pathfree = 0;
+ path = "<path-mem-alloc-failed>";
+ }
+
+ if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
+ "Failed to get device ID for device %s.", f, driver,
+ instance,
+ iommu->aiomt_idx, (void *)rdip, path);
+ goto out;
+ }
+
+ /* No deviceid */
+ if (deviceid == -1) {
+ goto out;
+ }
+
+ if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
+ iommu->aiomt_devtbl_sz) {
+ cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
+ "for rdip (%p) exceeds device table size (%u), path=%s",
+ f, driver,
+ instance, iommu->aiomt_idx, deviceid, (void *)rdip,
+ iommu->aiomt_devtbl_sz, path);
+ goto out;
+ }
+
+ /*LINTED*/
+ devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
+ [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
+
+ V = AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V);
+ TV = AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV);
+
+ /* Already passthru */
+ if (V == 0 && TV == 0) {
+ goto out;
+ }
+
+ /* Existing translations */
+ if (V == 1 && TV == 1) {
+ goto out;
+ }
+
+ /* Invalid setting */
+ if (V == 0 && TV == 1) {
+ goto out;
+ }
+
+ AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V, 0);
+
+ cmdargs.ca_deviceid = (uint16_t)deviceid;
+ (void) amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
+ &cmdargs, 0, 0);
+
+out:
+ if (pathfree)
+ kmem_free(path, MAXPATHLEN);
+}
+
+static int
+amd_iommu_set_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
+ domain_id_t domainid, uint16_t deviceid, amd_iommu_domain_t *dp,
+ const char *path)
+{
+ uint64_t *devtbl_entry;
+ amd_iommu_cmdargs_t cmdargs = {0};
+ int error;
+ dev_info_t *idip = iommu->aiomt_dip;
+ const char *driver = ddi_driver_name(idip);
+ int instance = ddi_get_instance(idip);
+ const char *f = "amd_iommu_set_devtbl_entry";
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+ cmn_err(CE_WARN, "%s: attempting to set devtbl entry for %s",
+ f, path);
+ }
+
+ if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
+ iommu->aiomt_devtbl_sz) {
+ cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
+ "for rdip (%p) exceeds device table size (%u), path=%s",
+ f, driver,
+ instance, iommu->aiomt_idx, deviceid, (void *)rdip,
+ iommu->aiomt_devtbl_sz, path);
+ return (DDI_DMA_NOMAPPING);
+ }
+
+ /*LINTED*/
+ devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
+ [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+ cmn_err(CE_WARN, "%s: deviceid=%u devtbl entry (%p) for %s",
+ f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
+ }
+
+ if (init_devtbl(iommu, devtbl_entry, domainid, dp)) {
+ cmdargs.ca_deviceid = deviceid;
+ error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
+ &cmdargs, 0, 0);
+ }
+
+ return (error);
+}
+
+int
+amd_iommu_clear_devtbl_entry(amd_iommu_t *iommu, dev_info_t *rdip,
+ domain_id_t domainid, uint16_t deviceid, amd_iommu_domain_t *dp,
+ int *domain_freed, char *path)
+{
+ uint64_t *devtbl_entry;
+ int error = DDI_SUCCESS;
+ amd_iommu_cmdargs_t cmdargs = {0};
+ const char *driver = ddi_driver_name(iommu->aiomt_dip);
+ int instance = ddi_get_instance(iommu->aiomt_dip);
+ const char *f = "amd_iommu_clear_devtbl_entry";
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+ cmn_err(CE_NOTE, "%s: attempting to clear devtbl entry for "
+ "domainid = %d, deviceid = %u, path = %s",
+ f, domainid, deviceid, path);
+ }
+
+ if ((deviceid + 1) * AMD_IOMMU_DEVTBL_ENTRY_SZ >
+ iommu->aiomt_devtbl_sz) {
+ cmn_err(CE_WARN, "%s: %s%d: IOMMU idx=%d, deviceid (%u) "
+ "for rdip (%p) exceeds device table size (%u), path = %s",
+ f, driver, instance,
+ iommu->aiomt_idx, deviceid, (void *)rdip,
+ iommu->aiomt_devtbl_sz, path);
+ return (DDI_FAILURE);
+ }
+
+ /*LINTED*/
+ devtbl_entry = (uint64_t *)&iommu->aiomt_devtbl
+ [deviceid * AMD_IOMMU_DEVTBL_ENTRY_SZ];
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
+ cmn_err(CE_WARN, "%s: deviceid=%u devtbl entry (%p) for %s",
+ f, deviceid, (void *)(uintptr_t)(*devtbl_entry), path);
+ }
+
+ if (AMD_IOMMU_REG_GET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV) == 0) {
+ /* Nothing to do */
+ return (DDI_SUCCESS);
+ }
+
+ ASSERT(dp->d_pgtable_root_4K == AMD_IOMMU_REG_GET64(&(devtbl_entry[0]),
+ AMD_IOMMU_DEVTBL_ROOT_PGTBL));
+
+ ASSERT(domainid == AMD_IOMMU_REG_GET64(&(devtbl_entry[1]),
+ AMD_IOMMU_DEVTBL_DOMAINID));
+
+ AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_TV, 0);
+ AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_ROOT_PGTBL, 0);
+ AMD_IOMMU_REG_SET64(&(devtbl_entry[0]), AMD_IOMMU_DEVTBL_V, 1);
+
+ SYNC_FORDEV(iommu->aiomt_dmahdl);
+
+ dp->d_ref--;
+ ASSERT(dp->d_ref >= 0);
+
+ if (dp->d_ref == 0) {
+ *domain_freed = 1;
+ }
+
+ cmdargs.ca_deviceid = deviceid;
+ error = amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_DEVTAB_ENTRY,
+ &cmdargs, 0, 0);
+ if (error != DDI_SUCCESS)
+ error = DDI_FAILURE;
+
+ return (error);
+}
+
+int
+amd_iommu_page_table_hash_init(amd_iommu_page_table_hash_t *ampt)
+{
+ ampt->ampt_hash = kmem_zalloc(sizeof (amd_iommu_page_table_t *) *
+ AMD_IOMMU_PGTABLE_HASH_SZ, KM_SLEEP);
+ return (DDI_SUCCESS);
+}
+
+void
+amd_iommu_page_table_hash_fini(amd_iommu_page_table_hash_t *ampt)
+{
+ kmem_free(ampt->ampt_hash,
+ sizeof (amd_iommu_page_table_t *) * AMD_IOMMU_PGTABLE_HASH_SZ);
+ ampt->ampt_hash = NULL;
+}
+
+static uint32_t
+pt_hashfn(uint64_t pa_4K)
+{
+ return (pa_4K % AMD_IOMMU_PGTABLE_HASH_SZ);
+}
+
+static void
+amd_iommu_insert_pgtable_hash(amd_iommu_page_table_t *pt)
+{
+ uint64_t pa_4K = ((uint64_t)pt->pt_cookie.dmac_cookie_addr) >> 12;
+ uint32_t idx = pt_hashfn(pa_4K);
+
+ ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
+
+ mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
+
+ pt->pt_next = amd_iommu_page_table_hash.ampt_hash[idx];
+ pt->pt_prev = NULL;
+ amd_iommu_page_table_hash.ampt_hash[idx] = pt;
+ if (pt->pt_next)
+ pt->pt_next->pt_prev = pt;
+
+ mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
+}
+
+static void
+amd_iommu_remove_pgtable_hash(amd_iommu_page_table_t *pt)
+{
+ uint64_t pa_4K = (pt->pt_cookie.dmac_cookie_addr >> 12);
+ uint32_t idx = pt_hashfn(pa_4K);
+
+ ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
+
+ mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
+
+ if (pt->pt_next)
+ pt->pt_next->pt_prev = pt->pt_prev;
+
+ if (pt->pt_prev)
+ pt->pt_prev->pt_next = pt->pt_next;
+ else
+ amd_iommu_page_table_hash.ampt_hash[idx] = pt->pt_next;
+
+ pt->pt_next = NULL;
+ pt->pt_prev = NULL;
+
+ mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
+}
+
+static amd_iommu_page_table_t *
+amd_iommu_lookup_pgtable_hash(domain_id_t domainid, uint64_t pgtable_pa_4K)
+{
+ amd_iommu_page_table_t *pt;
+ uint32_t idx = pt_hashfn(pgtable_pa_4K);
+
+ mutex_enter(&amd_iommu_page_table_hash.ampt_lock);
+ pt = amd_iommu_page_table_hash.ampt_hash[idx];
+ for (; pt; pt = pt->pt_next) {
+ if (domainid != pt->pt_domainid)
+ continue;
+ ASSERT((pt->pt_cookie.dmac_cookie_addr &
+ AMD_IOMMU_PGTABLE_ALIGN) == 0);
+ if ((pt->pt_cookie.dmac_cookie_addr >> 12) == pgtable_pa_4K) {
+ break;
+ }
+ }
+ mutex_exit(&amd_iommu_page_table_hash.ampt_lock);
+
+ return (pt);
+}
+
+/*ARGSUSED*/
+static amd_iommu_page_table_t *
+amd_iommu_lookup_pgtable(amd_iommu_t *iommu, amd_iommu_page_table_t *ppt,
+ amd_iommu_domain_t *dp, int level, uint16_t index)
+{
+ uint64_t *pdtep;
+ uint64_t pgtable_pa_4K;
+
+ ASSERT(level > 0 && level <= AMD_IOMMU_PGTABLE_MAXLEVEL);
+ ASSERT(dp);
+
+ if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
+ ASSERT(ppt == NULL);
+ ASSERT(index == 0);
+ pgtable_pa_4K = dp->d_pgtable_root_4K;
+ } else {
+ ASSERT(ppt);
+ pdtep = &(ppt->pt_pgtblva[index]);
+ if (AMD_IOMMU_REG_GET64(pdtep, AMD_IOMMU_PTDE_PR) == 0) {
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+ cmn_err(CE_NOTE, "Skipping PR=0 pdte: 0x%"
+ PRIx64, *pdtep);
+ }
+ return (NULL);
+ }
+ pgtable_pa_4K = AMD_IOMMU_REG_GET64(pdtep, AMD_IOMMU_PTDE_ADDR);
+ }
+
+ return (amd_iommu_lookup_pgtable_hash(dp->d_domainid, pgtable_pa_4K));
+}
+
+static amd_iommu_page_table_t *
+amd_iommu_alloc_from_freelist(void)
+{
+ int i;
+ uint64_t *pte_array;
+ amd_iommu_page_table_t *pt;
+
+ if (amd_iommu_no_pgtable_freelist == 1)
+ return (NULL);
+
+ if (amd_iommu_pgtable_freelist.f_count == 0)
+ return (NULL);
+
+ pt = amd_iommu_pgtable_freelist.f_list;
+ amd_iommu_pgtable_freelist.f_list = pt->pt_next;
+ amd_iommu_pgtable_freelist.f_count--;
+
+ pte_array = pt->pt_pgtblva;
+ for (i = 0; i < AMD_IOMMU_PGTABLE_SZ / (sizeof (*pte_array)); i++) {
+ ASSERT(pt->pt_pte_ref[i] == 0);
+ ASSERT(AMD_IOMMU_REG_GET64(&(pte_array[i]),
+ AMD_IOMMU_PTDE_PR) == 0);
+ }
+
+ return (pt);
+}
+
+static int
+amd_iommu_alloc_pgtable(amd_iommu_t *iommu, domain_id_t domainid,
+ const char *path, amd_iommu_page_table_t **ptp, int km_flags)
+{
+ int err;
+ uint_t ncookies;
+ amd_iommu_page_table_t *pt;
+ dev_info_t *idip = iommu->aiomt_dip;
+ const char *driver = ddi_driver_name(idip);
+ int instance = ddi_get_instance(idip);
+ const char *f = "amd_iommu_alloc_pgtable";
+
+ *ptp = NULL;
+
+ pt = amd_iommu_alloc_from_freelist();
+ if (pt)
+ goto init_pgtable;
+
+ pt = kmem_zalloc(sizeof (amd_iommu_page_table_t), km_flags);
+ if (pt == NULL)
+ return (DDI_DMA_NORESOURCES);
+
+ /*
+ * Each page table is 4K in size
+ */
+ pt->pt_mem_reqsz = AMD_IOMMU_PGTABLE_SZ;
+
+ /*
+ * Alloc a DMA handle. Use the IOMMU dip as we want this DMA
+ * to *not* enter the IOMMU - no recursive entrance.
+ */
+ err = ddi_dma_alloc_handle(idip, &amd_iommu_pgtable_dma_attr,
+ km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
+ NULL, &pt->pt_dma_hdl);
+ if (err != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: domainid = %d, path = %s. "
+ "Cannot alloc DMA handle for IO Page Table",
+ f, driver, instance, domainid, path);
+ kmem_free(pt, sizeof (amd_iommu_page_table_t));
+ return (err == DDI_DMA_NORESOURCES ? err : DDI_DMA_NOMAPPING);
+ }
+
+ /*
+ * Alloc memory for IO Page Table.
+ * XXX remove size_t cast kludge
+ */
+ err = ddi_dma_mem_alloc(pt->pt_dma_hdl, pt->pt_mem_reqsz,
+ &amd_iommu_devacc, DDI_DMA_CONSISTENT|IOMEM_DATA_UNCACHED,
+ km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
+ NULL, (caddr_t *)&pt->pt_pgtblva,
+ (size_t *)&pt->pt_mem_realsz, &pt->pt_mem_hdl);
+ if (err != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: domainid=%d, path = %s. "
+ "Cannot allocate DMA memory for IO Page table",
+ f, driver, instance, domainid, path);
+ ddi_dma_free_handle(&pt->pt_dma_hdl);
+ kmem_free(pt, sizeof (amd_iommu_page_table_t));
+ return (DDI_DMA_NORESOURCES);
+ }
+
+ /*
+ * The Page table DMA VA must be 4K aligned and
+ * size >= than requested memory.
+ *
+ */
+ ASSERT(((uint64_t)(uintptr_t)pt->pt_pgtblva & AMD_IOMMU_PGTABLE_ALIGN)
+ == 0);
+ ASSERT(pt->pt_mem_realsz >= pt->pt_mem_reqsz);
+
+ /*
+ * Now bind the handle
+ */
+ err = ddi_dma_addr_bind_handle(pt->pt_dma_hdl, NULL,
+ (caddr_t)pt->pt_pgtblva, pt->pt_mem_realsz,
+ DDI_DMA_READ | DDI_DMA_CONSISTENT,
+ km_flags == KM_SLEEP ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
+ NULL, &pt->pt_cookie, &ncookies);
+ if (err != DDI_DMA_MAPPED) {
+ cmn_err(CE_WARN, "%s: %s%d: domainid=%d, path = %s. "
+ "Cannot bind memory for DMA to IO Page Tables. "
+ "bufrealsz=%p",
+ f, driver, instance, domainid, path,
+ (void *)(uintptr_t)pt->pt_mem_realsz);
+ ddi_dma_mem_free(&pt->pt_mem_hdl);
+ ddi_dma_free_handle(&pt->pt_dma_hdl);
+ kmem_free(pt, sizeof (amd_iommu_page_table_t));
+ return (err == DDI_DMA_PARTIAL_MAP ? DDI_DMA_NOMAPPING :
+ err);
+ }
+
+ /*
+ * We assume the DMA engine on the IOMMU is capable of handling the
+ * whole page table in a single cookie. If not and multiple cookies
+ * are needed we fail.
+ */
+ if (ncookies != 1) {
+ cmn_err(CE_WARN, "%s: %s%d: domainid = %d, path=%s "
+ "Cannot handle multiple "
+ "cookies for DMA to IO page Table, #cookies=%u",
+ f, driver, instance, domainid, path, ncookies);
+ (void) ddi_dma_unbind_handle(pt->pt_dma_hdl);
+ ddi_dma_mem_free(&pt->pt_mem_hdl);
+ ddi_dma_free_handle(&pt->pt_dma_hdl);
+ kmem_free(pt, sizeof (amd_iommu_page_table_t));
+ return (DDI_DMA_NOMAPPING);
+ }
+
+init_pgtable:
+ /*
+ * The address in the cookie must be 4K aligned and >= table size
+ */
+ ASSERT(pt->pt_cookie.dmac_cookie_addr != NULL);
+ ASSERT((pt->pt_cookie.dmac_cookie_addr & AMD_IOMMU_PGTABLE_ALIGN) == 0);
+ ASSERT(pt->pt_cookie.dmac_size >= pt->pt_mem_realsz);
+ ASSERT(pt->pt_cookie.dmac_size >= pt->pt_mem_reqsz);
+ ASSERT(pt->pt_mem_reqsz >= AMD_IOMMU_PGTABLE_SIZE);
+ ASSERT(pt->pt_mem_realsz >= pt->pt_mem_reqsz);
+ ASSERT(pt->pt_pgtblva);
+
+ pt->pt_domainid = AMD_IOMMU_INVALID_DOMAIN;
+ pt->pt_level = 0x7;
+ pt->pt_index = 0;
+ pt->pt_ref = 0;
+ pt->pt_next = NULL;
+ pt->pt_prev = NULL;
+ pt->pt_parent = NULL;
+
+ bzero(pt->pt_pgtblva, pt->pt_mem_realsz);
+ SYNC_FORDEV(pt->pt_dma_hdl);
+
+ amd_iommu_insert_pgtable_hash(pt);
+
+ *ptp = pt;
+
+ return (DDI_SUCCESS);
+}
+
+static int
+amd_iommu_move_to_freelist(amd_iommu_page_table_t *pt)
+{
+ if (amd_iommu_no_pgtable_freelist == 1)
+ return (DDI_FAILURE);
+
+ if (amd_iommu_pgtable_freelist.f_count ==
+ AMD_IOMMU_PGTABLE_FREELIST_MAX)
+ return (DDI_FAILURE);
+
+ pt->pt_next = amd_iommu_pgtable_freelist.f_list;
+ amd_iommu_pgtable_freelist.f_list = pt;
+ amd_iommu_pgtable_freelist.f_count++;
+
+ return (DDI_SUCCESS);
+}
+
+static void
+amd_iommu_free_pgtable(amd_iommu_t *iommu, amd_iommu_page_table_t *pt)
+{
+ int i;
+ uint64_t *pte_array;
+ dev_info_t *dip = iommu->aiomt_dip;
+ int instance = ddi_get_instance(dip);
+ const char *driver = ddi_driver_name(dip);
+ const char *f = "amd_iommu_free_pgtable";
+
+ ASSERT(pt->pt_ref == 0);
+
+ amd_iommu_remove_pgtable_hash(pt);
+
+ pte_array = pt->pt_pgtblva;
+ for (i = 0; i < AMD_IOMMU_PGTABLE_SZ / (sizeof (*pte_array)); i++) {
+ ASSERT(pt->pt_pte_ref[i] == 0);
+ ASSERT(AMD_IOMMU_REG_GET64(&(pte_array[i]),
+ AMD_IOMMU_PTDE_PR) == 0);
+ }
+
+ if (amd_iommu_move_to_freelist(pt) == DDI_SUCCESS)
+ return;
+
+ /* Unbind the handle */
+ if (ddi_dma_unbind_handle(pt->pt_dma_hdl) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d, domainid=%d. "
+ "Failed to unbind handle: %p for IOMMU Page Table",
+ f, driver, instance, iommu->aiomt_idx, pt->pt_domainid,
+ (void *)pt->pt_dma_hdl);
+ }
+ /* Free the table memory allocated for DMA */
+ ddi_dma_mem_free(&pt->pt_mem_hdl);
+
+ /* Free the DMA handle */
+ ddi_dma_free_handle(&pt->pt_dma_hdl);
+
+ kmem_free(pt, sizeof (amd_iommu_page_table_t));
+
+}
+
+static int
+init_pde(amd_iommu_page_table_t *ppt, amd_iommu_page_table_t *pt)
+{
+ uint64_t *pdep = &(ppt->pt_pgtblva[pt->pt_index]);
+ uint64_t next_pgtable_pa_4K = (pt->pt_cookie.dmac_cookie_addr) >> 12;
+
+ /* nothing to set. PDE is already set */
+ if (AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_PR) == 1) {
+ ASSERT(PT_REF_VALID(ppt));
+ ASSERT(PT_REF_VALID(pt));
+ ASSERT(ppt->pt_pte_ref[pt->pt_index] == 0);
+ ASSERT(AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_ADDR)
+ == next_pgtable_pa_4K);
+ return (DDI_SUCCESS);
+ }
+
+ ppt->pt_ref++;
+ ASSERT(PT_REF_VALID(ppt));
+
+ /* Page Directories are always RW */
+ AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_IW, 1);
+ AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_IR, 1);
+ AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_ADDR,
+ next_pgtable_pa_4K);
+ pt->pt_parent = ppt;
+ AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_NXT_LVL,
+ pt->pt_level);
+ ppt->pt_pte_ref[pt->pt_index] = 0;
+ AMD_IOMMU_REG_SET64(pdep, AMD_IOMMU_PTDE_PR, 1);
+ SYNC_FORDEV(ppt->pt_dma_hdl);
+ ASSERT(AMD_IOMMU_REG_GET64(pdep, AMD_IOMMU_PTDE_PR) == 1);
+
+ return (DDI_SUCCESS);
+}
+
+static int
+init_pte(amd_iommu_page_table_t *pt, uint64_t pa, uint16_t index,
+ struct ddi_dma_req *dmareq)
+{
+ uint64_t *ptep = &(pt->pt_pgtblva[index]);
+ uint64_t pa_4K = pa >> 12;
+ int R;
+ int W;
+
+ /* nothing to set if PTE is already set */
+ if (AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_PR) == 1) {
+ /*
+ * Adjust current permissions
+ * DDI_DMA_WRITE means direction of DMA is MEM -> I/O
+ * so that requires Memory READ permissions i.e. sense
+ * is inverted.
+ * Note: either or both of DD_DMA_READ/WRITE may be set
+ */
+ if (amd_iommu_no_RW_perms == 0) {
+ R = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_IR);
+ W = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_IW);
+ if (R == 0 && ((dmareq->dmar_flags & DDI_DMA_WRITE) ||
+ (dmareq->dmar_flags & DDI_DMA_RDWR))) {
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
+ }
+ if (W == 0 && ((dmareq->dmar_flags & DDI_DMA_READ) ||
+ (dmareq->dmar_flags & DDI_DMA_RDWR))) {
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
+ }
+ }
+ ASSERT(PT_REF_VALID(pt));
+ pt->pt_pte_ref[index]++;
+ ASSERT(AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_ADDR)
+ == pa_4K);
+ return (DDI_SUCCESS);
+ }
+
+ pt->pt_ref++;
+ ASSERT(PT_REF_VALID(pt));
+
+ /* see comment above about inverting sense of RD/WR */
+ if (amd_iommu_no_RW_perms == 0) {
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 0);
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 0);
+ if (dmareq->dmar_flags & DDI_DMA_RDWR) {
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
+ } else {
+ if (dmareq->dmar_flags & DDI_DMA_WRITE) {
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
+ }
+ if (dmareq->dmar_flags & DDI_DMA_READ) {
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
+ }
+ }
+ } else {
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IR, 1);
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_IW, 1);
+ }
+
+ /* TODO what is correct for FC and U */
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTE_FC, 0);
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTE_U, 0);
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_ADDR, pa_4K);
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_NXT_LVL, 0);
+ ASSERT(pt->pt_pte_ref[index] == 0);
+ pt->pt_pte_ref[index] = 1;
+ AMD_IOMMU_REG_SET64(ptep, AMD_IOMMU_PTDE_PR, 1);
+ SYNC_FORDEV(pt->pt_dma_hdl);
+ ASSERT(AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_PR) == 1);
+
+ return (DDI_SUCCESS);
+}
+
+
+static void
+init_pt(amd_iommu_page_table_t *pt, amd_iommu_domain_t *dp,
+ int level, uint16_t index)
+{
+ ASSERT(dp);
+
+ if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
+ dp->d_pgtable_root_4K = (pt->pt_cookie.dmac_cookie_addr) >> 12;
+ } else {
+ ASSERT(level >= 1 && level < AMD_IOMMU_PGTABLE_MAXLEVEL);
+ }
+
+ pt->pt_domainid = dp->d_domainid;
+ pt->pt_level = level;
+ pt->pt_index = index;
+}
+
+static int
+amd_iommu_setup_1_pgtable(amd_iommu_t *iommu, dev_info_t *rdip,
+ struct ddi_dma_req *dmareq,
+ domain_id_t domainid, amd_iommu_domain_t *dp,
+ amd_iommu_page_table_t *ppt,
+ uint16_t index, int level, uint64_t va, uint64_t pa,
+ amd_iommu_page_table_t **ptp, uint16_t *next_idxp, const char *path,
+ int km_flags)
+{
+ int error;
+ amd_iommu_page_table_t *pt;
+ const char *driver = ddi_driver_name(rdip);
+ int instance = ddi_get_instance(rdip);
+ const char *f = "amd_iommu_setup_1_pgtable";
+
+ *ptp = NULL;
+ *next_idxp = 0;
+ error = DDI_SUCCESS;
+
+ ASSERT(level > 0 && level <= AMD_IOMMU_PGTABLE_MAXLEVEL);
+
+ ASSERT(dp);
+ if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
+ ASSERT(ppt == NULL);
+ ASSERT(index == 0);
+ } else {
+ ASSERT(ppt);
+ }
+
+ /* Check if page table is already allocated */
+ if (pt = amd_iommu_lookup_pgtable(iommu, ppt, dp, level, index)) {
+ ASSERT(pt->pt_domainid == domainid);
+ ASSERT(pt->pt_level == level);
+ ASSERT(pt->pt_index == index);
+ goto out;
+ }
+
+ if ((error = amd_iommu_alloc_pgtable(iommu, domainid, path, &pt,
+ km_flags)) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx = %u, domainid = %d, va = %p "
+ "path = %s", f, driver, instance, iommu->aiomt_idx,
+ domainid, (void *)(uintptr_t)va, path);
+ return (error);
+ }
+
+ ASSERT(dp->d_domainid == domainid);
+
+ init_pt(pt, dp, level, index);
+
+out:
+ if (level != AMD_IOMMU_PGTABLE_MAXLEVEL) {
+ error = init_pde(ppt, pt);
+ }
+
+ if (level == 1) {
+ ASSERT(error == DDI_SUCCESS);
+ error = init_pte(pt, pa, AMD_IOMMU_VA_BITS(va, level), dmareq);
+ } else {
+ *next_idxp = AMD_IOMMU_VA_BITS(va, level);
+ *ptp = pt;
+ }
+
+ return (error);
+}
+
+typedef enum {
+ PDTE_NOT_TORN = 0x1,
+ PDTE_TORN_DOWN = 0x2,
+ PGTABLE_TORN_DOWN = 0x4
+} pdte_tear_t;
+
+static pdte_tear_t
+amd_iommu_teardown_pdte(amd_iommu_t *iommu,
+ amd_iommu_page_table_t *pt, int index)
+{
+ uint8_t next_level;
+ pdte_tear_t retval;
+ uint64_t *ptdep = &(pt->pt_pgtblva[index]);
+
+ next_level = AMD_IOMMU_REG_GET64(ptdep,
+ AMD_IOMMU_PTDE_NXT_LVL);
+
+ if (AMD_IOMMU_REG_GET64(ptdep, AMD_IOMMU_PTDE_PR) == 1) {
+ if (pt->pt_level == 1) {
+ ASSERT(next_level == 0);
+ /* PTE */
+ pt->pt_pte_ref[index]--;
+ if (pt->pt_pte_ref[index] != 0) {
+ return (PDTE_NOT_TORN);
+ }
+ } else {
+ ASSERT(next_level != 0 && next_level != 7);
+ }
+ ASSERT(pt->pt_pte_ref[index] == 0);
+ ASSERT(PT_REF_VALID(pt));
+
+ AMD_IOMMU_REG_SET64(ptdep, AMD_IOMMU_PTDE_PR, 0);
+ SYNC_FORDEV(pt->pt_dma_hdl);
+ ASSERT(AMD_IOMMU_REG_GET64(ptdep,
+ AMD_IOMMU_PTDE_PR) == 0);
+ pt->pt_ref--;
+ ASSERT(PT_REF_VALID(pt));
+ retval = PDTE_TORN_DOWN;
+ } else {
+ ASSERT(0);
+ ASSERT(pt->pt_pte_ref[index] == 0);
+ ASSERT(PT_REF_VALID(pt));
+ retval = PDTE_NOT_TORN;
+ }
+
+ if (pt->pt_ref == 0) {
+ amd_iommu_free_pgtable(iommu, pt);
+ return (PGTABLE_TORN_DOWN);
+ }
+
+ return (retval);
+}
+
+static int
+amd_iommu_create_pgtables(amd_iommu_t *iommu, dev_info_t *rdip,
+ struct ddi_dma_req *dmareq, uint64_t va,
+ uint64_t pa, uint16_t deviceid, domain_id_t domainid,
+ amd_iommu_domain_t *dp, const char *path, int km_flags)
+{
+ int level;
+ uint16_t index;
+ uint16_t next_idx;
+ amd_iommu_page_table_t *pt;
+ amd_iommu_page_table_t *ppt;
+ int error;
+ const char *driver = ddi_driver_name(rdip);
+ int instance = ddi_get_instance(rdip);
+ const char *f = "amd_iommu_create_pgtables";
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+ cmn_err(CE_NOTE, "%s: %s%d: idx = %u, domainid = %d, "
+ "deviceid = %u, va = %p, pa = %p, path = %s",
+ f, driver, instance,
+ iommu->aiomt_idx, domainid, deviceid,
+ (void *)(uintptr_t)va,
+ (void *)(uintptr_t)pa, path);
+ }
+
+ if (domainid == AMD_IOMMU_PASSTHRU_DOMAIN) {
+ /* No need for pagetables. Just set up device table entry */
+ goto passthru;
+ }
+
+ index = 0;
+ ppt = NULL;
+ for (level = AMD_IOMMU_PGTABLE_MAXLEVEL; level > 0;
+ level--, pt = NULL, next_idx = 0) {
+ if ((error = amd_iommu_setup_1_pgtable(iommu, rdip, dmareq,
+ domainid, dp, ppt, index, level, va, pa, &pt,
+ &next_idx, path, km_flags)) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, "
+ "deviceid=%u, va= %p, pa = %p, Failed to setup "
+ "page table(s) at level = %d, path = %s.",
+ f, driver, instance, iommu->aiomt_idx,
+ domainid, deviceid, (void *)(uintptr_t)va,
+ (void *)(uintptr_t)pa, level, path);
+ return (error);
+ }
+
+ if (level > 1) {
+ ASSERT(pt);
+ ASSERT(pt->pt_domainid == domainid);
+ ppt = pt;
+ index = next_idx;
+ } else {
+ ASSERT(level == 1);
+ ASSERT(pt == NULL);
+ ASSERT(next_idx == 0);
+ ppt = NULL;
+ index = 0;
+ }
+ }
+
+passthru:
+ if ((error = amd_iommu_set_devtbl_entry(iommu, rdip, domainid, deviceid,
+ dp, path)) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, deviceid=%u, "
+ "domainid=%d."
+ "Failed to set device table entry for path %s.",
+ f, driver, instance,
+ iommu->aiomt_idx, (void *)rdip, deviceid, domainid, path);
+ return (error);
+ }
+
+ SYNC_FORDEV(iommu->aiomt_dmahdl);
+
+ return (DDI_SUCCESS);
+}
+
+static int
+amd_iommu_destroy_pgtables(amd_iommu_t *iommu, dev_info_t *rdip,
+ uint64_t pageva, uint16_t deviceid, domain_id_t domainid,
+ amd_iommu_domain_t *dp, map_type_t type, int *domain_freed, char *path)
+{
+ int level;
+ int flags;
+ amd_iommu_cmdargs_t cmdargs = {0};
+ uint16_t index;
+ uint16_t prev_index;
+ amd_iommu_page_table_t *pt;
+ amd_iommu_page_table_t *ppt;
+ pdte_tear_t retval;
+ int tear_level;
+ int invalidate_pte;
+ int invalidate_pde;
+ int error = DDI_FAILURE;
+ const char *driver = ddi_driver_name(iommu->aiomt_dip);
+ int instance = ddi_get_instance(iommu->aiomt_dip);
+ const char *f = "amd_iommu_destroy_pgtables";
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+ cmn_err(CE_NOTE, "%s: %s%d: idx = %u, domainid = %d, "
+ "deviceid = %u, va = %p, path = %s",
+ f, driver, instance,
+ iommu->aiomt_idx, domainid, deviceid,
+ (void *)(uintptr_t)pageva, path);
+ }
+
+ if (domainid == AMD_IOMMU_PASSTHRU_DOMAIN) {
+ /*
+ * there are no pagetables for the passthru domain.
+ * Just the device table entry
+ */
+ error = DDI_SUCCESS;
+ goto passthru;
+ }
+
+ ppt = NULL;
+ index = 0;
+ for (level = AMD_IOMMU_PGTABLE_MAXLEVEL; level > 0; level--) {
+ pt = amd_iommu_lookup_pgtable(iommu, ppt, dp, level, index);
+ if (pt) {
+ ppt = pt;
+ index = AMD_IOMMU_VA_BITS(pageva, level);
+ continue;
+ }
+ break;
+ }
+
+ if (level == 0) {
+ uint64_t *ptep;
+ uint64_t pa_4K;
+
+ ASSERT(pt);
+ ASSERT(pt == ppt);
+ ASSERT(pt->pt_domainid == dp->d_domainid);
+
+ ptep = &(pt->pt_pgtblva[index]);
+
+ pa_4K = AMD_IOMMU_REG_GET64(ptep, AMD_IOMMU_PTDE_ADDR);
+ if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
+ ASSERT(pageva == (pa_4K << MMU_PAGESHIFT));
+ }
+ }
+
+ tear_level = -1;
+ invalidate_pde = 0;
+ invalidate_pte = 0;
+ for (++level; level <= AMD_IOMMU_PGTABLE_MAXLEVEL; level++) {
+ prev_index = pt->pt_index;
+ ppt = pt->pt_parent;
+ retval = amd_iommu_teardown_pdte(iommu, pt, index);
+ switch (retval) {
+ case PDTE_NOT_TORN:
+ goto invalidate;
+ case PDTE_TORN_DOWN:
+ invalidate_pte = 1;
+ goto invalidate;
+ case PGTABLE_TORN_DOWN:
+ invalidate_pte = 1;
+ invalidate_pde = 1;
+ tear_level = level;
+ break;
+ }
+ index = prev_index;
+ pt = ppt;
+ }
+
+invalidate:
+ /*
+ * Now teardown the IOMMU HW caches if applicable
+ */
+ if (invalidate_pte) {
+ cmdargs.ca_domainid = (uint16_t)domainid;
+ if (amd_iommu_pageva_inval_all) {
+ cmdargs.ca_addr = (uintptr_t)0x7FFFFFFFFFFFF000;
+ flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
+ AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
+ } else if (invalidate_pde) {
+ cmdargs.ca_addr =
+ (uintptr_t)AMD_IOMMU_VA_INVAL(pageva, tear_level);
+ flags = AMD_IOMMU_CMD_FLAGS_PAGE_PDE_INVAL |
+ AMD_IOMMU_CMD_FLAGS_PAGE_INVAL_S;
+ } else {
+ cmdargs.ca_addr = (uintptr_t)pageva;
+ flags = 0;
+ }
+ if (amd_iommu_cmd(iommu, AMD_IOMMU_CMD_INVAL_IOMMU_PAGES,
+ &cmdargs, flags, 0) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, "
+ "rdip=%p. Failed to invalidate IOMMU HW cache "
+ "for %s", f, driver, instance,
+ iommu->aiomt_idx, domainid, (void *)rdip, path);
+ error = DDI_FAILURE;
+ goto out;
+ }
+ }
+
+passthru:
+ if (tear_level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
+ error = amd_iommu_clear_devtbl_entry(iommu, rdip, domainid,
+ deviceid, dp, domain_freed, path);
+ } else {
+ error = DDI_SUCCESS;
+ }
+
+out:
+ SYNC_FORDEV(iommu->aiomt_dmahdl);
+
+ return (error);
+}
+
+static int
+cvt_bind_error(int error)
+{
+ switch (error) {
+ case DDI_DMA_MAPPED:
+ case DDI_DMA_PARTIAL_MAP:
+ case DDI_DMA_NORESOURCES:
+ case DDI_DMA_NOMAPPING:
+ break;
+ default:
+ cmn_err(CE_PANIC, "Unsupported error code: %d", error);
+ /*NOTREACHED*/
+ }
+ return (error);
+}
+
+int
+amd_iommu_map_pa2va(amd_iommu_t *iommu, dev_info_t *rdip, ddi_dma_attr_t *attrp,
+ struct ddi_dma_req *dmareq, uint64_t start_pa, uint64_t pa_sz,
+ map_type_t type, uint64_t *start_vap, int km_flags)
+{
+ pfn_t pfn_start;
+ pfn_t pfn_end;
+ pfn_t pfn;
+ int alias;
+ int32_t deviceid;
+ domain_id_t domainid;
+ amd_iommu_domain_t *dp;
+ uint64_t end_pa;
+ uint64_t start_va;
+ uint64_t end_va;
+ uint64_t pg_start;
+ uint64_t pg_end;
+ uint64_t pg;
+ uint64_t va_sz;
+ char *path;
+ int error = DDI_DMA_NOMAPPING;
+ const char *driver = ddi_driver_name(iommu->aiomt_dip);
+ int instance = ddi_get_instance(iommu->aiomt_dip);
+ const char *f = "amd_iommu_map_pa2va";
+
+ ASSERT(pa_sz != 0);
+
+ *start_vap = 0;
+
+ ASSERT(rdip);
+
+ path = kmem_alloc(MAXPATHLEN, km_flags);
+ if (path == NULL) {
+ error = DDI_DMA_NORESOURCES;
+ goto out;
+ }
+ (void) ddi_pathname(rdip, path);
+
+ /*
+ * First get deviceid
+ */
+ if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
+ "Failed to get device ID for %s.", f, driver, instance,
+ iommu->aiomt_idx, (void *)rdip, path);
+ error = DDI_DMA_NOMAPPING;
+ goto out;
+ }
+
+ /*
+ * Next get the domain for this rdip
+ */
+ if (amd_iommu_get_domain(iommu, rdip, alias, deviceid, &domainid, path)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, path=%s. "
+ "Failed to get domain.", f, driver, instance,
+ iommu->aiomt_idx, (void *)rdip, path);
+ error = DDI_DMA_NOMAPPING;
+ goto out;
+ }
+
+ dp = amd_iommu_lookup_domain(iommu, domainid, type, km_flags);
+ if (dp == NULL) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, rdip=%p. "
+ "Failed to get device ID for %s.", f, driver, instance,
+ iommu->aiomt_idx, domainid, (void *)rdip, path);
+ error = DDI_DMA_NORESOURCES;
+ goto out;
+ }
+
+ ASSERT(dp->d_domainid == domainid);
+
+ pfn_start = start_pa >> MMU_PAGESHIFT;
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+ cmn_err(CE_NOTE, "pa = %p, pfn_new = %p, pfn_start = %p, "
+ "pgshift = %d",
+ (void *)(uintptr_t)start_pa,
+ (void *)(uintptr_t)(start_pa >> MMU_PAGESHIFT),
+ (void *)(uintptr_t)pfn_start, MMU_PAGESHIFT);
+ }
+
+ end_pa = start_pa + pa_sz - 1;
+ pfn_end = end_pa >> MMU_PAGESHIFT;
+
+ if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
+ start_va = start_pa;
+ end_va = end_pa;
+ va_sz = pa_sz;
+ *start_vap = start_va;
+ } else {
+ va_sz = mmu_ptob(pfn_end - pfn_start + 1);
+ start_va = (uintptr_t)vmem_xalloc(dp->d_vmem, va_sz,
+ MAX(attrp->dma_attr_align, MMU_PAGESIZE),
+ 0,
+ attrp->dma_attr_seg + 1,
+ (void *)(uintptr_t)attrp->dma_attr_addr_lo,
+ (void *)(uintptr_t)MIN((attrp->dma_attr_addr_hi + 1),
+ AMD_IOMMU_SIZE_4G), /* XXX rollover */
+ km_flags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
+ if (start_va == 0) {
+ cmn_err(CE_WARN, "%s: No VA resources",
+ amd_iommu_modname);
+ error = DDI_DMA_NORESOURCES;
+ goto out;
+ }
+ ASSERT((start_va & MMU_PAGEOFFSET) == 0);
+ end_va = start_va + va_sz - 1;
+ *start_vap = start_va + (start_pa & MMU_PAGEOFFSET);
+ }
+
+ pg_start = start_va >> MMU_PAGESHIFT;
+ pg_end = end_va >> MMU_PAGESHIFT;
+
+ pg = pg_start;
+ for (pfn = pfn_start; pfn <= pfn_end; pfn++, pg++) {
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+ cmn_err(CE_WARN, "%s: attempting to create page tables "
+ "for pfn = %p, va = %p, path = %s",
+ f, (void *)(uintptr_t)(pfn << MMU_PAGESHIFT),
+ (void *)(uintptr_t)(pg << MMU_PAGESHIFT), path);
+
+ }
+
+ if (amd_iommu_unity_map || type == AMD_IOMMU_UNITY_MAP) {
+ ASSERT(pfn == pg);
+ }
+
+ if ((error = amd_iommu_create_pgtables(iommu, rdip, dmareq,
+ pg << MMU_PAGESHIFT,
+ pfn << MMU_PAGESHIFT, deviceid, domainid, dp, path,
+ km_flags)) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Failed to create_pgtables");
+ goto out;
+ }
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_PAGE_TABLES) {
+ cmn_err(CE_WARN, "%s: successfuly created page tables "
+ "for pfn = %p, vapg = %p, path = %s",
+ f, (void *)(uintptr_t)pfn,
+ (void *)(uintptr_t)pg, path);
+ }
+
+ }
+ ASSERT(pg == pg_end + 1);
+
+
+ if (amd_iommu_debug & AMD_IOMMU_DEBUG_PA2VA) {
+ cmn_err(CE_NOTE, "pa=%p, va=%p",
+ (void *)(uintptr_t)start_pa,
+ (void *)(uintptr_t)(*start_vap));
+ }
+ error = DDI_DMA_MAPPED;
+
+out:
+ kmem_free(path, MAXPATHLEN);
+ return (cvt_bind_error(error));
+}
+
+int
+amd_iommu_unmap_va(amd_iommu_t *iommu, dev_info_t *rdip, uint64_t start_va,
+ uint64_t va_sz, map_type_t type)
+{
+ uint64_t end_va;
+ uint64_t pg_start;
+ uint64_t pg_end;
+ uint64_t pg;
+ uint64_t actual_sz;
+ char *path;
+ int pathfree;
+ int alias;
+ int32_t deviceid;
+ domain_id_t domainid;
+ amd_iommu_domain_t *dp;
+ int error;
+ int domain_freed;
+ const char *driver = ddi_driver_name(iommu->aiomt_dip);
+ int instance = ddi_get_instance(iommu->aiomt_dip);
+ const char *f = "amd_iommu_unmap_va";
+
+ if (amd_iommu_no_unmap)
+ return (DDI_SUCCESS);
+
+ path = kmem_alloc(MAXPATHLEN, KM_NOSLEEP);
+ if (path) {
+ (void) ddi_pathname(rdip, path);
+ pathfree = 1;
+ } else {
+ pathfree = 0;
+ path = "<path-mem-alloc-failed>";
+ }
+
+ /*
+ * First get deviceid
+ */
+ if (amd_iommu_get_deviceid(iommu, rdip, &deviceid, &alias, path)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p. "
+ "Failed to get device ID for %s.", f, driver, instance,
+ iommu->aiomt_idx, (void *)rdip, path);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ /*
+ * Next get the domain for this rdip
+ */
+ if (amd_iommu_get_domain(iommu, rdip, alias, deviceid, &domainid, path)
+ != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d: rdip=%p, path=%s. "
+ "Failed to get domain.", f, driver, instance,
+ iommu->aiomt_idx, (void *)rdip, path);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ /* should never result in domain allocation/vmem_create */
+ dp = amd_iommu_lookup_domain(iommu, domainid, AMD_IOMMU_INVALID_MAP,
+ KM_NOSLEEP);
+ if (dp == NULL) {
+ cmn_err(CE_WARN, "%s: %s%d: idx=%d: domainid=%d, rdip=%p. "
+ "Failed to get device ID for %s.", f, driver, instance,
+ iommu->aiomt_idx, domainid, (void *)rdip, path);
+ error = DDI_FAILURE;
+ goto out;
+ }
+
+ ASSERT(dp->d_domainid == domainid);
+
+ pg_start = start_va >> MMU_PAGESHIFT;
+ end_va = start_va + va_sz - 1;
+ pg_end = end_va >> MMU_PAGESHIFT;
+ actual_sz = (pg_end - pg_start + 1) << MMU_PAGESHIFT;
+
+ domain_freed = 0;
+ for (pg = pg_start; pg <= pg_end; pg++) {
+ domain_freed = 0;
+ if (amd_iommu_destroy_pgtables(iommu, rdip,
+ pg << MMU_PAGESHIFT, deviceid, domainid, dp, type,
+ &domain_freed, path) != DDI_SUCCESS) {
+ error = DDI_FAILURE;
+ goto out;
+ }
+ if (domain_freed) {
+ ASSERT(pg == pg_end);
+ break;
+ }
+ }
+
+ /*
+ * vmem_xalloc() must be paired with vmem_xfree
+ */
+ if (type == AMD_IOMMU_VMEM_MAP && !amd_iommu_unity_map) {
+ vmem_xfree(dp->d_vmem,
+ (void *)(uintptr_t)(pg_start << MMU_PAGESHIFT), actual_sz);
+ }
+
+ if (domain_freed)
+ amd_iommu_teardown_domain(iommu, dp);
+
+ error = DDI_SUCCESS;
+out:
+ if (pathfree)
+ kmem_free(path, MAXPATHLEN);
+ return (error);
+}
diff --git a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.h b/usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.h
index bd38293660..fa40bb4cb7 100644
--- a/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_page_tables.h
+++ b/usr/src/uts/intel/io/amd_iommu/amd_iommu_page_tables.h
@@ -19,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
diff --git a/usr/src/uts/intel/sys/Makefile b/usr/src/uts/intel/sys/Makefile
index d05dead3f6..3a1fa9843d 100644
--- a/usr/src/uts/intel/sys/Makefile
+++ b/usr/src/uts/intel/sys/Makefile
@@ -31,6 +31,7 @@ include ../../../Makefile.master
# from being built, so these headers are not exported (installed).
HDRS = \
+ amd_iommu.h \
archsystm.h \
asm_linkage.h \
bootconf.h \
diff --git a/usr/src/uts/intel/sys/amd_iommu.h b/usr/src/uts/intel/sys/amd_iommu.h
new file mode 100644
index 0000000000..2c3221d100
--- /dev/null
+++ b/usr/src/uts/intel/sys/amd_iommu.h
@@ -0,0 +1,56 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_AMD_IOMMU_H
+#define _SYS_AMD_IOMMU_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/sunddi.h>
+#include <sys/iommulib.h>
+
+#ifdef _KERNEL
+
+typedef struct amd_iommu_state {
+ int aioms_instance; /* instance */
+ dev_info_t *aioms_devi; /* dip */
+ struct amd_iommu *aioms_iommu_start; /* start of list of IOMMUs */
+ struct amd_iommu *aioms_iommu_end; /* end of list of IOMMUs */
+ int aioms_nunits; /* # of IOMMUs in function */
+} amd_iommu_state_t;
+
+int amd_iommu_setup(dev_info_t *dip, amd_iommu_state_t *statep);
+int amd_iommu_teardown(dev_info_t *dip, amd_iommu_state_t *statep);
+int amd_iommu_lookup_src_bdf(uint16_t bdf, uint16_t *src_bdfp);
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_AMD_IOMMU_H */