summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/cmd/fm/mcdecode/Makefile13
-rw-r--r--usr/src/cmd/fm/mcdecode/mcdecode.c209
-rw-r--r--usr/src/common/bitext/bitext.c171
-rw-r--r--usr/src/common/mc/zen_umc/zen_fabric_utils.c97
-rw-r--r--usr/src/common/mc/zen_umc/zen_umc_decode.c1619
-rw-r--r--usr/src/common/mc/zen_umc/zen_umc_dump.c717
-rw-r--r--usr/src/man/man9f/Makefile17
-rw-r--r--usr/src/man/man9f/bitdel64.9f81
-rw-r--r--usr/src/man/man9f/bitset64.9f186
-rw-r--r--usr/src/man/man9f/bitx64.9f120
-rw-r--r--usr/src/pkg/manifests/driver-cpu-amd-zen.p5m4
-rw-r--r--usr/src/pkg/manifests/system-header.p5m2
-rw-r--r--usr/src/pkg/manifests/system-kernel.man9f.inc10
-rw-r--r--usr/src/pkg/manifests/system-test-ostest.p5m2
-rw-r--r--usr/src/test/os-tests/runfiles/default.run3
-rw-r--r--usr/src/test/os-tests/tests/Makefile3
-rw-r--r--usr/src/test/os-tests/tests/imc/Makefile3
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/Makefile90
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_fabric_ids.c387
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test.c553
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test.h102
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test_basic.c357
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test_chans.c1729
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test_cod.c1354
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test_errors.c596
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test_hole.c668
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test_ilv.c1719
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test_multi.c396
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test_nps.c3349
-rw-r--r--usr/src/test/os-tests/tests/zen_umc/zen_umc_test_remap.c746
-rw-r--r--usr/src/uts/common/Makefile.files3
-rw-r--r--usr/src/uts/common/Makefile.rules6
-rw-r--r--usr/src/uts/common/sys/Makefile2
-rw-r--r--usr/src/uts/common/sys/bitext.h48
-rw-r--r--usr/src/uts/intel/Makefile.files1
-rw-r--r--usr/src/uts/intel/Makefile.intel4
-rw-r--r--usr/src/uts/intel/Makefile.rules5
-rw-r--r--usr/src/uts/intel/io/amdzen/amdzen.c627
-rw-r--r--usr/src/uts/intel/io/amdzen/amdzen.h162
-rw-r--r--usr/src/uts/intel/io/amdzen/amdzen_client.h76
-rw-r--r--usr/src/uts/intel/io/amdzen/zen_udf.c18
-rw-r--r--usr/src/uts/intel/io/amdzen/zen_umc.c3177
-rw-r--r--usr/src/uts/intel/io/amdzen/zen_umc.h634
-rw-r--r--usr/src/uts/intel/io/imc/imc.c11
-rw-r--r--usr/src/uts/intel/sys/amdzen/df.h896
-rw-r--r--usr/src/uts/intel/sys/amdzen/umc.h390
-rw-r--r--usr/src/uts/intel/sys/mc.h48
-rw-r--r--usr/src/uts/intel/sys/x86_archext.h11
-rw-r--r--usr/src/uts/intel/zen_umc/Makefile41
49 files changed, 21181 insertions, 282 deletions
diff --git a/usr/src/cmd/fm/mcdecode/Makefile b/usr/src/cmd/fm/mcdecode/Makefile
index 841725e4f7..18d7472ed0 100644
--- a/usr/src/cmd/fm/mcdecode/Makefile
+++ b/usr/src/cmd/fm/mcdecode/Makefile
@@ -11,12 +11,14 @@
#
# Copyright 2019 Joyent, Inc.
+# Copyright 2022 Oxide Computer Company
#
include ../../Makefile.cmd
include ../../Makefile.ctf
-SRCS += mcdecode.c imc_decode.o imc_dump.o
+SRCS += mcdecode.c imc_decode.c imc_dump.c
+SRCS += zen_fabric_utils.c zen_umc_decode.c zen_umc_dump.c bitext.o
OBJS = $(SRCS:%.c=%.o)
PROG = mcdecode
@@ -27,6 +29,7 @@ ROOTPROG = $(ROOTLIBFMD)/$(PROG)
$(NOT_RELEASE_BUILD)CPPFLAGS += -DDEBUG
CPPFLAGS += -I$(SRC)/uts/intel/io/imc
+CPPFLAGS += -I$(SRC)/uts/intel/io/amdzen -I$(SRC)/uts/intel
LDLIBS += -lnvpair
CSTD = $(CSTD_GNU99)
@@ -41,10 +44,18 @@ $(PROG): $(OBJS)
$(COMPILE.c) $<
$(POST_PROCESS_O)
+%.o: $(SRC)/common/bitext/%.c
+ $(COMPILE.c) $<
+ $(POST_PROCESS_O)
+
%.o: $(SRC)/common/mc/imc/%.c
$(COMPILE.c) $<
$(POST_PROCESS_O)
+%.o: $(SRC)/common/mc/zen_umc/%.c
+ $(COMPILE.c) $<
+ $(POST_PROCESS_O)
+
clean:
$(RM) $(OBJS) $(LINTFILES)
diff --git a/usr/src/cmd/fm/mcdecode/mcdecode.c b/usr/src/cmd/fm/mcdecode/mcdecode.c
index e6b8b62ce7..f88f4acd50 100644
--- a/usr/src/cmd/fm/mcdecode/mcdecode.c
+++ b/usr/src/cmd/fm/mcdecode/mcdecode.c
@@ -11,6 +11,7 @@
/*
* Copyright 2019 Joyent, Inc.
+ * Copyright 2022 Oxide Computer Company
*/
/*
@@ -28,9 +29,11 @@
#include <unistd.h>
#include <sys/mman.h>
#include <libnvpair.h>
+#include <sys/sysmacros.h>
#include <sys/mc.h>
#include "imc.h"
+#include "zen_umc.h"
#define MCDECODE_USAGE 2
@@ -39,27 +42,135 @@
*/
#define MCDECODE_WRITE (1024 * 32)
+typedef struct mc_backend {
+ const char *mcb_name;
+ void *(*mcb_init)(nvlist_t *, const char *);
+ void (*mcb_decode_pa)(void *, uint64_t);
+} mc_backend_t;
+
+static const mc_backend_t *mc_cur_backend = NULL;
+
static void
mcdecode_usage(void)
{
(void) fprintf(stderr,
- "Usage: mcdecode [-f infile] [-d address | -w outfile] device\n"
+ "Usage: mcdecode -d address -f infile | device\n"
+ " mcdecode -w outfile device\n"
"\n"
- "\t-d decode physical address to the correspond dimm\n"
+ "\t-d decode physical address to the corresponding dimm\n"
"\t-f use decoder image from infile\n"
"\t-w write decoder snapshot state to the specified file\n");
exit(MCDECODE_USAGE);
}
+static void *
+mcb_imc_init(nvlist_t *nvl, const char *file)
+{
+ imc_t *imc;
+
+ imc = calloc(1, sizeof (*imc));
+ if (imc == NULL) {
+ errx(EXIT_FAILURE, "failed to allocate memory for imc_t");
+ }
+
+ if (!imc_restore_decoder(nvl, imc)) {
+ errx(EXIT_FAILURE, "failed to restore memory "
+ "controller snapshot in %s", file);
+ }
+
+ return (imc);
+}
+
static void
-mcdecode_from_file(const char *file, uint64_t pa)
+mcb_imc_decode_pa(void *arg, uint64_t pa)
+{
+ const imc_t *imc = arg;
+ imc_decode_state_t dec;
+
+ bzero(&dec, sizeof (dec));
+ if (!imc_decode_pa(imc, pa, &dec)) {
+ errx(EXIT_FAILURE, "failed to decode address 0x%" PRIx64
+ " -- 0x%x, 0x%" PRIx64, pa, dec.ids_fail,
+ dec.ids_fail_data);
+ }
+
+ (void) printf("Decoded physical address 0x%" PRIx64 "\n"
+ "\tchip:\t\t\t%u\n"
+ "\tmemory controller:\t%u\n"
+ "\tchannel:\t\t%u\n"
+ "\tdimm:\t\t\t%u\n"
+ "\trank:\t\t\t%u\n",
+ pa, dec.ids_nodeid, dec.ids_tadid, dec.ids_channelid,
+ dec.ids_dimmid, dec.ids_rankid);
+}
+
+static void *
+mcb_umc_init(nvlist_t *nvl, const char *file)
+{
+ zen_umc_t *umc;
+
+ umc = calloc(1, sizeof (*umc));
+ if (umc == NULL) {
+ errx(EXIT_FAILURE, "failed to allocate memory for zen_umc_t");
+ }
+
+ if (!zen_umc_restore_decoder(nvl, umc)) {
+ errx(EXIT_FAILURE, "failed to restore memory "
+ "controller snapshot in %s", file);
+ }
+
+ return (umc);
+}
+
+
+static void
+mcb_umc_decode_pa(void *arg, uint64_t pa)
+{
+ zen_umc_t *umc = arg;
+ zen_umc_decoder_t dec;
+ uint32_t sock, die, comp;
+
+ bzero(&dec, sizeof (dec));
+ if (!zen_umc_decode_pa(umc, pa, &dec)) {
+ errx(EXIT_FAILURE, "failed to decode address 0x%" PRIx64
+ " -- 0x%x, 0x%" PRIx64, pa, dec.dec_fail,
+ dec.dec_fail_data);
+ }
+
+ zen_fabric_id_decompose(&umc->umc_decomp, dec.dec_targ_fabid, &sock,
+ &die, &comp);
+ (void) printf("Decoded physical address 0x%" PRIx64 "\n"
+ "\tsocket:\t\t\t%u\n"
+ "\tdie:\t\t\t%u\n"
+ "\tchannel:\t\t%u\n"
+ "\tchannel address\t\t0x%" PRIx64 "\n"
+ "\tdimm:\t\t\t%u\n"
+ "\trow:\t\t\t0x%x\n"
+ "\tcol:\t\t\t0x%x\n"
+ "\tbank:\t\t\t0x%x\n"
+ "\tbank group:\t\t0x%x\n"
+ "\trank mult:\t\t0x%x\n"
+ "\tchip-select:\t\t0x%x\n"
+ "\tsub-channel:\t\t0x%x\n",
+ pa, sock, die, dec.dec_umc_chan->chan_logid, dec.dec_norm_addr,
+ dec.dec_dimm->ud_dimmno, dec.dec_dimm_row, dec.dec_dimm_col,
+ dec.dec_dimm_bank, dec.dec_dimm_bank_group, dec.dec_dimm_rm,
+ dec.dec_dimm_csno, dec.dec_dimm_subchan);
+
+}
+
+static const mc_backend_t mc_backends[] = {
+ { "imc", mcb_imc_init, mcb_imc_decode_pa },
+ { "zen_umc", mcb_umc_init, mcb_umc_decode_pa, }
+};
+
+static void *
+mcdecode_from_file(const char *file)
{
int fd, ret;
struct stat st;
void *addr;
nvlist_t *nvl;
- imc_t imc;
- imc_decode_state_t dec;
char *driver;
if ((fd = open(file, O_RDONLY)) < 0) {
@@ -93,31 +204,18 @@ mcdecode_from_file(const char *file, uint64_t pa)
file);
}
- if (strcmp(driver, "imc") != 0) {
- errx(EXIT_FAILURE, "unknown driver dump source %s\n", driver);
- }
-
- if (!imc_restore_decoder(nvl, &imc)) {
- errx(EXIT_FAILURE, "failed to restore memory controller "
- "snapshot in %s", file);
- }
-
- bzero(&dec, sizeof (dec));
+ for (uint_t i = 0; i < ARRAY_SIZE(mc_backends); i++) {
+ if (strcmp(driver, mc_backends[i].mcb_name) == 0) {
+ void *data;
- if (!imc_decode_pa(&imc, pa, &dec)) {
- errx(EXIT_FAILURE, "failed to decode address 0x%" PRIx64, pa);
+ mc_cur_backend = &mc_backends[i];
+ data = mc_cur_backend->mcb_init(nvl, file);
+ nvlist_free(nvl);
+ return (data);
+ }
}
- (void) printf("Decoded physical address 0x%" PRIx64 "\n"
- "\tchip:\t\t\t%u\n"
- "\tmemory controller:\t%u\n"
- "\tchannel:\t\t%u\n"
- "\tdimm:\t\t\t%u\n"
- "\trank:\t\t\t%u\n",
- pa, dec.ids_nodeid, dec.ids_tadid, dec.ids_channelid,
- dec.ids_dimmid, dec.ids_rankid);
-
- nvlist_free(nvl);
+ errx(EXIT_FAILURE, "unknown driver dump source %s\n", driver);
}
static void
@@ -145,12 +243,44 @@ mcdecode_pa(const char *device, uint64_t pa)
(void) printf("Decoded physical address 0x%" PRIx64 "\n"
"\tchip:\t\t\t%u\n"
+ "\tdie:\t\t\t%u\n"
"\tmemory controller:\t%u\n"
"\tchannel:\t\t%u\n"
- "\tdimm:\t\t\t%u\n"
- "\trank:\t\t\t%u\n",
- pa, ioc.mcei_chip, ioc.mcei_mc, ioc.mcei_chan, ioc.mcei_dimm,
- ioc.mcei_rank);
+ "\tchannel address\t\t0x%" PRIx64"\n"
+ "\tdimm:\t\t\t%u\n",
+ pa, ioc.mcei_chip, ioc.mcei_die, ioc.mcei_mc, ioc.mcei_chan,
+ ioc.mcei_chan_addr, ioc.mcei_dimm);
+ if (ioc.mcei_rank != UINT8_MAX) {
+ (void) printf("\trank:\t\t\t%u\n", ioc.mcei_rank);
+ }
+
+ if (ioc.mcei_row != UINT32_MAX) {
+ (void) printf("\trow:\t\t\t0x%x\n", ioc.mcei_row);
+ }
+
+ if (ioc.mcei_column != UINT32_MAX) {
+ (void) printf("\tcol:\t\t\t0x%x\n", ioc.mcei_column);
+ }
+
+ if (ioc.mcei_bank != UINT8_MAX) {
+ (void) printf("\tbank:\t\t\t0x%x\n", ioc.mcei_bank);
+ }
+
+ if (ioc.mcei_bank_group != UINT8_MAX) {
+ (void) printf("\tbank group:\t\t0x%x\n", ioc.mcei_bank_group);
+ }
+
+ if (ioc.mcei_rm != UINT8_MAX) {
+ (void) printf("\trank mult:\t\t0x%x\n", ioc.mcei_rm);
+ }
+
+ if (ioc.mcei_cs != UINT8_MAX) {
+ (void) printf("\tchip-select:\t\t0x%x\n", ioc.mcei_cs);
+ }
+
+ if (ioc.mcei_subchan != UINT8_MAX) {
+ (void) printf("\tsub-channel:\t\t0x%x\n", ioc.mcei_subchan);
+ }
(void) close(fd);
}
@@ -217,6 +347,7 @@ main(int argc, char *argv[])
uint64_t pa = UINT64_MAX;
const char *outfile = NULL;
const char *infile = NULL;
+ void *backend;
while ((c = getopt(argc, argv, "d:f:w:")) != -1) {
char *eptr;
@@ -270,15 +401,17 @@ main(int argc, char *argv[])
errx(EXIT_FAILURE, "missing device argument");
}
-
- if (pa != UINT64_MAX) {
- if (infile != NULL) {
- mcdecode_from_file(infile, pa);
- } else {
+ if (infile == NULL) {
+ if (pa != UINT64_MAX) {
mcdecode_pa(argv[0], pa);
+ } else {
+ mcdecode_dump(argv[0], outfile);
}
- } else {
- mcdecode_dump(argv[0], outfile);
+
+ return (0);
}
+
+ backend = mcdecode_from_file(infile);
+ mc_cur_backend->mcb_decode_pa(backend, pa);
return (0);
}
diff --git a/usr/src/common/bitext/bitext.c b/usr/src/common/bitext/bitext.c
new file mode 100644
index 0000000000..cfd74b6925
--- /dev/null
+++ b/usr/src/common/bitext/bitext.c
@@ -0,0 +1,171 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * Various functions for manipulating regions of bits in standard sized
+ * integers. Meant to be a replacement for the extant BITX macro and provide
+ * additional functionality. See bitx64(9F), bitdel64(9F), and bitset64(9f) for
+ * more information.
+ */
+
+#include <sys/debug.h>
+#include <sys/stdint.h>
+
+uint8_t
+bitx8(uint8_t reg, uint_t high, uint_t low)
+{
+ uint8_t mask;
+
+ ASSERT3U(high, >=, low);
+ ASSERT3U(high, <, 8);
+ ASSERT3U(low, <, 8);
+
+ mask = (1 << (high - low + 1)) - 1;
+ return ((reg >> low) & mask);
+}
+
+uint16_t
+bitx16(uint16_t reg, uint_t high, uint_t low)
+{
+ uint16_t mask;
+
+ ASSERT3U(high, >=, low);
+ ASSERT3U(high, <, 16);
+ ASSERT3U(low, <, 16);
+
+ mask = (1 << (high - low + 1)) - 1;
+ return ((reg >> low) & mask);
+}
+
+
+uint32_t
+bitx32(uint32_t reg, uint_t high, uint_t low)
+{
+ uint32_t mask;
+
+ ASSERT3U(high, >=, low);
+ ASSERT3U(high, <, 32);
+ ASSERT3U(low, <, 32);
+
+ mask = (1UL << (high - low + 1)) - 1;
+
+ return ((reg >> low) & mask);
+}
+
+uint64_t
+bitx64(uint64_t reg, uint_t high, uint_t low)
+{
+ uint64_t mask;
+
+ ASSERT3U(high, >=, low);
+ ASSERT3U(high, <, 64);
+ ASSERT3U(low, <, 64);
+
+ mask = (1ULL << (high - low + 1)) - 1ULL;
+ return ((reg >> low) & mask);
+}
+
+uint8_t
+bitset8(uint8_t reg, uint_t high, uint_t low, uint8_t val)
+{
+ uint8_t mask;
+
+ ASSERT3U(high, >=, low);
+ ASSERT3U(high, <, 8);
+ ASSERT3U(low, <, 8);
+
+ mask = (1 << (high - low + 1)) - 1;
+ ASSERT0(~mask & val);
+
+ reg &= ~(mask << low);
+ reg |= val << low;
+
+ return (reg);
+}
+
+uint16_t
+bitset16(uint16_t reg, uint_t high, uint_t low, uint16_t val)
+{
+ uint16_t mask;
+
+ ASSERT3U(high, >=, low);
+ ASSERT3U(high, <, 16);
+ ASSERT3U(low, <, 16);
+
+ mask = (1 << (high - low + 1)) - 1;
+ ASSERT0(~mask & val);
+
+ reg &= ~(mask << low);
+ reg |= val << low;
+
+ return (reg);
+}
+
+uint32_t
+bitset32(uint32_t reg, uint_t high, uint_t low, uint32_t val)
+{
+ uint32_t mask;
+
+ ASSERT3U(high, >=, low);
+ ASSERT3U(high, <, 32);
+ ASSERT3U(low, <, 32);
+
+ mask = (1UL << (high - low + 1)) - 1;
+ ASSERT0(~mask & val);
+
+ reg &= ~(mask << low);
+ reg |= val << low;
+
+ return (reg);
+}
+
+uint64_t
+bitset64(uint64_t reg, uint_t high, uint_t low, uint64_t val)
+{
+ uint64_t mask;
+
+ ASSERT3U(high, >=, low);
+ ASSERT3U(high, <, 64);
+ ASSERT3U(low, <, 64);
+
+ mask = (1ULL << (high - low + 1)) - 1ULL;
+ ASSERT0(~mask & val);
+
+ reg &= ~(mask << low);
+ reg |= val << low;
+
+ return (reg);
+}
+
+uint64_t
+bitdel64(uint64_t val, uint_t high, uint_t low)
+{
+ uint64_t high_val = 0;
+ uint64_t low_val = 0;
+
+ ASSERT3U(high, >=, low);
+ ASSERT3U(high, <, 64);
+ ASSERT3U(low, <, 64);
+
+ if (low != 0) {
+ low_val = bitx64(val, low - 1, 0);
+ }
+
+ if (high != 63) {
+ high_val = bitx64(val, 63, high + 1);
+ }
+
+ return ((high_val << low) | low_val);
+}
diff --git a/usr/src/common/mc/zen_umc/zen_fabric_utils.c b/usr/src/common/mc/zen_umc/zen_fabric_utils.c
new file mode 100644
index 0000000000..d4db7875f2
--- /dev/null
+++ b/usr/src/common/mc/zen_umc/zen_fabric_utils.c
@@ -0,0 +1,97 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * A collection of utility functions for interacting with fabric IDs.
+ */
+
+#include "zen_umc.h"
+
+/*
+ * Validate whether a fabric ID actually represents a valid ID for a given data
+ * fabric.
+ */
+boolean_t
+zen_fabric_id_valid_fabid(const df_fabric_decomp_t *decomp,
+ const uint32_t fabid)
+{
+ uint32_t mask = decomp->dfd_node_mask | decomp->dfd_comp_mask;
+ return ((fabid & ~mask) == 0);
+}
+
+/*
+ * Validate whether the parts of a fabric ID (e.g. the socket, die, and
+ * component) are in fact valid for a given data fabric.
+ */
+boolean_t
+zen_fabric_id_valid_parts(const df_fabric_decomp_t *decomp, const uint32_t sock,
+ const uint32_t die, const uint32_t comp)
+{
+ uint32_t node;
+
+ if (((sock << decomp->dfd_sock_shift) & ~decomp->dfd_sock_mask) != 0) {
+ return (B_FALSE);
+ }
+ if (((die << decomp->dfd_die_shift) & ~decomp->dfd_die_mask) != 0) {
+ return (B_FALSE);
+ }
+ if ((comp & ~decomp->dfd_comp_mask) != 0) {
+ return (B_FALSE);
+ }
+
+ node = die << decomp->dfd_die_shift;
+ node |= sock << decomp->dfd_sock_shift;
+
+ if (((node << decomp->dfd_node_shift) & ~decomp->dfd_node_mask) != 0) {
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Take apart a fabric ID into its constituent parts. The decomposition
+ * information has the die and socket information relative to the node ID.
+ */
+void
+zen_fabric_id_decompose(const df_fabric_decomp_t *decomp, const uint32_t fabid,
+ uint32_t *sockp, uint32_t *diep, uint32_t *compp)
+{
+ uint32_t node;
+
+ ASSERT(zen_fabric_id_valid_fabid(decomp, fabid));
+
+ *compp = (fabid & decomp->dfd_comp_mask) >> decomp->dfd_comp_shift;
+ node = (fabid & decomp->dfd_node_mask) >> decomp->dfd_node_shift;
+ *diep = (node & decomp->dfd_die_mask) >> decomp->dfd_die_shift;
+ *sockp = (node & decomp->dfd_sock_mask) >> decomp->dfd_sock_shift;
+}
+
+/*
+ * Compose a fabric ID from its constituent parts: the socket, die, and fabric.
+ */
+void
+zen_fabric_id_compose(const df_fabric_decomp_t *decomp, const uint32_t sock,
+ const uint32_t die, const uint32_t comp, uint32_t *fabidp)
+{
+ uint32_t node;
+
+ ASSERT(zen_fabric_id_valid_parts(decomp, sock, die, comp));
+
+ node = die << decomp->dfd_die_shift;
+ node |= sock << decomp->dfd_sock_shift;
+ *fabidp = (node << decomp->dfd_node_shift) |
+ (comp << decomp->dfd_comp_shift);
+}
diff --git a/usr/src/common/mc/zen_umc/zen_umc_decode.c b/usr/src/common/mc/zen_umc/zen_umc_decode.c
new file mode 100644
index 0000000000..acf03868cd
--- /dev/null
+++ b/usr/src/common/mc/zen_umc/zen_umc_decode.c
@@ -0,0 +1,1619 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * Zen UMC Decoding logic. See zen_umc.c for an overview of everything. This
+ * implements shared userland/kernel decoding.
+ */
+
+#include "zen_umc.h"
+
+#ifndef _KERNEL
+#include <strings.h>
+#endif
+
+/*
+ * Address constants.
+ */
+#define ZEN_UMC_TOM2_START 0x100000000ULL
+#define ZEN_UMC_TOM2_RSVD_BEGIN 0xfd00000000ULL
+#define ZEN_UMC_TOM2_RSVD_END 0x10000000000ULL
+
+/*
+ * COD based hashing constants.
+ */
+#define ZEN_UMC_COD_NBITS 3
+#define ZEN_UMC_NPS_MOD_NBITS 3
+
+/*
+ * We want to apply some initial heuristics to determine if a physical address
+ * is DRAM before we proceed because of the MMIO hole and related. The DRAM
+ * ranges can overlap with these system reserved ranges so we have to manually
+ * check these. Effectively this means that we have a few valid ranges:
+ *
+ * o [ 0, TOM )
+ * o [ 4 GiB, TOM2 )
+ *
+ * However, the above 4 GiB runs into trouble depending on size. There is a 12
+ * GiB system reserved address region right below 1 TiB. So it really turns
+ * into the following when we have more than 1 TiB of DRAM:
+ *
+ * o [ 0, TOM )
+ * o [ 4 GiB, 1 TiB - 12 GiB )
+ * o [ 1 TiB, TOM2 )
+ *
+ * Note, this does not currently scan MTRRs or MMIO rules for what might be
+ * redirected to MMIO.
+ */
+static boolean_t
+zen_umc_decode_is_dram(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ if (dec->dec_pa < umc->umc_tom) {
+ return (B_TRUE);
+ }
+
+ if (dec->dec_pa >= umc->umc_tom2) {
+ dec->dec_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM;
+ return (B_FALSE);
+ }
+
+ /*
+ * If the address is in the reserved hole around 1 TiB, do not proceed.
+ */
+ if (dec->dec_pa >= ZEN_UMC_TOM2_RSVD_BEGIN &&
+ dec->dec_pa < ZEN_UMC_TOM2_RSVD_END) {
+ dec->dec_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM;
+ return (B_FALSE);
+ }
+
+ /*
+ * Now that we've validated we're not in the hole, check to see if we're
+ * actually in a valid region for TOM2.
+ */
+ if (dec->dec_pa >= ZEN_UMC_TOM2_START &&
+ dec->dec_pa < umc->umc_tom2) {
+ return (B_TRUE);
+ }
+
+ /*
+ * At this point we have eliminated all known DRAM regions described by
+ * TOM and TOM2, so we have to conclude that whatever we're looking at
+ * is now not part of DRAM.
+ */
+ dec->dec_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM;
+ return (B_FALSE);
+}
+
+/*
+ * In our first stop on decoding, we need to go through and take a physical
+ * address and figure out what the corresponding initial DF rule that applies
+ * is. This rule will then be used to figure out which target on the data fabric
+ * we should be going to and what interleaving rules apply.
+ *
+ * Our DRAM rule may reflect that the DRAM hole is active. In this case the
+ * specified range in the rule will be larger than the actual amount of DRAM
+ * present. MMIO accesses take priority over DRAM accesses in the core and
+ * therefore the MMIO portion of the rule is not actually decoded. When trying
+ * to match a rule we do not need to worry about that and can just look whether
+ * our physical address matches a rule. We will take into account whether
+ * hoisting should adjust the address when we translate from a system address to
+ * a normal address (e.g. an address in the channel) which will be done in a
+ * subsequent step. If an address is in the hole, that has already been
+ * accounted for.
+ *
+ * While gathering information, we have all the DRAM rules for a given CCM that
+ * corresponds to a CPU core. This allows us to review all DRAM rules in one
+ * place rather than walking through what's been assigned to each UMC instance,
+ * which only has the rules that are directed towards that particular channel
+ * and matter for determining channel offsets.
+ */
+static boolean_t
+zen_umc_decode_find_df_rule(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ const zen_umc_df_t *df = &umc->umc_dfs[0];
+
+ for (uint_t i = 0; i < df->zud_dram_nrules; i++) {
+ const df_dram_rule_t *rule = &df->zud_rules[i];
+
+ /*
+ * If this rule is not enabled, skip it.
+ */
+ if ((rule->ddr_flags & DF_DRAM_F_VALID) == 0)
+ continue;
+
+ if (dec->dec_pa >= rule->ddr_base &&
+ dec->dec_pa < rule->ddr_limit) {
+ dec->dec_df_ruleno = i;
+ dec->dec_df_rule = rule;
+ dec->dec_df_rulesrc = df;
+ return (B_TRUE);
+ }
+ }
+
+ dec->dec_fail = ZEN_UMC_DECODE_F_NO_DF_RULE;
+ return (B_FALSE);
+}
+
+/*
+ * This function takes care of the common logic of adjusting an address by the
+ * base value in the rule and determining if we need to apply the DRAM hole or
+ * not. This function is used in two different places:
+ *
+ * o As part of adjusting the system address to construct the interleave
+ * address for DFv4 and Zen 3 based 6-channel hashing (see
+ * zen_umc_determine_ileave_addr() below).
+ * o As part of adjusting the system address at the beginning of normalization
+ * to a channel address.
+ *
+ * One thing to highlight is that the same adjustment we make in the first case
+ * applies to a subset of things for interleaving; however, it applies to
+ * everything when normalizing.
+ */
+static boolean_t
+zen_umc_adjust_dram_addr(const zen_umc_t *umc, zen_umc_decoder_t *dec,
+ uint64_t *addrp, zen_umc_decode_failure_t errno)
+{
+ const uint64_t init_addr = *addrp;
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+ const zen_umc_df_t *df = dec->dec_df_rulesrc;
+ uint64_t mod_addr = init_addr;
+
+ ASSERT3U(init_addr, >=, rule->ddr_base);
+ ASSERT3U(init_addr, <, rule->ddr_limit);
+ mod_addr -= rule->ddr_base;
+
+ /*
+ * Determine if the hole applies to this rule.
+ */
+ if ((rule->ddr_flags & DF_DRAM_F_HOLE) != 0 &&
+ (df->zud_flags & ZEN_UMC_DF_F_HOLE_VALID) != 0 &&
+ init_addr >= ZEN_UMC_TOM2_START) {
+ uint64_t hole_size;
+ hole_size = ZEN_UMC_TOM2_START -
+ umc->umc_dfs[0].zud_hole_base;
+ if (mod_addr < hole_size) {
+ dec->dec_fail = errno;
+ dec->dec_fail_data = dec->dec_df_ruleno;
+ return (B_FALSE);
+ }
+
+ mod_addr -= hole_size;
+ }
+
+ *addrp = mod_addr;
+ return (B_TRUE);
+}
+
+/*
+ * Take care of constructing the address we need to use for determining the
+ * interleaving target fabric id. See the big theory statement in zen_umc.c for
+ * more on this.
+ */
+static boolean_t
+zen_umc_determine_ileave_addr(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+
+ if (umc->umc_df_rev <= DF_REV_3 &&
+ rule->ddr_chan_ileave != DF_CHAN_ILEAVE_6CH) {
+ dec->dec_ilv_pa = dec->dec_pa;
+ return (B_TRUE);
+ }
+
+ dec->dec_ilv_pa = dec->dec_pa;
+ if (!zen_umc_adjust_dram_addr(umc, dec, &dec->dec_ilv_pa,
+ ZEN_UMC_DECODE_F_ILEAVE_UNDERFLOW)) {
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * This is a simple interleaving case where we simply extract bits. No hashing
+ * required! Per zen_umc.c, from lowest to highest, we have channel, die, and
+ * then socket bits.
+ */
+static boolean_t
+zen_umc_decode_ileave_nohash(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint32_t nchan_bit, ndie_bit, nsock_bit, addr_bit;
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+
+ nsock_bit = rule->ddr_sock_ileave_bits;
+ ndie_bit = rule->ddr_die_ileave_bits;
+ switch (rule->ddr_chan_ileave) {
+ case DF_CHAN_ILEAVE_1CH:
+ nchan_bit = 0;
+ break;
+ case DF_CHAN_ILEAVE_2CH:
+ nchan_bit = 1;
+ break;
+ case DF_CHAN_ILEAVE_4CH:
+ nchan_bit = 2;
+ break;
+ case DF_CHAN_ILEAVE_8CH:
+ nchan_bit = 3;
+ break;
+ case DF_CHAN_ILEAVE_16CH:
+ nchan_bit = 4;
+ break;
+ case DF_CHAN_ILEAVE_32CH:
+ nchan_bit = 5;
+ break;
+ default:
+ dec->dec_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP;
+ dec->dec_fail_data = rule->ddr_chan_ileave;
+ return (B_FALSE);
+ }
+
+ /*
+ * Zero all of these out in case no bits are dedicated to this purpose.
+ * In those cases, then the value for this is always zero.
+ */
+ dec->dec_ilv_sock = dec->dec_ilv_die = dec->dec_ilv_chan = 0;
+ addr_bit = rule->ddr_addr_start;
+ if (nchan_bit > 0) {
+ dec->dec_ilv_chan = bitx64(dec->dec_ilv_pa,
+ addr_bit + nchan_bit - 1, addr_bit);
+ addr_bit += nchan_bit;
+ }
+
+ if (ndie_bit > 0) {
+ dec->dec_ilv_die = bitx64(dec->dec_ilv_pa,
+ addr_bit + ndie_bit - 1, addr_bit);
+ addr_bit += ndie_bit;
+ }
+
+ if (nsock_bit > 0) {
+ dec->dec_ilv_sock = bitx64(dec->dec_ilv_pa,
+ addr_bit + nsock_bit - 1, addr_bit);
+ addr_bit += nsock_bit;
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Perform the Zen 2/Zen 3 "COD" based hashing. See the zen_umc.c interleaving
+ * section of the big theory statement for an overview of how this works.
+ */
+static boolean_t
+zen_umc_decode_ileave_cod(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint32_t nchan_bit;
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+ /*
+ * The order of bits here is defined by AMD. Yes, we do use the rule's
+ * address bit first and then skip to bit 12 for the second hash bit.
+ */
+ const uint32_t addr_bits[3] = { rule->ddr_addr_start, 12, 13 };
+
+ if (rule->ddr_sock_ileave_bits != 0 || rule->ddr_die_ileave_bits != 0) {
+ dec->dec_fail = ZEN_UMC_DECODE_F_COD_BAD_ILEAVE;
+ dec->dec_fail_data = dec->dec_df_ruleno;
+ return (B_FALSE);
+ }
+
+ switch (rule->ddr_chan_ileave) {
+ case DF_CHAN_ILEAVE_COD4_2CH:
+ nchan_bit = 1;
+ break;
+ case DF_CHAN_ILEAVE_COD2_4CH:
+ nchan_bit = 2;
+ break;
+ case DF_CHAN_ILEAVE_COD1_8CH:
+ nchan_bit = 3;
+ break;
+ default:
+ dec->dec_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP;
+ dec->dec_fail_data = rule->ddr_chan_ileave;
+ return (B_FALSE);
+ }
+
+ dec->dec_ilv_sock = dec->dec_ilv_die = dec->dec_ilv_chan = 0;
+
+ /*
+ * Proceed to calculate the address hash based on the number of bits
+ * that we have been told to use based on the DF rule. Use the flags in
+ * the rule to determine which additional address ranges to hash in.
+ */
+ for (uint_t i = 0; i < nchan_bit; i++) {
+ uint8_t hash = 0;
+
+ hash = bitx64(dec->dec_ilv_pa, addr_bits[i], addr_bits[i]);
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_16_18) != 0) {
+ uint8_t val = bitx64(dec->dec_ilv_pa, 16 + i, 16 + i);
+ hash ^= val;
+ }
+
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_21_23) != 0) {
+ uint8_t val = bitx64(dec->dec_ilv_pa, 21 + i, 21 + i);
+ hash ^= val;
+ }
+
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_30_32) != 0) {
+ uint8_t val = bitx64(dec->dec_ilv_pa, 30 + i, 30 + i);
+ hash ^= val;
+ }
+
+ dec->dec_ilv_chan |= hash << i;
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * This implements the standard NPS hash for power of 2 based channel
+ * configurations that is found in DFv4. For more information, please see the
+ * interleaving portion of the zen_umc.c big theory statement.
+ */
+static boolean_t
+zen_umc_decode_ileave_nps(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint32_t nchan_bit, nsock_bit;
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+ /*
+ * The order of bits here is defined by AMD. Yes, this is start with the
+ * defined address bit and then skip to bit 12.
+ */
+ const uint32_t addr_bits[4] = { rule->ddr_addr_start, 12, 13, 14 };
+
+ if (rule->ddr_die_ileave_bits != 0) {
+ dec->dec_fail = ZEN_UMC_DECODE_F_NPS_BAD_ILEAVE;
+ dec->dec_fail_data = dec->dec_df_ruleno;
+ return (B_FALSE);
+ }
+
+ nsock_bit = rule->ddr_sock_ileave_bits;
+ switch (rule->ddr_chan_ileave) {
+ case DF_CHAN_ILEAVE_NPS4_2CH:
+ nchan_bit = 1;
+ break;
+ case DF_CHAN_ILEAVE_NPS2_4CH:
+ nchan_bit = 2;
+ break;
+ case DF_CHAN_ILEAVE_NPS1_8CH:
+ nchan_bit = 3;
+ break;
+ default:
+ dec->dec_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP;
+ dec->dec_fail_data = rule->ddr_chan_ileave;
+ return (B_FALSE);
+ }
+
+ ASSERT3U(nchan_bit + nsock_bit, <=, 4);
+ dec->dec_ilv_sock = dec->dec_ilv_die = dec->dec_ilv_chan = 0;
+
+ for (uint_t i = 0; i < nchan_bit + nsock_bit; i++) {
+ uint8_t hash = 0;
+
+ hash = bitx64(dec->dec_ilv_pa, addr_bits[i], addr_bits[i]);
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_16_18) != 0) {
+ uint8_t val = bitx64(dec->dec_ilv_pa, 16 + i, 16 + i);
+ hash ^= val;
+ }
+
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_21_23) != 0) {
+ uint8_t val = bitx64(dec->dec_ilv_pa, 21 + i, 21 + i);
+ hash ^= val;
+ }
+
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_30_32) != 0) {
+ uint8_t val = bitx64(dec->dec_ilv_pa, 30 + i, 30 + i);
+ hash ^= val;
+ }
+
+ /*
+ * If this is the first bit and we're not doing socket
+ * interleaving, then we need to add bit 14 to the running hash.
+ */
+ if (i == 0 && nsock_bit == 0) {
+ uint8_t val = bitx64(dec->dec_ilv_pa, 14, 14);
+ hash ^= val;
+ }
+
+ /*
+ * If socket interleaving is going on we need to store the first
+ * bit as the socket hash and then redirect the remaining bits
+ * to the channel, taking into account that the shift will be
+ * adjusted as a result.
+ */
+ if (nsock_bit > 0) {
+ if (i == 0) {
+ dec->dec_ilv_sock = hash;
+ } else {
+ dec->dec_ilv_chan |= hash << (i - 1);
+ }
+ } else {
+ dec->dec_ilv_chan |= hash << i;
+ }
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * This implements the logic to perform the Zen 3 6ch special hash. It's worth
+ * calling out that unlike all other hash functions, this does not support the
+ * use of the DF_DRAM_F_HASH_16_18 flag.
+ */
+static void
+zen_umc_decode_hash_zen3_6ch(const df_dram_rule_t *rule, uint64_t pa,
+ uint8_t hashes[3])
+{
+ uint32_t addr_bit = rule->ddr_addr_start;
+ /*
+ * Yes, we use these in a weird order. No, there is no 64K.
+ */
+ const uint32_t bits_2M[3] = { 23, 21, 22 };
+ const uint32_t bits_1G[3] = { 32, 30, 31 };
+
+ hashes[0] = hashes[1] = hashes[2] = 0;
+ for (uint_t i = 0; i < ZEN_UMC_COD_NBITS; i++) {
+ hashes[i] = bitx64(pa, addr_bit + i, addr_bit + i);
+ if (i == 0) {
+ uint8_t val = bitx64(pa, addr_bit + 3, addr_bit + 3);
+ hashes[i] ^= val;
+ }
+
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_21_23) != 0) {
+ uint8_t val = bitx64(pa, bits_2M[i], bits_2M[i]);
+ hashes[i] ^= val;
+ }
+
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_30_32) != 0) {
+ uint8_t val = bitx64(pa, bits_1G[i], bits_1G[i]);
+ hashes[i] ^= val;
+ }
+ }
+}
+
+/*
+ * Perform Zen 3 6-channel hashing. This is pretty weird compared to others. See
+ * the zen_umc.c big theory statement for the thorny details.
+ */
+static boolean_t
+zen_umc_decode_ileave_zen3_6ch(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint8_t hashes[3] = { 0 };
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+ uint32_t addr_bit = rule->ddr_addr_start;
+
+ if (rule->ddr_sock_ileave_bits != 0 || rule->ddr_die_ileave_bits != 0) {
+ dec->dec_fail = ZEN_UMC_DECODE_F_COD_BAD_ILEAVE;
+ dec->dec_fail_data = dec->dec_df_ruleno;
+ return (B_FALSE);
+ }
+
+ zen_umc_decode_hash_zen3_6ch(rule, dec->dec_ilv_pa, hashes);
+ dec->dec_ilv_sock = dec->dec_ilv_die = dec->dec_ilv_chan = 0;
+ dec->dec_ilv_chan = hashes[0];
+ if (hashes[1] == 1 && hashes[2] == 1) {
+ uint64_t mod_addr = dec->dec_ilv_pa >> (addr_bit + 3);
+ dec->dec_ilv_chan |= (mod_addr % 3) << 1;
+ } else {
+ dec->dec_ilv_chan |= hashes[1] << 1;
+ dec->dec_ilv_chan |= hashes[2] << 2;
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * This is the standard hash function for the non-power of two based NPS hashes.
+ * See the big theory statement for more information. Unlike the normal NPS hash
+ * which uses bit 14 conditionally based on socket interleaving, here it is
+ * always used.
+ */
+static void
+zen_umc_decode_hash_nps_mod(const df_dram_rule_t *rule, uint64_t pa,
+ uint8_t hashes[3])
+{
+ const uint32_t addr_bits[3] = { rule->ddr_addr_start, 12, 13 };
+
+ for (uint_t i = 0; i < ZEN_UMC_NPS_MOD_NBITS; i++) {
+ hashes[i] = bitx64(pa, addr_bits[i], addr_bits[i]);
+ if (i == 0) {
+ uint8_t val = bitx64(pa, 14, 14);
+ hashes[i] ^= val;
+ }
+
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_16_18) != 0) {
+ uint8_t val = bitx64(pa, 16 + i, 16 + i);
+ hashes[i] ^= val;
+ }
+
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_21_23) != 0) {
+ uint8_t val = bitx64(pa, 21 + i, 21 + i);
+ hashes[i] ^= val;
+ }
+
+ if ((rule->ddr_flags & DF_DRAM_F_HASH_30_32) != 0) {
+ uint8_t val = bitx64(pa, 30 + i, 30 + i);
+ hashes[i] ^= val;
+ }
+ }
+}
+
+/*
+ * See the big theory statement in zen_umc.c which describes the rules for this
+ * computation. This is a little less weird than the Zen 3 one, but still,
+ * unique.
+ */
+static boolean_t
+zen_umc_decode_ileave_nps_mod(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint8_t hashes[3] = { 0 };
+ uint32_t nsock_bit, chan_mod;
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+
+ if (rule->ddr_die_ileave_bits != 0) {
+ dec->dec_fail = ZEN_UMC_DECODE_F_NPS_BAD_ILEAVE;
+ dec->dec_fail_data = dec->dec_df_ruleno;
+ return (B_FALSE);
+ }
+
+ nsock_bit = rule->ddr_sock_ileave_bits;
+ switch (rule->ddr_chan_ileave) {
+ case DF_CHAN_ILEAVE_NPS4_3CH:
+ case DF_CHAN_ILEAVE_NPS2_6CH:
+ case DF_CHAN_ILEAVE_NPS1_12CH:
+ chan_mod = 3;
+ break;
+ case DF_CHAN_ILEAVE_NPS2_5CH:
+ case DF_CHAN_ILEAVE_NPS1_10CH:
+ chan_mod = 5;
+ break;
+ default:
+ dec->dec_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP;
+ dec->dec_fail_data = rule->ddr_chan_ileave;
+ return (B_FALSE);
+ }
+
+ dec->dec_ilv_sock = dec->dec_ilv_die = dec->dec_ilv_chan = 0;
+ zen_umc_decode_hash_nps_mod(rule, dec->dec_ilv_pa, hashes);
+
+ if (nsock_bit > 0) {
+ ASSERT3U(nsock_bit, ==, 1);
+ dec->dec_ilv_sock = hashes[0];
+ }
+
+ dec->dec_ilv_chan = bitx64(dec->dec_ilv_pa, 63, 14) % chan_mod;
+ if (hashes[0] == 1) {
+ dec->dec_ilv_chan = (dec->dec_ilv_chan + 1) % chan_mod;
+ }
+
+ /*
+ * Use the remaining hash bits based on the number of channels. There is
+ * nothing else to do for 3/5 channel configs.
+ */
+ switch (rule->ddr_chan_ileave) {
+ case DF_CHAN_ILEAVE_NPS4_3CH:
+ case DF_CHAN_ILEAVE_NPS2_5CH:
+ break;
+ case DF_CHAN_ILEAVE_NPS2_6CH:
+ case DF_CHAN_ILEAVE_NPS1_10CH:
+ dec->dec_ilv_chan += hashes[2] * chan_mod;
+ break;
+ case DF_CHAN_ILEAVE_NPS1_12CH:
+ dec->dec_ilv_chan += ((hashes[2] << 1) | hashes[1]) * chan_mod;
+ break;
+ default:
+ dec->dec_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP;
+ dec->dec_fail_data = rule->ddr_chan_ileave;
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Our next task is to attempt to translate the PA and the DF rule from a system
+ * address into a normalized address and a particular DRAM channel that it's
+ * targeting. There are several things that we need to take into account here
+ * when performing interleaving and translation:
+ *
+ * o The DRAM Hole modifying our base address
+ * o The various interleave bits
+ * o Potentially hashing based on channel and global settings
+ * o Potential CS re-targeting registers (only on some systems)
+ * o Finally, the question of how to adjust for the DRAM hole and the base
+ * address changes based on the DF generation and channel configuration. This
+ * influences what address we start interleaving with.
+ *
+ * Note, this phase does not actually construct the normalized (e.g. channel)
+ * address. That's done in a subsequent step. For more background, please see
+ * the 'Data Fabric Interleaving' section of the zen_umc.c big theory statement.
+ */
+static boolean_t
+zen_umc_decode_sysaddr_to_csid(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint32_t sock, die, chan, remap_ruleset;
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+ const zen_umc_cs_remap_t *remap;
+
+ /*
+ * First, we must determine what the actual address used for
+ * interleaving is. This varies based on the interleaving and DF
+ * generation.
+ */
+ if (!zen_umc_determine_ileave_addr(umc, dec)) {
+ return (B_FALSE);
+ }
+
+ switch (rule->ddr_chan_ileave) {
+ case DF_CHAN_ILEAVE_1CH:
+ case DF_CHAN_ILEAVE_2CH:
+ case DF_CHAN_ILEAVE_4CH:
+ case DF_CHAN_ILEAVE_8CH:
+ case DF_CHAN_ILEAVE_16CH:
+ case DF_CHAN_ILEAVE_32CH:
+ if (!zen_umc_decode_ileave_nohash(umc, dec)) {
+ return (B_FALSE);
+ }
+ break;
+ case DF_CHAN_ILEAVE_COD4_2CH:
+ case DF_CHAN_ILEAVE_COD2_4CH:
+ case DF_CHAN_ILEAVE_COD1_8CH:
+ if (!zen_umc_decode_ileave_cod(umc, dec)) {
+ return (B_FALSE);
+ }
+ break;
+ case DF_CHAN_ILEAVE_NPS4_2CH:
+ case DF_CHAN_ILEAVE_NPS2_4CH:
+ case DF_CHAN_ILEAVE_NPS1_8CH:
+ if (!zen_umc_decode_ileave_nps(umc, dec)) {
+ return (B_FALSE);
+ }
+ break;
+ case DF_CHAN_ILEAVE_6CH:
+ if (!zen_umc_decode_ileave_zen3_6ch(umc, dec)) {
+ return (B_FALSE);
+ }
+ break;
+ case DF_CHAN_ILEAVE_NPS4_3CH:
+ case DF_CHAN_ILEAVE_NPS2_6CH:
+ case DF_CHAN_ILEAVE_NPS1_12CH:
+ case DF_CHAN_ILEAVE_NPS2_5CH:
+ case DF_CHAN_ILEAVE_NPS1_10CH:
+ if (!zen_umc_decode_ileave_nps_mod(umc, dec)) {
+ return (B_FALSE);
+ }
+ break;
+ default:
+ dec->dec_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP;
+ dec->dec_fail_data = rule->ddr_chan_ileave;
+ return (B_FALSE);
+ }
+
+ /*
+ * At this point we have dealt with decoding the interleave into the
+ * logical elements that it contains. We need to transform that back
+ * into a fabric ID, so we can add it to the base fabric ID in our rule.
+ * After that, we need to see if there is any CS remapping going on. If
+ * there is, we will replace the component part of the decomposed fabric
+ * ID. With that done, we can then transform the components back into
+ * our target fabric ID, which indicates which UMC we're after.
+ */
+ zen_fabric_id_compose(&umc->umc_decomp, dec->dec_ilv_sock,
+ dec->dec_ilv_die, dec->dec_ilv_chan, &dec->dec_ilv_fabid);
+ dec->dec_log_fabid = dec->dec_ilv_fabid + rule->ddr_dest_fabid;
+
+ /*
+ * If there's no remapping to do, then we're done. Simply assign the
+ * logical ID as our target.
+ */
+ zen_fabric_id_decompose(&umc->umc_decomp, dec->dec_log_fabid, &sock,
+ &die, &chan);
+ if ((rule->ddr_flags & DF_DRAM_F_REMAP_EN) == 0) {
+ dec->dec_targ_fabid = dec->dec_log_fabid;
+ return (B_TRUE);
+ }
+
+ /*
+ * The DF contains multiple remapping tables. We must figure out which
+ * of these to actually use. There are two different ways that this can
+ * work. The first way is the one added in DFv4 and is used since then.
+ * In that case, the DRAM rule includes both that remapping was enabled
+ * and which of the multiple mapping tables to use.
+ *
+ * This feature also exists prior to DFv4, but only in Milan. In that
+ * world, indicated by the DF_DRAM_F_REMAP_SOCK flag, there is one table
+ * in each DF per-socket. Based on the destination socket from the data
+ * fabric ID, you pick the actual table to use.
+ *
+ * Once the table has been selected, we maintain the socket and die
+ * portions of the fabric ID as constants and replace the component with
+ * the one the remapping table indicates.
+ *
+ * Technically each DF has its own copy of the remapping tables. To make
+ * this work we rely on the following assumption: a given DF node has to
+ * be able to fully route all DRAM rules to a target. That is, a given
+ * DF node doesn't really forward a system address to the remote die for
+ * further interleave processing and therefore we must have enough
+ * information here to map it totally from the same DF that we got the
+ * CCM rules from in the first place, DF 0.
+ */
+ if ((rule->ddr_flags & DF_DRAM_F_REMAP_SOCK) != 0) {
+ remap_ruleset = sock;
+ } else {
+ remap_ruleset = rule->ddr_remap_ent;
+ }
+
+ if (remap_ruleset >= dec->dec_df_rulesrc->zud_cs_nremap) {
+ dec->dec_fail = ZEN_UMC_DECODE_F_BAD_REMAP_SET;
+ dec->dec_fail_data = remap_ruleset;
+ return (B_FALSE);
+ }
+
+ remap = &dec->dec_df_rulesrc->zud_remap[remap_ruleset];
+ if (chan >= remap->csr_nremaps) {
+ dec->dec_fail = ZEN_UMC_DECODE_F_BAD_REMAP_ENTRY;
+ dec->dec_fail_data = chan;
+ return (B_FALSE);
+ }
+
+ dec->dec_remap_comp = remap->csr_remaps[chan];
+ if ((dec->dec_remap_comp & ~umc->umc_decomp.dfd_comp_mask) != 0) {
+ dec->dec_fail = ZEN_UMC_DECODE_F_REMAP_HAS_BAD_COMP;
+ dec->dec_fail_data = dec->dec_remap_comp;
+ return (B_FALSE);
+ }
+
+ zen_fabric_id_compose(&umc->umc_decomp, sock, die, dec->dec_remap_comp,
+ &dec->dec_targ_fabid);
+
+ return (B_TRUE);
+}
+
+/*
+ * Our next step here is to actually take our target ID and find the
+ * corresponding DF, UMC, and actual rule that was used. Note, we don't
+ * decompose the ID and look things up that way for a few reasons. While each
+ * UMC should map linearly to its instance/component ID, there are suggestions
+ * that they can be renumbered. This makes it simplest to just walk over
+ * everything (and there aren't that many things to walk over either).
+ */
+static boolean_t
+zen_umc_decode_find_umc_rule(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ for (uint_t dfno = 0; dfno < umc->umc_ndfs; dfno++) {
+ const zen_umc_df_t *df = &umc->umc_dfs[dfno];
+ for (uint_t umcno = 0; umcno < df->zud_nchan; umcno++) {
+ const zen_umc_chan_t *chan = &df->zud_chan[umcno];
+
+ if (chan->chan_fabid != dec->dec_targ_fabid) {
+ continue;
+ }
+
+ /*
+ * At this point we have found the UMC that we were
+ * looking for. Snapshot that and then figure out which
+ * rule index of it corresponds to our mapping so we can
+ * properly determine an offset. We will still use the
+ * primary CCM rule for all other calculations.
+ */
+ dec->dec_umc_chan = chan;
+ for (uint32_t ruleno = 0; ruleno < chan->chan_nrules;
+ ruleno++) {
+ const df_dram_rule_t *rule =
+ &chan->chan_rules[ruleno];
+ if ((rule->ddr_flags & DF_DRAM_F_VALID) == 0) {
+ continue;
+ }
+
+ if (dec->dec_pa >= rule->ddr_base &&
+ dec->dec_pa < rule->ddr_limit) {
+ dec->dec_umc_ruleno = ruleno;
+ return (B_TRUE);
+ }
+ }
+
+ dec->dec_fail = ZEN_UMC_DECODE_F_UMC_DOESNT_HAVE_PA;
+ return (B_FALSE);
+ }
+ }
+
+ dec->dec_fail = ZEN_UMC_DECODE_F_CANNOT_MAP_FABID;
+ return (B_FALSE);
+}
+
+/*
+ * Non-hashing interleave modes system address normalization logic. See the
+ * zen_umc.c big theory statement for more information.
+ */
+static boolean_t
+zen_umc_decode_normalize_nohash(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint_t nbits = 0;
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+
+ nbits += rule->ddr_sock_ileave_bits;
+ nbits += rule->ddr_die_ileave_bits;
+ switch (rule->ddr_chan_ileave) {
+ case DF_CHAN_ILEAVE_1CH:
+ break;
+ case DF_CHAN_ILEAVE_2CH:
+ nbits += 1;
+ break;
+ case DF_CHAN_ILEAVE_4CH:
+ nbits += 2;
+ break;
+ case DF_CHAN_ILEAVE_8CH:
+ nbits += 3;
+ break;
+ case DF_CHAN_ILEAVE_16CH:
+ nbits += 4;
+ break;
+ case DF_CHAN_ILEAVE_32CH:
+ nbits += 5;
+ break;
+ default:
+ dec->dec_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP;
+ dec->dec_fail_data = rule->ddr_chan_ileave;
+ return (B_FALSE);
+ }
+
+ /*
+ * If we have a really simple configuration (e.g. no interleaving at
+ * all), then make sure that we do not actually do anything here.
+ */
+ if (nbits > 0) {
+ dec->dec_norm_addr = bitdel64(dec->dec_norm_addr,
+ rule->ddr_addr_start + nbits - 1, rule->ddr_addr_start);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * COD/NPS system address normalization logic. See the zen_umc.c big theory
+ * statement for more information.
+ */
+static boolean_t
+zen_umc_decode_normalize_hash(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint_t nbits = 0;
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+
+ /*
+ * NPS hashes allow for socket interleaving, COD hashes do not. Add
+ * socket interleaving, skip die.
+ */
+ nbits += rule->ddr_sock_ileave_bits;
+ switch (rule->ddr_chan_ileave) {
+ case DF_CHAN_ILEAVE_COD4_2CH:
+ case DF_CHAN_ILEAVE_NPS4_2CH:
+ nbits += 1;
+ break;
+ case DF_CHAN_ILEAVE_COD2_4CH:
+ case DF_CHAN_ILEAVE_NPS2_4CH:
+ nbits += 2;
+ break;
+ case DF_CHAN_ILEAVE_COD1_8CH:
+ case DF_CHAN_ILEAVE_NPS1_8CH:
+ nbits += 3;
+ break;
+ default:
+ dec->dec_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP;
+ dec->dec_fail_data = rule->ddr_chan_ileave;
+ }
+
+ /*
+ * Always remove high order bits before low order bits so we don't have
+ * to adjust the bits we need to remove.
+ */
+ if (nbits > 1) {
+ uint_t start = 12;
+ uint_t end = start + (nbits - 2);
+ dec->dec_norm_addr = bitdel64(dec->dec_norm_addr, end, start);
+ }
+
+ dec->dec_norm_addr = bitdel64(dec->dec_norm_addr, rule->ddr_addr_start,
+ rule->ddr_addr_start);
+ return (B_TRUE);
+}
+
+/*
+ * Now it's time to perform normalization of our favorite interleaving type.
+ * Please see the comments in zen_umc.c on this to understand what we're doing
+ * here and why.
+ */
+static boolean_t
+zen_umc_decode_normalize_zen3_6ch(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint8_t hashes[3] = { 0 };
+ uint_t start, end;
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+
+ /*
+ * As per the theory statement, we always remove the hash bits here from
+ * the starting address. Because this is a 6-channel config, that turns
+ * into 3. Perform the hash again first.
+ */
+ zen_umc_decode_hash_zen3_6ch(rule, dec->dec_norm_addr, hashes);
+ start = rule->ddr_addr_start;
+ end = rule->ddr_addr_start + ZEN_UMC_COD_NBITS - 1;
+ dec->dec_norm_addr = bitdel64(dec->dec_norm_addr, end, start);
+
+ /*
+ * This is the case the theory statement warned about. This gets
+ * normalized to the top of the DIMM's range (its two upper most bits
+ * are set).
+ */
+ if (hashes[1] == 1 && hashes[2] == 1) {
+ uint_t start = 14 - ZEN_UMC_COD_NBITS +
+ dec->dec_umc_chan->chan_np2_space0;
+ dec->dec_norm_addr = bitset64(dec->dec_norm_addr, start + 1,
+ start, 0x3);
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * Based on the algorithm of sorts described in zen_umc.c, we have a few
+ * different phases of extraction and combination. This isn't quite like the
+ * others where we simply delete bits.
+ */
+static boolean_t
+zen_umc_decode_normalize_nps_mod(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint64_t low, high, mid;
+ uint_t nbits, chan_mod, sock_bits, nmid_bits;
+ uint_t mid_start, mid_end;
+ uint8_t hashes[3] = { 0 };
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+
+ sock_bits = rule->ddr_sock_ileave_bits;
+ switch (rule->ddr_chan_ileave) {
+ case DF_CHAN_ILEAVE_NPS4_3CH:
+ chan_mod = 3;
+ nbits = 1;
+ break;
+ case DF_CHAN_ILEAVE_NPS2_5CH:
+ chan_mod = 5;
+ nbits = 1;
+ break;
+ case DF_CHAN_ILEAVE_NPS2_6CH:
+ chan_mod = 3;
+ nbits = 2;
+ break;
+ case DF_CHAN_ILEAVE_NPS1_10CH:
+ chan_mod = 5;
+ nbits = 2;
+ break;
+ case DF_CHAN_ILEAVE_NPS1_12CH:
+ chan_mod = 3;
+ nbits = 3;
+ break;
+ default:
+ dec->dec_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP;
+ dec->dec_fail_data = rule->ddr_chan_ileave;
+ return (B_FALSE);
+ }
+
+ /*
+ * First extract the low bit range that we're using which is everything
+ * below the starting interleave address. We also always extract the
+ * high bits, which are always [63:14] and divide it by the modulus.
+ * Note, we apply the hash after any such division if needed. It becomes
+ * the new least significant bit.
+ */
+ low = bitx64(dec->dec_norm_addr, rule->ddr_addr_start - 1, 0);
+ high = bitx64(dec->dec_norm_addr, 63, 14) / chan_mod;
+ zen_umc_decode_hash_nps_mod(rule, dec->dec_norm_addr, hashes);
+ if (sock_bits == 0) {
+ high = (high << 1) | hashes[0];
+ }
+
+ /*
+ * Now for the weirdest bit here, extracting the middle bits. Recall
+ * this hash uses bit 8, then 13, then 12 (the hash order is still 8,
+ * 12, 13, but it uses the hashes[2] before hashes[1] in
+ * zen_umc_decode_ileave_nps_mod()). So if we're only using 1 interleave
+ * bit, we just remove bit 8 (assuming that is our starting address) and
+ * our range is [13:9]. If we're using two, our range becomes [12:9],
+ * and if three, [11:9]. The 6 - nbits below comes from the fact that in
+ * a 1 bit interleave we have 5 bits. Because our mid_start/mid_end
+ * range is inclusive, we subtract one at the end from mid_end.
+ */
+ nmid_bits = 6 - nbits;
+ mid_start = rule->ddr_addr_start + 1;
+ mid_end = mid_start + nmid_bits - 1;
+ mid = bitx64(dec->dec_norm_addr, mid_end, mid_start);
+
+ /*
+ * Because we've been removing bits, we don't use any of the start and
+ * ending ranges we calculated above for shifts, as that was what we
+ * needed from the original address.
+ */
+ dec->dec_norm_addr = low | (mid << rule->ddr_addr_start) | (high <<
+ (rule->ddr_addr_start + nmid_bits));
+
+ return (B_TRUE);
+}
+
+/*
+ * Now we need to go through and try to construct a normalized address using all
+ * the information that we've gathered to date. To do this we need to take into
+ * account all of the following transformations on the address that need to
+ * occur. We apply modifications to the address in the following order:
+ *
+ * o The base address of the rule
+ * o DRAM hole changes
+ * o Normalization of the address due to interleaving (more fun)
+ * o The DRAM offset register of the rule
+ */
+static boolean_t
+zen_umc_decode_sysaddr_to_norm(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ const zen_umc_chan_t *chan = dec->dec_umc_chan;
+ const df_dram_rule_t *rule = dec->dec_df_rule;
+
+ dec->dec_norm_addr = dec->dec_pa;
+ if (!zen_umc_adjust_dram_addr(umc, dec, &dec->dec_norm_addr,
+ ZEN_UMC_DECODE_F_CALC_NORM_UNDERFLOW)) {
+ return (B_FALSE);
+ }
+
+ /*
+ * Now for the most annoying part of this whole thing, normalizing based
+ * on our actual interleave format. The reason for this is that when
+ * interleaving is going on, it actually is removing bits that are just
+ * being used to direct it somewhere; however, it's actually generally
+ * speaking the same value in each location. See the big theory
+ * statement in zen_umc.c for more information.
+ */
+ switch (rule->ddr_chan_ileave) {
+ case DF_CHAN_ILEAVE_1CH:
+ case DF_CHAN_ILEAVE_2CH:
+ case DF_CHAN_ILEAVE_4CH:
+ case DF_CHAN_ILEAVE_8CH:
+ case DF_CHAN_ILEAVE_16CH:
+ case DF_CHAN_ILEAVE_32CH:
+ if (!zen_umc_decode_normalize_nohash(umc, dec)) {
+ return (B_FALSE);
+ }
+ break;
+ case DF_CHAN_ILEAVE_COD4_2CH:
+ case DF_CHAN_ILEAVE_COD2_4CH:
+ case DF_CHAN_ILEAVE_COD1_8CH:
+ case DF_CHAN_ILEAVE_NPS4_2CH:
+ case DF_CHAN_ILEAVE_NPS2_4CH:
+ case DF_CHAN_ILEAVE_NPS1_8CH:
+ if (!zen_umc_decode_normalize_hash(umc, dec)) {
+ return (B_FALSE);
+ }
+ break;
+ case DF_CHAN_ILEAVE_6CH:
+ if (!zen_umc_decode_normalize_zen3_6ch(umc, dec)) {
+ return (B_FALSE);
+ }
+ break;
+ case DF_CHAN_ILEAVE_NPS4_3CH:
+ case DF_CHAN_ILEAVE_NPS2_6CH:
+ case DF_CHAN_ILEAVE_NPS1_12CH:
+ case DF_CHAN_ILEAVE_NPS2_5CH:
+ case DF_CHAN_ILEAVE_NPS1_10CH:
+ if (!zen_umc_decode_normalize_nps_mod(umc, dec)) {
+ return (B_FALSE);
+ }
+ break;
+ default:
+ dec->dec_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP;
+ dec->dec_fail_data = rule->ddr_chan_ileave;
+ return (B_FALSE);
+ }
+
+ /*
+ * Determine if this rule has an offset to apply. Note, there is never
+ * an offset for rule 0, hence the index into this is one less than the
+ * actual rule number. Unlike other transformations these offsets
+ * describe the start of a normalized range. Therefore we need to
+ * actually add this value instead of subtract.
+ */
+ if (dec->dec_umc_ruleno > 0) {
+ uint32_t offno = dec->dec_umc_ruleno - 1;
+ const chan_offset_t *offset = &chan->chan_offsets[offno];
+
+ if (offset->cho_valid) {
+ dec->dec_norm_addr += offset->cho_offset;
+ }
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * This applies the formula that determines a chip-select actually matches which
+ * is defined as (address & ~mask) == (base & ~mask) in the PPR. There is both a
+ * primary and secondary mask here. We need to pay attention to which is used
+ * (if any) for later on.
+ */
+static boolean_t
+zen_umc_decoder_cs_matches(const umc_cs_t *cs, const uint64_t norm,
+ boolean_t *matched_sec)
+{
+ if (cs->ucs_base.udb_valid != 0) {
+ uint64_t imask = ~cs->ucs_base_mask;
+ if ((norm & imask) == (cs->ucs_base.udb_base & imask)) {
+ *matched_sec = B_FALSE;
+ return (B_TRUE);
+ }
+ }
+
+ if (cs->ucs_sec.udb_valid != 0) {
+ uint64_t imask = ~cs->ucs_sec_mask;
+ if ((norm & imask) == (cs->ucs_sec.udb_base & imask)) {
+ *matched_sec = B_TRUE;
+ return (B_TRUE);
+ }
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * Go through with our normalized address and map it to a given chip-select.
+ * This as a side effect indicates which DIMM we're going out on as well. Note,
+ * the final DIMM can change due to chip-select hashing; however, we use this
+ * DIMM for determining all of the actual address translations.
+ */
+static boolean_t
+zen_umc_decode_find_cs(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ const zen_umc_chan_t *chan = dec->dec_umc_chan;
+
+ for (uint_t dimmno = 0; dimmno < ZEN_UMC_MAX_DIMMS; dimmno++) {
+ const umc_dimm_t *dimm = &chan->chan_dimms[dimmno];
+
+ if ((dimm->ud_flags & UMC_DIMM_F_VALID) == 0)
+ continue;
+
+ for (uint_t csno = 0; csno < ZEN_UMC_MAX_CS_PER_DIMM; csno++) {
+ const umc_cs_t *cs = &dimm->ud_cs[csno];
+ boolean_t is_sec = B_FALSE;
+
+ if (zen_umc_decoder_cs_matches(cs, dec->dec_norm_addr,
+ &is_sec)) {
+ dec->dec_dimm = dimm;
+ dec->dec_cs = cs;
+ dec->dec_log_csno = dimmno * ZEN_UMC_MAX_DIMMS +
+ csno;
+ dec->dec_cs_sec = is_sec;
+ return (B_TRUE);
+ }
+ }
+ }
+
+ dec->dec_fail = ZEN_UMC_DECODE_F_NO_CS_BASE_MATCH;
+ return (B_FALSE);
+}
+
+/*
+ * Extract the column from the address. For once, something that is almost
+ * straightforward.
+ */
+static boolean_t
+zen_umc_decode_cols(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint32_t cols = 0;
+ const umc_cs_t *cs = dec->dec_cs;
+
+ for (uint_t i = 0; i < cs->ucs_ncol; i++) {
+ uint32_t index;
+
+ index = cs->ucs_col_bits[i];
+ cols |= bitx64(dec->dec_norm_addr, index, index) << i;
+ }
+
+ dec->dec_dimm_col = cols;
+ return (B_TRUE);
+}
+
+/*
+ * The row is split into two different regions. There's a low and high value,
+ * though the high value is only present in DDR4. Unlike the column, where each
+ * bit is spelled out, each set of row bits are contiguous (low and high are
+ * independent).
+ */
+static boolean_t
+zen_umc_decode_rows(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint32_t row = 0;
+ uint8_t inv;
+ const umc_cs_t *cs = dec->dec_cs;
+ const uint_t total_bits = cs->ucs_nrow_lo + cs->ucs_nrow_hi;
+ const uint_t lo_end = cs->ucs_nrow_lo + cs->ucs_row_low_bit - 1;
+
+ row = bitx64(dec->dec_norm_addr, lo_end, cs->ucs_row_low_bit);
+ if (cs->ucs_nrow_hi > 0) {
+ const uint_t hi_end = cs->ucs_nrow_hi + cs->ucs_row_hi_bit - 1;
+ const uint32_t hi = bitx64(dec->dec_norm_addr, hi_end,
+ cs->ucs_row_hi_bit);
+
+ row |= hi << cs->ucs_nrow_lo;
+ }
+
+ if (dec->dec_cs_sec) {
+ inv = cs->ucs_inv_msbs_sec;
+ } else {
+ inv = cs->ucs_inv_msbs;
+ }
+
+ /*
+ * We need to potentially invert the top two bits of the row address
+ * based on the low two bits of the inverted register below. Note, inv
+ * only has two valid bits below. So we shift them into place to perform
+ * the XOR. See the big theory statement in zen_umc.c for more on why
+ * this works.
+ */
+ inv = inv << (total_bits - 2);
+ row = row ^ inv;
+
+ dec->dec_dimm_row = row;
+ return (B_TRUE);
+}
+
+/*
+ * Several of the hash schemes ask us to go through and xor all the bits that
+ * are in an address to transform it into a single bit. This implements that for
+ * a uint32_t. This is basically a bitwise XOR reduce.
+ */
+static uint8_t
+zen_umc_running_xor32(const uint32_t in)
+{
+ uint8_t run = 0;
+
+ for (uint_t i = 0; i < sizeof (in) * NBBY; i++) {
+ run ^= bitx32(in, i, i);
+ }
+
+ return (run);
+}
+
+static uint8_t
+zen_umc_running_xor64(const uint64_t in)
+{
+ uint8_t run = 0;
+
+ for (uint_t i = 0; i < sizeof (in) * NBBY; i++) {
+ run ^= bitx64(in, i, i);
+ }
+
+ return (run);
+}
+
+/*
+ * Our goal here is to extract the number of banks and bank groups that are
+ * used, if any.
+ */
+static boolean_t
+zen_umc_decode_banks(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint8_t bank = 0;
+ const umc_cs_t *cs = dec->dec_cs;
+ const umc_chan_hash_t *hash = &dec->dec_umc_chan->chan_hash;
+
+ /*
+ * Get an initial bank address bit and then perform any hashing if
+ * bank hashing is enabled. Note, the memory controller's nbanks is the
+ * total number of bank and bank group bits, hence why it's used for
+ * the loop counter.
+ */
+ for (uint_t i = 0; i < cs->ucs_nbanks; i++) {
+ uint32_t row_hash, col_hash;
+ uint8_t row_xor, col_xor;
+ uint_t targ = cs->ucs_bank_bits[i];
+ uint8_t val = bitx64(dec->dec_norm_addr, targ, targ);
+ const umc_bank_hash_t *bank_hash = &hash->uch_bank_hashes[i];
+
+ if ((hash->uch_flags & UMC_CHAN_HASH_F_BANK) == 0 ||
+ !hash->uch_bank_hashes[i].ubh_en) {
+ bank |= val << i;
+ continue;
+ }
+
+ /*
+ * See the big theory statement for more on this. Short form,
+ * bit-wise AND the row and column, then XOR shenanigans.
+ */
+ row_hash = dec->dec_dimm_row & bank_hash->ubh_row_xor;
+ col_hash = dec->dec_dimm_col & bank_hash->ubh_col_xor;
+ row_xor = zen_umc_running_xor32(row_hash);
+ col_xor = zen_umc_running_xor32(col_hash);
+ bank |= (row_xor ^ col_xor ^ val) << i;
+ }
+
+ /*
+ * The bank and bank group are conjoined in the register and bit
+ * definitions. Once we've calculated that, extract it.
+ */
+ dec->dec_dimm_bank_group = bitx8(bank, cs->ucs_nbank_groups - 1, 0);
+ dec->dec_dimm_bank = bitx8(bank, cs->ucs_nbanks, cs->ucs_nbank_groups);
+ return (B_TRUE);
+}
+
+/*
+ * Extract the sub-channel. If not a DDR5 based device, simply set it to zero
+ * and return. We can't forget to hash this if required.
+ */
+static boolean_t
+zen_umc_decode_subchan(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint8_t subchan;
+ uint32_t row_hash, col_hash, bank_hash;
+ uint8_t row_xor, col_xor, bank_xor;
+ const umc_cs_t *cs = dec->dec_cs;
+ const umc_chan_hash_t *hash = &dec->dec_umc_chan->chan_hash;
+
+ switch (dec->dec_dimm->ud_type) {
+ case UMC_DIMM_T_DDR5:
+ case UMC_DIMM_T_LPDDR5:
+ break;
+ default:
+ dec->dec_dimm_subchan = 0;
+ return (B_TRUE);
+ }
+
+ subchan = bitx64(dec->dec_norm_addr, cs->ucs_subchan, cs->ucs_subchan);
+ if ((hash->uch_flags & UMC_CHAN_HASH_F_PC) == 0 ||
+ !hash->uch_pc_hash.uph_en) {
+ dec->dec_dimm_subchan = subchan;
+ return (B_TRUE);
+ }
+
+ row_hash = dec->dec_dimm_row & hash->uch_pc_hash.uph_row_xor;
+ col_hash = dec->dec_dimm_col & hash->uch_pc_hash.uph_col_xor;
+ bank_hash = dec->dec_dimm_bank & hash->uch_pc_hash.uph_bank_xor;
+ row_xor = zen_umc_running_xor32(row_hash);
+ col_xor = zen_umc_running_xor32(col_hash);
+ bank_xor = zen_umc_running_xor32(bank_hash);
+
+ dec->dec_dimm_subchan = subchan ^ row_xor ^ col_xor ^ bank_xor;
+ return (B_TRUE);
+}
+
+/*
+ * Note that we have normalized the RM bits between the primary and secondary
+ * base/mask registers so that way even though the DDR5 controller always uses
+ * the same RM selection bits, it works in a uniform way for both DDR4 and DDR5.
+ */
+static boolean_t
+zen_umc_decode_rank_mul(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint8_t rm = 0;
+ const umc_cs_t *cs = dec->dec_cs;
+ const umc_chan_hash_t *hash = &dec->dec_umc_chan->chan_hash;
+
+ for (uint_t i = 0; i < cs->ucs_nrm; i++) {
+ uint8_t index = cs->ucs_rm_bits[i];
+ uint8_t bit = bitx64(dec->dec_norm_addr, index, index);
+
+ if ((hash->uch_flags & UMC_CHAN_HASH_F_RM) != 0 &&
+ hash->uch_rm_hashes[i].uah_en) {
+ uint64_t norm_mask = dec->dec_norm_addr &
+ hash->uch_rm_hashes[i].uah_addr_xor;
+ uint8_t norm_hash = zen_umc_running_xor64(norm_mask);
+ bit = bit ^ norm_hash;
+ }
+
+ rm |= bit << i;
+ }
+
+ dec->dec_dimm_rm = rm;
+ return (B_TRUE);
+}
+
+/*
+ * Go through and determine the actual chip-select activated. This is subject to
+ * hashing. Note, we first constructed a logical chip-select value based on
+ * which of the four base/mask registers in the UMC we activated for the
+ * channel. That basically seeded the two bit value we start with.
+ */
+static boolean_t
+zen_umc_decode_chipsel(const zen_umc_t *umc, zen_umc_decoder_t *dec)
+{
+ uint8_t csno = 0;
+ const umc_cs_t *cs = dec->dec_cs;
+ const umc_chan_hash_t *hash = &dec->dec_umc_chan->chan_hash;
+
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CS_BITS; i++) {
+ uint8_t bit = bitx8(dec->dec_log_csno, i, i);
+ if ((hash->uch_flags & UMC_CHAN_HASH_F_CS) != 0 &&
+ hash->uch_cs_hashes[i].uah_en) {
+ uint64_t mask = dec->dec_norm_addr &
+ hash->uch_cs_hashes[i].uah_addr_xor;
+ uint8_t rxor = zen_umc_running_xor64(mask);
+ bit = bit ^ rxor;
+ }
+ csno |= bit << i;
+ }
+
+ /*
+ * It is not entirely clear what the circumstances are that we need to
+ * apply the chip-select xor. Right now we always apply it. This only
+ * exists on a few DDR5 SoCs, it seems, and we zero out other cases to
+ * try and have a uniform and reasonable path. This tells us what the
+ * absolute chip-select is in the channel. We record this for debugging
+ * purposes and to derive the DIMM and CS.
+ */
+ dec->dec_chan_csno = (csno ^ cs->ucs_cs_xor) & 0x3;
+
+ /*
+ * Now that we actually know which chip-select we're targeting, go back
+ * and actual indicate which DIMM we'll go out to and what chip-select
+ * it is relative to the DIMM. This may have changed out due to CS
+ * hashing. As such we have to now snapshot our final DIMM and
+ * chip-select.
+ */
+ dec->dec_dimm_no = dec->dec_chan_csno >> 1;
+ dec->dec_dimm_csno = dec->dec_chan_csno % 2;
+ return (B_TRUE);
+}
+
+/*
+ * Initialize the decoder state. We do this by first zeroing it all and then
+ * setting various result addresses to the UINTXX_MAX that is appropriate. These
+ * work as better sentinel values than zero; however, we always zero the
+ * structure to be defensive, cover pointers, etc.
+ */
+static void
+zen_umc_decoder_init(zen_umc_decoder_t *dec)
+{
+ bzero(dec, sizeof (*dec));
+
+ dec->dec_pa = dec->dec_ilv_pa = UINT64_MAX;
+ dec->dec_df_ruleno = UINT32_MAX;
+ dec->dec_ilv_sock = dec->dec_ilv_die = dec->dec_ilv_chan =
+ dec->dec_ilv_fabid = dec->dec_log_fabid = dec->dec_remap_comp =
+ dec->dec_targ_fabid = UINT32_MAX;
+ dec->dec_umc_ruleno = UINT32_MAX;
+ dec->dec_norm_addr = UINT64_MAX;
+ dec->dec_dimm_col = dec->dec_dimm_row = UINT32_MAX;
+ dec->dec_log_csno = dec->dec_dimm_bank = dec->dec_dimm_bank_group =
+ dec->dec_dimm_subchan = dec->dec_dimm_rm = dec->dec_chan_csno =
+ dec->dec_dimm_no = dec->dec_dimm_csno = UINT8_MAX;
+}
+
+boolean_t
+zen_umc_decode_pa(const zen_umc_t *umc, const uint64_t pa,
+ zen_umc_decoder_t *dec)
+{
+ zen_umc_decoder_init(dec);
+ dec->dec_pa = pa;
+
+ /*
+ * Before we proceed through decoding, the first thing we should try to
+ * do is verify that this is even something that could be DRAM.
+ */
+ if (!zen_umc_decode_is_dram(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ /*
+ * The very first thing that we need to do is find a data fabric rule
+ * that corresponds to this memory address. This will be used to
+ * determine which set of rules for interleave and related we actually
+ * should then use.
+ */
+ if (!zen_umc_decode_find_df_rule(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ /*
+ * Now that we have a DF rule, we must take a more involved step of
+ * mapping to a given CS, e.g. a specific UMC channel. This will tell us
+ * the socket and die as well. This takes care of all the interleaving
+ * and remapping and produces a target fabric ID.
+ */
+ if (!zen_umc_decode_sysaddr_to_csid(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ /*
+ * With that target ID known, now actually map this to a corresponding
+ * UMC.
+ */
+ if (!zen_umc_decode_find_umc_rule(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ /*
+ * With the target and corresponding rules and offset information,
+ * actually perform normalization.
+ */
+ if (!zen_umc_decode_sysaddr_to_norm(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ /*
+ * Finally, we somehow managed to actually construct a normalized
+ * address. Now we must begin the act of transforming this channel
+ * address into something that makes sense to address a DIMM. To start
+ * with determine which logical chip-select, which determines where we
+ * source all our data to use.
+ */
+ if (!zen_umc_decode_find_cs(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ /*
+ * Now that we have the logical chip-select matched that we're sourcing
+ * our data from, the next this is a bit more involved: we need to
+ * extract the row, column, rank/rank multiplication, bank, and bank
+ * group out of all this, while taking into account all of our hashes.
+ *
+ * To do this, we begin by first calculating the row and column as those
+ * will be needed to determine some of our other values here.
+ */
+ if (!zen_umc_decode_rows(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ if (!zen_umc_decode_cols(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ /*
+ * Now that we have the rows and columns we can go through and determine
+ * the bank and bank group. This depends on the above.
+ */
+ if (!zen_umc_decode_banks(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ /*
+ * If we have a DDR5 generation DIMM then we need to consider the
+ * subchannel. This doesn't exist in DDR4 systems (the function handles
+ * this reality). Because of potential hashing, this needs to come after
+ * the row, column, and bank have all been determined.
+ */
+ if (!zen_umc_decode_subchan(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ /*
+ * Time for the last two pieces here: the actual chip select used and
+ * then figuring out which rank, taking into account rank
+ * multiplication. Don't worry, these both have hashing opportunities.
+ */
+ if (!zen_umc_decode_rank_mul(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ if (!zen_umc_decode_chipsel(umc, dec)) {
+ ASSERT3U(dec->dec_fail, !=, ZEN_UMC_DECODE_F_NONE);
+ return (B_FALSE);
+ }
+
+ /*
+ * Somehow, that's it.
+ */
+ return (B_TRUE);
+}
diff --git a/usr/src/common/mc/zen_umc/zen_umc_dump.c b/usr/src/common/mc/zen_umc/zen_umc_dump.c
new file mode 100644
index 0000000000..da3c2cc095
--- /dev/null
+++ b/usr/src/common/mc/zen_umc/zen_umc_dump.c
@@ -0,0 +1,717 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * Dump and restore logic for external processing. Dump generally runs in kernel
+ * context from a well formed structure created by the driver. Restore is used
+ * in userland as part of testing and related.
+ *
+ * Note, there are a lot of fields in these structures that are not serialized
+ * because they are not used as part of the decoder (e.g. the various raw values
+ * which are captured to aid future debugging).
+ */
+
+#include "zen_umc.h"
+#ifndef _KERNEL
+#include <string.h>
+#include <strings.h>
+#include <libnvpair.h>
+#endif
+
+static nvlist_t *
+zen_umc_dump_dram_rule(df_dram_rule_t *rule)
+{
+ nvlist_t *nvl;
+
+ nvl = fnvlist_alloc();
+ fnvlist_add_uint32(nvl, "ddr_flags", rule->ddr_flags);
+ fnvlist_add_uint64(nvl, "ddr_base", rule->ddr_base);
+ fnvlist_add_uint64(nvl, "ddr_limit", rule->ddr_limit);
+ fnvlist_add_uint16(nvl, "ddr_dest_fabid", rule->ddr_dest_fabid);
+ fnvlist_add_uint8(nvl, "ddr_sock_ileave_bits",
+ rule->ddr_sock_ileave_bits);
+ fnvlist_add_uint8(nvl, "ddr_die_ileave_bits",
+ rule->ddr_die_ileave_bits);
+ fnvlist_add_uint8(nvl, "ddr_addr_start", rule->ddr_addr_start);
+ fnvlist_add_uint8(nvl, "ddr_remap_ent", rule->ddr_remap_ent);
+ fnvlist_add_uint32(nvl, "ddr_chan_ileave", rule->ddr_chan_ileave);
+
+ return (nvl);
+}
+
+static nvlist_t *
+zen_umc_dump_cs(umc_cs_t *cs)
+{
+ nvlist_t *nvl = fnvlist_alloc();
+ nvlist_t *base = fnvlist_alloc();
+ nvlist_t *sec = fnvlist_alloc();
+
+ fnvlist_add_uint64(base, "udb_base", cs->ucs_base.udb_base);
+ fnvlist_add_uint8(base, "udb_valid", cs->ucs_base.udb_valid);
+ fnvlist_add_nvlist(nvl, "ucs_base", base);
+ nvlist_free(base);
+ fnvlist_add_uint64(sec, "udb_base", cs->ucs_sec.udb_base);
+ fnvlist_add_uint8(sec, "udb_valid", cs->ucs_sec.udb_valid);
+ fnvlist_add_nvlist(nvl, "ucs_sec", sec);
+ nvlist_free(sec);
+ fnvlist_add_uint64(nvl, "ucs_base_mask", cs->ucs_base_mask);
+ fnvlist_add_uint64(nvl, "ucs_sec_mask", cs->ucs_sec_mask);
+ fnvlist_add_uint8(nvl, "ucs_nrow_lo", cs->ucs_nrow_lo);
+ fnvlist_add_uint8(nvl, "ucs_nrow_hi", cs->ucs_nrow_hi);
+ fnvlist_add_uint8(nvl, "ucs_nbank_groups", cs->ucs_nbank_groups);
+ fnvlist_add_uint8(nvl, "ucs_cs_xor", cs->ucs_cs_xor);
+ fnvlist_add_uint8(nvl, "ucs_row_hi_bit", cs->ucs_row_hi_bit);
+ fnvlist_add_uint8(nvl, "ucs_row_low_bit", cs->ucs_row_low_bit);
+ fnvlist_add_uint8_array(nvl, "ucs_bank_bits", cs->ucs_bank_bits,
+ cs->ucs_nbanks);
+ fnvlist_add_uint8_array(nvl, "ucs_col_bits", cs->ucs_col_bits,
+ cs->ucs_ncol);
+ fnvlist_add_uint8(nvl, "ucs_inv_msbs", cs->ucs_inv_msbs);
+ fnvlist_add_uint8_array(nvl, "ucs_rm_bits", cs->ucs_rm_bits,
+ cs->ucs_nrm);
+ fnvlist_add_uint8(nvl, "ucs_inv_msbs_sec", cs->ucs_inv_msbs_sec);
+ fnvlist_add_uint8_array(nvl, "ucs_rm_bits_sec", cs->ucs_rm_bits_sec,
+ cs->ucs_nrm);
+ fnvlist_add_uint8(nvl, "ucs_subchan", cs->ucs_subchan);
+
+ return (nvl);
+}
+
+static nvlist_t *
+zen_umc_dump_dimm(umc_dimm_t *dimm)
+{
+ nvlist_t *nvl = fnvlist_alloc();
+ nvlist_t *cs[ZEN_UMC_MAX_CS_PER_DIMM];
+
+ fnvlist_add_uint32(nvl, "ud_flags", dimm->ud_flags);
+ fnvlist_add_uint32(nvl, "ud_width", dimm->ud_width);
+ fnvlist_add_uint32(nvl, "ud_type", dimm->ud_type);
+ fnvlist_add_uint32(nvl, "ud_kind", dimm->ud_kind);
+ fnvlist_add_uint32(nvl, "ud_dimmno", dimm->ud_dimmno);
+
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CS_PER_DIMM; i++) {
+ cs[i] = zen_umc_dump_cs(&dimm->ud_cs[i]);
+ }
+ fnvlist_add_nvlist_array(nvl, "ud_cs", cs, ZEN_UMC_MAX_CS_PER_DIMM);
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CS_PER_DIMM; i++) {
+ nvlist_free(cs[i]);
+ }
+
+ return (nvl);
+}
+
+static nvlist_t *
+zen_umc_dump_chan_hash(umc_chan_hash_t *hash)
+{
+ nvlist_t *nvl = fnvlist_alloc();
+
+ fnvlist_add_uint32(nvl, "uch_flags", hash->uch_flags);
+
+ if (hash->uch_flags & UMC_CHAN_HASH_F_BANK) {
+ nvlist_t *banks[ZEN_UMC_MAX_CHAN_BANK_HASH];
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CHAN_BANK_HASH; i++) {
+ banks[i] = fnvlist_alloc();
+
+ fnvlist_add_uint32(banks[i], "ubh_row_xor",
+ hash->uch_bank_hashes[i].ubh_row_xor);
+ fnvlist_add_uint32(banks[i], "ubh_col_xor",
+ hash->uch_bank_hashes[i].ubh_col_xor);
+ fnvlist_add_boolean_value(banks[i], "ubh_en",
+ hash->uch_bank_hashes[i].ubh_en);
+ }
+ fnvlist_add_nvlist_array(nvl, "uch_bank_hashes", banks,
+ ZEN_UMC_MAX_CHAN_BANK_HASH);
+
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CHAN_BANK_HASH; i++) {
+ nvlist_free(banks[i]);
+ }
+ }
+
+ if (hash->uch_flags & UMC_CHAN_HASH_F_RM) {
+ nvlist_t *rm[ZEN_UMC_MAX_CHAN_RM_HASH];
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CHAN_RM_HASH; i++) {
+ rm[i] = fnvlist_alloc();
+
+ fnvlist_add_uint64(rm[i], "uah_addr_xor",
+ hash->uch_rm_hashes[i].uah_addr_xor);
+ fnvlist_add_boolean_value(rm[i], "uah_en",
+ hash->uch_rm_hashes[i].uah_en);
+ }
+ fnvlist_add_nvlist_array(nvl, "uch_rm_hashes", rm,
+ ZEN_UMC_MAX_CHAN_RM_HASH);
+
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CHAN_RM_HASH; i++) {
+ nvlist_free(rm[i]);
+ }
+ }
+
+ if (hash->uch_flags & UMC_CHAN_HASH_F_CS) {
+ nvlist_t *cs[ZEN_UMC_MAX_CHAN_CS_HASH];
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CHAN_CS_HASH; i++) {
+ cs[i] = fnvlist_alloc();
+
+ fnvlist_add_uint64(cs[i], "uah_addr_xor",
+ hash->uch_rm_hashes[i].uah_addr_xor);
+ fnvlist_add_boolean_value(cs[i], "uah_en",
+ hash->uch_rm_hashes[i].uah_en);
+ }
+ fnvlist_add_nvlist_array(nvl, "uch_cs_hashes", cs,
+ ZEN_UMC_MAX_CHAN_CS_HASH);
+
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CHAN_CS_HASH; i++) {
+ nvlist_free(cs[i]);
+ }
+ }
+
+ if (hash->uch_flags & UMC_CHAN_HASH_F_PC) {
+ nvlist_t *pc = fnvlist_alloc();
+
+ fnvlist_add_uint32(pc, "uph_row_xor",
+ hash->uch_pc_hash.uph_row_xor);
+ fnvlist_add_uint32(pc, "uph_col_xor",
+ hash->uch_pc_hash.uph_col_xor);
+ fnvlist_add_uint8(pc, "uph_bank_xor",
+ hash->uch_pc_hash.uph_bank_xor);
+ fnvlist_add_boolean_value(pc, "uph_en",
+ hash->uch_pc_hash.uph_en);
+
+ fnvlist_add_nvlist(nvl, "uch_pch_hash", pc);
+ fnvlist_free(pc);
+
+ }
+
+ return (nvl);
+}
+
+static nvlist_t *
+zen_umc_dump_chan(zen_umc_chan_t *chan)
+{
+ nvlist_t *nvl, *hash;
+ nvlist_t *rules[ZEN_UMC_MAX_CS_RULES];
+ nvlist_t *offsets[ZEN_UMC_MAX_DRAM_OFFSET];
+ nvlist_t *dimms[ZEN_UMC_MAX_DIMMS];
+
+ nvl = fnvlist_alloc();
+ fnvlist_add_uint32(nvl, "chan_flags", chan->chan_flags);
+ fnvlist_add_uint32(nvl, "chan_fabid", chan->chan_fabid);
+ fnvlist_add_uint32(nvl, "chan_instid", chan->chan_instid);
+ fnvlist_add_uint32(nvl, "chan_logid", chan->chan_logid);
+ fnvlist_add_uint32(nvl, "chan_np2_space0", chan->chan_np2_space0);
+
+ for (uint_t i = 0; i < chan->chan_nrules; i++) {
+ rules[i] = zen_umc_dump_dram_rule(&chan->chan_rules[i]);
+ }
+
+ for (uint_t i = 0; i < chan->chan_nrules - 1; i++) {
+ offsets[i] = fnvlist_alloc();
+ fnvlist_add_boolean_value(offsets[i], "cho_valid",
+ chan->chan_offsets[i].cho_valid);
+ fnvlist_add_uint64(offsets[i], "cho_offset",
+ chan->chan_offsets[i].cho_offset);
+ }
+
+ for (uint_t i = 0; i < ZEN_UMC_MAX_DIMMS; i++) {
+ dimms[i] = zen_umc_dump_dimm(&chan->chan_dimms[i]);
+ }
+
+ fnvlist_add_nvlist_array(nvl, "chan_rules", rules, chan->chan_nrules);
+ fnvlist_add_nvlist_array(nvl, "chan_offsets", offsets,
+ chan->chan_nrules - 1);
+ fnvlist_add_nvlist_array(nvl, "chan_dimms", dimms, ZEN_UMC_MAX_DIMMS);
+ hash = zen_umc_dump_chan_hash(&chan->chan_hash);
+ fnvlist_add_nvlist(nvl, "chan_hash", hash);
+
+ for (uint_t i = 0; i < chan->chan_nrules; i++) {
+ nvlist_free(rules[i]);
+ }
+
+ for (uint_t i = 0; i < chan->chan_nrules - 1; i++) {
+ nvlist_free(offsets[i]);
+ }
+
+ for (uint_t i = 0; i < ZEN_UMC_MAX_DIMMS; i++) {
+ nvlist_free(dimms[i]);
+ }
+
+ nvlist_free(hash);
+
+ return (nvl);
+}
+
+static nvlist_t *
+zen_umc_dump_df(zen_umc_df_t *df)
+{
+ nvlist_t *nvl;
+ nvlist_t *rules[ZEN_UMC_MAX_DRAM_RULES];
+ nvlist_t *remap[ZEN_UMC_MAX_CS_REMAPS];
+ nvlist_t *chan[ZEN_UMC_MAX_UMCS];
+
+ nvl = fnvlist_alloc();
+ fnvlist_add_uint32(nvl, "zud_flags", df->zud_flags);
+ fnvlist_add_uint32(nvl, "zud_dfno", df->zud_dfno);
+ fnvlist_add_uint32(nvl, "zud_ccm_inst", df->zud_ccm_inst);
+ fnvlist_add_uint64(nvl, "zud_hole_base", df->zud_hole_base);
+
+ for (uint_t i = 0; i < df->zud_dram_nrules; i++) {
+ rules[i] = zen_umc_dump_dram_rule(&df->zud_rules[i]);
+ }
+
+ for (uint_t i = 0; i < df->zud_cs_nremap; i++) {
+ remap[i] = fnvlist_alloc();
+ fnvlist_add_uint16_array(remap[i], "csr_remaps",
+ df->zud_remap[i].csr_remaps, df->zud_remap[i].csr_nremaps);
+ }
+
+ for (uint_t i = 0; i < df->zud_nchan; i++) {
+ chan[i] = zen_umc_dump_chan(&df->zud_chan[i]);
+ }
+
+ fnvlist_add_nvlist_array(nvl, "zud_rules", rules, df->zud_dram_nrules);
+ fnvlist_add_nvlist_array(nvl, "zud_remap", remap, df->zud_cs_nremap);
+ fnvlist_add_nvlist_array(nvl, "zud_chan", chan, df->zud_nchan);
+
+ for (uint_t i = 0; i < df->zud_dram_nrules; i++) {
+ nvlist_free(rules[i]);
+ }
+
+ for (uint_t i = 0; i < df->zud_cs_nremap; i++) {
+ nvlist_free(remap[i]);
+ }
+
+ for (uint_t i = 0; i < df->zud_nchan; i++) {
+ nvlist_free(chan[i]);
+ }
+
+ return (nvl);
+}
+
+nvlist_t *
+zen_umc_dump_decoder(zen_umc_t *umc)
+{
+ nvlist_t *nvl, *umc_nvl, *decomp;
+ nvlist_t *dfs[ZEN_UMC_MAX_DFS];
+
+ nvl = fnvlist_alloc();
+ fnvlist_add_uint32(nvl, "mc_dump_version", 0);
+ fnvlist_add_string(nvl, "mc_dump_driver", "zen_umc");
+
+ umc_nvl = fnvlist_alloc();
+ fnvlist_add_uint64(umc_nvl, "umc_tom", umc->umc_tom);
+ fnvlist_add_uint64(umc_nvl, "umc_tom2", umc->umc_tom2);
+ fnvlist_add_uint32(umc_nvl, "umc_family", umc->umc_family);
+ fnvlist_add_uint32(umc_nvl, "umc_df_rev", umc->umc_df_rev);
+
+ decomp = fnvlist_alloc();
+ fnvlist_add_uint32(decomp, "dfd_sock_mask",
+ umc->umc_decomp.dfd_sock_mask);
+ fnvlist_add_uint32(decomp, "dfd_die_mask",
+ umc->umc_decomp.dfd_die_mask);
+ fnvlist_add_uint32(decomp, "dfd_node_mask",
+ umc->umc_decomp.dfd_node_mask);
+ fnvlist_add_uint32(decomp, "dfd_comp_mask",
+ umc->umc_decomp.dfd_comp_mask);
+ fnvlist_add_uint8(decomp, "dfd_sock_shift",
+ umc->umc_decomp.dfd_sock_shift);
+ fnvlist_add_uint8(decomp, "dfd_die_shift",
+ umc->umc_decomp.dfd_die_shift);
+ fnvlist_add_uint8(decomp, "dfd_node_shift",
+ umc->umc_decomp.dfd_node_shift);
+ fnvlist_add_uint8(decomp, "dfd_comp_shift",
+ umc->umc_decomp.dfd_comp_shift);
+ fnvlist_add_nvlist(umc_nvl, "umc_decomp", decomp);
+ nvlist_free(decomp);
+
+ for (uint_t i = 0; i < umc->umc_ndfs; i++) {
+ dfs[i] = zen_umc_dump_df(&umc->umc_dfs[i]);
+ }
+
+ fnvlist_add_nvlist_array(umc_nvl, "umc_dfs", dfs, umc->umc_ndfs);
+ fnvlist_add_nvlist(nvl, "zen_umc", umc_nvl);
+ for (uint_t i = 0; i < umc->umc_ndfs; i++) {
+ nvlist_free(dfs[i]);
+ }
+
+ return (nvl);
+}
+
+static boolean_t
+zen_umc_restore_dram_rule(nvlist_t *nvl, df_dram_rule_t *rule)
+{
+ return (nvlist_lookup_pairs(nvl, 0,
+ "ddr_flags", DATA_TYPE_UINT32, &rule->ddr_flags,
+ "ddr_base", DATA_TYPE_UINT64, &rule->ddr_base,
+ "ddr_limit", DATA_TYPE_UINT64, &rule->ddr_limit,
+ "ddr_dest_fabid", DATA_TYPE_UINT16, &rule->ddr_dest_fabid,
+ "ddr_sock_ileave_bits", DATA_TYPE_UINT8,
+ &rule->ddr_sock_ileave_bits,
+ "ddr_die_ileave_bits", DATA_TYPE_UINT8, &rule->ddr_die_ileave_bits,
+ "ddr_addr_start", DATA_TYPE_UINT8, &rule->ddr_addr_start,
+ "ddr_remap_ent", DATA_TYPE_UINT8, &rule->ddr_remap_ent,
+ "ddr_chan_ileave", DATA_TYPE_UINT32, &rule->ddr_chan_ileave,
+ NULL) == 0);
+}
+
+static boolean_t
+zen_umc_restore_cs(nvlist_t *nvl, umc_cs_t *cs)
+{
+ nvlist_t *base, *sec;
+ uint8_t *bank_bits, *col_bits, *rm_bits, *rm_bits_sec;
+ uint_t nbanks, ncols, nrm, nrm_sec;
+
+ if (nvlist_lookup_pairs(nvl, 0,
+ "ucs_base", DATA_TYPE_NVLIST, &base,
+ "ucs_sec", DATA_TYPE_NVLIST, &sec,
+ "ucs_base_mask", DATA_TYPE_UINT64, &cs->ucs_base_mask,
+ "ucs_sec_mask", DATA_TYPE_UINT64, &cs->ucs_sec_mask,
+ "ucs_nrow_lo", DATA_TYPE_UINT8, &cs->ucs_nrow_lo,
+ "ucs_nrow_hi", DATA_TYPE_UINT8, &cs->ucs_nrow_hi,
+ "ucs_nbank_groups", DATA_TYPE_UINT8, &cs->ucs_nbank_groups,
+ "ucs_cs_xor", DATA_TYPE_UINT8, &cs->ucs_cs_xor,
+ "ucs_row_hi_bit", DATA_TYPE_UINT8, &cs->ucs_row_hi_bit,
+ "ucs_row_low_bit", DATA_TYPE_UINT8, &cs->ucs_row_low_bit,
+ "ucs_bank_bits", DATA_TYPE_UINT8_ARRAY, &bank_bits, &nbanks,
+ "ucs_col_bits", DATA_TYPE_UINT8_ARRAY, &col_bits, &ncols,
+ "ucs_inv_msbs", DATA_TYPE_UINT8, &cs->ucs_inv_msbs,
+ "ucs_rm_bits", DATA_TYPE_UINT8_ARRAY, &rm_bits, &nrm,
+ "ucs_inv_msbs_sec", DATA_TYPE_UINT8, &cs->ucs_inv_msbs_sec,
+ "ucs_rm_bits_sec", DATA_TYPE_UINT8_ARRAY, &rm_bits_sec, &nrm_sec,
+ "ucs_subchan", DATA_TYPE_UINT8, &cs->ucs_subchan,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+
+ if (nbanks > ZEN_UMC_MAX_BANK_BITS ||
+ ncols > ZEN_UMC_MAX_COL_BITS ||
+ nrm > ZEN_UMC_MAX_RM_BITS ||
+ nrm != nrm_sec) {
+ return (B_FALSE);
+ }
+
+ cs->ucs_nbanks = nbanks;
+ cs->ucs_ncol = ncols;
+ cs->ucs_nrm = nrm;
+
+ bcopy(bank_bits, cs->ucs_bank_bits, cs->ucs_nbanks *
+ sizeof (uint8_t));
+ bcopy(col_bits, cs->ucs_col_bits, cs->ucs_ncol * sizeof (uint8_t));
+ bcopy(rm_bits, cs->ucs_rm_bits, cs->ucs_nrm * sizeof (uint8_t));
+ bcopy(rm_bits_sec, cs->ucs_rm_bits_sec, cs->ucs_nrm *
+ sizeof (uint8_t));
+
+ if (nvlist_lookup_pairs(base, 0,
+ "udb_base", DATA_TYPE_UINT64, &cs->ucs_base.udb_base,
+ "udb_valid", DATA_TYPE_UINT8, &cs->ucs_base.udb_valid,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+
+ if (nvlist_lookup_pairs(sec, 0,
+ "udb_base", DATA_TYPE_UINT64, &cs->ucs_sec.udb_base,
+ "udb_valid", DATA_TYPE_UINT8, &cs->ucs_sec.udb_valid,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+static boolean_t
+zen_umc_restore_dimm(nvlist_t *nvl, umc_dimm_t *dimm)
+{
+ nvlist_t **cs;
+ uint_t ncs;
+
+ if (nvlist_lookup_pairs(nvl, 0,
+ "ud_flags", DATA_TYPE_UINT32, &dimm->ud_flags,
+ "ud_width", DATA_TYPE_UINT32, &dimm->ud_width,
+ "ud_type", DATA_TYPE_UINT32, &dimm->ud_type,
+ "ud_kind", DATA_TYPE_UINT32, &dimm->ud_kind,
+ "ud_dimmno", DATA_TYPE_UINT32, &dimm->ud_dimmno,
+ "ud_cs", DATA_TYPE_NVLIST_ARRAY, &cs, &ncs,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+
+ if (ncs != ZEN_UMC_MAX_CS_PER_DIMM) {
+ return (B_FALSE);
+ }
+
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CS_PER_DIMM; i++) {
+ if (!zen_umc_restore_cs(cs[i], &dimm->ud_cs[i])) {
+ return (B_FALSE);
+ }
+ }
+
+ return (B_TRUE);
+}
+
+static boolean_t
+zen_umc_restore_hash(nvlist_t *nvl, umc_chan_hash_t *hash)
+{
+ if (nvlist_lookup_uint32(nvl, "uch_flags", &hash->uch_flags) != 0) {
+ return (B_FALSE);
+ }
+
+ if (hash->uch_flags & UMC_CHAN_HASH_F_BANK) {
+ nvlist_t **banks;
+ uint_t nbanks;
+
+ if (nvlist_lookup_nvlist_array(nvl, "uch_bank_hashes", &banks,
+ &nbanks) != 0) {
+ return (B_FALSE);
+ }
+
+ if (nbanks != ZEN_UMC_MAX_CHAN_BANK_HASH) {
+ return (B_FALSE);
+ }
+
+ for (uint_t i = 0; i < nbanks; i++) {
+ if (nvlist_lookup_pairs(banks[i], 0,
+ "ubh_row_xor", DATA_TYPE_UINT32,
+ &hash->uch_bank_hashes[i].ubh_row_xor,
+ "ubh_col_xor", DATA_TYPE_UINT32,
+ &hash->uch_bank_hashes[i].ubh_col_xor,
+ "ubh_en", DATA_TYPE_BOOLEAN_VALUE,
+ &hash->uch_bank_hashes[i].ubh_en,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+ }
+ }
+
+ if (hash->uch_flags & UMC_CHAN_HASH_F_RM) {
+ nvlist_t **rm;
+ uint_t nrm;
+
+ if (nvlist_lookup_nvlist_array(nvl, "uch_rm_hashes", &rm,
+ &nrm) != 0) {
+ return (B_FALSE);
+ }
+
+ if (nrm != ZEN_UMC_MAX_CHAN_RM_HASH) {
+ return (B_FALSE);
+ }
+
+ for (uint_t i = 0; i < nrm; i++) {
+ if (nvlist_lookup_pairs(rm[i], 0,
+ "uah_addr_xor", DATA_TYPE_UINT64,
+ &hash->uch_rm_hashes[i].uah_addr_xor,
+ "uah_en", DATA_TYPE_BOOLEAN_VALUE,
+ &hash->uch_rm_hashes[i].uah_en,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+ }
+ }
+
+ if (hash->uch_flags & UMC_CHAN_HASH_F_CS) {
+ nvlist_t **cs;
+ uint_t ncs;
+
+ if (nvlist_lookup_nvlist_array(nvl, "uch_cs_hashes", &cs,
+ &ncs) != 0) {
+ return (B_FALSE);
+ }
+
+ if (ncs != ZEN_UMC_MAX_CHAN_CS_HASH) {
+ return (B_FALSE);
+ }
+
+ for (uint_t i = 0; i < ncs; i++) {
+ if (nvlist_lookup_pairs(cs[i], 0,
+ "uah_addr_xor", DATA_TYPE_UINT64,
+ &hash->uch_cs_hashes[i].uah_addr_xor,
+ "uah_en", DATA_TYPE_BOOLEAN_VALUE,
+ &hash->uch_cs_hashes[i].uah_en,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+ }
+ }
+
+ if (hash->uch_flags & UMC_CHAN_HASH_F_PC) {
+ nvlist_t *pc;
+
+ if (nvlist_lookup_nvlist(nvl, "uch_pch_hash", &pc) != 0) {
+ return (B_FALSE);
+ }
+
+ if (nvlist_lookup_pairs(pc, 0,
+ "uph_row_xor", DATA_TYPE_UINT32,
+ &hash->uch_pc_hash.uph_row_xor,
+ "uph_col_xor", DATA_TYPE_UINT32,
+ &hash->uch_pc_hash.uph_col_xor,
+ "uph_bank_xor", DATA_TYPE_UINT32,
+ &hash->uch_pc_hash.uph_bank_xor,
+ "uph_en", DATA_TYPE_BOOLEAN_VALUE,
+ &hash->uch_pc_hash.uph_en,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+ }
+ return (B_TRUE);
+}
+
+static boolean_t
+zen_umc_restore_chan(nvlist_t *nvl, zen_umc_chan_t *chan)
+{
+ uint_t noffsets, ndimms;
+ nvlist_t **rules, **offsets, **dimms, *hash;
+
+ if (nvlist_lookup_pairs(nvl, 0,
+ "chan_flags", DATA_TYPE_UINT32, &chan->chan_flags,
+ "chan_fabid", DATA_TYPE_UINT32, &chan->chan_fabid,
+ "chan_instid", DATA_TYPE_UINT32, &chan->chan_instid,
+ "chan_logid", DATA_TYPE_UINT32, &chan->chan_logid,
+ "chan_rules", DATA_TYPE_NVLIST_ARRAY, &rules, &chan->chan_nrules,
+ "chan_np2_space0", DATA_TYPE_UINT32, &chan->chan_np2_space0,
+ "chan_offsets", DATA_TYPE_NVLIST_ARRAY, &offsets, &noffsets,
+ "chan_dimms", DATA_TYPE_NVLIST_ARRAY, &dimms, &ndimms,
+ "chan_hash", DATA_TYPE_NVLIST, &hash,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+
+ if (chan->chan_nrules > ZEN_UMC_MAX_CS_RULES ||
+ noffsets != chan->chan_nrules - 1 || ndimms != ZEN_UMC_MAX_DIMMS) {
+ return (B_FALSE);
+ }
+
+ for (uint_t i = 0; i < chan->chan_nrules; i++) {
+ if (!zen_umc_restore_dram_rule(rules[i],
+ &chan->chan_rules[i])) {
+ return (B_FALSE);
+ }
+ }
+
+ for (uint_t i = 0; i < chan->chan_nrules - 1; i++) {
+ chan_offset_t *coff = &chan->chan_offsets[i];
+
+ if (nvlist_lookup_pairs(offsets[i], 0,
+ "cho_valid", DATA_TYPE_BOOLEAN_VALUE, &coff->cho_valid,
+ "cho_offset", DATA_TYPE_UINT64, &coff->cho_offset,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+ }
+
+ for (uint_t i = 0; i < ZEN_UMC_MAX_DIMMS; i++) {
+ if (!zen_umc_restore_dimm(dimms[i], &chan->chan_dimms[i])) {
+ return (B_FALSE);
+ }
+ }
+
+ if (!zen_umc_restore_hash(hash, &chan->chan_hash)) {
+ return (B_FALSE);
+ }
+
+ return (B_TRUE);
+}
+
+static boolean_t
+zen_umc_restore_df(nvlist_t *nvl, zen_umc_df_t *df)
+{
+ nvlist_t **rules, **chan, **remap;
+
+ if (nvlist_lookup_pairs(nvl, 0,
+ "zud_flags", DATA_TYPE_UINT32, &df->zud_flags,
+ "zud_dfno", DATA_TYPE_UINT32, &df->zud_dfno,
+ "zud_ccm_inst", DATA_TYPE_UINT32, &df->zud_ccm_inst,
+ "zud_hole_base", DATA_TYPE_UINT64, &df->zud_hole_base,
+ "zud_rules", DATA_TYPE_NVLIST_ARRAY, &rules, &df->zud_dram_nrules,
+ "zud_remap", DATA_TYPE_NVLIST_ARRAY, &remap, &df->zud_cs_nremap,
+ "zud_chan", DATA_TYPE_NVLIST_ARRAY, &chan, &df->zud_nchan,
+ NULL != 0) ||
+ df->zud_dram_nrules > ZEN_UMC_MAX_DRAM_RULES ||
+ df->zud_cs_nremap > ZEN_UMC_MAX_CS_REMAPS ||
+ df->zud_nchan > ZEN_UMC_MAX_UMCS) {
+ return (B_FALSE);
+ }
+
+ for (uint_t i = 0; i < df->zud_dram_nrules; i++) {
+ if (!zen_umc_restore_dram_rule(rules[i], &df->zud_rules[i])) {
+ return (B_FALSE);
+ }
+ }
+
+ for (uint_t i = 0; i < df->zud_cs_nremap; i++) {
+ uint16_t *u16p;
+ if (nvlist_lookup_uint16_array(remap[i], "csr_remaps", &u16p,
+ &df->zud_remap[i].csr_nremaps) != 0 ||
+ df->zud_remap[i].csr_nremaps > ZEN_UMC_MAX_REMAP_ENTS) {
+ return (B_FALSE);
+ }
+ bcopy(u16p, df->zud_remap[i].csr_remaps,
+ df->zud_remap[i].csr_nremaps);
+ }
+
+ for (uint_t i = 0; i < df->zud_nchan; i++) {
+ if (!zen_umc_restore_chan(chan[i], &df->zud_chan[i])) {
+ return (B_FALSE);
+ }
+ }
+
+ return (B_TRUE);
+}
+
+boolean_t
+zen_umc_restore_decoder(nvlist_t *nvl, zen_umc_t *umc)
+{
+ uint32_t vers;
+ char *driver;
+ nvlist_t *umc_nvl, *decomp, **dfs;
+ bzero(umc, sizeof (zen_umc_t));
+
+ if (nvlist_lookup_pairs(nvl, 0,
+ "mc_dump_version", DATA_TYPE_UINT32, &vers,
+ "mc_dump_driver", DATA_TYPE_STRING, &driver,
+ NULL) != 0 || vers != 0 || strcmp(driver, "zen_umc") != 0 ||
+ nvlist_lookup_nvlist(nvl, "zen_umc", &umc_nvl) != 0) {
+ return (B_FALSE);
+ }
+
+ if (nvlist_lookup_pairs(umc_nvl, 0,
+ "umc_tom", DATA_TYPE_UINT64, &umc->umc_tom,
+ "umc_tom2", DATA_TYPE_UINT64, &umc->umc_tom2,
+ "umc_family", DATA_TYPE_UINT32, &umc->umc_family,
+ "umc_df_rev", DATA_TYPE_UINT32, &umc->umc_df_rev,
+ "umc_decomp", DATA_TYPE_NVLIST, &decomp,
+ "umc_dfs", DATA_TYPE_NVLIST_ARRAY, &dfs, &umc->umc_ndfs,
+ NULL) != 0 || umc->umc_ndfs > ZEN_UMC_MAX_DFS) {
+ return (B_FALSE);
+ }
+
+
+ if (nvlist_lookup_pairs(decomp, 0,
+ "dfd_sock_mask", DATA_TYPE_UINT32, &umc->umc_decomp.dfd_sock_mask,
+ "dfd_die_mask", DATA_TYPE_UINT32, &umc->umc_decomp.dfd_die_mask,
+ "dfd_node_mask", DATA_TYPE_UINT32, &umc->umc_decomp.dfd_node_mask,
+ "dfd_comp_mask", DATA_TYPE_UINT32, &umc->umc_decomp.dfd_comp_mask,
+ "dfd_sock_shift", DATA_TYPE_UINT8, &umc->umc_decomp.dfd_sock_shift,
+ "dfd_die_shift", DATA_TYPE_UINT8, &umc->umc_decomp.dfd_die_shift,
+ "dfd_node_shift", DATA_TYPE_UINT8, &umc->umc_decomp.dfd_node_shift,
+ "dfd_comp_shift", DATA_TYPE_UINT8, &umc->umc_decomp.dfd_comp_shift,
+ NULL) != 0) {
+ return (B_FALSE);
+ }
+
+ for (uint_t i = 0; i < umc->umc_ndfs; i++) {
+ if (!zen_umc_restore_df(dfs[i], &umc->umc_dfs[i])) {
+ return (B_FALSE);
+ }
+ }
+
+ return (B_TRUE);
+}
diff --git a/usr/src/man/man9f/Makefile b/usr/src/man/man9f/Makefile
index f220a3f404..bb3b85026c 100644
--- a/usr/src/man/man9f/Makefile
+++ b/usr/src/man/man9f/Makefile
@@ -16,6 +16,7 @@
# Copyright 2020-2021 Tintri by DDN, Inc. All rights reserved.
# Copyright 2016 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
# Copyright 2022 RackTop Systems, Inc.
+# Copyright 2022 Oxide Computer Company
#
include $(SRC)/Makefile.master
@@ -54,6 +55,9 @@ MANFILES= ASSERT.9f \
bioreset.9f \
biosize.9f \
biowait.9f \
+ bitdel64.9f \
+ bitset64.9f \
+ bitx64.9f \
bp_copyin.9f \
bp_copyout.9f \
bp_mapin.9f \
@@ -693,6 +697,12 @@ MANLINKS= AVL_NEXT.9f \
avl_remove.9f \
avl_swap.9f \
bcanputnext.9f \
+ bitset8.9f \
+ bitset16.9f \
+ bitset32.9f \
+ bitx8.9f \
+ bitx16.9f \
+ bitx32.9f \
crdup.9f \
crfree.9f \
crget.9f \
@@ -1416,6 +1426,13 @@ avl_swap.9f := LINKSRC = avl.9f
AVL_NEXT.9f := LINKSRC = avl.9f
AVL_PREV.9f := LINKSRC = avl.9f
+bitset8.9f := LINKSRC = bitset64.9f
+bitset16.9f := LINKSRC = bitset64.9f
+bitset32.9f := LINKSRC = bitset64.9f
+bitx8.9f := LINKSRC = bitx64.9f
+bitx16.9f := LINKSRC = bitx64.9f
+bitx32.9f := LINKSRC = bitx64.9f
+
dev_err.9f := LINKSRC = cmn_err.9f
vcmn_err.9f := LINKSRC = cmn_err.9f
vzcmn_err.9f := LINKSRC = cmn_err.9f
diff --git a/usr/src/man/man9f/bitdel64.9f b/usr/src/man/man9f/bitdel64.9f
new file mode 100644
index 0000000000..f4cd3705e8
--- /dev/null
+++ b/usr/src/man/man9f/bitdel64.9f
@@ -0,0 +1,81 @@
+.\"
+.\" This file and its contents are supplied under the terms of the
+.\" Common Development and Distribution License ("CDDL"), version 1.0.
+.\" You may only use this file in accordance with the terms of version
+.\" 1.0 of the CDDL.
+.\"
+.\" A full copy of the text of the CDDL should have accompanied this
+.\" source. A copy of the CDDL is also available via the Internet at
+.\" http://www.illumos.org/license/CDDL.
+.\"
+.\"
+.\" Copyright 2022 Oxide Computer Company
+.\"
+.Dd April 12, 2022
+.Dt BITDEL64 9F
+.Os
+.Sh NAME
+.Nm bitdel64
+.Nd delete bits from an integer
+.Sh SYNOPSIS
+.In sys/bitext.h
+.Ft uint64_t
+.Fo bitdel64
+.Fa "uint64_t value"
+.Fa "uint_t high"
+.Fa "uint_t low"
+.Fc
+.Sh INTERFACE LEVEL
+.Sy Volatile -
+This interface is still evolving in illumos.
+API and ABI stability is not guaranteed.
+.Sh PARAMETERS
+.Bl -tag -width Fa
+.It Fa value
+.It Fa high
+The high end, inclusive, of the bit range to delete from
+.Fa value .
+.It Fa low
+The low end, inclusive, of the bit range to delete from
+.Fa value .
+.It Fa value
+An integer to remove bits from.
+.El
+.Sh DESCRIPTION
+The
+.Fn bitdel64
+function removes bits from an integer,
+.Fa value .
+The act of removing a bit range not only removes all the bits in the
+range specified by
+.Fa low
+and
+.Fa high ,
+but also causes all remaining bits to be shifted over to start at
+.Fa low .
+.Pp
+For example, consider the binary value 0b11_1101_0011
+.Pq 0x3d3 .
+If we deleted bits 4 through 7, the resulting value would be 0b11_0011
+.Pq 0x33 .
+.Pp
+.Fa high
+and
+.Fa low
+are an inclusive range
+.Po
+.Pf [ Fa low ,
+.Fa high ]
+.Pc
+and the value of
+.Fa low
+cannot be greater than
+.Fa high
+or 63.
+.Sh RETURN VALUES
+Upon successful completion, the
+.Fn bitdel64
+returns the modified integer with the appropriate bits removed.
+.Sh SEE ALSO
+.Xr bitset64 9F ,
+.Xr bitx64 9F
diff --git a/usr/src/man/man9f/bitset64.9f b/usr/src/man/man9f/bitset64.9f
new file mode 100644
index 0000000000..14bf0858c7
--- /dev/null
+++ b/usr/src/man/man9f/bitset64.9f
@@ -0,0 +1,186 @@
+.\"
+.\" This file and its contents are supplied under the terms of the
+.\" Common Development and Distribution License ("CDDL"), version 1.0.
+.\" You may only use this file in accordance with the terms of version
+.\" 1.0 of the CDDL.
+.\"
+.\" A full copy of the text of the CDDL should have accompanied this
+.\" source. A copy of the CDDL is also available via the Internet at
+.\" http://www.illumos.org/license/CDDL.
+.\"
+.\"
+.\" Copyright 2022 Oxide Computer Company
+.\"
+.Dd April 12, 2022
+.Dt BITSET64 9F
+.Os
+.Sh NAME
+.Nm bitset8 ,
+.Nm bitset16 ,
+.Nm bitset32 ,
+.Nm bitset64
+.Nd set bitfield values in an integer
+.Sh SYNOPSIS
+.In sys/bitext.h
+.Ft uint8_t
+.Fo bitset8
+.Fa "uint8_t base"
+.Fa "uint_t high"
+.Fa "uint_t low"
+.Fa "uint8_t value"
+.Fc
+.Ft uint16_t
+.Fo bitset16
+.Fa "uint16_t base"
+.Fa "uint_t high"
+.Fa "uint_t low"
+.Fa "uint16_t value"
+.Fc
+.Ft uint32_t
+.Fo bitset32
+.Fa "uint32_t base"
+.Fa "uint_t high"
+.Fa "uint_t low"
+.Fa "uint32_t value"
+.Fc
+.Ft uint64_t
+.Fo bitset64
+.Fa "uint64_t base"
+.Fa "uint_t high"
+.Fa "uint_t low"
+.Fa "uint64_t value"
+.Fc
+.Sh INTERFACE LEVEL
+.Sy Volatile -
+This interface is still evolving in illumos.
+API and ABI stability is not guaranteed.
+.Sh PARAMETERS
+.Bl -tag -width Fa
+.It Fa base
+The starting integer that will have a value ORed into it.
+.It Fa high
+The high end, inclusive, of the bit range to insert
+.Fa value
+into
+.Fa base .
+.It Fa low
+The low end, inclusive, of the bit range to extract from
+.Fa value .
+.It Fa value
+A value to insert into
+.Fa base .
+.El
+.Sh DESCRIPTION
+The
+.Fn bitset8 ,
+.Fn bitset16 ,
+.Fn bitset32 ,
+and
+.Fn bitset64
+functions are used to logically bitwise-OR in the integer
+.Fa value
+into a specified bit position in
+.Fa base .
+Effectively, the function zeros out the bit range in
+.Fa base ,
+described by
+.Fa high
+and
+.Fa low
+and then performs a bitwise-OR of
+.Fa base
+which has been adjusted to start at
+.Fa low .
+.Pp
+The
+.Fa high
+and
+.Fa low
+arguments describe an inclusive bit range
+.Po
+.Pf [ Fa low ,
+.Fa high ]
+.Pc
+which describes where
+.Fa value
+should be inserted.
+It is illegal
+for
+.Fa low
+to be greater than
+.Fa high ,
+for
+.Fa low
+or
+.Fa high
+to exceed the integer's bit range
+.Po
+e.g. neither can be greater than 7 for
+.Fn bitset8
+.Pc ,
+and
+.Fa value
+must not exceed the described bit range.
+That is, if
+.Fa high
+was 2
+and
+.Fa low
+was 1,
+.Fa value
+could not be larger than a 2-bit value.
+.Pp
+Note, these functions do not modify either
+.Fa base
+or
+.Fa value .
+.Sh RETURN VALUES
+Upon successful completion, the
+.Fn bitset8 ,
+.Fn bitset16 ,
+.Fn bitset32 ,
+and
+.Fn bitset64
+functions all return a new value that has first cleared the specified
+bit range from
+.Fa base
+and then replaced it with
+.Fa value .
+.Sh EXAMPLES
+.Sy Example 1 -
+Using the
+.Fn bitset32
+function to build up a register value.
+.Pp
+A common use case for these functions is to help deal with registers
+that are defined as a series of bit values.
+The following example shows a register's bit definitions and then how
+they are used to construct a value to write.
+.Bd -literal
+/*
+ * This represents a token register definition. It is normally a
+ * uint32_t.
+ */
+#define DF_IO_BASE_V2_SET_BASE(r, v) bitx32(r, 24, 12, v)
+#define DF_IO_BASE_V2_SET_IE(r, v) bitset32(r, 5, 5, v)
+#define DF_IO_BASE_V2_SET_WE(r, v) bitset32(r, 1, 1, v)
+#define DF_IO_BASE_V2_SET_RE(r, v) bitset32(r, 0, 0, v)
+
+void
+setup_register(uint32_t base)
+{
+ uint32_t reg = 0;
+
+ /*
+ * Set read enable, write enable, and the base. Then write the
+ * hardware register.
+ */
+ reg = DF_IO_BASE_V2_SET_RE(reg, 1);
+ reg = DF_IO_BASE_V2_SET_WE(reg, 1);
+ reg = DF_IO_BASE_V2_SET_BASE(reg, base);
+ write_register(XXX, reg);
+}
+.Ed
+.Sh SEE ALSO
+.Xr bitdel64 9F ,
+.Xr bitx64 9F
diff --git a/usr/src/man/man9f/bitx64.9f b/usr/src/man/man9f/bitx64.9f
new file mode 100644
index 0000000000..bf15ec5ff7
--- /dev/null
+++ b/usr/src/man/man9f/bitx64.9f
@@ -0,0 +1,120 @@
+.\"
+.\" This file and its contents are supplied under the terms of the
+.\" Common Development and Distribution License ("CDDL"), version 1.0.
+.\" You may only use this file in accordance with the terms of version
+.\" 1.0 of the CDDL.
+.\"
+.\" A full copy of the text of the CDDL should have accompanied this
+.\" source. A copy of the CDDL is also available via the Internet at
+.\" http://www.illumos.org/license/CDDL.
+.\"
+.\"
+.\" Copyright 2022 Oxide Computer Company
+.\"
+.Dd April 12, 2022
+.Dt BITX64 9F
+.Os
+.Sh NAME
+.Nm bitx8 ,
+.Nm bitx16 ,
+.Nm bitx32 ,
+.Nm bitx64
+.Nd extract bits from an integer
+.Sh SYNOPSIS
+.In sys/bitext.h
+.Ft uint8_t
+.Fo bitx8
+.Fa "uint8_t value"
+.Fa "uint_t high"
+.Fa "uint_t low"
+.Fc
+.Ft uint16_t
+.Fo bitx16
+.Fa "uint16_t value"
+.Fa "uint_t high"
+.Fa "uint_t low"
+.Fc
+.Ft uint32_t
+.Fo bitx32
+.Fa "uint32_t value"
+.Fa "uint_t high"
+.Fa "uint_t low"
+.Fc
+.Ft uint64_t
+.Fo bitx64
+.Fa "uint64_t value"
+.Fa "uint_t high"
+.Fa "uint_t low"
+.Fc
+.Sh INTERFACE LEVEL
+.Sy Volatile -
+This interface is still evolving in illumos.
+API and ABI stability is not guaranteed.
+.Sh PARAMETERS
+.Bl -tag -width Fa
+.It Fa value
+An integer to extract a value from.
+.It Fa high
+The high end, inclusive, of the bit range to extract from
+.Fa value .
+.It Fa low
+The low end, inclusive, of the bit range to extract from
+.Fa value .
+.El
+.Sh DESCRIPTION
+The
+.Fn bitx8 ,
+.Fn bitx16 ,
+.Fn bitx32 ,
+and
+.Fn bitx64
+functions are used to extract a range of bits from on an 8, 16, 32, and
+64-bit value respectively.
+These functions are all implementations of a classical application of a
+bitwise-AND of a mask and a logical right shift.
+More specifically, the arguments
+.Fa high
+and
+.Fa low
+describe an inclusive range of bits
+.Po
+.Pf [ Fa low ,
+.Fa high ]
+.Pc
+to extract from
+.Fa value .
+The extracted bits are all shifted right such that the resulting value
+starts at bit 0
+.Po that is, shifted over by
+.Fa low
+.Pc .
+.Pp
+Each of the variants here operates on a specifically sized integer.
+.Fa high
+and
+.Fa low
+must fit within the bit range that the integer implies.
+For example, the valid range for
+.Fa bitx32
+is from 0 to 31.
+.Fa high
+must not be less than
+.Fa low .
+.Sh CONTEXT
+These functions may be called in all contexts,
+.Sy user ,
+.Sy kernel ,
+and
+.Sy interrupt .
+.Sh RETURN VALUES
+Upon successful completion, the
+.Fn bitx8 ,
+.Fn bitx16 ,
+.Fn bitx32 ,
+and
+.Fn bitx64
+functions all return the indicated portion of
+.Fa val .
+.Sh SEE ALSO
+.Xr bitdel64 9F ,
+.Xr bitset64 9F
diff --git a/usr/src/pkg/manifests/driver-cpu-amd-zen.p5m b/usr/src/pkg/manifests/driver-cpu-amd-zen.p5m
index a8500fac4f..1fcd24c4bf 100644
--- a/usr/src/pkg/manifests/driver-cpu-amd-zen.p5m
+++ b/usr/src/pkg/manifests/driver-cpu-amd-zen.p5m
@@ -10,7 +10,7 @@
#
#
-# Copyright 2021 Oxide Computer Company
+# Copyright 2022 Oxide Computer Company
#
<include global_zone_only_component>
@@ -24,6 +24,7 @@ dir path=kernel/drv group=sys
dir path=kernel/drv/$(ARCH64) group=sys
file path=kernel/drv/$(ARCH64)/amdzen group=sys
file path=kernel/drv/$(ARCH64)/amdzen_stub group=sys
+file path=kernel/drv/$(ARCH64)/zen_umc group=sys
file path=kernel/drv/amdzen.conf group=sys
dir path=usr/share/man
dir path=usr/share/man/man4d
@@ -104,4 +105,5 @@ driver name=amdzen_stub \
alias=pci1022,166f,p \
alias=pci1022,1670,p \
alias=pci1022,1671,p
+driver name=zen_umc
license lic_CDDL license=lic_CDDL
diff --git a/usr/src/pkg/manifests/system-header.p5m b/usr/src/pkg/manifests/system-header.p5m
index 338e3181c7..7789ee796a 100644
--- a/usr/src/pkg/manifests/system-header.p5m
+++ b/usr/src/pkg/manifests/system-header.p5m
@@ -29,6 +29,7 @@
# Copyright 2020 Joyent, Inc.
# Copyright 2019 Peter Tribble.
# Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
+# Copyright 2022 Oxide Computer company
#
set name=pkg.fmri value=pkg:/system/header@$(PKGVERS)
@@ -702,6 +703,7 @@ file path=usr/include/sys/av/iec61883.h
file path=usr/include/sys/avintr.h
file path=usr/include/sys/avl.h
file path=usr/include/sys/avl_impl.h
+file path=usr/include/sys/bitext.h
file path=usr/include/sys/bitmap.h
file path=usr/include/sys/bitset.h
file path=usr/include/sys/bl.h
diff --git a/usr/src/pkg/manifests/system-kernel.man9f.inc b/usr/src/pkg/manifests/system-kernel.man9f.inc
index dbdea73f17..e6af4d104b 100644
--- a/usr/src/pkg/manifests/system-kernel.man9f.inc
+++ b/usr/src/pkg/manifests/system-kernel.man9f.inc
@@ -16,6 +16,7 @@
# Copyright 2016 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
# Copyright 2019 Joyent, Inc.
# Copyright 2022 RackTop Systems, Inc.
+# Copyright 2022 Oxide Computer Company
#
file path=usr/share/man/man9f/ASSERT.9f
@@ -187,6 +188,15 @@ file path=usr/share/man/man9f/biomodified.9f
file path=usr/share/man/man9f/bioreset.9f
file path=usr/share/man/man9f/biosize.9f
file path=usr/share/man/man9f/biowait.9f
+file path=usr/share/man/man9f/bitdel64.9f
+link path=usr/share/man/man9f/bitset16.9f target=bitset64.9f
+link path=usr/share/man/man9f/bitset32.9f target=bitset64.9f
+file path=usr/share/man/man9f/bitset64.9f
+link path=usr/share/man/man9f/bitset8.9f target=bitset64.9f
+link path=usr/share/man/man9f/bitx16.9f target=bitx64.9f
+link path=usr/share/man/man9f/bitx32.9f target=bitx64.9f
+file path=usr/share/man/man9f/bitx64.9f
+link path=usr/share/man/man9f/bitx8.9f target=bitx64.9f
file path=usr/share/man/man9f/bp_copyin.9f
file path=usr/share/man/man9f/bp_copyout.9f
file path=usr/share/man/man9f/bp_mapin.9f
diff --git a/usr/src/pkg/manifests/system-test-ostest.p5m b/usr/src/pkg/manifests/system-test-ostest.p5m
index 1f89619630..a235dff463 100644
--- a/usr/src/pkg/manifests/system-test-ostest.p5m
+++ b/usr/src/pkg/manifests/system-test-ostest.p5m
@@ -15,6 +15,7 @@
# Copyright 2020 Joyent, Inc.
# Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
# Copyright 2021 Tintri by DDN, Inc. All rights reserved.
+# Copyright 2022 Oxide Computer Company
#
set name=pkg.fmri value=pkg:/system/test/ostest@$(PKGVERS)
@@ -157,6 +158,7 @@ file path=opt/os-tests/tests/uccid/yk-poll mode=0555
file path=opt/os-tests/tests/uccid/yk-readonly mode=0555
file path=opt/os-tests/tests/writev.32 mode=0555
file path=opt/os-tests/tests/writev.64 mode=0555
+file path=opt/os-tests/tests/zen_umc_test mode=0555
license cr_Sun license=cr_Sun
license lic_CDDL license=lic_CDDL
depend type=require fmri=developer/dtrace
diff --git a/usr/src/test/os-tests/runfiles/default.run b/usr/src/test/os-tests/runfiles/default.run
index dcf65fceed..c96c159c30 100644
--- a/usr/src/test/os-tests/runfiles/default.run
+++ b/usr/src/test/os-tests/runfiles/default.run
@@ -14,6 +14,7 @@
# Copyright 2020 Joyent, Inc.
# Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
# Copyright 2021 Tintri by DDN, Inc. All rights reserved.
+# Copyright 2022 Oxide Computer Company
#
[DEFAULT]
@@ -133,3 +134,5 @@ tests = ['stackalign.32', 'stackalign.64']
user = root
pre = core_prereqs
tests = ['coretests']
+
+[/opt/os-tests/tests/zen_umc_test]
diff --git a/usr/src/test/os-tests/tests/Makefile b/usr/src/test/os-tests/tests/Makefile
index 8b5a45ab04..67e6bbfb69 100644
--- a/usr/src/test/os-tests/tests/Makefile
+++ b/usr/src/test/os-tests/tests/Makefile
@@ -14,9 +14,10 @@
# Copyright 2020 Joyent, Inc.
# Copyright 2021 Tintri by DDN, Inc. All rights reserved.
# Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
+# Copyright 2022 Oxide Computer Company
#
-SUBDIRS_i386 = i386 imc
+SUBDIRS_i386 = i386 imc zen_umc
SUBDIRS = \
cores \
diff --git a/usr/src/test/os-tests/tests/imc/Makefile b/usr/src/test/os-tests/tests/imc/Makefile
index cbf3d81654..861626ce15 100644
--- a/usr/src/test/os-tests/tests/imc/Makefile
+++ b/usr/src/test/os-tests/tests/imc/Makefile
@@ -56,6 +56,9 @@ clean:
$(CMDS): $(TESTDIR) $(PROG)
+$(TESTDIR):
+ $(INS.dir)
+
$(TESTDIR)/%: %
$(INS.file)
diff --git a/usr/src/test/os-tests/tests/zen_umc/Makefile b/usr/src/test/os-tests/tests/zen_umc/Makefile
new file mode 100644
index 0000000000..e8587df39f
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/Makefile
@@ -0,0 +1,90 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2022 Oxide Computer Company
+#
+
+ROOTOPTPKG = $(ROOT)/opt/os-tests
+TESTDIR = $(ROOTOPTPKG)/tests
+
+#
+# Test objects
+#
+OBJS = zen_umc_test.o \
+ zen_umc_fabric_ids.o \
+ zen_umc_test_basic.o \
+ zen_umc_test_chans.o \
+ zen_umc_test_cod.o \
+ zen_umc_test_errors.o \
+ zen_umc_test_hole.o \
+ zen_umc_test_ilv.o \
+ zen_umc_test_multi.o \
+ zen_umc_test_nps.o \
+ zen_umc_test_remap.o
+
+#
+# Common objects that we need.
+#
+OBJS += zen_fabric_utils.o zen_umc_decode.o bitext.o
+
+PROG = zen_umc_test
+
+include $(SRC)/cmd/Makefile.cmd
+include $(SRC)/test/Makefile.com
+include $(SRC)/cmd/Makefile.ctf
+
+CSTD = $(CSTD_GNU99)
+#
+# Ensure we always build with asserts. The first -I gives us access to
+# the zen_umc.h pieces while the second gives us the registers that are
+# required (dependency of the zen_umc.h header).
+#
+CPPFLAGS += -DDEBUG
+CPPFLAGS += -I$(SRC)/uts/intel/io/amdzen
+CPPFLAGS += -I$(SRC)/uts/intel/
+
+CMDS = $(PROG:%=$(TESTDIR)/%)
+$(CMDS) := FILEMODE = 0555
+
+all: $(PROG)
+
+install: all $(CMDS)
+
+clobber: clean
+ -$(RM) $(PROG)
+
+clean:
+ -$(RM) *.o
+
+$(CMDS): $(TESTDIR) $(PROG)
+
+$(TESTDIR):
+ $(INS.dir)
+
+$(TESTDIR)/%: %
+ $(INS.file)
+
+$(PROG): $(OBJS)
+ $(LINK.c) -o $@ $(OBJS) $(LDLIBS)
+ $(POST_PROCESS)
+
+%.o: %.c
+ $(COMPILE.c) $<
+ $(POST_PROCESS_O)
+
+%.o: $(SRC)/common/bitext/%.c
+ $(COMPILE.c) $<
+ $(POST_PROCESS_O)
+
+%.o: $(SRC)/common/mc/zen_umc/%.c
+ $(COMPILE.c) $<
+ $(POST_PROCESS_O)
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_fabric_ids.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_fabric_ids.c
new file mode 100644
index 0000000000..592edec331
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_fabric_ids.c
@@ -0,0 +1,387 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * Test basic fabric ID composition and decomposition across a couple of
+ * different styles of fabric decomposition schemes.
+ */
+
+#include "zen_umc_test.h"
+
+const df_fabric_decomp_t naples_decomp_cpu = {
+ .dfd_sock_mask = 0x04,
+ .dfd_die_mask = 0x03,
+ .dfd_node_mask = 0xe0,
+ .dfd_comp_mask = 0x07,
+ .dfd_sock_shift = 2,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+};
+
+const df_fabric_decomp_t naples_decomp_apu = {
+ .dfd_sock_mask = 0x0,
+ .dfd_die_mask = 0x0,
+ .dfd_node_mask = 0x0,
+ .dfd_comp_mask = 0xf,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 0,
+ .dfd_comp_shift = 0
+};
+
+const df_fabric_decomp_t milan_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+};
+
+static const df_fabric_decomp_t contig_decomp = {
+ .dfd_sock_mask = 0x1c,
+ .dfd_die_mask = 0x3,
+ .dfd_node_mask = 0xf80,
+ .dfd_comp_mask = 0x07f,
+ .dfd_sock_shift = 2,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 7,
+ .dfd_comp_shift = 0
+};
+
+const umc_fabric_test_t zen_umc_test_fabric_ids[] = { {
+ .uft_desc = "Naples CPU (0)",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0
+}, {
+ .uft_desc = "Naples CPU Socket 1 (0)",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x81,
+ .uft_sock_id = 1,
+ .uft_die_id = 0,
+ .uft_comp_id = 1
+}, {
+ .uft_desc = "Naples CPU Socket 1 (1)",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x87,
+ .uft_sock_id = 1,
+ .uft_die_id = 0,
+ .uft_comp_id = 7
+}, {
+ .uft_desc = "Naples Die (0)",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0xa7,
+ .uft_sock_id = 1,
+ .uft_die_id = 1,
+ .uft_comp_id = 7
+}, {
+ .uft_desc = "Naples Die (1)",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0xe4,
+ .uft_sock_id = 1,
+ .uft_die_id = 3,
+ .uft_comp_id = 4
+}, {
+ .uft_desc = "Naples CPU Invalid Socket (0)",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 11,
+ .uft_die_id = 3,
+ .uft_comp_id = 4
+}, {
+ .uft_desc = "Naples CPU Invalid Socket (1)",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 2,
+ .uft_die_id = 3,
+ .uft_comp_id = 4
+}, {
+ .uft_desc = "Naples CPU Invalid Socket (2)",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_FALSE,
+ .uft_fabric_id = 0x91,
+}, {
+ .uft_desc = "Naples CPU Invalid Die",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0,
+ .uft_die_id = 4,
+ .uft_comp_id = 4
+}, {
+ .uft_desc = "Naples CPU Invalid Component (0)",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0x34
+}, {
+ .uft_desc = "Naples CPU Invalid Component (1)",
+ .uft_decomp = &naples_decomp_cpu,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_FALSE,
+ .uft_fabric_id = 0x88,
+}, {
+ .uft_desc = "Naples APU Invalid Socket (0)",
+ .uft_decomp = &naples_decomp_apu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 1,
+ .uft_die_id = 0,
+ .uft_comp_id = 4
+}, {
+ .uft_desc = "Naples APU Invalid Socket (1)",
+ .uft_decomp = &naples_decomp_apu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0x22,
+ .uft_die_id = 0,
+ .uft_comp_id = 4
+}, {
+ .uft_desc = "Naples APU Invalid Die (0)",
+ .uft_decomp = &naples_decomp_apu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0,
+ .uft_die_id = 1,
+ .uft_comp_id = 4
+}, {
+ .uft_desc = "Naples APU Invalid Die (1)",
+ .uft_decomp = &naples_decomp_apu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0,
+ .uft_die_id = 3,
+ .uft_comp_id = 4
+}, {
+ .uft_desc = "Naples APU Invalid Components (0)",
+ .uft_decomp = &naples_decomp_apu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0x10
+}, {
+ .uft_desc = "Naples APU Invalid Components (1)",
+ .uft_decomp = &naples_decomp_apu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0x13
+}, {
+ .uft_desc = "Naples APU Roundtrip (0)",
+ .uft_decomp = &naples_decomp_apu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x03,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 3
+}, {
+ .uft_desc = "Naples APU Roundtrip (1)",
+ .uft_decomp = &naples_decomp_apu,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x00,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0
+}, {
+ .uft_desc = "Naples APU Roundtrip (2)",
+ .uft_decomp = &naples_decomp_apu,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x0f,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0xf
+}, {
+ .uft_desc = "Milan Roundtrip (0)",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x00,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0
+}, {
+ .uft_desc = "Milan Roundtrip (1)",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x13,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0x13
+}, {
+ .uft_desc = "Milan Roundtrip (2)",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x33,
+ .uft_sock_id = 1,
+ .uft_die_id = 0,
+ .uft_comp_id = 0x13
+}, {
+ .uft_desc = "Milan Roundtrip (3)",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x20,
+ .uft_sock_id = 1,
+ .uft_die_id = 0,
+ .uft_comp_id = 0
+}, {
+ .uft_desc = "Milan Invalid Component (0)",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0x20
+}, {
+ .uft_desc = "Milan Invalid Component (1)",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0x2f
+}, {
+ .uft_desc = "Milan Invalid Die",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0,
+ .uft_die_id = 1,
+ .uft_comp_id = 0xf
+}, {
+ .uft_desc = "Milan Invalid Socket (0)",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 2,
+ .uft_die_id = 0,
+ .uft_comp_id = 0xf
+}, {
+ .uft_desc = "Milan Invalid Socket (1)",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 3,
+ .uft_die_id = 0,
+ .uft_comp_id = 0xf
+}, {
+ .uft_desc = "Milan Invalid Socket (2)",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_FALSE,
+ .uft_fabric_id = 0x40
+}, {
+ .uft_desc = "Milan Invalid Socket (3)",
+ .uft_decomp = &milan_decomp,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_FALSE,
+ .uft_fabric_id = 0x8f
+}, {
+ .uft_desc = "Contig Multi-Die Roundtrip (0)",
+ .uft_decomp = &contig_decomp,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0,
+ .uft_sock_id = 0,
+ .uft_die_id = 0,
+ .uft_comp_id = 0
+}, {
+ .uft_desc = "Contig Multi-Die Roundtrip (1)",
+ .uft_decomp = &contig_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0xfff,
+ .uft_sock_id = 0x7,
+ .uft_die_id = 0x3,
+ .uft_comp_id = 0x7f
+}, {
+ .uft_desc = "Contig Multi-Die Roundtrip (2)",
+ .uft_decomp = &contig_decomp,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x121,
+ .uft_sock_id = 0x0,
+ .uft_die_id = 0x2,
+ .uft_comp_id = 0x21
+}, {
+ .uft_desc = "Contig Multi-Die Roundtrip (3)",
+ .uft_decomp = &contig_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_TRUE,
+ .uft_fabric_id = 0x7f7,
+ .uft_sock_id = 0x3,
+ .uft_die_id = 0x3,
+ .uft_comp_id = 0x77
+}, {
+ .uft_desc = "Contig Multi-Die Bad Socket",
+ .uft_decomp = &contig_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0x8,
+ .uft_die_id = 0x1,
+ .uft_comp_id = 0x23
+}, {
+ .uft_desc = "Contig Multi-Die Bad Die",
+ .uft_decomp = &contig_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0x2,
+ .uft_die_id = 0x5,
+ .uft_comp_id = 0x23
+}, {
+ .uft_desc = "Contig Multi-Die Bad Component",
+ .uft_decomp = &contig_decomp,
+ .uft_compose = B_TRUE,
+ .uft_valid = B_FALSE,
+ .uft_sock_id = 0x2,
+ .uft_die_id = 0x1,
+ .uft_comp_id = 0xff
+}, {
+ .uft_desc = "Contig Multi-Die Bad Fabric",
+ .uft_decomp = &contig_decomp,
+ .uft_compose = B_FALSE,
+ .uft_valid = B_FALSE,
+ .uft_fabric_id = 0x1000
+}, {
+ .uft_desc = NULL
+} };
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test.c
new file mode 100644
index 0000000000..3cb487af87
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test.c
@@ -0,0 +1,553 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * Test the memory decoding and normalization features at the heart of the
+ * zen_umc(4D) driver.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <err.h>
+#include <stdlib.h>
+#include <sys/sysmacros.h>
+
+#include "zen_umc_test.h"
+
+static const char *
+zen_umc_test_strerror(zen_umc_decode_failure_t fail)
+{
+ switch (fail) {
+ case ZEN_UMC_DECODE_F_NONE:
+ return ("Actually succeeded");
+ case ZEN_UMC_DECODE_F_OUTSIDE_DRAM:
+ return ("Address outside of DRAM");
+ case ZEN_UMC_DECODE_F_NO_DF_RULE:
+ return ("Address didn't find a DF rule that matched");
+ case ZEN_UMC_DECODE_F_ILEAVE_UNDERFLOW:
+ return ("Interleave adjustments caused PA to underflow");
+ case ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP:
+ return ("Unsupported channel interleave");
+ case ZEN_UMC_DECODE_F_COD_BAD_ILEAVE:
+ return ("Unsupported interleave settings for COD hash");
+ case ZEN_UMC_DECODE_F_NPS_BAD_ILEAVE:
+ return ("Unsupported interleave settings for NPS hash");
+ case ZEN_UMC_DECODE_F_BAD_REMAP_SET:
+ return ("Remap ruleset was invalid");
+ case ZEN_UMC_DECODE_F_BAD_REMAP_ENTRY:
+ return ("Remap entry was invalid");
+ case ZEN_UMC_DECODE_F_REMAP_HAS_BAD_COMP:
+ return ("Remap entry is not a valid component ID");
+ case ZEN_UMC_DECODE_F_CANNOT_MAP_FABID:
+ return ("Failed to find target fabric ID");
+ case ZEN_UMC_DECODE_F_UMC_DOESNT_HAVE_PA:
+ return ("Target UMC does not have a DRAM rule for PA");
+ case ZEN_UMC_DECODE_F_CALC_NORM_UNDERFLOW:
+ return ("Address normalization underflowed");
+ case ZEN_UMC_DECODE_F_NO_CS_BASE_MATCH:
+ return ("No chip-select matched normal address");
+ default:
+ return ("<unknown>");
+ }
+}
+
+static const char *
+zen_umc_test_strenum(zen_umc_decode_failure_t fail)
+{
+ switch (fail) {
+ case ZEN_UMC_DECODE_F_NONE:
+ return ("ZEN_UMC_DECODE_F_NONE");
+ case ZEN_UMC_DECODE_F_OUTSIDE_DRAM:
+ return ("ZEN_UMC_DECODE_F_OUTSIDE_DRAM");
+ case ZEN_UMC_DECODE_F_NO_DF_RULE:
+ return ("ZEN_UMC_DECODE_F_NO_DF_RULE");
+ case ZEN_UMC_DECODE_F_ILEAVE_UNDERFLOW:
+ return ("ZEN_UMC_DECODE_F_ILEAVE_UNDERFLOW");
+ case ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP:
+ return ("ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP");
+ case ZEN_UMC_DECODE_F_COD_BAD_ILEAVE:
+ return ("ZEN_UMC_DECODE_F_COD_BAD_ILEAVE");
+ case ZEN_UMC_DECODE_F_NPS_BAD_ILEAVE:
+ return ("ZEN_UMC_DECODE_F_NPS_BAD_ILEAVE");
+ case ZEN_UMC_DECODE_F_BAD_REMAP_SET:
+ return ("ZEN_UMC_DECODE_F_BAD_REMAP_SET");
+ case ZEN_UMC_DECODE_F_BAD_REMAP_ENTRY:
+ return ("ZEN_UMC_DECODE_F_BAD_REMAP_ENTRY");
+ case ZEN_UMC_DECODE_F_REMAP_HAS_BAD_COMP:
+ return ("ZEN_UMC_DECODE_F_REMAP_HAS_BAD_COMP");
+ case ZEN_UMC_DECODE_F_CANNOT_MAP_FABID:
+ return ("ZEN_UMC_DECODE_F_CANNOT_MAP_FABID");
+ case ZEN_UMC_DECODE_F_UMC_DOESNT_HAVE_PA:
+ return ("ZEN_UMC_DECODE_F_UMC_DOESNT_HAVE_PA");
+ case ZEN_UMC_DECODE_F_CALC_NORM_UNDERFLOW:
+ return ("ZEN_UMC_DECODE_F_CALC_NORM_UNDERFLOW");
+ case ZEN_UMC_DECODE_F_NO_CS_BASE_MATCH:
+ return ("ZEN_UMC_DECODE_F_NO_CS_BASE_MATCH");
+ default:
+ return ("<unknown>");
+ }
+}
+
+static boolean_t
+zen_umc_test_fabric_one(const umc_fabric_test_t *test)
+{
+ boolean_t ret = B_TRUE;
+
+ (void) printf("Running test: %s\n", test->uft_desc);
+ if (test->uft_compose) {
+ uint32_t fab, sock, die, comp;
+ boolean_t rtt = B_TRUE;
+ boolean_t valid;
+
+ valid = zen_fabric_id_valid_parts(test->uft_decomp,
+ test->uft_sock_id, test->uft_die_id, test->uft_comp_id);
+ if (!valid) {
+ if (test->uft_valid) {
+ (void) printf("\tInvalid fabric ID parts "
+ "found\n");
+ return (B_FALSE);
+ }
+
+ (void) printf("\tTEST PASSED: Invalid Fabric parts "
+ "detected\n");
+ return (B_TRUE);
+ } else {
+ if (!test->uft_valid) {
+ (void) printf("\tFabric ID parts validated, "
+ "but expected failure\n");
+ return (B_FALSE);
+ }
+ }
+ zen_fabric_id_compose(test->uft_decomp, test->uft_sock_id,
+ test->uft_die_id, test->uft_comp_id, &fab);
+ if (fab != test->uft_fabric_id) {
+ (void) printf("\tFabric ID mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->uft_fabric_id, fab);
+ ret = B_FALSE;
+ } else {
+ (void) printf("\tTEST PASSED: Fabric ID composition\n");
+ }
+
+ zen_fabric_id_decompose(test->uft_decomp, fab, &sock, &die,
+ &comp);
+ if (sock != test->uft_sock_id) {
+ (void) printf("\tRound-trip socket mismatch\n"
+ "\t\texpected %u\n\t\tfound %u\n",
+ test->uft_sock_id, sock);
+ ret = rtt = B_FALSE;
+ }
+
+ if (die != test->uft_die_id) {
+ (void) printf("\tRound-trip die mismatch\n"
+ "\t\texpected %u\n\t\tfound %u\n",
+ test->uft_die_id, die);
+ ret = rtt = B_FALSE;
+ }
+
+ if (comp != test->uft_comp_id) {
+ (void) printf("\tRound-trip comp mismatch\n"
+ "\t\texpected %u\n\t\tfound %u\n",
+ test->uft_comp_id, comp);
+ ret = rtt = B_FALSE;
+ }
+
+ if (rtt) {
+ (void) printf("\tTEST PASSED: Round-trip Fabric ID "
+ "decomposition\n");
+ }
+ } else {
+ uint32_t fab, sock, die, comp;
+ boolean_t valid;
+
+ valid = zen_fabric_id_valid_fabid(test->uft_decomp,
+ test->uft_fabric_id);
+ if (!valid) {
+ if (test->uft_valid) {
+ (void) printf("\tInvalid fabric ID found\n");
+ return (B_FALSE);
+ }
+
+ (void) printf("\tTEST PASSED: Successfully found "
+ "invalid fabric ID\n");
+ return (B_TRUE);
+ } else {
+ if (!test->uft_valid) {
+ (void) printf("\tFabric ID validated, "
+ "but expected to find an invalid one\n");
+ return (B_FALSE);
+ }
+ }
+ zen_fabric_id_decompose(test->uft_decomp, test->uft_fabric_id,
+ &sock, &die, &comp);
+ if (sock != test->uft_sock_id) {
+ (void) printf("\tsocket mismatch\n"
+ "\t\texpected %u\n\t\tfound %u\n",
+ test->uft_sock_id, sock);
+ ret = B_FALSE;
+ }
+
+ if (die != test->uft_die_id) {
+ (void) printf("\tdie mismatch\n"
+ "\t\texpected %u\n\t\tfound %u\n",
+ test->uft_die_id, die);
+ ret = B_FALSE;
+ }
+
+ if (comp != test->uft_comp_id) {
+ (void) printf("\tcomp mismatch\n"
+ "\t\texpected %u\n\t\tfound %u\n",
+ test->uft_comp_id, comp);
+ ret = B_FALSE;
+ }
+
+ if (ret) {
+ (void) printf("\tTEST PASSED: Fabric ID "
+ "Decomposition\n");
+ }
+
+ zen_fabric_id_compose(test->uft_decomp, sock, die, comp, &fab);
+ if (fab != test->uft_fabric_id) {
+ (void) printf("\tFabric ID mismatch on round trip\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->uft_fabric_id, fab);
+ ret = B_FALSE;
+ } else {
+ (void) printf("\tTEST PASSED: Round-trip Fabric ID "
+ "composition\n");
+ }
+ }
+
+ return (ret);
+}
+
+static boolean_t
+zen_umc_test_decode_one(const umc_decode_test_t *test)
+{
+ boolean_t pass;
+ zen_umc_decoder_t dec;
+
+ (void) printf("Running test: %s\n", test->udt_desc);
+ (void) printf("\tDecoding address: 0x%" PRIx64 "\n", test->udt_pa);
+ memset(&dec, '\0', sizeof (dec));
+
+ pass = zen_umc_decode_pa(test->udt_umc, test->udt_pa, &dec);
+ if (pass && !test->udt_pass) {
+ uint32_t sock, die, comp;
+
+ zen_fabric_id_decompose(&test->udt_umc->umc_decomp,
+ dec.dec_targ_fabid, &sock, &die, &comp);
+
+ (void) printf("\tdecode unexpectedly succeeded\n");
+ (void) printf("\texpected error '%s' (%s/0x%x)\n",
+ zen_umc_test_strerror(test->udt_fail),
+ zen_umc_test_strenum(test->udt_fail),
+ test->udt_fail);
+ (void) printf("\t\tdecoded socket: 0x%x\n", sock);
+ (void) printf("\t\tdecoded die: 0x%x\n", die);
+ (void) printf("\t\tdecoded component: 0x%x\n", comp);
+ (void) printf("\t\tnormal address: 0x%" PRIx64 "\n",
+ dec.dec_norm_addr);
+ (void) printf("\t\tdecoded dimm: 0x%x\n", dec.dec_dimm_no);
+ (void) printf("\t\tdecoded row: 0x%x\n", dec.dec_dimm_row);
+ (void) printf("\t\tdecoded column: 0x%x\n", dec.dec_dimm_col);
+ (void) printf("\t\tdecoded bank: 0x%x\n", dec.dec_dimm_bank);
+ (void) printf("\t\tdecoded bank group: 0x%x\n",
+ dec.dec_dimm_bank_group);
+ (void) printf("\t\tdecoded rm: 0x%x\n", dec.dec_dimm_rm);
+ (void) printf("\t\tdecoded cs: 0x%x\n", dec.dec_dimm_csno);
+ (void) printf("\ttest failed\n");
+ return (B_FALSE);
+ } else if (pass) {
+ uint32_t sock, die, comp;
+ boolean_t success = B_TRUE;
+
+ zen_fabric_id_decompose(&test->udt_umc->umc_decomp,
+ dec.dec_targ_fabid, &sock, &die, &comp);
+ if (test->udt_sock != UINT8_MAX &&
+ test->udt_sock != sock) {
+ (void) printf("\tsocket mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_sock, sock);
+ success = B_FALSE;
+ }
+
+ if (test->udt_die != UINT8_MAX &&
+ test->udt_die != die) {
+ (void) printf("\tdie mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_die, die);
+ success = B_FALSE;
+ }
+
+ if (test->udt_comp != UINT8_MAX &&
+ test->udt_comp != comp) {
+ (void) printf("\tcomp mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_comp, comp);
+ success = B_FALSE;
+ }
+
+ if (test->udt_norm_addr != UINT64_MAX &&
+ test->udt_norm_addr != dec.dec_norm_addr) {
+ (void) printf("\tnormalized address mismatch\n"
+ "\t\texpected 0x%" PRIx64 "\n"
+ "\t\tfound 0x%" PRIx64 "\n",
+ test->udt_norm_addr, dec.dec_norm_addr);
+ success = B_FALSE;
+ }
+
+ if (test->udt_dimm_no != UINT32_MAX &&
+ test->udt_dimm_no != dec.dec_dimm_no) {
+ (void) printf("\tDIMM number mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_dimm_no, dec.dec_dimm_no);
+ success = B_FALSE;
+ }
+
+ if (test->udt_dimm_col != UINT32_MAX &&
+ test->udt_dimm_col != dec.dec_dimm_col) {
+ (void) printf("\tcolumn mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_dimm_col, dec.dec_dimm_col);
+ success = B_FALSE;
+ }
+
+ if (test->udt_dimm_row != UINT32_MAX &&
+ test->udt_dimm_row != dec.dec_dimm_row) {
+ (void) printf("\trow mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_dimm_row, dec.dec_dimm_row);
+ success = B_FALSE;
+ }
+
+ if (test->udt_dimm_bank != UINT8_MAX &&
+ test->udt_dimm_bank != dec.dec_dimm_bank) {
+ (void) printf("\tbank mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_dimm_bank, dec.dec_dimm_bank);
+ success = B_FALSE;
+ }
+
+ if (test->udt_dimm_bank_group != UINT8_MAX &&
+ test->udt_dimm_bank_group != dec.dec_dimm_bank_group) {
+ (void) printf("\tbank group mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_dimm_bank_group, dec.dec_dimm_bank_group);
+ success = B_FALSE;
+ }
+
+ if (test->udt_dimm_subchan != UINT8_MAX &&
+ test->udt_dimm_subchan != dec.dec_dimm_subchan) {
+ (void) printf("\tsub-channel mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_dimm_subchan, dec.dec_dimm_subchan);
+ success = B_FALSE;
+ }
+
+ if (test->udt_dimm_rm != UINT8_MAX &&
+ test->udt_dimm_rm != dec.dec_dimm_rm) {
+ (void) printf("\tRM mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_dimm_rm, dec.dec_dimm_rm);
+ success = B_FALSE;
+ }
+
+ if (test->udt_dimm_cs != UINT8_MAX &&
+ test->udt_dimm_cs != dec.dec_dimm_csno) {
+ (void) printf("\tCS mismatch\n"
+ "\t\texpected 0x%x\n\t\tfound 0x%x\n",
+ test->udt_dimm_cs, dec.dec_dimm_csno);
+ success = B_FALSE;
+ }
+
+ if (success) {
+ (void) printf("\tTEST PASSED: Successfully decoded "
+ "PA\n");
+ } else {
+ (void) printf("\tTEST FAILED!\n");
+ }
+ return (success);
+ } else if (!pass && !test->udt_pass) {
+ if (dec.dec_fail != test->udt_fail) {
+ (void) printf("\terror mismatch\n"
+ "\t\texpected '%s' (%s/0x%x)\n"
+ "\t\tfound '%s' (%s/0x%x)\n",
+ zen_umc_test_strerror(test->udt_fail),
+ zen_umc_test_strenum(test->udt_fail),
+ test->udt_fail,
+ zen_umc_test_strerror(dec.dec_fail),
+ zen_umc_test_strenum(dec.dec_fail),
+ dec.dec_fail);
+ return (B_FALSE);
+ }
+
+ (void) printf("\tTEST PASSED: Correct error generated\n");
+ return (B_TRUE);
+ } else {
+ (void) printf("\tdecode failed with error '%s' (%s/0x%x)\n",
+ zen_umc_test_strerror(dec.dec_fail),
+ zen_umc_test_strenum(dec.dec_fail),
+ dec.dec_fail);
+
+ if (test->udt_norm_addr != UINT64_MAX) {
+ (void) printf("\t\texpected normal address: "
+ "0x%" PRIx64 "\n", test->udt_norm_addr);
+ }
+
+ if (test->udt_sock != UINT8_MAX) {
+ (void) printf("\t\texpected socket: 0x%x\n",
+ test->udt_sock);
+ }
+
+ if (test->udt_die != UINT8_MAX) {
+ (void) printf("\t\texpected die: 0x%x\n",
+ test->udt_die);
+ }
+
+ if (test->udt_comp != UINT8_MAX) {
+ (void) printf("\t\texpected comp: 0x%x\n",
+ test->udt_comp);
+ }
+
+ if (test->udt_dimm_no != UINT32_MAX) {
+ (void) printf("\t\texpected DIMM number: 0x%x\n",
+ test->udt_dimm_no);
+ }
+
+ if (test->udt_dimm_col != UINT32_MAX) {
+ (void) printf("\t\texpected column: 0x%x\n",
+ test->udt_dimm_col);
+ }
+
+ if (test->udt_dimm_row != UINT32_MAX) {
+ (void) printf("\t\texpected row: 0x%x\n",
+ test->udt_dimm_row);
+ }
+
+ if (test->udt_dimm_bank != UINT8_MAX) {
+ (void) printf("\t\texpected bank: 0x%x\n",
+ test->udt_dimm_bank);
+ }
+
+ if (test->udt_dimm_bank_group != UINT8_MAX) {
+ (void) printf("\t\texpected bank group: 0x%x\n",
+ test->udt_dimm_bank_group);
+ }
+
+ if (test->udt_dimm_subchan != UINT8_MAX) {
+ (void) printf("\t\texpected sub-channel: 0x%x\n",
+ test->udt_dimm_subchan);
+ }
+
+ if (test->udt_dimm_rm != UINT8_MAX) {
+ (void) printf("\t\texpected RM: 0x%x\n",
+ test->udt_dimm_rm);
+ }
+
+ if (test->udt_dimm_cs != UINT8_MAX) {
+ (void) printf("\t\texpected CS: 0x%x\n",
+ test->udt_dimm_cs);
+ }
+
+ return (B_FALSE);
+ }
+}
+
+static void
+zen_umc_test_fabric(const umc_fabric_test_t *tests, uint_t *ntests,
+ uint_t *nfail)
+{
+ for (uint_t i = 0; tests[i].uft_desc != NULL; i++) {
+ if (!zen_umc_test_fabric_one(&tests[i]))
+ *nfail += 1;
+ *ntests += 1;
+ }
+}
+
+static void
+zen_umc_test_decode(const umc_decode_test_t *tests, uint_t *ntests,
+ uint_t *nfail)
+{
+ for (uint_t i = 0; tests[i].udt_desc != NULL; i++) {
+ if (!zen_umc_test_decode_one(&tests[i]))
+ *nfail += 1;
+ *ntests += 1;
+ }
+}
+
+typedef struct zen_umc_test_set {
+ const char *set_name;
+ const umc_decode_test_t *set_test;
+} zen_umc_test_set_t;
+
+static const zen_umc_test_set_t zen_umc_test_set[] = {
+ { "basic", zen_umc_test_basics },
+ { "channel", zen_umc_test_chans },
+ { "cod", zen_umc_test_cod },
+ { "errors", zen_umc_test_errors },
+ { "hole", zen_umc_test_hole },
+ { "ilv", zen_umc_test_ilv },
+ { "multi", zen_umc_test_multi },
+ { "nps", zen_umc_test_nps },
+ { "remap", zen_umc_test_remap },
+};
+
+static void
+zen_umc_test_selected(int argc, char *argv[], uint_t *ntests, uint_t *nfail)
+{
+ for (int i = 1; i < argc; i++) {
+ boolean_t ran = B_FALSE;
+
+ if (strcmp(argv[i], "fabric_ids") == 0) {
+ zen_umc_test_fabric(zen_umc_test_fabric_ids, ntests,
+ nfail);
+ continue;
+ }
+
+ for (uint_t t = 0; t < ARRAY_SIZE(zen_umc_test_set); t++) {
+ const zen_umc_test_set_t *s = &zen_umc_test_set[t];
+
+ if (strcmp(s->set_name, argv[i]) == 0) {
+ zen_umc_test_decode(s->set_test, ntests, nfail);
+ ran = B_TRUE;
+ break;
+ }
+ }
+
+ if (!ran) {
+ errx(EXIT_FAILURE, "Unknown test suite: %s", argv[i]);
+ }
+ }
+}
+
+int
+main(int argc, char *argv[])
+{
+ uint_t ntests = 0, nfail = 0;
+
+ if (argc > 1) {
+ zen_umc_test_selected(argc, argv, &ntests, &nfail);
+ } else {
+ zen_umc_test_fabric(zen_umc_test_fabric_ids, &ntests, &nfail);
+ for (uint_t i = 0; i < ARRAY_SIZE(zen_umc_test_set); i++) {
+ zen_umc_test_decode(zen_umc_test_set[i].set_test,
+ &ntests, &nfail);
+ }
+ }
+ (void) printf("%u/%u tests passed\n", ntests - nfail, ntests);
+ return (nfail > 0);
+}
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test.h b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test.h
new file mode 100644
index 0000000000..261b4405a2
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test.h
@@ -0,0 +1,102 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+#ifndef _ZEN_UMC_TEST_H
+#define _ZEN_UMC_TEST_H
+
+/*
+ * Common definitions for testing the pieces of zen_umc(4D).
+ */
+
+#include "zen_umc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Fabric ID Composition / Decomposition tests
+ */
+typedef struct umc_fabric_test {
+ const char *uft_desc;
+ const df_fabric_decomp_t *uft_decomp;
+ /*
+ * If uft_compose is true, we will take the socket/die/comp and try to
+ * create a fabric id from it (and then round trip through it again). If
+ * it is false, we will start with the fabric id, decompose, and then
+ * round trip back.
+ */
+ boolean_t uft_compose;
+ /*
+ * If uft_valid is not set, we expect that either the fabric id or the
+ * sock/die/comp is invalid based on uft_compose. This will only perform
+ * the initial validity checks instead.
+ */
+ boolean_t uft_valid;
+ uint32_t uft_fabric_id;
+ uint32_t uft_sock_id;
+ uint32_t uft_die_id;
+ uint32_t uft_comp_id;
+} umc_fabric_test_t;
+
+/*
+ * Test cases for actual decoding!
+ */
+typedef struct umc_decode_test {
+ const char *udt_desc;
+ const zen_umc_t *udt_umc;
+ uint64_t udt_pa;
+ boolean_t udt_pass;
+ /*
+ * When udt_pass is set to B_FALSE, then the following member will be
+ * checked to ensure that we got the right thing. Otherwise it'll be
+ * skipped.
+ */
+ zen_umc_decode_failure_t udt_fail;
+ /*
+ * When udt_pass is set to true, the following will all be checked. If
+ * you wish to skip one, set it to its corresponding UINTXX_MAX.
+ */
+ uint64_t udt_norm_addr;
+ uint8_t udt_sock;
+ uint8_t udt_die;
+ uint8_t udt_comp;
+ uint32_t udt_dimm_no;
+ uint32_t udt_dimm_col;
+ uint32_t udt_dimm_row;
+ uint8_t udt_dimm_bank;
+ uint8_t udt_dimm_bank_group;
+ uint8_t udt_dimm_subchan;
+ uint8_t udt_dimm_rm;
+ uint8_t udt_dimm_cs;
+} umc_decode_test_t;
+
+extern const umc_fabric_test_t zen_umc_test_fabric_ids[];
+
+extern const umc_decode_test_t zen_umc_test_basics[];
+extern const umc_decode_test_t zen_umc_test_chans[];
+extern const umc_decode_test_t zen_umc_test_cod[];
+extern const umc_decode_test_t zen_umc_test_errors[];
+extern const umc_decode_test_t zen_umc_test_hole[];
+extern const umc_decode_test_t zen_umc_test_ilv[];
+extern const umc_decode_test_t zen_umc_test_multi[];
+extern const umc_decode_test_t zen_umc_test_nps[];
+extern const umc_decode_test_t zen_umc_test_remap[];
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ZEN_UMC_TEST_H */
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_basic.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_basic.c
new file mode 100644
index 0000000000..d19a0152cf
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_basic.c
@@ -0,0 +1,357 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * This provides a simple case with one DIMM, one channel, one socket, and no
+ * interleaving, and no DRAM hole. This sends everything to exactly one DIMM. In
+ * particular we have configurations with the following DIMM sizes:
+ *
+ * o 16 GiB RDIMM (1 rank)
+ * o 64 GiB RDIMM (2 rank)
+ *
+ * There is no hashing going on in the channel in any way here (e.g. no CS
+ * interleaving). This is basically simple linear mappings.
+ */
+
+#include "zen_umc_test.h"
+
+static const zen_umc_t zen_umc_basic_1p1c1d = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 16ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ /* Per milan_decomp */
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_ccm_inst = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 1,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 16ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 16ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+static const zen_umc_t zen_umc_basic_1p1c1d_64g = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ /* Per milan_decomp */
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_ccm_inst = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 1,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x7ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x800000000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x7ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+const umc_decode_test_t zen_umc_test_basics[] = { {
+ .udt_desc = "decode basic single socket/channel/DIMM DDR4 (0)",
+ .udt_umc = &zen_umc_basic_1p1c1d,
+ .udt_pa = 0,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "decode basic single socket/channel/DIMM DDR4 (1)",
+ .udt_umc = &zen_umc_basic_1p1c1d,
+ .udt_pa = 0x123,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x24,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "decode basic single socket/channel/DIMM DDR4 (2)",
+ .udt_umc = &zen_umc_basic_1p1c1d,
+ .udt_pa = 0x5000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x5000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x200,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0x2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "decode basic single socket/channel/DIMM DDR4 (3)",
+ .udt_umc = &zen_umc_basic_1p1c1d,
+ .udt_pa = 0x345678901,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x345678901,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x120,
+ .udt_dimm_row = 0x1a2b3,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "decode basic single socket/channel/DIMM DDR4 (4)",
+ .udt_umc = &zen_umc_basic_1p1c1d,
+ .udt_pa = 0x3ffffffff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3ffffffff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3ff,
+ .udt_dimm_row = 0x1ffff,
+ .udt_dimm_bank = 0x3,
+ .udt_dimm_bank_group = 0x3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "single socket/channel/DIMM 2R DDR4 (0)",
+ .udt_umc = &zen_umc_basic_1p1c1d_64g,
+ .udt_pa = 0,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "single socket/channel/DIMM 2R DDR4 (1)",
+ .udt_umc = &zen_umc_basic_1p1c1d_64g,
+ .udt_pa = 0x800000000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x800000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "single socket/channel/DIMM 2R DDR4 (2)",
+ .udt_umc = &zen_umc_basic_1p1c1d_64g,
+ .udt_pa = 0x876543210,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x876543210,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x242,
+ .udt_dimm_row = 0x3b2a,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "single socket/channel/DIMM 2R DDR4 (3)",
+ .udt_umc = &zen_umc_basic_1p1c1d_64g,
+ .udt_pa = 0x076543210,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x076543210,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x242,
+ .udt_dimm_row = 0x3b2a,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = NULL
+} };
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_chans.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_chans.c
new file mode 100644
index 0000000000..b3baa2d31e
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_chans.c
@@ -0,0 +1,1729 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * Here we test several different channel related test cases. In particular, we
+ * want to exercise the following situations:
+ *
+ * o Multiple DIMMs per channel (no hashing)
+ * o Multiple DIMMs per channel (chip-select interleaving)
+ * o CS Hashing
+ * o Bank Hashing
+ * o Bank Swaps
+ * o Basic sub-channel
+ *
+ * For all of these, we don't do anything special from the Data Fabric to
+ * strictly allow us to reason about the channel logic here.
+ *
+ * Currently, we do not have tests for the following because we don't have a
+ * great sense of how the AMD SoC will set this up for the decoder:
+ *
+ * o Cases where rank-multiplication and hashing are taking place
+ * o Cases where sub-channel hashing is being used
+ */
+
+#include "zen_umc_test.h"
+
+/*
+ * This has two of our favorite 64 GiB DIMMs. Everything is done out linearly.
+ * Because of this, we don't apply any channel offsets.
+ */
+static const zen_umc_t zen_umc_chan_no_hash = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ /* Per milan_decomp */
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_ccm_inst = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 1,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x7ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x800000000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x7ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ }, {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 1,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0x1000000000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x7ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x1800000000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x7ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } }
+ } }
+ } }
+};
+
+/*
+ * This is a variant on the prior where we begin to interleave across all 4
+ * ranks in a channel, which AMD calls chip-select interleaving. This basically
+ * uses bits in the middle of the address to select the rank and therefore
+ * shifts all the other bits that get used for rank and bank selection. This
+ * works by shifting which address bits are used to actually determine the row
+ * up, allowing us to interleave in the middle of this.
+ */
+static const zen_umc_t zen_umc_chan_ilv = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ /* Per milan_decomp */
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_ccm_inst = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 1,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x20000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ }, {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 1,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0x40000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x60000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } }
+ } }
+ } }
+};
+
+/*
+ * This sets up a CS hash across all 4 ranks. The actual values here are
+ * representative of a set up we've seen on the CPU.
+ */
+static const zen_umc_t zen_umc_chan_ilv_cs_hash = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ /* Per milan_decomp */
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_ccm_inst = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 1,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x20000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ }, {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 1,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0x40000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x60000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ .chan_hash = {
+ .uch_flags = UMC_CHAN_HASH_F_CS,
+ .uch_cs_hashes = { {
+ .uah_addr_xor = 0xaaaa80000,
+ .uah_en = B_TRUE
+ }, {
+ .uah_addr_xor = 0x1555500000,
+ .uah_en = B_TRUE
+ } }
+ }
+ } }
+ } }
+};
+
+/*
+ * This enables bank hashing across both of the DIMMs in this configuration. The
+ * use of the row and not the column to select the bank is based on a CPU config
+ * seen in the wild.
+ */
+static const zen_umc_t zen_umc_chan_ilv_bank_hash = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ /* Per milan_decomp */
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_ccm_inst = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 1,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x20000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ }, {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 1,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0x40000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x60000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0xd, 0xe, 0xf,
+ 0x10 },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ .chan_hash = {
+ .uch_flags = UMC_CHAN_HASH_F_BANK,
+ .uch_bank_hashes = { {
+ .ubh_row_xor = 0x11111,
+ .ubh_col_xor = 0,
+ .ubh_en = B_TRUE
+ }, {
+ .ubh_row_xor = 0x22222,
+ .ubh_col_xor = 0,
+ .ubh_en = B_TRUE
+ }, {
+ .ubh_row_xor = 0x4444,
+ .ubh_col_xor = 0,
+ .ubh_en = B_TRUE
+ }, {
+ .ubh_row_xor = 0x8888,
+ .ubh_col_xor = 0,
+ .ubh_en = B_TRUE
+ } }
+ }
+ } }
+ } }
+};
+
+/*
+ * Some configurations allow optional bank swaps where by the bits we use for
+ * the column and the bank are swapped around. Do one of these just to make sure
+ * we haven't built in any surprise dependencies.
+ */
+static const zen_umc_t zen_umc_chan_ilv_bank_swap = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_ccm_inst = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 1,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0x9, 0xa, 0x6,
+ 0xb },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x7,
+ 0x8, 0xc, 0xd, 0xe, 0xf, 0x10 }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x20000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0x9, 0xa, 0x6,
+ 0xb },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x7,
+ 0x8, 0xc, 0xd, 0xe, 0xf, 0x10 }
+ } }
+ }, {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 1,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0x40000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0x9, 0xa, 0x6,
+ 0xb },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x7,
+ 0x8, 0xc, 0xd, 0xe, 0xf, 0x10 }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x60000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x1ffff9ffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x13,
+ .ucs_bank_bits = { 0x9, 0xa, 0x6,
+ 0xb },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x7,
+ 0x8, 0xc, 0xd, 0xe, 0xf, 0x10 }
+ } }
+ } }
+ } }
+ } }
+};
+
+/*
+ * This is a basic DDR5 channel. We only use a single DIMM and set up a
+ * sub-channel on it.
+ */
+static const zen_umc_t zen_umc_chan_subchan_no_hash = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 16ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_ccm_inst = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 1,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 16ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 16ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11,
+ 0xd, 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } }
+ } }
+ } }
+};
+
+const umc_decode_test_t zen_umc_test_chans[] = { {
+ .udt_desc = "2 DPC 2R no ilv/hash (0)",
+ .udt_umc = &zen_umc_chan_no_hash,
+ .udt_pa = 0x0,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x0,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R no ilv/hash (1)",
+ .udt_umc = &zen_umc_chan_no_hash,
+ .udt_pa = 0x800000000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x800000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R no ilv/hash (2)",
+ .udt_umc = &zen_umc_chan_no_hash,
+ .udt_pa = 0x1000000000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1000000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R no ilv/hash (3)",
+ .udt_umc = &zen_umc_chan_no_hash,
+ .udt_pa = 0x1800000000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1800000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R no ilv/hash (4)",
+ .udt_umc = &zen_umc_chan_no_hash,
+ .udt_pa = 0x0ff1ff120,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x0ff1ff120,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x224,
+ .udt_dimm_row = 0x7f8f,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R no ilv/hash (5)",
+ .udt_umc = &zen_umc_chan_no_hash,
+ .udt_pa = 0x8ff4ff500,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x8ff4ff500,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2a0,
+ .udt_dimm_row = 0x7fa7,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R no ilv/hash (6)",
+ .udt_umc = &zen_umc_chan_no_hash,
+ .udt_pa = 0x10ff6ff700,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x10ff6ff700,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0x2e0,
+ .udt_dimm_row = 0x7fb7,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R no ilv/hash (7)",
+ .udt_umc = &zen_umc_chan_no_hash,
+ .udt_pa = 0x18ff8ff102,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x18ff8ff102,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0x220,
+ .udt_dimm_row = 0x7fc7,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R no hash, rank ilv (0)",
+ .udt_umc = &zen_umc_chan_ilv,
+ .udt_pa = 0x0,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x0,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R no hash, rank ilv (1)",
+ .udt_umc = &zen_umc_chan_ilv,
+ .udt_pa = 0x20000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R no hash, rank ilv (2)",
+ .udt_umc = &zen_umc_chan_ilv,
+ .udt_pa = 0x40000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x40000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R no hash, rank ilv (3)",
+ .udt_umc = &zen_umc_chan_ilv,
+ .udt_pa = 0x60000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x60000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R no hash, rank ilv (4)",
+ .udt_umc = &zen_umc_chan_ilv,
+ .udt_pa = 0xe1be12e00,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xe1be12e00,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1c0,
+ .udt_dimm_row = 0x1c37c,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R no hash, rank ilv (5)",
+ .udt_umc = &zen_umc_chan_ilv,
+ .udt_pa = 0x1fffffffff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1fffffffff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0x3ff,
+ .udt_dimm_row = 0x3ffff,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+},
+/*
+ * Test the CS hashing by first going back and using bits that aren't part of
+ * the CS hash modification, e.g. the same 4 interleaving case that we hit
+ * earlier. Next, we go through and tweak things that would normally go to a
+ * given CS originally by tweaking the bits that would be used in a hash and
+ * prove that they go elsewhere.
+ */
+{
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (0)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x0,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x0,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (1)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x20000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (2)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x40000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x40000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (3)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x60000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x60000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (4)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x80000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x80000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 1,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (5)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x180000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x180000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 3,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (6)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x100000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x100000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 2,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (7)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x18180000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x18180000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0x303,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (8)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x181a0000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x181a0000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0x303,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (9)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x181c0000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x181c0000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0x303,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R cs hash, rank ilv (10)",
+ .udt_umc = &zen_umc_chan_ilv_cs_hash,
+ .udt_pa = 0x181e0000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x181e0000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0x303,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+},
+/*
+ * For the bank hash we first prove that we can target a given row/column in
+ * each bank and bank group without hashing (this leads to a total of 16
+ * combinations). We then later go back and start tweaking the row/column to
+ * change which bank and group we end up in.
+ */
+{
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (0)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x0,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x0,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (1)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x8000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x8000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (2)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x10000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x10000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (3)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x18000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x18000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (4)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x2000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x2000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (5)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0xa000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xa000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (6)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x12000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x12000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (7)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x1a000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1a000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (8)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x4000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x4000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 2,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (9)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0xc000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xc000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 2,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (10)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x14000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x14000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 2,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (11)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x1c000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1c000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 2,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (12)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x6000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x6000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (13)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0xe000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xe000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (14)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x16000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x16000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (15)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x1e000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1e000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (16)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x79c000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x79c000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0xf,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (17)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x7f9c000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x7f9c000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0xff,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 2,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (18)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x7ff9c000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x7ff9c000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0xfff,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (19)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x71c000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x71c000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0xe,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank hash, rank ilv (20)",
+ .udt_umc = &zen_umc_chan_ilv_bank_hash,
+ .udt_pa = 0x71c118,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x71c118,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x23,
+ .udt_dimm_row = 0xe,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+},
+/*
+ * Bank swapping. We basically do a few sanity tests on this just to make sure
+ * the right bits are triggering things here in the first DIMM/rank.
+ */
+{
+ .udt_desc = "2 DPC 2R bank swap, rank ilv (0)",
+ .udt_umc = &zen_umc_chan_ilv_bank_swap,
+ .udt_pa = 0x4247,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x4247,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x80,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "2 DPC 2R bank swap, rank ilv (1)",
+ .udt_umc = &zen_umc_chan_ilv_bank_swap,
+ .udt_pa = 0xff6214247,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xff6214247,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x280,
+ .udt_dimm_row = 0x1fec4,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "Basic DDR5 Sub-channel (0)",
+ .udt_umc = &zen_umc_chan_subchan_no_hash,
+ .udt_pa = 0x0,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x0,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x0,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "Basic DDR5 Sub-channel (1)",
+ .udt_umc = &zen_umc_chan_subchan_no_hash,
+ .udt_pa = 0x9999,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x9999,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x336,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "Basic DDR5 Sub-channel (2)",
+ .udt_umc = &zen_umc_chan_subchan_no_hash,
+ .udt_pa = 0x99d9,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x99d9,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x336,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = NULL
+} };
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_cod.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_cod.c
new file mode 100644
index 0000000000..c515672cfd
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_cod.c
@@ -0,0 +1,1354 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * Here we try to test a few variants of the Zen 3 COD based hashing, including
+ * our favorite 6 channel. These all use DFv3 and 1 DPC 16 GiB channels without
+ * any internal hashing (that is tested elsewhere).
+ */
+
+#include "zen_umc_test.h"
+
+/*
+ * This is a basic 4-channel hash, sending us out to one of four locations. This
+ * enables hashing in all three regions because 6 channel variant does not seem
+ * to use them.
+ */
+static const zen_umc_t zen_umc_cod_4ch = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 | DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_COD2_4CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_COD2_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_COD2_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_COD2_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_COD2_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+static const zen_umc_t zen_umc_cod_6ch = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 96ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 6,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_6CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_np2_space0 = 21,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_np2_space0 = 21,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_np2_space0 = 21,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_np2_space0 = 21,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 4,
+ .chan_instid = 4,
+ .chan_logid = 4,
+ .chan_nrules = 1,
+ .chan_np2_space0 = 21,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 5,
+ .chan_instid = 5,
+ .chan_logid = 5,
+ .chan_nrules = 1,
+ .chan_np2_space0 = 21,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+const umc_decode_test_t zen_umc_test_cod[] = { {
+ .udt_desc = "COD 4ch (0)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x1ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (1)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x3ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (2)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x11ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (3)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x13ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (4)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x101ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x41ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (5)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x103ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x41ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (6)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x303ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xc1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (7)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x313ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xc1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (8)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x311ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xc1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (9)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x2311ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x8c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x4,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (10)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x6311ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x18c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (11)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x6313ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x18c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (12)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x6303ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x18c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (13)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x6301ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x18c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (14)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x406301ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1018c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x80c,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (15)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x406303ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1018c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x80c,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (16)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x406311ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1018c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x80c,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (17)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0x406313ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1018c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x80c,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (18)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0xc06313ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3018c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x180c,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (19)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0xc06311ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3018c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x180c,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (20)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0xc06301ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3018c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x180c,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 4ch (21)",
+ .udt_umc = &zen_umc_cod_4ch,
+ .udt_pa = 0xc06303ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3018c1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x180c,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (0)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x1ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (1)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x11ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (2)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x21ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (3)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x31ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (4)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x41ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (5)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x51ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 5,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (6)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x61ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3000001ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x18000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (7)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x71ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3000001ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x18000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (8)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x81ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x11ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x23f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (9)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x91ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x11ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x23f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (10)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0xa1ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x11ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x23f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (11)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0xb1ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x11ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x23f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (12)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0xc1ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x11ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 5,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x23f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (13)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0xd1ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x11ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x23f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (14)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0xe1ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3000011ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x23f,
+ .udt_dimm_row = 0x18000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (15)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0xf1ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3000011ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x23f,
+ .udt_dimm_row = 0x18000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+},
+/*
+ * The above went through and showed that we can probably hash things correctly
+ * and account for our mod-3 case. The ones below try to find the higher level
+ * addresses that would result in the same normalized address that we have, but
+ * on different dies to try and complete the set.
+ */
+{
+ .udt_desc = "COD 6ch (16)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x8000061ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3000001ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x18000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (17)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x8000071ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3000001ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x18000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (18)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x10000061ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3000001ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x18000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (19)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x10000071ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3000001ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 5,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x18000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+},
+/*
+ * Now with that there, we go back and show that hashing actually impacts things
+ * as we expect. Note, the bit 0 hash was already taken into account.
+ */
+{
+ .udt_desc = "COD 6ch (20)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x8001ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1001ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x8,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (21)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0xa001ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1401ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xa,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (22)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0xe001ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3001c01ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x1800e,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (23)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x180e001ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x301c01ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x180e,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "COD 6ch (24)",
+ .udt_umc = &zen_umc_cod_6ch,
+ .udt_pa = 0x1c0e041ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x381c01ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0x1c0e,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = NULL
+} };
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_errors.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_errors.c
new file mode 100644
index 0000000000..73bbfaac98
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_errors.c
@@ -0,0 +1,596 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * This tries to make sure that if we had invalid state somehow, we'd properly
+ * end up detecting an error. Note, for these we try to do include the most bare
+ * minimum style zen_umc_t to minimize the size (at least in this one file for a
+ * change). Note, testing hole decoding errors has been performed in
+ * zen_umc_test_hole.c.
+ */
+
+#include "zen_umc_test.h"
+
+/*
+ * This first structure is used to test:
+ * o Being outside TOM2
+ * o Being in the 1 TiB reserved region
+ * o Not being covered by a valid DF rule
+ * o Several invalid interleave combinations
+ * o Unsupported interleave rule
+ * o Bad Remap set counts
+ */
+static const zen_umc_t zen_umc_bad_df = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 2ULL * 1024ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 10,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 1ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_COD4_2CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 2ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 3ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 2,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_COD1_8CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 4ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 5ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 2,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_6CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 6ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 7ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 2,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_6CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 8ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 9ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 2,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = INT32_MAX
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 10ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 11ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_5CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 12ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 13ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 2,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_2CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_REMAP_EN |
+ DF_DRAM_F_REMAP_SOCK,
+ .ddr_base = 14ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 15ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_REMAP_EN,
+ .ddr_base = 16ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 17ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_REMAP_EN,
+ .ddr_base = 18ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 19ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 3
+ } },
+ } }
+};
+
+/*
+ * This UMC contains a weird relationship between its rule, TOM and the actual
+ * DRAM hole base. This creates an inconsistency that should underflow. This is
+ * honestly a bit odd to actually try to find in the wild. The fact that TOM is
+ * much greater than the hole base is key. This requires DFv4 for subtracting
+ * the base.
+ */
+static const zen_umc_t zen_umc_hole_underflow = {
+ .umc_tom = 3ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 2ULL * 1024ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_4,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_flags = ZEN_UMC_DF_F_HOLE_VALID,
+ .zud_dfno = 0,
+ .zud_dram_nrules = 2,
+ .zud_hole_base = 0x0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 1ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 8ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_2CH
+ } }
+ } },
+};
+
+/*
+ * This is a variant of the previous one, but it takes place when normalization
+ * occurs. The biggest gotcha there is that for DFv3 the base isn't subtracted
+ * initially for interleaving, only when normalizing.
+ */
+static const zen_umc_t zen_umc_norm_underflow = {
+ .umc_tom = 3ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 16ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_flags = ZEN_UMC_DF_F_HOLE_VALID,
+ .zud_dfno = 0,
+ .zud_dram_nrules = 2,
+ .zud_nchan = 1,
+ .zud_hole_base = 0xc0000000,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 8ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 8ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+/*
+ * This DF is designed to capture bad remap entry pointers and remap entries
+ * with bad components.
+ */
+static const zen_umc_t zen_umc_remap_errs = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 2,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 2,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_REMAP_EN |
+ DF_DRAM_F_REMAP_SOCK,
+ .ddr_base = 0,
+ .ddr_limit = 32ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0x1f,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH,
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_REMAP_EN,
+ .ddr_base = 32ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH,
+ .ddr_remap_ent = 1
+ } },
+ .zud_remap = { {
+ .csr_nremaps = ZEN_UMC_MAX_REMAP_ENTS,
+ .csr_remaps = { 0x0 }
+ }, {
+ .csr_nremaps = ZEN_UMC_MAX_REMAP_ENTS,
+ .csr_remaps = { 0x21 }
+ } }
+ } }
+};
+
+/*
+ * This umc is used to cover the cases where:
+ * o There is no match to the fabric ID
+ * o The UMC in question doesn't have rules for our PA
+ * o Normalization underflow
+ * o Failure to match a chip-select
+ */
+static const zen_umc_t zen_umc_fab_errs = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 4,
+ .zud_nchan = 2,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 1ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0x22,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 2ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 3ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 5ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0x1,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } }
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0x400000000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+const umc_decode_test_t zen_umc_test_errors[] = { {
+ .udt_desc = "Memory beyond TOM2 doesn't decode (0)",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x20000000000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM
+}, {
+ .udt_desc = "Memory beyond TOM2 doesn't decode (1)",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x2123456789a,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM
+}, {
+ .udt_desc = "Memory in 1 TiB-12 GiB hole doesn't decode (0)",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0xfd00000000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM
+}, {
+ .udt_desc = "Memory in 1 TiB-12 GiB hole doesn't decode (1)",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0xfd00000001,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM
+}, {
+ .udt_desc = "Memory in 1 TiB-12 GiB hole doesn't decode (2)",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0xffffffffff,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM
+}, {
+ .udt_desc = "No valid DF rule (0)",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x1ffffffffff,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_NO_DF_RULE
+}, {
+ .udt_desc = "No valid DF rule (1)",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0xfcffffffff,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_NO_DF_RULE
+}, {
+ .udt_desc = "No valid DF rule (2)",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x123456,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_NO_DF_RULE
+}, {
+ .udt_desc = "Bad COD hash interleave - socket",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x0,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_COD_BAD_ILEAVE
+}, {
+ .udt_desc = "Bad COD hash interleave - die",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x200000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_COD_BAD_ILEAVE
+}, {
+ .udt_desc = "Bad COD 6ch hash interleave - socket",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x400000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_COD_BAD_ILEAVE
+}, {
+ .udt_desc = "Bad COD 6ch hash interleave - die",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x600000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_COD_BAD_ILEAVE
+}, {
+ .udt_desc = "Unknown interleave",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x800000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP,
+}, {
+ .udt_desc = "Bad NPS hash interleave - die",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0xc00000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_NPS_BAD_ILEAVE
+}, {
+ .udt_desc = "Bad NPS NP2 hash interleave - die",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0xa00000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_NPS_BAD_ILEAVE
+}, {
+ .udt_desc = "Bad Remap Set - DFv3",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0xe00000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_BAD_REMAP_SET
+}, {
+ .udt_desc = "Bad Remap Set - DFv4 (0)",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x1000000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_BAD_REMAP_SET
+}, {
+ .udt_desc = "Bad Remap Set - DFv4 (1)",
+ .udt_umc = &zen_umc_bad_df,
+ .udt_pa = 0x1200000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_BAD_REMAP_SET
+}, {
+ .udt_desc = "Interleave address underflow",
+ .udt_umc = &zen_umc_hole_underflow,
+ .udt_pa = 0x100000000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_ILEAVE_UNDERFLOW
+}, {
+ .udt_desc = "Normal address underflow",
+ .udt_umc = &zen_umc_norm_underflow,
+ .udt_pa = 0x100000000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_CALC_NORM_UNDERFLOW
+}, {
+ .udt_desc = "Non-existent remap entry",
+ .udt_umc = &zen_umc_remap_errs,
+ .udt_pa = 0x0,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_BAD_REMAP_ENTRY
+}, {
+ .udt_desc = "Remap entry has bogus ID",
+ .udt_umc = &zen_umc_remap_errs,
+ .udt_pa = 0x8f0000000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_REMAP_HAS_BAD_COMP
+}, {
+ .udt_desc = "Target fabric ID doesn't exist",
+ .udt_umc = &zen_umc_fab_errs,
+ .udt_pa = 0x12345,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_CANNOT_MAP_FABID
+}, {
+ .udt_desc = "UMC doesn't have DRAM rule",
+ .udt_umc = &zen_umc_fab_errs,
+ .udt_pa = 0x87654321,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_UMC_DOESNT_HAVE_PA
+}, {
+ .udt_desc = "No matching chip-select",
+ .udt_umc = &zen_umc_fab_errs,
+ .udt_pa = 0x101234567,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_NO_CS_BASE_MATCH
+}, {
+ .udt_desc = NULL
+} };
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_hole.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_hole.c
new file mode 100644
index 0000000000..652317817d
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_hole.c
@@ -0,0 +1,668 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * This provides a few different examples of how we take into account the DRAM
+ * hole. There are three primary cases to consider:
+ *
+ * o Taking it into account when determine if DRAM is valid or not.
+ * o Taking it into account when we do address interleaving (DFv4)
+ * o Taking it into account when performing normalization.
+ */
+
+#include "zen_umc_test.h"
+
+/*
+ * This is a standard application of the DRAM hole starting at 2 GiB in the
+ * space. This follows the DFv3 rules.
+ */
+static const zen_umc_t zen_umc_hole_dfv3 = {
+ .umc_tom = 2ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 68ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_flags = ZEN_UMC_DF_F_HOLE_VALID,
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0x80000000,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 68ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 68ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 68ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 68ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 68ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+/*
+ * This case is a little insidious to be honest. Here we're using a DFv4 style
+ * DRAM hole. Technically the hole needs to be taken into account before
+ * interleaving here (unlike DFv3). So we shrink the hole's size to 4 KiB and
+ * set up interleaving at address 12. This ensures that stuff around the hole
+ * will catch this and adjust for interleve. Yes, this is smaller than the hole
+ * is allowed to be in hardware, but here we're all just integers. Basically the
+ * whole covers the last 4 KiB of low memory. We use hex here to make these
+ * easier to deal with.
+ */
+static const zen_umc_t zen_umc_hole_dfv4 = {
+ .umc_tom = 0xfffff000,
+ .umc_tom2 = 0x1000001000,
+ .umc_df_rev = DF_REV_4,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_flags = ZEN_UMC_DF_F_HOLE_VALID,
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0xfffff000,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 0x1000001000,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 0x1000001000,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 0x1000001000,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 0x1000001000,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 0x1000001000,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+
+const umc_decode_test_t zen_umc_test_hole[] = { {
+ .udt_desc = "Memory in hole doesn't decode (0)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0xb0000000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM
+}, {
+ .udt_desc = "Memory in hole doesn't decode (1)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0x80000000,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM
+}, {
+ .udt_desc = "Memory in hole doesn't decode (2)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0xffffffff,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM
+}, {
+ .udt_desc = "Memory in hole doesn't decode (3)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0xcba89754,
+ .udt_pass = B_FALSE,
+ .udt_fail = ZEN_UMC_DECODE_F_OUTSIDE_DRAM
+}, {
+ .udt_desc = "DRAM Hole DFv3 4ch (0)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0x7fffffff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1fffffff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3ff,
+ .udt_dimm_row = 0xfff,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv3 4ch (1)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0x7ffffdff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1fffffff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3ff,
+ .udt_dimm_row = 0xfff,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv3 4ch (2)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0x7ffffbff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1fffffff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3ff,
+ .udt_dimm_row = 0xfff,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv3 4ch (3)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0x7ffff9ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1fffffff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3ff,
+ .udt_dimm_row = 0xfff,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv3 4ch (4)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0x100000000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x0,
+ .udt_dimm_row = 0x1000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv3 4ch (5)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0x100000200,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x0,
+ .udt_dimm_row = 0x1000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv3 4ch (6)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0x100000400,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x0,
+ .udt_dimm_row = 0x1000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv3 4ch (7)",
+ .udt_umc = &zen_umc_hole_dfv3,
+ .udt_pa = 0x100000600,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x0,
+ .udt_dimm_row = 0x1000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv4 4ch Shenanigans (0)",
+ .udt_umc = &zen_umc_hole_dfv4,
+ .udt_pa = 0x100000000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3ffff000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x200,
+ .udt_dimm_row = 0x1fff,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv4 4ch Shenanigans (1)",
+ .udt_umc = &zen_umc_hole_dfv4,
+ .udt_pa = 0x100001000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x40000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x0,
+ .udt_dimm_row = 0x2000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv4 4ch Shenanigans (2)",
+ .udt_umc = &zen_umc_hole_dfv4,
+ .udt_pa = 0x100002000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x40000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x0,
+ .udt_dimm_row = 0x2000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DRAM Hole DFv4 4ch Shenanigans (3)",
+ .udt_umc = &zen_umc_hole_dfv4,
+ .udt_pa = 0x100003000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x40000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x0,
+ .udt_dimm_row = 0x2000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = NULL
+} };
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_ilv.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_ilv.c
new file mode 100644
index 0000000000..06ee4413fe
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_ilv.c
@@ -0,0 +1,1719 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * Test the basic interleave scenarios that don't involve hashing or non-powers
+ * of 2. Every DRAM channel we map to always has a single 16 GiB DIMM to keep
+ * things straightforward. For all of these designs, UMC channels are labeled
+ * consecutively. Note, there is no remapping going on here, that is saved for
+ * elsewhere. In particular we cover:
+ *
+ * o Channel Interleaving
+ * o Channel + Die Interleaving
+ * o Channel + Socket Interleaving
+ * o Channel + Die + Socket Interleaving
+ *
+ * Throughout these we end up trying to vary what the starting address here and
+ * we adjust the DF decomposition to try and be something close to an existing
+ * DF revision.
+ */
+
+#include "zen_umc_test.h"
+
+/*
+ * Our first version of this, 4-way channel interleaving.
+ */
+static const zen_umc_t zen_umc_ilv_1p1d4c_4ch = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+/*
+ * 2P configuration with 4 channels each. We interleave across 2 sockets and 4
+ * channels. This uses a single rule.
+ */
+static const zen_umc_t zen_umc_ilv_2p1d4c_2s4ch = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 2,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 10,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 10,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 10,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 10,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 10,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ }, {
+ .zud_dfno = 1,
+ .zud_dram_nrules = 2,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 10,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x20,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 10,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x21,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 10,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x22,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 10,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x23,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 128ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 10,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+/*
+ * This is a 2 Die, 2 Channel interleave.
+ */
+static const zen_umc_t zen_umc_ilv_1p2d2c_2d2ch = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x00,
+ .dfd_die_mask = 0x01,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 2,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 2,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ }, {
+ .zud_dfno = 1,
+ .zud_dram_nrules = 2,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x20,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x21,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+/*
+ * This is a 2 Socket 2 Die, 2 Channel interleave, aka naples-light, but with a
+ * contiguous DFv4 style socket ID.
+ */
+static const zen_umc_t zen_umc_ilv_naplesish = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 256ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x1e,
+ .dfd_die_mask = 0x01,
+ .dfd_node_mask = 0xf80,
+ .dfd_comp_mask = 0x7f,
+ .dfd_sock_shift = 1,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 7,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 4,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 2,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ }, {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 2,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x80,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x81,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ }, {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 2,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x100,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x101,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ }, {
+ .zud_dfno = 1,
+ .zud_dram_nrules = 2,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x180,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x181,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 1,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+const umc_decode_test_t zen_umc_test_ilv[] = { {
+ .udt_desc = "ILV: 1/1/4 4-way Channel (0)",
+ .udt_umc = &zen_umc_ilv_1p1d4c_4ch,
+ .udt_pa = 0x1ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ILV: 1/1/4 4-way Channel (1)",
+ .udt_umc = &zen_umc_ilv_1p1d4c_4ch,
+ .udt_pa = 0x3ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ILV: 1/1/4 4-way Channel (2)",
+ .udt_umc = &zen_umc_ilv_1p1d4c_4ch,
+ .udt_pa = 0x5ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ILV: 1/1/4 4-way Channel (3)",
+ .udt_umc = &zen_umc_ilv_1p1d4c_4ch,
+ .udt_pa = 0x7ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ILV: 1/1/4 4-way Channel (4)",
+ .udt_umc = &zen_umc_ilv_1p1d4c_4ch,
+ .udt_pa = 0x42231679,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1088c479,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x8f,
+ .udt_dimm_row = 0x844,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ILV: 2/1/4 2P-way, 4-way Channel (0)",
+ .udt_umc = &zen_umc_ilv_2p1d4c_2s4ch,
+ .udt_pa = 0x21ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x5ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0xbf,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ILV: 2/1/4 2P-way, 4-way Channel (1)",
+ .udt_umc = &zen_umc_ilv_2p1d4c_2s4ch,
+ .udt_pa = 0x23ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x7ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0xff,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ILV: 2/1/4 2P-way, 4-way Channel (2)",
+ .udt_umc = &zen_umc_ilv_2p1d4c_2s4ch,
+ .udt_pa = 0xbadc201ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x175b841ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xbadc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/1/4 2p-way, 4-way channel (3)",
+ .udt_umc = &zen_umc_ilv_2p1d4c_2s4ch,
+ .udt_pa = 0xbadc205ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x175b841ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xbadc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/1/4 2p-way, 4-way channel (4)",
+ .udt_umc = &zen_umc_ilv_2p1d4c_2s4ch,
+ .udt_pa = 0xbadc209ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x175b841ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xbadc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/1/4 2p-way, 4-way channel (5)",
+ .udt_umc = &zen_umc_ilv_2p1d4c_2s4ch,
+ .udt_pa = 0xbadc20dff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x175b841ff,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xbadc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ILV: 2/1/4 2P-way, 4-way Channel (6)",
+ .udt_umc = &zen_umc_ilv_2p1d4c_2s4ch,
+ .udt_pa = 0xbadc211ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x175b841ff,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xbadc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/1/4 2p-way, 4-way channel (7)",
+ .udt_umc = &zen_umc_ilv_2p1d4c_2s4ch,
+ .udt_pa = 0xbadc215ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x175b841ff,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xbadc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/1/4 2p-way, 4-way channel (8)",
+ .udt_umc = &zen_umc_ilv_2p1d4c_2s4ch,
+ .udt_pa = 0xbadc219ff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x175b841ff,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xbadc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/1/4 2p-way, 4-way channel (9)",
+ .udt_umc = &zen_umc_ilv_2p1d4c_2s4ch,
+ .udt_pa = 0xbadc21dff,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x175b841ff,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f,
+ .udt_dimm_row = 0xbadc,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 1/2/2 2-way die, 2-way channel (0)",
+ .udt_umc = &zen_umc_ilv_1p2d2c_2d2ch,
+ .udt_pa = 0x12233cfbb,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x488cffbb,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f7,
+ .udt_dimm_row = 0x2446,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 1/2/2 2-way die, 2-way channel (1)",
+ .udt_umc = &zen_umc_ilv_1p2d2c_2d2ch,
+ .udt_pa = 0x12233efbb,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x488cffbb,
+ .udt_sock = 0,
+ .udt_die = 1,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f7,
+ .udt_dimm_row = 0x2446,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 1/2/2 2-way die, 2-way channel (2)",
+ .udt_umc = &zen_umc_ilv_1p2d2c_2d2ch,
+ .udt_pa = 0x12233ffbb,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x488cffbb,
+ .udt_sock = 0,
+ .udt_die = 1,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f7,
+ .udt_dimm_row = 0x2446,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 1/2/2 2-way die, 2-way channel (3)",
+ .udt_umc = &zen_umc_ilv_1p2d2c_2d2ch,
+ .udt_pa = 0x12233dfbb,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x488cffbb,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f7,
+ .udt_dimm_row = 0x2446,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 1,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/2/2 2-way sock, 2-way die, 2-way channel (0)",
+ .udt_umc = &zen_umc_ilv_naplesish,
+ .udt_pa = 0xffed37f42,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ffda6f42,
+ .udt_sock = 1,
+ .udt_die = 1,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1e8,
+ .udt_dimm_row = 0xffed,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/2/2 2-way sock, 2-way die, 2-way channel (1)",
+ .udt_umc = &zen_umc_ilv_naplesish,
+ .udt_pa = 0xffed37e42,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ffda6f42,
+ .udt_sock = 1,
+ .udt_die = 1,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1e8,
+ .udt_dimm_row = 0xffed,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/2/2 2-way sock, 2-way die, 2-way channel (2)",
+ .udt_umc = &zen_umc_ilv_naplesish,
+ .udt_pa = 0xffed37d42,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ffda6f42,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1e8,
+ .udt_dimm_row = 0xffed,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/2/2 2-way sock, 2-way die, 2-way channel (3)",
+ .udt_umc = &zen_umc_ilv_naplesish,
+ .udt_pa = 0xffed37c42,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ffda6f42,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1e8,
+ .udt_dimm_row = 0xffed,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/2/2 2-way sock, 2-way die, 2-way channel (4)",
+ .udt_umc = &zen_umc_ilv_naplesish,
+ .udt_pa = 0xffed37b42,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ffda6f42,
+ .udt_sock = 0,
+ .udt_die = 1,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1e8,
+ .udt_dimm_row = 0xffed,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/2/2 2-way sock, 2-way die, 2-way channel (5)",
+ .udt_umc = &zen_umc_ilv_naplesish,
+ .udt_pa = 0xffed37a42,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ffda6f42,
+ .udt_sock = 0,
+ .udt_die = 1,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1e8,
+ .udt_dimm_row = 0xffed,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/2/2 2-way sock, 2-way die, 2-way channel (6)",
+ .udt_umc = &zen_umc_ilv_naplesish,
+ .udt_pa = 0xffed37942,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ffda6f42,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1e8,
+ .udt_dimm_row = 0xffed,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "ilv: 2/2/2 2-way sock, 2-way die, 2-way channel (7)",
+ .udt_umc = &zen_umc_ilv_naplesish,
+ .udt_pa = 0xffed37842,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1ffda6f42,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1e8,
+ .udt_dimm_row = 0xffed,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = NULL
+} };
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_multi.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_multi.c
new file mode 100644
index 0000000000..7cb1d29c46
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_multi.c
@@ -0,0 +1,396 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * Here we construct a more realistic DF situation where we have multiple rules.
+ * In particular, we use a DFv3 style configuration with a single die and
+ * socket. To make sense of the channel offset logic, we construct a system with
+ * two channels, one with 64 GiB and one one 8 GiB DIMMs. We basically
+ * interleave with the 16 GiB channel over the last 16 GiB of the 128 GiB
+ * channel. This requires us to therefore use the channel offset for the first
+ * channel to get it in a reasonable spot for the second rule. This also allows
+ * us to test what happens with multiple rules and ensure that we select the
+ * right one and when two rules map to one channel.
+ *
+ * Here, the hole is sized to 1.75 GiB. This is based on a system we saw that
+ * was set up this way.
+ */
+
+#include "zen_umc_test.h"
+
+static const zen_umc_t zen_umc_multi = {
+ .umc_tom = 0x90000000,
+ .umc_tom2 = 0x2470000000,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_flags = ZEN_UMC_DF_F_HOLE_VALID,
+ .zud_dfno = 0,
+ .zud_dram_nrules = 2,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0x90000000,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 0x1c70000000,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0x1c70000000,
+ .ddr_limit = 0x2470000000,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 2,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HOLE,
+ .ddr_base = 0,
+ .ddr_limit = 0x1c70000000,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_1CH
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0x1c70000000,
+ .ddr_limit = 0x2470000000,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_offsets = { {
+ .cho_valid = B_TRUE,
+ .cho_offset = 0x1c00000000,
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x7ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x800000000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x7ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ }, {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 1,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0x1000000000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x7ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ }, {
+ .ucs_base = {
+ .udb_base = 0x1800000000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x7ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x12,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } }
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID,
+ .ddr_base = 0x1c70000000,
+ .ddr_limit = 0x2470000000,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X8,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3fffdffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ }, {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X8,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0x20000,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3fffdffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+const umc_decode_test_t zen_umc_test_multi[] = { {
+ .udt_desc = "Multi-rule (0)",
+ .udt_umc = &zen_umc_multi,
+ .udt_pa = 0x12345603,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x12345603,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2c0,
+ .udt_dimm_row = 0x91a,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+
+}, {
+ .udt_desc = "Multi-rule (1)",
+ .udt_umc = &zen_umc_multi,
+ .udt_pa = 0x12345703,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x12345703,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2e0,
+ .udt_dimm_row = 0x91a,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+
+}, {
+ .udt_desc = "Multi-rule (2)",
+ .udt_umc = &zen_umc_multi,
+ .udt_pa = 0x1ba9876543,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1b39876543,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0xa8,
+ .udt_dimm_row = 0x19cc3,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 2,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+
+}, {
+ .udt_desc = "Multi-rule (3)",
+ .udt_umc = &zen_umc_multi,
+ .udt_pa = 0x1ba9876643,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1b39876643,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0xc8,
+ .udt_dimm_row = 0x19cc3,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 2,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+},
+/*
+ * All of the accesses below should now hit our second rule. When normalizing we
+ * subtract the base and add the channel offset. So that is why the normalized
+ * address will look totally different depending on which DIMM we go to.
+ */
+{
+ .udt_desc = "Multi-rule (4)",
+ .udt_umc = &zen_umc_multi,
+ .udt_pa = 0x1c70000000,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1c00000000,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0x20000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = "Multi-rule (5)",
+ .udt_umc = &zen_umc_multi,
+ .udt_pa = 0x1c70000100,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x0,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "Multi-rule (6)",
+ .udt_umc = &zen_umc_multi,
+ .udt_pa = 0x23456789ab,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x36ab3c4ab,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0x95,
+ .udt_dimm_row = 0xb559,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "Multi-rule (7)",
+ .udt_umc = &zen_umc_multi,
+ .udt_pa = 0x2345678aab,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1f6ab3c5ab,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 1,
+ .udt_dimm_col = 0xb5,
+ .udt_dimm_row = 0x3b559,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 3,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 1
+}, {
+ .udt_desc = NULL
+} };
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_nps.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_nps.c
new file mode 100644
index 0000000000..add98ccebf
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_nps.c
@@ -0,0 +1,3349 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * Go through and test the different versions of NPS hashing. Unlike with the
+ * COD hash, we also need to take into account socket interleaving. In addition
+ * to the basic ones, we also do a 5-channel and 6-channel variant to get
+ * various parts of the non-power of 2 forms tested.
+ */
+
+#include "zen_umc_test.h"
+
+/*
+ * Start with the heavy hitter, the 2 socket, 8 channel (8/socket) configuration
+ * that does both socket interleaving and the hashing. Because this is a DFv4
+ * variant, we opt to set up the channels for DDR5.
+ */
+static const zen_umc_t zen_umc_nps8_2p = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 256ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_4,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 2,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 8,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 | DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 4,
+ .chan_instid = 4,
+ .chan_logid = 4,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 5,
+ .chan_instid = 5,
+ .chan_logid = 5,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 6,
+ .chan_instid = 6,
+ .chan_logid = 6,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 7,
+ .chan_instid = 7,
+ .chan_logid = 7,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ } }
+ }, {
+ .zud_dfno = 1,
+ .zud_dram_nrules = 2,
+ .zud_nchan = 8,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 | DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x20,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x21,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x22,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x23,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x24,
+ .chan_instid = 4,
+ .chan_logid = 4,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x25,
+ .chan_instid = 5,
+ .chan_logid = 5,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x26,
+ .chan_instid = 6,
+ .chan_logid = 6,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x27,
+ .chan_instid = 7,
+ .chan_logid = 7,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 256ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ } }
+ } }
+};
+
+/*
+ * Here we switch back to a 1P 2-channel configuration so we can test how things
+ * change with the extra bit that is now included since we're not hashing the
+ * socket.
+ */
+static const zen_umc_t zen_umc_nps2_1p = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 32ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_4,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 2,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 | DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 32ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_2CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 9,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_2CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ } }
+ } }
+};
+
+/*
+ * This here is a five-channel version, giving us some of our favorite non-power
+ * of 2 cases.
+ */
+static const zen_umc_t zen_umc_nps5_1p = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 80ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_4,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 5,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 | DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 80ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_5CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 80ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_5CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 80ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_5CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 80ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_5CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 80ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_5CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 4,
+ .chan_instid = 4,
+ .chan_logid = 4,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_16_18 |
+ DF_DRAM_F_HASH_21_23 |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 80ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_5CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ } }
+ } }
+};
+
+/*
+ * And now in 6-channels so we can get the spiciness of the new normalization
+ * scheme. We've also turned off several of the hash bits on this so we can
+ * verify that using those middle bits doesn't do anything here.
+ */
+static const zen_umc_t zen_umc_nps6_1p = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 96ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_4,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 6,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_6CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 4,
+ .chan_instid = 4,
+ .chan_logid = 4,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 5,
+ .chan_instid = 5,
+ .chan_logid = 5,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_30_32,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_6CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ } }
+ } }
+};
+
+/*
+ * Finally our last bit here is a 3-channel 2P system. This is used to test that
+ * the variant of the normalization with socket interleaving works correctly.
+ */
+static const zen_umc_t zen_umc_nps3_2p = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 96ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_4,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 2,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 3,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HASH_21_23,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_3CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_3CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_3CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_3CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } }
+ } }
+ }, {
+ .zud_dfno = 1,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 3,
+ .zud_cs_nremap = 0,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_HASH_21_23,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_3CH
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x20,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_3CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x21,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_3CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0x22,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_HASH_21_23,
+ .ddr_base = 0,
+ .ddr_limit = 96ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 1,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 8,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_3CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR5,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x5,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x10,
+ .ucs_nbank_groups = 0x3,
+ .ucs_row_low_bit = 0x12,
+ .ucs_bank_bits = { 0xf, 0x10, 0x11, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x2, 0x3, 0x4, 0x5,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc },
+ .ucs_subchan = 0x6
+ } }
+ } }
+ } }
+ } }
+};
+
+const umc_decode_test_t zen_umc_test_nps[] = { {
+ .udt_desc = "NPS 8ch, 2P ilv (0)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x123,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (1)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (2)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x1123,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (3)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x1323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (4)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x2123,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (5)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x2323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (6)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x3123,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (7)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x3323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (8)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x4123,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (9)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x4323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (10)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x5123,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 5,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (11)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x5323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 5,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (12)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x6123,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 6,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (13)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x6323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 6,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (14)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x7123,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 7,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (15)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x7323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 7,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (16)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x17323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 7,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x228,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (17)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x217323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x21123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 7,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x228,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x4,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (18)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x40217323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x4021123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 7,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x228,
+ .udt_dimm_row = 0x100,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x4,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (19)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x240217323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x24021123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x228,
+ .udt_dimm_row = 0x900,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x4,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (20)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x240617323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x24061123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x228,
+ .udt_dimm_row = 0x901,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x4,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (21)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x240617323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x24061123,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x228,
+ .udt_dimm_row = 0x901,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x4,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (22)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x240687323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x24068123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 6,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0x901,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x5,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 8ch, 2P ilv (23)",
+ .udt_umc = &zen_umc_nps8_2p,
+ .udt_pa = 0x2c0687323,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x2c068123,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 7,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x28,
+ .udt_dimm_row = 0xb01,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x5,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 2ch, 1P (0)",
+ .udt_umc = &zen_umc_nps2_1p,
+ .udt_pa = 0x167,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x167,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x29,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 2ch, 1P (1)",
+ .udt_umc = &zen_umc_nps2_1p,
+ .udt_pa = 0x367,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x167,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x29,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 2ch, 1P (2)",
+ .udt_umc = &zen_umc_nps2_1p,
+ .udt_pa = 0x4167,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x2167,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x29,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 2ch, 1P (3)",
+ .udt_umc = &zen_umc_nps2_1p,
+ .udt_pa = 0x14167,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xa167,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x29,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x1,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 2ch, 1P (4)",
+ .udt_umc = &zen_umc_nps2_1p,
+ .udt_pa = 0x40014167,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x2000a167,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x29,
+ .udt_dimm_row = 0x800,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x1,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 2ch, 1P (5)",
+ .udt_umc = &zen_umc_nps2_1p,
+ .udt_pa = 0x214167,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x10a167,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x29,
+ .udt_dimm_row = 0x4,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x1,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 2ch, 1P (6)",
+ .udt_umc = &zen_umc_nps2_1p,
+ .udt_pa = 0x40214167,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x2010a167,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x29,
+ .udt_dimm_row = 0x804,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x1,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (0)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0xcd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xcd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (1)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x1cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (2)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x2cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x33,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (3)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x3cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x21cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x33,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (4)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x4cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x2cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x53,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (5)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x5cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x22cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x53,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (6)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x6cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x73,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (7)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x3ecd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x1fcd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f3,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (8)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x3fcd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x3fcd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x3f3,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (9)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x40cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (10)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x41cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xcd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (11)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x80cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xcd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (12)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x81cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (13)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0xc0cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (14)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0xc1cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xcd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (15)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x100cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (16)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x101cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xcd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (17)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x140cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x40cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 2,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 5ch, 1P (18)",
+ .udt_umc = &zen_umc_nps5_1p,
+ .udt_pa = 0x141cd,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x60cd,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x13,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 3,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 1,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (0)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0xbc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xbc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (1)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x1bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x10bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x21f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (2)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x20bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xbc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (3)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x21bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x10bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x21f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (4)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x40bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x10bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x21f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (5)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x41bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xbc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (6)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x60bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x10bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 5,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x21f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (7)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x61bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xbc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (8)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x80bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xbc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (9)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x81bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x10bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x21f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (10)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0xa0bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xbc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 5,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (11)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0xa1bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x10bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x21f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+},
+/*
+ * We don't use hashing on the 64 KiB range, but walking through it should still
+ * change the component IDs because of how the scheme works, but it should be
+ * more contiguous.
+ */
+{
+ .udt_desc = "NPS 6ch, 1P (12)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x120bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x20bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (13)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x220bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x40bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 5,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0x2,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (14)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x320bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x80bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0x0,
+ .udt_dimm_bank_group = 0x1,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (15)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x420bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xa0bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0x1,
+ .udt_dimm_bank_group = 0x1,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (16)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x720bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x120bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 4,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0x1,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (17)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x1000020bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x2aaaa0bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0xaaa,
+ .udt_dimm_bank = 0x1,
+ .udt_dimm_bank_group = 0x5,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (18)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x1800020bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x400000bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x1000,
+ .udt_dimm_bank = 0x0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (19)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x1c00020bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x4aaab0bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x21f,
+ .udt_dimm_row = 0x12aa,
+ .udt_dimm_bank = 0x1,
+ .udt_dimm_bank_group = 0x5,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (20)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x1c00060bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x4aaaa0bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x12aa,
+ .udt_dimm_bank = 0x1,
+ .udt_dimm_bank_group = 0x5,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (21)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x1c00040bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x4aaaa0bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 5,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1f,
+ .udt_dimm_row = 0x12aa,
+ .udt_dimm_bank = 0x1,
+ .udt_dimm_bank_group = 0x5,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (22)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x1c00041bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x4aaab0bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x21f,
+ .udt_dimm_row = 0x12aa,
+ .udt_dimm_bank = 0x1,
+ .udt_dimm_bank_group = 0x5,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 6ch, 1P (23)",
+ .udt_umc = &zen_umc_nps6_1p,
+ .udt_pa = 0x1c00061bc,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x4aaab0bc,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x21f,
+ .udt_dimm_row = 0x12aa,
+ .udt_dimm_bank = 0x1,
+ .udt_dimm_bank_group = 0x5,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (0)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0xad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xad,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0x0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (1)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x1ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xad,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0x0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (2)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x40ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xad,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0x0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (3)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x41ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xad,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0x0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (4)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x80ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xad,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0x0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (5)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x81ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0xad,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x0,
+ .udt_dimm_bank = 0x0,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (6)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x1fc0ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x540ad,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x1,
+ .udt_dimm_bank = 0x2,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (7)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x1fc1ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x540ad,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x1,
+ .udt_dimm_bank = 0x2,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (8)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x2000ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x540ad,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x1,
+ .udt_dimm_bank = 0x2,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (9)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x2001ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x540ad,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x1,
+ .udt_dimm_bank = 0x2,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (10)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x2040ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x560ad,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x1,
+ .udt_dimm_bank = 0x3,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (11)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x2041ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x560ad,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x1,
+ .udt_dimm_bank = 0x3,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (12)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x2080ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x560ad,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x1,
+ .udt_dimm_bank = 0x3,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (13)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x2081ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x560ad,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x1,
+ .udt_dimm_bank = 0x3,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (14)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x20c0ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x560ad,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x1,
+ .udt_dimm_bank = 0x3,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (15)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x20c1ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x560ad,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0x1,
+ .udt_dimm_bank = 0x3,
+ .udt_dimm_bank_group = 0x2,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (16)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x10020c0ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x2ab020ad,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0xaac,
+ .udt_dimm_bank = 0x1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "NPS 3ch, 2P (17)",
+ .udt_umc = &zen_umc_nps3_2p,
+ .udt_pa = 0x10020c1ad,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x2ab020ad,
+ .udt_sock = 1,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x1b,
+ .udt_dimm_row = 0xaac,
+ .udt_dimm_bank = 0x1,
+ .udt_dimm_bank_group = 0x0,
+ .udt_dimm_subchan = 0,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = NULL
+} };
diff --git a/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_remap.c b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_remap.c
new file mode 100644
index 0000000000..3bdbeae0d4
--- /dev/null
+++ b/usr/src/test/os-tests/tests/zen_umc/zen_umc_test_remap.c
@@ -0,0 +1,746 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * This works at performing remap tests across both the DFv3 and DFv4 variants.
+ */
+
+#include "zen_umc_test.h"
+
+static const zen_umc_t zen_umc_remap_v3 = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_3,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 1,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 2,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_REMAP_EN |
+ DF_DRAM_F_REMAP_SOCK,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .zud_remap = { {
+ .csr_nremaps = ZEN_UMC_MILAN_REMAP_ENTS,
+ .csr_remaps = { 0x3, 0x2, 0x1, 0x0, 0x4, 0x5, 0x6, 0x7,
+ 0x8, 0x9, 0xa, 0xb },
+ }, {
+ .csr_nremaps = ZEN_UMC_MILAN_REMAP_ENTS,
+ .csr_remaps = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+ 0x8, 0x9, 0xa, 0xb },
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN |
+ DF_DRAM_F_REMAP_SOCK,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN |
+ DF_DRAM_F_REMAP_SOCK,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN |
+ DF_DRAM_F_REMAP_SOCK,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 1,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN |
+ DF_DRAM_F_REMAP_SOCK,
+ .ddr_base = 0,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+/*
+ * This sets up a DFv4 capable remap engine. The important difference we want to
+ * test here is that the remap rules can be selected on a per-DRAM rule basis.
+ * This leads us to split our rules in half and end up with two totally
+ * different remapping schemes. In comparison, DFv3 is target socket based.
+ */
+static const zen_umc_t zen_umc_remap_v4 = {
+ .umc_tom = 4ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_tom2 = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .umc_df_rev = DF_REV_4,
+ .umc_decomp = {
+ .dfd_sock_mask = 0x01,
+ .dfd_die_mask = 0x00,
+ .dfd_node_mask = 0x20,
+ .dfd_comp_mask = 0x1f,
+ .dfd_sock_shift = 0,
+ .dfd_die_shift = 0,
+ .dfd_node_shift = 5,
+ .dfd_comp_shift = 0
+ },
+ .umc_ndfs = 1,
+ .umc_dfs = { {
+ .zud_dfno = 0,
+ .zud_dram_nrules = 2,
+ .zud_nchan = 4,
+ .zud_cs_nremap = 2,
+ .zud_hole_base = 0,
+ .zud_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_REMAP_EN,
+ .ddr_base = 0,
+ .ddr_limit = 32ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 0
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID | DF_DRAM_F_REMAP_EN,
+ .ddr_base = 32ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL * 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 1
+ } },
+ .zud_remap = { {
+ .csr_nremaps = ZEN_UMC_MAX_REMAP_ENTS,
+ .csr_remaps = { 0x3, 0x2, 0x1, 0x0, 0x4, 0x5, 0x6, 0x7,
+ 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf },
+ }, {
+ .csr_nremaps = ZEN_UMC_MAX_REMAP_ENTS,
+ .csr_remaps = { 0x2, 0x1, 0x3, 0x0, 0x4, 0x5, 0x6, 0x7,
+ 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf },
+ } },
+ .zud_chan = { {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 0,
+ .chan_instid = 0,
+ .chan_logid = 0,
+ .chan_nrules = 2,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN,
+ .ddr_base = 0,
+ .ddr_limit = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 0
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN,
+ .ddr_base = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 1
+ } },
+ .chan_offsets = { {
+ .cho_valid = B_TRUE,
+ .cho_offset = 0x200000000,
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 1,
+ .chan_instid = 1,
+ .chan_logid = 1,
+ .chan_nrules = 2,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN,
+ .ddr_base = 0,
+ .ddr_limit = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 0
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN,
+ .ddr_base = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 1
+ } },
+ .chan_offsets = { {
+ .cho_valid = B_TRUE,
+ .cho_offset = 0x200000000,
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 2,
+ .chan_instid = 2,
+ .chan_logid = 2,
+ .chan_nrules = 2,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN,
+ .ddr_base = 0,
+ .ddr_limit = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 0
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN,
+ .ddr_base = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 1
+ } },
+ .chan_offsets = { {
+ .cho_valid = B_TRUE,
+ .cho_offset = 0x200000000,
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ }, {
+ .chan_flags = UMC_CHAN_F_ECC_EN,
+ .chan_fabid = 3,
+ .chan_instid = 3,
+ .chan_logid = 3,
+ .chan_nrules = 2,
+ .chan_rules = { {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN,
+ .ddr_base = 0,
+ .ddr_limit = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 0
+ }, {
+ .ddr_flags = DF_DRAM_F_VALID |
+ DF_DRAM_F_REMAP_EN,
+ .ddr_base = 32ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_limit = 64ULL * 1024ULL * 1024ULL *
+ 1024ULL,
+ .ddr_dest_fabid = 0,
+ .ddr_sock_ileave_bits = 0,
+ .ddr_die_ileave_bits = 0,
+ .ddr_addr_start = 12,
+ .ddr_chan_ileave = DF_CHAN_ILEAVE_4CH,
+ .ddr_remap_ent = 1
+ } },
+ .chan_offsets = { {
+ .cho_valid = B_TRUE,
+ .cho_offset = 0x200000000,
+ } },
+ .chan_dimms = { {
+ .ud_flags = UMC_DIMM_F_VALID,
+ .ud_width = UMC_DIMM_W_X4,
+ .ud_type = UMC_DIMM_T_DDR4,
+ .ud_kind = UMC_DIMM_K_RDIMM,
+ .ud_dimmno = 0,
+ .ud_cs = { {
+ .ucs_base = {
+ .udb_base = 0,
+ .udb_valid = B_TRUE
+ },
+ .ucs_base_mask = 0x3ffffffff,
+ .ucs_nbanks = 0x4,
+ .ucs_ncol = 0xa,
+ .ucs_nrow_lo = 0x11,
+ .ucs_nbank_groups = 0x2,
+ .ucs_row_hi_bit = 0x18,
+ .ucs_row_low_bit = 0x11,
+ .ucs_bank_bits = { 0xf, 0x10, 0xd,
+ 0xe },
+ .ucs_col_bits = { 0x3, 0x4, 0x5, 0x6,
+ 0x7, 0x8, 0x9, 0xa, 0xb, 0xc }
+ } }
+ } },
+ } }
+ } }
+};
+
+const umc_decode_test_t zen_umc_test_remap[] = { {
+ .udt_desc = "Milan Remap (0)",
+ .udt_umc = &zen_umc_remap_v3,
+ .udt_pa = 0x138,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x138,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x27,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "Milan Remap (1)",
+ .udt_umc = &zen_umc_remap_v3,
+ .udt_pa = 0x1138,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x138,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x27,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "Milan Remap (2)",
+ .udt_umc = &zen_umc_remap_v3,
+ .udt_pa = 0x2138,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x138,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x27,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "Milan Remap (3)",
+ .udt_umc = &zen_umc_remap_v3,
+ .udt_pa = 0x3138,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x138,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x27,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DFv4 Remap (0)",
+ .udt_umc = &zen_umc_remap_v4,
+ .udt_pa = 0x163,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x163,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2c,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DFv4 Remap (1)",
+ .udt_umc = &zen_umc_remap_v4,
+ .udt_pa = 0x1163,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x163,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2c,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DFv4 Remap (2)",
+ .udt_umc = &zen_umc_remap_v4,
+ .udt_pa = 0x2163,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x163,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2c,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DFv4 Remap (3)",
+ .udt_umc = &zen_umc_remap_v4,
+ .udt_pa = 0x3163,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x163,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2c,
+ .udt_dimm_row = 0,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DFv4 Remap (4)",
+ .udt_umc = &zen_umc_remap_v4,
+ .udt_pa = 0x900000163,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x240000163,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 2,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2c,
+ .udt_dimm_row = 0x12000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DFv4 Remap (5)",
+ .udt_umc = &zen_umc_remap_v4,
+ .udt_pa = 0x900001163,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x240000163,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 1,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2c,
+ .udt_dimm_row = 0x12000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DFv4 Remap (6)",
+ .udt_umc = &zen_umc_remap_v4,
+ .udt_pa = 0x900002163,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x240000163,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 3,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2c,
+ .udt_dimm_row = 0x12000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = "DFv4 Remap (7)",
+ .udt_umc = &zen_umc_remap_v4,
+ .udt_pa = 0x900003163,
+ .udt_pass = B_TRUE,
+ .udt_norm_addr = 0x240000163,
+ .udt_sock = 0,
+ .udt_die = 0,
+ .udt_comp = 0,
+ .udt_dimm_no = 0,
+ .udt_dimm_col = 0x2c,
+ .udt_dimm_row = 0x12000,
+ .udt_dimm_bank = 0,
+ .udt_dimm_bank_group = 0,
+ .udt_dimm_subchan = UINT8_MAX,
+ .udt_dimm_rm = 0,
+ .udt_dimm_cs = 0
+}, {
+ .udt_desc = NULL
+} };
diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
index 5d48a86475..b8cad13069 100644
--- a/usr/src/uts/common/Makefile.files
+++ b/usr/src/uts/common/Makefile.files
@@ -29,7 +29,7 @@
# Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
# Copyright 2016 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
# Copyright 2022 RackTop Systems, Inc.
-# Copyright 2021 Oxide Computer Company
+# Copyright 2022 Oxide Computer Company
#
#
@@ -110,6 +110,7 @@ GENUNIX_OBJS += \
avl.o \
bdev_dsort.o \
bio.o \
+ bitext.o \
bitmap.o \
blabel.o \
bootbanner.o \
diff --git a/usr/src/uts/common/Makefile.rules b/usr/src/uts/common/Makefile.rules
index ba70c5d688..8a7e11e34c 100644
--- a/usr/src/uts/common/Makefile.rules
+++ b/usr/src/uts/common/Makefile.rules
@@ -26,7 +26,7 @@
# Copyright 2019 Joyent, Inc.
# Copyright 2018 Nexenta Systems, Inc.
# Copyright (c) 2017 by Delphix. All rights reserved.
-# Copyright 2021 Oxide Computer Company
+# Copyright 2022 Oxide Computer Company
# Copyright 2022 RackTop Systems, Inc.
#
@@ -97,6 +97,10 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/common/bignum/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(COMMONBASE)/bitext/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
$(OBJS_DIR)/%.o: $(COMMONBASE)/mpi/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
diff --git a/usr/src/uts/common/sys/Makefile b/usr/src/uts/common/sys/Makefile
index 03f2fb8537..2dddfad820 100644
--- a/usr/src/uts/common/sys/Makefile
+++ b/usr/src/uts/common/sys/Makefile
@@ -29,6 +29,7 @@
# Copyright 2016 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
# Copyright 2019 Peter Tribble.
# Copyright 2015, Joyent, Inc. All rights reserved.
+# Copyright 2022 Oxide Computer Company
#
include $(SRC)/uts/Makefile.uts
@@ -89,6 +90,7 @@ CHKHDRS= \
auxv_SPARC.h \
avl.h \
avl_impl.h \
+ bitext.h \
bitmap.h \
bitset.h \
bl.h \
diff --git a/usr/src/uts/common/sys/bitext.h b/usr/src/uts/common/sys/bitext.h
new file mode 100644
index 0000000000..d54d7657b8
--- /dev/null
+++ b/usr/src/uts/common/sys/bitext.h
@@ -0,0 +1,48 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+#ifndef _SYS_BITEXT_H
+#define _SYS_BITEXT_H
+
+#include <sys/debug.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * A bunch of routines to make working with bits and registers easier. This is
+ * designed to be a replacement for the BITX macro and provide additional error
+ * handling. See bitx64(9F), bitdel64(9F), and bitset64(9F) for more
+ * information.
+ */
+
+extern uint8_t bitx8(uint8_t, uint_t, uint_t);
+extern uint16_t bitx16(uint16_t, uint_t, uint_t);
+extern uint32_t bitx32(uint32_t, uint_t, uint_t);
+extern uint64_t bitx64(uint64_t, uint_t, uint_t);
+
+extern uint8_t bitset8(uint8_t, uint_t, uint_t, uint8_t);
+extern uint16_t bitset16(uint16_t, uint_t, uint_t, uint16_t);
+extern uint32_t bitset32(uint32_t, uint_t, uint_t, uint32_t);
+extern uint64_t bitset64(uint64_t, uint_t, uint_t, uint64_t);
+
+extern uint64_t bitdel64(uint64_t, uint_t, uint_t);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_BITEXT_H */
diff --git a/usr/src/uts/intel/Makefile.files b/usr/src/uts/intel/Makefile.files
index 193570711b..8974c80a63 100644
--- a/usr/src/uts/intel/Makefile.files
+++ b/usr/src/uts/intel/Makefile.files
@@ -356,6 +356,7 @@ AMDZEN_STUB_OBJS = amdzen_stub.o
SMNTEMP_OBJS = smntemp.o
USMN_OBJS = usmn.o
ZEN_UDF_OBJS = zen_udf.o
+ZEN_UMC_OBJS = zen_umc.o zen_umc_decode.o zen_fabric_utils.o zen_umc_dump.o
#
# Intel Integrated Memory Controller
diff --git a/usr/src/uts/intel/Makefile.intel b/usr/src/uts/intel/Makefile.intel
index ac4d9a0548..f93a9d600f 100644
--- a/usr/src/uts/intel/Makefile.intel
+++ b/usr/src/uts/intel/Makefile.intel
@@ -27,7 +27,7 @@
# Copyright 2018 Nexenta Systems, Inc.
# Copyright 2019 RackTop Systems
# Copyright 2019 Peter Tribble.
-# Copyright 2021 Oxide Computer Company
+# Copyright 2022 Oxide Computer Company
#
#
@@ -736,7 +736,7 @@ DRV_KMODS += smntemp
#
DRV_KMODS += amdzen
DRV_KMODS += amdzen_stub
-DRV_KMODS += usmn zen_udf
+DRV_KMODS += usmn zen_udf zen_umc
#
# Intel Integrated Memory Controller
diff --git a/usr/src/uts/intel/Makefile.rules b/usr/src/uts/intel/Makefile.rules
index 7b3845970d..f7c05bdf22 100644
--- a/usr/src/uts/intel/Makefile.rules
+++ b/usr/src/uts/intel/Makefile.rules
@@ -23,6 +23,7 @@
# Use is subject to license terms.
# Copyright 2019 Joyent, Inc.
# Copyright 2017 Nexenta Systems, Inc.
+# Copyright 2022 Oxide Computer Company
#
#
@@ -145,6 +146,10 @@ $(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/amdzen/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
+$(OBJS_DIR)/%.o: $(SRC)/common/mc/zen_umc/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
$(OBJS_DIR)/%.o: $(UTSBASE)/intel/io/amr/%.c
$(COMPILE.c) -o $@ $<
$(CTFCONVERT_O)
diff --git a/usr/src/uts/intel/io/amdzen/amdzen.c b/usr/src/uts/intel/io/amdzen/amdzen.c
index 3715a2c035..f8ec834e66 100644
--- a/usr/src/uts/intel/io/amdzen/amdzen.c
+++ b/usr/src/uts/intel/io/amdzen/amdzen.c
@@ -11,7 +11,7 @@
/*
* Copyright 2019, Joyent, Inc.
- * Copyright 2021 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
/*
@@ -119,6 +119,29 @@
* provided by one driver, we instead have created a nexus driver that will
* itself try and load children. Children are all pseudo-device drivers that
* provide different pieces of functionality that use this.
+ *
+ * -------
+ * Locking
+ * -------
+ *
+ * The amdzen_data structure contains a single lock, azn_mutex. The various
+ * client functions are intended for direct children of our nexus, but have been
+ * designed in case someone else depends on this driver despite not being a
+ * child. Once a DF has been discovered, the set of entities inside of it
+ * (adf_nents, adf_ents[]) is considered static, constant data. This means that
+ * iterating over it in and of itself does not require locking; however, the
+ * discovery of the amd_df_t does. In addition, whenever performing register
+ * accesses to the DF or SMN, those require locking. This means that one must
+ * hold the lock in the following circumstances:
+ *
+ * o Looking up DF structures
+ * o Reading or writing to DF registers
+ * o Reading or writing to SMN registers
+ *
+ * In general, it is preferred that the lock be held across an entire client
+ * operation if possible. The only time this becomes an issue are when we have
+ * callbacks into our callers (ala amdzen_c_df_iter()) as they will likely
+ * recursively call into us.
*/
#include <sys/modctl.h>
@@ -132,6 +155,8 @@
#include <sys/x86_archext.h>
#include <sys/cpuvar.h>
+#include <sys/amdzen/df.h>
+#include "amdzen_client.h"
#include "amdzen.h"
amdzen_t *amdzen_data;
@@ -158,9 +183,75 @@ typedef struct {
static const amdzen_child_data_t amdzen_children[] = {
{ "smntemp", AMDZEN_C_SMNTEMP },
{ "usmn", AMDZEN_C_USMN },
- { "zen_udf", AMDZEN_C_ZEN_UDF }
+ { "zen_udf", AMDZEN_C_ZEN_UDF },
+ { "zen_umc", AMDZEN_C_ZEN_UMC }
};
+/*
+ * Provide a caller with the notion of what CPU family their device falls into.
+ * This is useful for client drivers that want to make decisions based on model
+ * ranges.
+ */
+zen_family_t
+amdzen_c_family(void)
+{
+ uint_t vendor, family, model;
+ zen_family_t ret = ZEN_FAMILY_UNKNOWN;
+
+ vendor = cpuid_getvendor(CPU);
+ family = cpuid_getfamily(CPU);
+ model = cpuid_getmodel(CPU);
+
+ switch (family) {
+ case 0x17:
+ if (vendor != X86_VENDOR_AMD)
+ break;
+ if (model < 0x10) {
+ ret = ZEN_FAMILY_NAPLES;
+ } else if (model >= 0x10 && model < 0x30) {
+ ret = ZEN_FAMILY_DALI;
+ } else if (model >= 0x30 && model < 0x40) {
+ ret = ZEN_FAMILY_ROME;
+ } else if (model >= 0x60 && model < 0x70) {
+ ret = ZEN_FAMILY_RENOIR;
+ } else if (model >= 0x70 && model < 0x80) {
+ ret = ZEN_FAMILY_MATISSE;
+ } else if (model >= 0x90 && model < 0xa0) {
+ ret = ZEN_FAMILY_VAN_GOGH;
+ } else if (model >= 0xa0 && model < 0xb0) {
+ ret = ZEN_FAMILY_MENDOCINO;
+ }
+ break;
+ case 0x18:
+ if (vendor != X86_VENDOR_HYGON)
+ break;
+ if (model < 0x10)
+ ret = ZEN_FAMILY_DHYANA;
+ break;
+ case 0x19:
+ if (vendor != X86_VENDOR_AMD)
+ break;
+ if (model < 0x10) {
+ ret = ZEN_FAMILY_MILAN;
+ } else if (model >= 0x10 && model < 0x20) {
+ ret = ZEN_FAMILY_GENOA;
+ } else if (model >= 0x20 && model < 0x30) {
+ ret = ZEN_FAMILY_VERMEER;
+ } else if (model >= 0x40 && model < 0x50) {
+ ret = ZEN_FAMILY_REMBRANDT;
+ } else if (model >= 0x50 && model < 0x60) {
+ ret = ZEN_FAMILY_CEZANNE;
+ } else if (model >= 0x60 && model < 0x70) {
+ ret = ZEN_FAMILY_RAPHAEL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return (ret);
+}
+
static uint32_t
amdzen_stub_get32(amdzen_stub_t *stub, off_t reg)
{
@@ -179,40 +270,77 @@ amdzen_stub_put32(amdzen_stub_t *stub, off_t reg, uint32_t val)
pci_config_put32(stub->azns_cfgspace, reg, val);
}
+static uint64_t
+amdzen_df_read_regdef(amdzen_t *azn, amdzen_df_t *df, const df_reg_def_t def,
+ uint8_t inst, boolean_t do_64)
+{
+ df_reg_def_t ficaa;
+ df_reg_def_t ficad;
+ uint32_t val = 0;
+ df_rev_t df_rev = azn->azn_dfs[0].adf_rev;
+
+ VERIFY(MUTEX_HELD(&azn->azn_mutex));
+ ASSERT3U(def.drd_gens & df_rev, ==, df_rev);
+ val = DF_FICAA_V2_SET_TARG_INST(val, 1);
+ val = DF_FICAA_V2_SET_FUNC(val, def.drd_func);
+ val = DF_FICAA_V2_SET_INST(val, inst);
+ val = DF_FICAA_V2_SET_64B(val, do_64 ? 1 : 0);
+
+ switch (df_rev) {
+ case DF_REV_2:
+ case DF_REV_3:
+ case DF_REV_3P5:
+ ficaa = DF_FICAA_V2;
+ ficad = DF_FICAD_LO_V2;
+ /*
+ * Both here and in the DFv4 case, the register ignores the
+ * lower 2 bits. That is we can only address and encode things
+ * in units of 4 bytes.
+ */
+ val = DF_FICAA_V2_SET_REG(val, def.drd_reg >> 2);
+ break;
+ case DF_REV_4:
+ ficaa = DF_FICAA_V4;
+ ficad = DF_FICAD_LO_V4;
+ val = DF_FICAA_V4_SET_REG(val, def.drd_reg >> 2);
+ break;
+ default:
+ panic("encountered unexpected DF rev: %u", df_rev);
+ }
+
+ amdzen_stub_put32(df->adf_funcs[ficaa.drd_func], ficaa.drd_reg, val);
+ if (do_64) {
+ return (amdzen_stub_get64(df->adf_funcs[ficad.drd_func],
+ ficad.drd_reg));
+ } else {
+ return (amdzen_stub_get32(df->adf_funcs[ficad.drd_func],
+ ficad.drd_reg));
+ }
+}
+
/*
* Perform a targeted 32-bit indirect read to a specific instance and function.
*/
static uint32_t
-amdzen_df_read32(amdzen_t *azn, amdzen_df_t *df, uint8_t inst, uint8_t func,
- uint16_t reg)
+amdzen_df_read32(amdzen_t *azn, amdzen_df_t *df, uint8_t inst,
+ const df_reg_def_t def)
{
- uint32_t val;
-
- VERIFY(MUTEX_HELD(&azn->azn_mutex));
- val = AMDZEN_DF_F4_FICAA_TARG_INST | AMDZEN_DF_F4_FICAA_SET_REG(reg) |
- AMDZEN_DF_F4_FICAA_SET_FUNC(func) |
- AMDZEN_DF_F4_FICAA_SET_INST(inst);
- amdzen_stub_put32(df->adf_funcs[4], AMDZEN_DF_F4_FICAA, val);
- return (amdzen_stub_get32(df->adf_funcs[4], AMDZEN_DF_F4_FICAD_LO));
+ return (amdzen_df_read_regdef(azn, df, def, inst, B_FALSE));
}
/*
- * Perform a targeted 64-bit indirect read to a specific instance and function.
+ * For a broadcast read, just go to the underlying PCI function and perform a
+ * read. At this point in time, we don't believe we need to use the FICAA/FICAD
+ * to access it (though it does have a broadcast mode).
*/
-static uint64_t
-amdzen_df_read64(amdzen_t *azn, amdzen_df_t *df, uint8_t inst, uint8_t func,
- uint16_t reg)
+static uint32_t
+amdzen_df_read32_bcast(amdzen_t *azn, amdzen_df_t *df, const df_reg_def_t def)
{
- uint32_t val;
-
VERIFY(MUTEX_HELD(&azn->azn_mutex));
- val = AMDZEN_DF_F4_FICAA_TARG_INST | AMDZEN_DF_F4_FICAA_SET_REG(reg) |
- AMDZEN_DF_F4_FICAA_SET_FUNC(func) |
- AMDZEN_DF_F4_FICAA_SET_INST(inst) | AMDZEN_DF_F4_FICAA_SET_64B;
- amdzen_stub_put32(df->adf_funcs[4], AMDZEN_DF_F4_FICAA, val);
- return (amdzen_stub_get64(df->adf_funcs[4], AMDZEN_DF_F4_FICAD_LO));
+ return (amdzen_stub_get32(df->adf_funcs[def.drd_func], def.drd_reg));
}
+
static uint32_t
amdzen_smn_read32(amdzen_t *azn, amdzen_df_t *df, uint32_t reg)
{
@@ -316,9 +444,34 @@ amdzen_c_df_count(void)
return (ret);
}
+df_rev_t
+amdzen_c_df_rev(void)
+{
+ amdzen_df_t *df;
+ amdzen_t *azn = amdzen_data;
+ df_rev_t rev;
+
+ /*
+ * Always use the first DF instance to determine what we're using. Our
+ * current assumption, which seems to generally be true, is that the
+ * given DF revisions are the same in a given system when the DFs are
+ * directly connected.
+ */
+ mutex_enter(&azn->azn_mutex);
+ df = amdzen_df_find(azn, 0);
+ if (df == NULL) {
+ rev = DF_REV_UNKNOWN;
+ } else {
+ rev = df->adf_rev;
+ }
+ mutex_exit(&azn->azn_mutex);
+
+ return (rev);
+}
+
int
-amdzen_c_df_read32(uint_t dfno, uint8_t inst, uint8_t func,
- uint16_t reg, uint32_t *valp)
+amdzen_c_df_read32(uint_t dfno, uint8_t inst, const df_reg_def_t def,
+ uint32_t *valp)
{
amdzen_df_t *df;
amdzen_t *azn = amdzen_data;
@@ -330,15 +483,15 @@ amdzen_c_df_read32(uint_t dfno, uint8_t inst, uint8_t func,
return (ENOENT);
}
- *valp = amdzen_df_read32(azn, df, inst, func, reg);
+ *valp = amdzen_df_read_regdef(azn, df, def, inst, B_FALSE);
mutex_exit(&azn->azn_mutex);
return (0);
}
int
-amdzen_c_df_read64(uint_t dfno, uint8_t inst, uint8_t func,
- uint16_t reg, uint64_t *valp)
+amdzen_c_df_read64(uint_t dfno, uint8_t inst, const df_reg_def_t def,
+ uint64_t *valp)
{
amdzen_df_t *df;
amdzen_t *azn = amdzen_data;
@@ -350,12 +503,112 @@ amdzen_c_df_read64(uint_t dfno, uint8_t inst, uint8_t func,
return (ENOENT);
}
- *valp = amdzen_df_read64(azn, df, inst, func, reg);
+ *valp = amdzen_df_read_regdef(azn, df, def, inst, B_TRUE);
mutex_exit(&azn->azn_mutex);
return (0);
}
+int
+amdzen_c_df_iter(uint_t dfno, zen_df_type_t type, amdzen_c_iter_f func,
+ void *arg)
+{
+ amdzen_df_t *df;
+ amdzen_t *azn = amdzen_data;
+ df_type_t df_type;
+ uint8_t df_subtype;
+
+ /*
+ * Unlike other calls here, we hold our lock only to find the DF here.
+ * The main reason for this is the nature of the callback function.
+ * Folks are iterating over instances so they can call back into us. If
+ * you look at the locking statement, the thing that is most volatile
+ * right here and what we need to protect is the DF itself and
+ * subsequent register accesses to it. The actual data about which
+ * entities exist is static and so once we have found a DF we should
+ * hopefully be in good shape as they only come, but don't go.
+ */
+ mutex_enter(&azn->azn_mutex);
+ df = amdzen_df_find(azn, dfno);
+ if (df == NULL) {
+ mutex_exit(&azn->azn_mutex);
+ return (ENOENT);
+ }
+ mutex_exit(&azn->azn_mutex);
+
+ switch (type) {
+ case ZEN_DF_TYPE_CS_UMC:
+ df_type = DF_TYPE_CS;
+ /*
+ * In the original Zeppelin DFv2 die there was no subtype field
+ * used for the CS. The UMC is the only type and has a subtype
+ * of zero.
+ */
+ if (df->adf_rev != DF_REV_2) {
+ df_subtype = DF_CS_SUBTYPE_UMC;
+ } else {
+ df_subtype = 0;
+ }
+ break;
+ case ZEN_DF_TYPE_CCM_CPU:
+ df_type = DF_TYPE_CCM;
+ /*
+ * In the Genoa/DFv4 timeframe, with the introduction of CXL and
+ * related, a subtype was added here where as previously it was
+ * always zero.
+ */
+ if (df->adf_major >= 4) {
+ df_subtype = DF_CCM_SUBTYPE_CPU;
+ } else {
+ df_subtype = 0;
+ }
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ for (uint_t i = 0; i < df->adf_nents; i++) {
+ amdzen_df_ent_t *ent = &df->adf_ents[i];
+
+ /*
+ * Some DF components are not considered enabled and therefore
+ * will end up having bogus values in their ID fields. If we do
+ * not have an enable flag set, we must skip this node.
+ */
+ if ((ent->adfe_flags & AMDZEN_DFE_F_ENABLED) == 0)
+ continue;
+
+ if (ent->adfe_type == df_type &&
+ ent->adfe_subtype == df_subtype) {
+ int ret = func(dfno, ent->adfe_fabric_id,
+ ent->adfe_inst_id, arg);
+ if (ret != 0) {
+ return (ret);
+ }
+ }
+ }
+
+ return (0);
+}
+
+int
+amdzen_c_df_fabric_decomp(df_fabric_decomp_t *decomp)
+{
+ const amdzen_df_t *df;
+ amdzen_t *azn = amdzen_data;
+
+ mutex_enter(&azn->azn_mutex);
+ df = amdzen_df_find(azn, 0);
+ if (df == NULL) {
+ mutex_exit(&azn->azn_mutex);
+ return (ENOENT);
+ }
+
+ *decomp = df->adf_decomp;
+ mutex_exit(&azn->azn_mutex);
+ return (0);
+}
+
static boolean_t
amdzen_create_child(amdzen_t *azn, const amdzen_child_data_t *acd)
{
@@ -477,6 +730,218 @@ amdzen_is_rome_style(uint_t id)
}
/*
+ * To be able to do most other things we want to do, we must first determine
+ * what revision of the DF (data fabric) that we're using.
+ *
+ * Snapshot the df version. This was added explicitly in DFv4.0, around the Zen
+ * 4 timeframe and allows us to tell apart different version of the DF register
+ * set, most usefully when various subtypes were added.
+ *
+ * Older versions can theoretically be told apart based on usage of reserved
+ * registers. We walk these in the following order, starting with the newest rev
+ * and walking backwards to tell things apart:
+ *
+ * o v3.5 -> Check function 1, register 0x150. This was reserved prior
+ * to this point. This is actually DF_FIDMASK0_V3P5. We are supposed
+ * to check bits [7:0].
+ *
+ * o v3.0 -> Check function 1, register 0x208. The low byte (7:0) was
+ * changed to indicate a component mask. This is non-zero
+ * in the 3.0 generation. This is actually DF_FIDMASK_V2.
+ *
+ * o v2.0 -> This is just the not that case. Presumably v1 wasn't part
+ * of the Zen generation.
+ *
+ * Because we don't know what version we are yet, we do not use the normal
+ * versioned register accesses which would check what DF version we are and
+ * would want to use the normal indirect register accesses (which also require
+ * us to know the version). We instead do direct broadcast reads.
+ */
+static void
+amdzen_determine_df_vers(amdzen_t *azn, amdzen_df_t *df)
+{
+ uint32_t val;
+ df_reg_def_t rd = DF_FBICNT;
+
+ val = amdzen_stub_get32(df->adf_funcs[rd.drd_func], rd.drd_reg);
+ df->adf_major = DF_FBICNT_V4_GET_MAJOR(val);
+ df->adf_minor = DF_FBICNT_V4_GET_MINOR(val);
+ if (df->adf_major == 0 && df->adf_minor == 0) {
+ rd = DF_FIDMASK0_V3P5;
+ val = amdzen_stub_get32(df->adf_funcs[rd.drd_func], rd.drd_reg);
+ if (bitx32(val, 7, 0) != 0) {
+ df->adf_major = 3;
+ df->adf_minor = 5;
+ df->adf_rev = DF_REV_3P5;
+ } else {
+ rd = DF_FIDMASK_V2;
+ val = amdzen_stub_get32(df->adf_funcs[rd.drd_func],
+ rd.drd_reg);
+ if (bitx32(val, 7, 0) != 0) {
+ df->adf_major = 3;
+ df->adf_minor = 0;
+ df->adf_rev = DF_REV_3;
+ } else {
+ df->adf_major = 2;
+ df->adf_minor = 0;
+ df->adf_rev = DF_REV_2;
+ }
+ }
+ } else if (df->adf_major == 4 && df->adf_minor == 0) {
+ df->adf_rev = DF_REV_4;
+ } else {
+ df->adf_rev = DF_REV_UNKNOWN;
+ }
+}
+
+/*
+ * All of the different versions of the DF have different ways of getting at and
+ * answering the question of how do I break a fabric ID into a corresponding
+ * socket, die, and component. Importantly the goal here is to obtain, cache,
+ * and normalize:
+ *
+ * o The DF System Configuration
+ * o The various Mask registers
+ * o The Node ID
+ */
+static void
+amdzen_determine_fabric_decomp(amdzen_t *azn, amdzen_df_t *df)
+{
+ uint32_t mask;
+ df_fabric_decomp_t *decomp = &df->adf_decomp;
+
+ switch (df->adf_rev) {
+ case DF_REV_2:
+ df->adf_syscfg = amdzen_df_read32_bcast(azn, df, DF_SYSCFG_V2);
+ switch (DF_SYSCFG_V2_GET_MY_TYPE(df->adf_syscfg)) {
+ case DF_DIE_TYPE_CPU:
+ mask = amdzen_df_read32_bcast(azn, df,
+ DF_DIEMASK_CPU_V2);
+ break;
+ case DF_DIE_TYPE_APU:
+ mask = amdzen_df_read32_bcast(azn, df,
+ DF_DIEMASK_APU_V2);
+ break;
+ default:
+ panic("DF thinks we're not on a CPU!");
+ }
+ df->adf_mask0 = mask;
+
+ /*
+ * DFv2 is a bit different in how the fabric mask register is
+ * phrased. Logically a fabric ID is broken into something that
+ * uniquely identifies a "node" (a particular die on a socket)
+ * and something that identifies a "component", e.g. a memory
+ * controller.
+ *
+ * Starting with DFv3, these registers logically called out how
+ * to separate the fabric ID first into a node and a component.
+ * Then the node was then broken down into a socket and die. In
+ * DFv2, there is no separate mask and shift of a node. Instead
+ * the socket and die are absolute offsets into the fabric ID
+ * rather than relative offsets into the node ID. As such, when
+ * we encounter DFv2, we fake up a node mask and shift and make
+ * it look like DFv3+.
+ */
+ decomp->dfd_node_mask = DF_DIEMASK_V2_GET_SOCK_MASK(mask) |
+ DF_DIEMASK_V2_GET_DIE_MASK(mask);
+ decomp->dfd_node_shift = DF_DIEMASK_V2_GET_DIE_SHIFT(mask);
+ decomp->dfd_comp_mask = DF_DIEMASK_V2_GET_COMP_MASK(mask);
+ decomp->dfd_comp_shift = 0;
+
+ decomp->dfd_sock_mask = DF_DIEMASK_V2_GET_SOCK_MASK(mask) >>
+ decomp->dfd_node_shift;
+ decomp->dfd_die_mask = DF_DIEMASK_V2_GET_DIE_MASK(mask) >>
+ decomp->dfd_node_shift;
+ decomp->dfd_sock_shift = DF_DIEMASK_V2_GET_SOCK_SHIFT(mask) -
+ decomp->dfd_node_shift;
+ decomp->dfd_die_shift = DF_DIEMASK_V2_GET_DIE_SHIFT(mask) -
+ decomp->dfd_node_shift;
+ ASSERT3U(decomp->dfd_die_shift, ==, 0);
+ break;
+ case DF_REV_3:
+ df->adf_syscfg = amdzen_df_read32_bcast(azn, df, DF_SYSCFG_V3);
+ df->adf_mask0 = amdzen_df_read32_bcast(azn, df,
+ DF_FIDMASK0_V3);
+ df->adf_mask1 = amdzen_df_read32_bcast(azn, df,
+ DF_FIDMASK1_V3);
+
+ decomp->dfd_sock_mask =
+ DF_FIDMASK1_V3_GET_SOCK_MASK(df->adf_mask1);
+ decomp->dfd_sock_shift =
+ DF_FIDMASK1_V3_GET_SOCK_SHIFT(df->adf_mask1);
+ decomp->dfd_die_mask =
+ DF_FIDMASK1_V3_GET_DIE_MASK(df->adf_mask1);
+ decomp->dfd_die_shift = 0;
+ decomp->dfd_node_mask =
+ DF_FIDMASK0_V3_GET_NODE_MASK(df->adf_mask0);
+ decomp->dfd_node_shift =
+ DF_FIDMASK1_V3_GET_NODE_SHIFT(df->adf_mask1);
+ decomp->dfd_comp_mask =
+ DF_FIDMASK0_V3_GET_COMP_MASK(df->adf_mask0);
+ decomp->dfd_comp_shift = 0;
+ break;
+ case DF_REV_3P5:
+ df->adf_syscfg = amdzen_df_read32_bcast(azn, df,
+ DF_SYSCFG_V3P5);
+ df->adf_mask0 = amdzen_df_read32_bcast(azn, df,
+ DF_FIDMASK0_V3P5);
+ df->adf_mask1 = amdzen_df_read32_bcast(azn, df,
+ DF_FIDMASK1_V3P5);
+ df->adf_mask2 = amdzen_df_read32_bcast(azn, df,
+ DF_FIDMASK2_V3P5);
+
+ decomp->dfd_sock_mask =
+ DF_FIDMASK2_V3P5_GET_SOCK_MASK(df->adf_mask2);
+ decomp->dfd_sock_shift =
+ DF_FIDMASK1_V3P5_GET_SOCK_SHIFT(df->adf_mask1);
+ decomp->dfd_die_mask =
+ DF_FIDMASK2_V3P5_GET_DIE_MASK(df->adf_mask2);
+ decomp->dfd_die_shift = 0;
+ decomp->dfd_node_mask =
+ DF_FIDMASK0_V3P5_GET_NODE_MASK(df->adf_mask0);
+ decomp->dfd_node_shift =
+ DF_FIDMASK1_V3P5_GET_NODE_SHIFT(df->adf_mask1);
+ decomp->dfd_comp_mask =
+ DF_FIDMASK0_V3P5_GET_COMP_MASK(df->adf_mask0);
+ decomp->dfd_comp_shift = 0;
+ break;
+ case DF_REV_4:
+ df->adf_syscfg = amdzen_df_read32_bcast(azn, df, DF_SYSCFG_V4);
+ df->adf_mask0 = amdzen_df_read32_bcast(azn, df,
+ DF_FIDMASK0_V4);
+ df->adf_mask1 = amdzen_df_read32_bcast(azn, df,
+ DF_FIDMASK1_V4);
+ df->adf_mask2 = amdzen_df_read32_bcast(azn, df,
+ DF_FIDMASK2_V4);
+
+ /*
+ * The DFv4 registers are at a different location in the DF;
+ * however, the actual layout of fields is the same as DFv3.5.
+ * This is why you see V3P5 below.
+ */
+ decomp->dfd_sock_mask =
+ DF_FIDMASK2_V3P5_GET_SOCK_MASK(df->adf_mask2);
+ decomp->dfd_sock_shift =
+ DF_FIDMASK1_V3P5_GET_SOCK_SHIFT(df->adf_mask1);
+ decomp->dfd_die_mask =
+ DF_FIDMASK2_V3P5_GET_DIE_MASK(df->adf_mask2);
+ decomp->dfd_die_shift = 0;
+ decomp->dfd_node_mask =
+ DF_FIDMASK0_V3P5_GET_NODE_MASK(df->adf_mask0);
+ decomp->dfd_node_shift =
+ DF_FIDMASK1_V3P5_GET_NODE_SHIFT(df->adf_mask1);
+ decomp->dfd_comp_mask =
+ DF_FIDMASK0_V3P5_GET_COMP_MASK(df->adf_mask0);
+ decomp->dfd_comp_shift = 0;
+ break;
+ default:
+ panic("encountered suspicious, previously rejected DF "
+ "rev: 0x%x", df->adf_rev);
+ }
+}
+
+/*
* Initialize our knowledge about a given series of nodes on the data fabric.
*/
static void
@@ -485,10 +950,25 @@ amdzen_setup_df(amdzen_t *azn, amdzen_df_t *df)
uint_t i;
uint32_t val;
- val = amdzen_stub_get32(df->adf_funcs[0], AMDZEN_DF_F0_CFG_ADDR_CTL);
- df->adf_nb_busno = AMDZEN_DF_F0_CFG_ADDR_CTL_BUS_NUM(val);
- val = amdzen_stub_get32(df->adf_funcs[0], AMDZEN_DF_F0_FBICNT);
- df->adf_nents = AMDZEN_DF_F0_FBICNT_COUNT(val);
+ amdzen_determine_df_vers(azn, df);
+
+ switch (df->adf_rev) {
+ case DF_REV_2:
+ case DF_REV_3:
+ case DF_REV_3P5:
+ val = amdzen_df_read32_bcast(azn, df, DF_CFG_ADDR_CTL_V2);
+ break;
+ case DF_REV_4:
+ val = amdzen_df_read32_bcast(azn, df, DF_CFG_ADDR_CTL_V4);
+ break;
+ default:
+ dev_err(azn->azn_dip, CE_WARN, "encountered unsupported DF "
+ "revision: 0x%x", df->adf_rev);
+ return;
+ }
+ df->adf_nb_busno = DF_CFG_ADDR_CTL_GET_BUS_NUM(val);
+ val = amdzen_df_read32_bcast(azn, df, DF_FBICNT);
+ df->adf_nents = DF_FBICNT_GET_COUNT(val);
if (df->adf_nents == 0)
return;
df->adf_ents = kmem_zalloc(sizeof (amdzen_df_ent_t) * df->adf_nents,
@@ -503,7 +983,8 @@ amdzen_setup_df(amdzen_t *azn, amdzen_df_t *df)
* while everything else we can find uses a contiguous instance
* ID pattern. This means that for Rome, we need to adjust the
* indexes that we iterate over, though the total number of
- * entries is right.
+ * entries is right. This was carried over into Milan, but not
+ * Genoa.
*/
if (amdzen_is_rome_style(df->adf_funcs[0]->azns_did)) {
if (inst > ARRAY_SIZE(amdzen_df_rome_ids)) {
@@ -517,52 +998,50 @@ amdzen_setup_df(amdzen_t *azn, amdzen_df_t *df)
}
dfe->adfe_drvid = inst;
- dfe->adfe_info0 = amdzen_df_read32(azn, df, inst, 0,
- AMDZEN_DF_F0_FBIINFO0);
- dfe->adfe_info1 = amdzen_df_read32(azn, df, inst, 0,
- AMDZEN_DF_F0_FBIINFO1);
- dfe->adfe_info2 = amdzen_df_read32(azn, df, inst, 0,
- AMDZEN_DF_F0_FBIINFO2);
- dfe->adfe_info3 = amdzen_df_read32(azn, df, inst, 0,
- AMDZEN_DF_F0_FBIINFO3);
- dfe->adfe_syscfg = amdzen_df_read32(azn, df, inst, 1,
- AMDZEN_DF_F1_SYSCFG);
- dfe->adfe_mask0 = amdzen_df_read32(azn, df, inst, 1,
- AMDZEN_DF_F1_FIDMASK0);
- dfe->adfe_mask1 = amdzen_df_read32(azn, df, inst, 1,
- AMDZEN_DF_F1_FIDMASK1);
-
- dfe->adfe_type = AMDZEN_DF_F0_FBIINFO0_TYPE(dfe->adfe_info0);
- dfe->adfe_sdp_width =
- AMDZEN_DF_F0_FBIINFO0_SDP_WIDTH(dfe->adfe_info0);
- if (AMDZEN_DF_F0_FBIINFO0_ENABLED(dfe->adfe_info0)) {
+ dfe->adfe_info0 = amdzen_df_read32(azn, df, inst, DF_FBIINFO0);
+ dfe->adfe_info1 = amdzen_df_read32(azn, df, inst, DF_FBIINFO1);
+ dfe->adfe_info2 = amdzen_df_read32(azn, df, inst, DF_FBIINFO2);
+ dfe->adfe_info3 = amdzen_df_read32(azn, df, inst, DF_FBIINFO3);
+
+ dfe->adfe_type = DF_FBIINFO0_GET_TYPE(dfe->adfe_info0);
+ dfe->adfe_subtype = DF_FBIINFO0_GET_SUBTYPE(dfe->adfe_info0);
+
+ /*
+ * The enabled flag was not present in Zen 1. Simulate it by
+ * checking for a non-zero register instead.
+ */
+ if (DF_FBIINFO0_V3_GET_ENABLED(dfe->adfe_info0) ||
+ (df->adf_rev == DF_REV_2 && dfe->adfe_info0 != 0)) {
dfe->adfe_flags |= AMDZEN_DFE_F_ENABLED;
}
- dfe->adfe_fti_width =
- AMDZEN_DF_F0_FBIINFO0_FTI_WIDTH(dfe->adfe_info0);
- dfe->adfe_sdp_count =
- AMDZEN_DF_F0_FBIINFO0_SDP_PCOUNT(dfe->adfe_info0);
- dfe->adfe_fti_count =
- AMDZEN_DF_F0_FBIINFO0_FTI_PCOUNT(dfe->adfe_info0);
- if (AMDZEN_DF_F0_FBIINFO0_HAS_MCA(dfe->adfe_info0)) {
+ if (DF_FBIINFO0_GET_HAS_MCA(dfe->adfe_info0)) {
dfe->adfe_flags |= AMDZEN_DFE_F_MCA;
}
- dfe->adfe_subtype =
- AMDZEN_DF_F0_FBIINFO0_SUBTYPE(dfe->adfe_info0);
-
- dfe->adfe_inst_id =
- AMDZEN_DF_F0_FBIINFO3_INSTID(dfe->adfe_info3);
- dfe->adfe_fabric_id =
- AMDZEN_DF_F0_FBIINFO3_FABID(dfe->adfe_info3);
+ dfe->adfe_inst_id = DF_FBIINFO3_GET_INSTID(dfe->adfe_info3);
+ switch (df->adf_rev) {
+ case DF_REV_2:
+ dfe->adfe_fabric_id =
+ DF_FBIINFO3_V2_GET_BLOCKID(dfe->adfe_info3);
+ break;
+ case DF_REV_3:
+ dfe->adfe_fabric_id =
+ DF_FBIINFO3_V3_GET_BLOCKID(dfe->adfe_info3);
+ break;
+ case DF_REV_3P5:
+ dfe->adfe_fabric_id =
+ DF_FBIINFO3_V3P5_GET_BLOCKID(dfe->adfe_info3);
+ break;
+ case DF_REV_4:
+ dfe->adfe_fabric_id =
+ DF_FBIINFO3_V4_GET_BLOCKID(dfe->adfe_info3);
+ break;
+ default:
+ panic("encountered suspicious, previously rejected DF "
+ "rev: 0x%x", df->adf_rev);
+ }
}
- df->adf_syscfg = amdzen_stub_get32(df->adf_funcs[1],
- AMDZEN_DF_F1_SYSCFG);
- df->adf_nodeid = AMDZEN_DF_F1_SYSCFG_NODEID(df->adf_syscfg);
- df->adf_mask0 = amdzen_stub_get32(df->adf_funcs[1],
- AMDZEN_DF_F1_FIDMASK0);
- df->adf_mask1 = amdzen_stub_get32(df->adf_funcs[1],
- AMDZEN_DF_F1_FIDMASK1);
+ amdzen_determine_fabric_decomp(azn, df);
}
static void
diff --git a/usr/src/uts/intel/io/amdzen/amdzen.h b/usr/src/uts/intel/io/amdzen/amdzen.h
index 6ba5266bd3..30777a2905 100644
--- a/usr/src/uts/intel/io/amdzen/amdzen.h
+++ b/usr/src/uts/intel/io/amdzen/amdzen.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright 2020 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
#ifndef _AMDZEN_H
@@ -22,6 +22,9 @@
#include <sys/pci.h>
#include <sys/taskq.h>
#include <sys/bitmap.h>
+#include <sys/amdzen/df.h>
+
+#include "amdzen_client.h"
/*
* This header describes properties of the data fabric and our internal state
@@ -51,144 +54,6 @@ extern "C" {
*/
#define AMDZEN_MAX_DF_FUNCS 0x8
-
-/*
- * Registers in the data fabric space that we care about for the purposes of the
- * nexus driver understanding itself.
- */
-
-/*
- * This set of registers provides us access to the count of instances in the
- * data fabric and then a number of different pieces of information about them
- * like their type. Note, these registers require indirect access because the
- * information cannot be broadcast.
- */
-#define AMDZEN_DF_F0_FBICNT 0x40
-#define AMDZEN_DF_F0_FBICNT_COUNT(x) BITX(x, 7, 0)
-#define AMDZEN_DF_F0_FBIINFO0 0x44
-#define AMDZEN_DF_F0_FBIINFO0_TYPE(x) BITX(x, 3, 0)
-typedef enum {
- AMDZEN_DF_TYPE_CCM = 0,
- AMDZEN_DF_TYPE_GCM,
- AMDZEN_DF_TYPE_NCM,
- AMDZEN_DF_TYPE_IOMS,
- AMDZEN_DF_TYPE_CS,
- AMDZEN_DF_TYPE_TCDX,
- AMDZEN_DF_TYPE_PIE,
- AMDZEN_DF_TYPE_SPF,
- AMDZEN_DF_TYPE_LLC,
- AMDZEN_DF_TYPE_CAKE
-} amdzen_df_type_t;
-#define AMDZEN_DF_F0_FBIINFO0_SDP_WIDTH(x) BITX(x, 5, 4)
-typedef enum {
- AMDZEN_DF_SDP_W_64 = 0,
- AMDZEN_DF_SDP_W_128,
- AMDZEN_DF_SDP_W_256,
- AMDZEN_DF_SDP_W_512
-} amdzen_df_sdp_width_t;
-#define AMDZEN_DF_F0_FBIINFO0_ENABLED(x) BITX(x, 6, 6)
-#define AMDZEN_DF_F0_FBIINFO0_FTI_WIDTH(x) BITX(x, 9, 8)
-typedef enum {
- AMDZEN_DF_FTI_W_64 = 0,
- AMDZEN_DF_FTI_W_128,
- AMDZEN_DF_FTI_W_256,
- AMDZEN_DF_FTI_W_512
-} amdzen_df_fti_width_t;
-#define AMDZEN_DF_F0_FBIINFO0_SDP_PCOUNT(x) BITX(x, 13, 12)
-#define AMDZEN_DF_F0_FBIINFO0_FTI_PCOUNT(x) BITX(x, 18, 16)
-#define AMDZEN_DF_F0_FBIINFO0_HAS_MCA(x) BITX(x, 23, 23)
-#define AMDZEN_DF_F0_FBIINFO0_SUBTYPE(x) BITX(x, 26, 24)
-#define AMDZEN_DF_SUBTYPE_NONE 0
-typedef enum {
- AMDZEN_DF_CAKE_SUBTYPE_GMI = 1,
- AMDZEN_DF_CAKE_SUBTYPE_xGMI = 2
-} amdzen_df_cake_subtype_t;
-
-typedef enum {
- AMDZEN_DF_IOM_SUBTYPE_IOHUB = 1,
-} amdzen_df_iom_subtype_t;
-
-typedef enum {
- AMDZEN_DF_CS_SUBTYPE_UMC = 1,
- AMDZEN_DF_CS_SUBTYPE_CCIX = 2
-} amdzen_df_cs_subtype_t;
-
-#define AMDZEN_DF_F0_FBIINFO1 0x48
-#define AMDZEN_DF_F0_FBIINFO1_FTI0_NINSTID(x) BITX(x, 7, 0)
-#define AMDZEN_DF_F0_FBIINFO1_FTI1_NINSTID(x) BITX(x, 15, 8)
-#define AMDZEN_DF_F0_FBIINFO1_FTI2_NINSTID(x) BITX(x, 23, 16)
-#define AMDZEN_DF_F0_FBIINFO1_FTI3_NINSTID(x) BITX(x, 31, 24)
-#define AMDZEN_DF_F0_FBIINFO2 0x4c
-#define AMDZEN_DF_F0_FBIINFO2_FTI4_NINSTID(x) BITX(x, 7, 0)
-#define AMDZEN_DF_F0_FBIINFO2_FTI5_NINSTID(x) BITX(x, 15, 8)
-#define AMDZEN_DF_F0_FBIINFO3 0x50
-#define AMDZEN_DF_F0_FBIINFO3_INSTID(x) BITX(x, 7, 0)
-#define AMDZEN_DF_F0_FBIINFO3_FABID(x) BITX(x, 13, 8)
-
-/*
- * This register contains the information about the configuration of PCIe buses.
- * We care about finding which one has our BUS A, which is required to map it to
- * the northbridge.
- */
-#define AMDZEN_DF_F0_CFG_ADDR_CTL 0x84
-#define AMDZEN_DF_F0_CFG_ADDR_CTL_BUS_NUM(x) BITX(x, 7, 0)
-
-/*
- * Registers that describe how the system is actually put together.
- */
-#define AMDZEN_DF_F1_SYSCFG 0x200
-#define AMDZEN_DF_F1_SYSCFG_DIE_PRESENT(X) BITX(x, 7, 0)
-#define AMDZEN_DF_F1_SYSCFG_DIE_TYPE(x) BITX(x, 18, 11)
-#define AMDZEN_DF_F1_SYSCFG_MYDIE_TYPE(x) BITX(x, 24, 23)
-typedef enum {
- AMDZEN_DF_DIE_TYPE_CPU = 0,
- AMDZEN_DF_DIE_TYPE_APU,
- AMDZEN_DF_DIE_TYPE_dGPU
-} amdzen_df_die_type_t;
-#define AMDZEN_DF_F1_SYSCFG_OTHERDIE_TYPE(x) BITX(x, 26, 25)
-#define AMDZEN_DF_F1_SYSCFG_OTHERSOCK(x) BITX(x, 27, 27)
-#define AMDZEN_DF_F1_SYSCFG_NODEID(x) BITX(x, 30, 28)
-
-#define AMDZEN_DF_F1_FIDMASK0 0x208
-#define AMDZEN_DF_F1_FIDMASK0_COMP_MASK(x) BITX(x, 9, 0)
-#define AMDZEN_DF_F1_FIDMASK0_NODE_MASK(x) BITX(x, 25, 16)
-#define AMDZEN_DF_F1_FIDMASK1 0x20C
-#define AMDZEN_DF_F1_FIDMASK1_NODE_SHIFT(x) BITX(x, 3, 0)
-#define AMDZEN_DF_F1_FIDMASK1_SKT_SHIFT(x) BITX(x, 9, 8)
-#define AMDZEN_DF_F1_FIDMASK1_DIE_MASK(x) BITX(x, 18, 16)
-#define AMDZEN_DF_F1_FIDMASK1_SKT_MASK(x) BITX(x, 26, 24)
-
-/*
- * These two registers define information about the PSP and SMU on local and
- * remote dies (from the context of the DF instance). The bits are the same.
- */
-#define AMDZEN_DF_F1_PSPSMU_LOCAL 0x268
-#define AMDZEN_DF_F1_PSPSMU_REMOTE 0x268
-#define AMDZEN_DF_F1_PSPSMU_SMU_VALID(x) BITX(x, 0, 0)
-#define AMDZEN_DF_F1_PSPSMU_SMU_UNITID(x) BITX(x, 6, 1)
-#define AMDZEN_DF_F1_PSPSMU_SMU_COMPID(x) BITX(x, 15, 8)
-#define AMDZEN_DF_F1_PSPSMU_PSP_VALID(x) BITX(x, 16, 16)
-#define AMDZEN_DF_F1_PSPSMU_PSP_UNITID(x) BITX(x, 22, 17)
-#define AMDZEN_DF_F1_PSPSMU_PSP_COMPID(x) BITX(x, 31, 24)
-
-#define AMDZEN_DF_F1_CAKE_ENCR 0x2cc
-
-/*
- * These registers are used to define Indirect Access, commonly known as FICAA
- * and FICAD for the system. While there are multiple copies of the indirect
- * access registers in device 4, we're only allowed access to one set of those
- * (which are the ones present here). Specifically the OS is given access to set
- * 3.
- */
-#define AMDZEN_DF_F4_FICAA 0x5c
-#define AMDZEN_DF_F4_FICAA_TARG_INST (1 << 0)
-#define AMDZEN_DF_F4_FICAA_SET_REG(x) ((x) & 0x3fc)
-#define AMDZEN_DF_F4_FICAA_SET_FUNC(x) (((x) & 0x7) << 11)
-#define AMDZEN_DF_F4_FICAA_SET_64B (1 << 14)
-#define AMDZEN_DF_F4_FICAA_SET_INST(x) (((x) & 0xff) << 16)
-#define AMDZEN_DF_F4_FICAD_LO 0x98
-#define AMDZEN_DF_F4_FICAD_HI 0x9c
-
/*
* Northbridge registers that are relevant for the nexus, mostly for SMN.
*/
@@ -229,26 +94,19 @@ typedef enum {
typedef struct {
uint8_t adfe_drvid;
amdzen_df_ent_flags_t adfe_flags;
- amdzen_df_type_t adfe_type;
+ df_type_t adfe_type;
uint8_t adfe_subtype;
uint8_t adfe_fabric_id;
uint8_t adfe_inst_id;
- amdzen_df_sdp_width_t adfe_sdp_width;
- amdzen_df_fti_width_t adfe_fti_width;
- uint8_t adfe_sdp_count;
- uint8_t adfe_fti_count;
uint32_t adfe_info0;
uint32_t adfe_info1;
uint32_t adfe_info2;
uint32_t adfe_info3;
- uint32_t adfe_syscfg;
- uint32_t adfe_mask0;
- uint32_t adfe_mask1;
} amdzen_df_ent_t;
typedef enum {
AMDZEN_DF_F_VALID = 1 << 0,
- AMDZEN_DF_F_FOUND_NB = 1 << 1
+ AMDZEN_DF_F_FOUND_NB = 1 << 1,
} amdzen_df_flags_t;
typedef struct {
@@ -256,12 +114,17 @@ typedef struct {
uint_t adf_nb_busno;
amdzen_stub_t *adf_funcs[AMDZEN_MAX_DF_FUNCS];
amdzen_stub_t *adf_nb;
+ uint8_t adf_major;
+ uint8_t adf_minor;
uint_t adf_nents;
+ df_rev_t adf_rev;
amdzen_df_ent_t *adf_ents;
uint32_t adf_nodeid;
uint32_t adf_syscfg;
uint32_t adf_mask0;
uint32_t adf_mask1;
+ uint32_t adf_mask2;
+ df_fabric_decomp_t adf_decomp;
} amdzen_df_t;
typedef enum {
@@ -295,7 +158,8 @@ typedef struct amdzen {
typedef enum {
AMDZEN_C_SMNTEMP = 1,
AMDZEN_C_USMN,
- AMDZEN_C_ZEN_UDF
+ AMDZEN_C_ZEN_UDF,
+ AMDZEN_C_ZEN_UMC
} amdzen_child_t;
/*
diff --git a/usr/src/uts/intel/io/amdzen/amdzen_client.h b/usr/src/uts/intel/io/amdzen/amdzen_client.h
index d617224a1a..fe8cd87751 100644
--- a/usr/src/uts/intel/io/amdzen/amdzen_client.h
+++ b/usr/src/uts/intel/io/amdzen/amdzen_client.h
@@ -10,7 +10,7 @@
*/
/*
- * Copyright 2021 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
#ifndef _AMDZEN_CLIENT_H
@@ -21,16 +21,84 @@
*/
#include <sys/types.h>
+#include <sys/amdzen/df.h>
#ifdef __cplusplus
extern "C" {
#endif
+/*
+ * This enumeration is used to identify a given family of Zen era processors.
+ * This is derived from a family/model/stepping range. In some cases such as
+ * Dali, several different chips are covered that appear to be mostly the same
+ * as far as we can tell for our purposes (e.g. Raven Ridge, Picasso, etc.).
+ */
+typedef enum {
+ ZEN_FAMILY_UNKNOWN,
+ ZEN_FAMILY_NAPLES,
+ ZEN_FAMILY_DHYANA,
+ ZEN_FAMILY_DALI,
+ ZEN_FAMILY_ROME,
+ ZEN_FAMILY_RENOIR,
+ ZEN_FAMILY_MATISSE,
+ ZEN_FAMILY_VAN_GOGH,
+ ZEN_FAMILY_MENDOCINO,
+ ZEN_FAMILY_MILAN,
+ ZEN_FAMILY_GENOA,
+ ZEN_FAMILY_VERMEER,
+ ZEN_FAMILY_REMBRANDT,
+ ZEN_FAMILY_CEZANNE,
+ ZEN_FAMILY_RAPHAEL
+} zen_family_t;
+
+/*
+ * This struct encodes enough information to later be used to compose and
+ * decompose a fabric ID and component ID. A fabric ID is broken into its node
+ * and component IDs and then a node ID is further decomposed into a socket and
+ * die ID.
+ */
+typedef struct {
+ uint32_t dfd_sock_mask;
+ uint32_t dfd_die_mask;
+ uint32_t dfd_node_mask;
+ uint32_t dfd_comp_mask;
+ uint8_t dfd_sock_shift;
+ uint8_t dfd_die_shift;
+ uint8_t dfd_node_shift;
+ uint8_t dfd_comp_shift;
+} df_fabric_decomp_t;
+
+extern zen_family_t amdzen_c_family(void);
+extern uint_t amdzen_c_df_count(void);
+extern df_rev_t amdzen_c_df_rev(void);
+extern int amdzen_c_df_fabric_decomp(df_fabric_decomp_t *);
+
+/*
+ * SMN and DF access routines.
+ */
extern int amdzen_c_smn_read32(uint_t, uint32_t, uint32_t *);
extern int amdzen_c_smn_write32(uint_t, uint32_t, uint32_t);
-extern uint_t amdzen_c_df_count(void);
-extern int amdzen_c_df_read32(uint_t, uint8_t, uint8_t, uint16_t, uint32_t *);
-extern int amdzen_c_df_read64(uint_t, uint8_t, uint8_t, uint16_t, uint64_t *);
+extern int amdzen_c_df_read32(uint_t, uint8_t, const df_reg_def_t, uint32_t *);
+extern int amdzen_c_df_read64(uint_t, uint8_t, const df_reg_def_t, uint64_t *);
+
+/*
+ * The following are logical types that we can iterate over. Note, that these
+ * are a combination of a DF type and subtype. This is used to smooth over the
+ * differences between different DF revisions and how they indicate these types.
+ */
+typedef enum {
+ /*
+ * Iterate over only DDR memory controllers.
+ */
+ ZEN_DF_TYPE_CS_UMC,
+ /*
+ * Iterate only over CPU based CCMs.
+ */
+ ZEN_DF_TYPE_CCM_CPU
+} zen_df_type_t;
+
+typedef int (*amdzen_c_iter_f)(uint_t, uint32_t, uint32_t, void *);
+extern int amdzen_c_df_iter(uint_t, zen_df_type_t, amdzen_c_iter_f, void *);
#ifdef __cplusplus
}
diff --git a/usr/src/uts/intel/io/amdzen/zen_udf.c b/usr/src/uts/intel/io/amdzen/zen_udf.c
index 3b0f38b289..28f03a9bb2 100644
--- a/usr/src/uts/intel/io/amdzen/zen_udf.c
+++ b/usr/src/uts/intel/io/amdzen/zen_udf.c
@@ -10,7 +10,7 @@
*/
/*
- * Copyright 2020 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
/*
@@ -75,6 +75,7 @@ zen_udf_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
uint_t dfno;
zen_udf_t *zen_udf = &zen_udf_data;
zen_udf_io_t zui;
+ df_reg_def_t def;
if (cmd != ZEN_UDF_READ32 && cmd != ZEN_UDF_READ64) {
return (ENOTTY);
@@ -94,12 +95,19 @@ zen_udf_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
return (EFAULT);
}
+ /*
+ * Cons up a register definition based on the user request. We set the
+ * gen to our current one.
+ */
+ def.drd_gens = amdzen_c_df_rev();
+ def.drd_func = zui.zui_func;
+ def.drd_reg = zui.zui_reg;
+
if (cmd == ZEN_UDF_READ32) {
int ret;
uint32_t data;
- ret = amdzen_c_df_read32(dfno, zui.zui_inst, zui.zui_func,
- zui.zui_reg, &data);
+ ret = amdzen_c_df_read32(dfno, zui.zui_inst, def, &data);
if (ret != 0) {
return (ret);
}
@@ -108,8 +116,8 @@ zen_udf_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
} else {
int ret;
- ret = amdzen_c_df_read64(dfno, zui.zui_inst, zui.zui_func,
- zui.zui_reg, &zui.zui_data);
+ ret = amdzen_c_df_read64(dfno, zui.zui_inst, def,
+ &zui.zui_data);
if (ret != 0) {
return (ret);
}
diff --git a/usr/src/uts/intel/io/amdzen/zen_umc.c b/usr/src/uts/intel/io/amdzen/zen_umc.c
new file mode 100644
index 0000000000..485b645299
--- /dev/null
+++ b/usr/src/uts/intel/io/amdzen/zen_umc.c
@@ -0,0 +1,3177 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+/*
+ * AMD Zen Unified Memory Controller Driver
+ *
+ * This file forms the core logic around transforming a physical address that
+ * we're used to using into a specific location on a DIMM. This has support for
+ * a wide range of AMD CPUs and APUs ranging from Zen 1 - Zen 4.
+ *
+ * The goal of this driver is to implement the infrastructure and support
+ * necessary to understand how DRAM requests are being routed in the system and
+ * to be able to map those to particular channels and then DIMMs. This is used
+ * as part of RAS (reliability, availability, and serviceability) to enable
+ * aspects around understanding ECC errors, hardware topology, and more. Like
+ * with any software project, there is more to do here. Please see the Future
+ * Work section at the end of this big theory statement for more information.
+ *
+ * -------------------
+ * Driver Organization
+ * -------------------
+ *
+ * This driver is organized into two major pieces:
+ *
+ * 1. Logic to interface with hardware, discover the data fabric, memory
+ * controller configuration, and transform that into a normalized fashion
+ * that can be used across all different Zen family CPUs. This is
+ * implemented generally in this file, and is designed to assume it is in
+ * the kernel (as it requires access to the SMN, DF PCI registers, and the
+ * amdzen nexus driver client services).
+ *
+ * 2. Logic that can take the above normalized memory information and perform
+ * decoding (e.g. physical address to DIMM information). This generally
+ * lives in common/mc/zen_uc/zen_umc_decode.c. This file is in common/,
+ * meaning it is designed to be shared by userland and the kernel. Even
+ * more so, it is designed to operate on a const version of our primary
+ * data structure (zen_umc_t), not allowing it to be modified. This allows
+ * us to more easily unit test the decoding logic and utilize it in other
+ * circumstances such as with the mcdecode utility.
+ *
+ * There is corresponding traditional dev_ops(9S) and cb_ops(9S) logic in the
+ * driver (currently this file) which take care of interfacing with the broader
+ * operating system environment.
+ *
+ * There is only ever one instance of this driver, e.g. it is a singleton in
+ * design pattern parlance. There is a single struct, the zen_umc_t found in the
+ * global (albeit static) variable zen_umc. This structure itself contains a
+ * hierarchical set of structures that describe the system. To make management
+ * of memory simpler, all of the nested structures that we discover from
+ * hardware are allocated in the same structure. The only exception to this rule
+ * is when we cache serialized nvlists for dumping.
+ *
+ * The organization of the structures inside the zen_umc_t, generally mimics the
+ * hardware organization and is structured as follows:
+ *
+ * +-----------+
+ * | zen_umc_t |
+ * +-----------+
+ * |
+ * +-------------------------------+
+ * v v
+ * +--------------+ +--------------+ One instance of the
+ * | zen_umc_df_t | ... | zen_umc_df_t | zen_umc_df_t per
+ * +--------------+ +--------------+ discovered DF.
+ * |||
+ * |||
+ * ||| +----------------+ +----------------+ Global DRAM
+ * ||+--->| df_dram_rule_t | ... | df_dram_rule_t | rules for the
+ * || +----------------+ +----------------+ platform.
+ * ||
+ * || +--------------------+ +--------------------+ UMC remap
+ * |+--->| zen_umc_cs_remap_t | ... | zen_umc_cs_remap_t | rule arrays.
+ * | +--------------------+ +--------------------+
+ * |
+ * v
+ * +----------------+ +----------------+ One structure per
+ * | zen_umc_chan_t | ... | zen_umc_chan_t | discovered DDR4/5
+ * +----------------+ +----------------+ memory channel.
+ * ||||
+ * ||||
+ * |||| +----------------+ +----------------+ Channel specific
+ * |||+--->| df_dram_rule_t | ... | df_dram_rule_t | copy of DRAM rules.
+ * ||| +----------------+ +----------------+ Less than global.
+ * |||
+ * ||| +---------------+ +---------------+ Per-Channel DRAM
+ * ||+---->| chan_offset_t | ... | chan_offset_t | offset that is used
+ * || +---------------+ +---------------+ for normalization.
+ * ||
+ * || +-----------------+ Channel-specific
+ * |+----->| umc_chan_hash_t | hashing rules.
+ * | +-----------------+
+ * |
+ * | +------------+ +------------+ One structure for
+ * +------>| umc_dimm_t | ... | umc_dimm_t | each DIMM in the
+ * +------------+ +------------+ channel. Always two.
+ * |
+ * | +----------+ +----------+ Per chip-select
+ * +---> | umc_cs_t | ... | umc_cs_t | data. Always two.
+ * +----------+ +----------+
+ *
+ * In the data structures themselves you'll often find several pieces of data
+ * that have the term 'raw' in their name. The point of these is to basically
+ * capture the original value that we read from the register before processing
+ * it. These are generally used either for debugging or to help answer future
+ * curiosity with resorting to the udf and usmn tooling, which hopefully aren't
+ * actually installed on systems.
+ *
+ * With the exception of some of the members in the zen_umc_t that are around
+ * management of state for userland ioctls, everything in the structure is
+ * basically write-once and from that point on should be treated as read-only.
+ *
+ * ---------------
+ * Memory Decoding
+ * ---------------
+ *
+ * To understand the process of memory decoding, it's worth going through and
+ * understanding a bunch of the terminology that is used in this process. As an
+ * additional reference when understanding this, you may want to turn to either
+ * an older generation AMD BIOS and Kernel Developer's Guide or the more current
+ * Processor Programming Reference. In addition, the imc driver, which is the
+ * Intel equivalent, also provides an additional bit of reference.
+ *
+ * SYSTEM ADDRESS
+ *
+ * This is a physical address and is the way that the operating system
+ * normally thinks of memory. System addresses can refer to many different
+ * things. For example, you have traditional DRAM, memory-mapped PCIe
+ * devices, peripherals that the processor exposes such as the xAPIC, data
+ * from the FCH (Fusion Controller Hub), etc.
+ *
+ * TOM, TOM2, and the DRAM HOLE
+ *
+ * Physical memory has a complicated layout on x86 in part because of
+ * support for traditional 16-bit and 32-bit systems. As a result, contrary
+ * to popular belief, DRAM is not at a consistent address range in the
+ * processor. AMD processors have a few different ranges. There is a 32-bit
+ * region that starts at effectively physical address zero and goes to the
+ * TOM MSR (top of memory -- Core::X86::Msr::TOP_MEM). This indicates a
+ * limit below 4 GiB, generally around 2 GiB.
+ *
+ * From there, the next region of DRAM starts at 4 GiB and goes to TOM2
+ * (top of memory 2 -- Core::X86::Msr::TOM2). The region between TOM and
+ * 4 GiB is called the DRAM hole. Physical addresses in this region are
+ * used for memory mapped I/O. This breaks up contiguous physical
+ * addresses being used for DRAM, creating a "hole".
+ *
+ * DATA FABRIC
+ *
+ * The data fabric (DF) is the primary interface that different parts of
+ * the system use to communicate with one another. This includes the I/O
+ * engines (where PCIe traffic goes), CPU caches and their cores, memory
+ * channels, cross-socket communication, and a whole lot more. The first
+ * part of decoding addresses and figuring out which DRAM channel an
+ * address should be directed to all come from the data fabric.
+ *
+ * The data fabric is comprised of instances. So there is one instance for
+ * each group of cores, each memory channel, etc. Each instance has its own
+ * independent set of register information. As the data fabric is a series
+ * of devices exposed over PCI, if you do a normal PCI configuration space
+ * read or write that'll end up broadcasting the I/O. Instead, to access a
+ * particular instance's register information there is an indirect access
+ * mechanism. The primary way that this driver accesses data fabric
+ * registers is via these indirect reads.
+ *
+ * There is one instance of the Data Fabric per socket starting with Zen 2.
+ * In Zen 1, there was one instance of the data fabric per CCD -- core
+ * complex die (see cpuid.c's big theory statement for more information).
+ *
+ * DF INSTANCE ID
+ *
+ * A DF instance ID is an identifier for a single entity or component in a
+ * data fabric. The set of instance IDs is unique only with a single data
+ * fabric. So for example, each memory channel, I/O endpoint (e.g. PCIe
+ * logic), group of cores, has its own instance ID. Anything within the
+ * same data fabric (e.g. the same die) can be reached via its instance ID.
+ * The instance ID is used to indicate which instance to contact when
+ * performing indirect accesses.
+ *
+ * Not everything that has an instance ID will be globally routable (e.g.
+ * between multiple sockets). For things that are, such as the memory
+ * channels and coherent core initiators, there is a second ID called a
+ * fabric ID.
+ *
+ * DF FABRIC ID
+ *
+ * A DF fabric ID is an identifier that combines information to indicate
+ * both which instance of the data fabric a component is on and a component
+ * itself. So with this number you can distinguish between a memory channel
+ * on one of two sockets. A Fabric ID is made up of two parts. The upper
+ * part indicates which DF we are talking to and is referred to as a Node
+ * ID. The Node ID is itself broken into two parts: one that identifies a
+ * socket, and one that identifies a die. The lower part of a fabric ID is
+ * called a component ID and indicates which component in a particular data
+ * fabric that we are talking to. While only a subset of the total
+ * components in the data fabric are routable, for everything that is, its
+ * component ID matches its instance ID.
+ *
+ * Put differently, the component portion of a fabric ID and a component's
+ * instance ID are always the same for routable entities. For things which
+ * cannot be routed, they only have an instance ID and no fabric ID.
+ * Because this code is always interacting with data fabric components that
+ * are routable, sometimes instance ID and the component ID portion of the
+ * data fabric ID may be used interchangeably.
+ *
+ * Finally, it's worth calling out that the number of bits that are used to
+ * indicate the socket, die, and component in a fabric ID changes from
+ * hardware generation to hardware generation.
+ *
+ * Inside the code here, the socket and die decomposition information is
+ * always relative to the node ID. AMD phrases the decomposition
+ * information in terms of a series of masks and shifts. This is
+ * information that can be retrieved from the data fabric itself, allowing
+ * us to avoid hardcoding too much information other than which registers
+ * actually have which fields. With both masks and shifts, it's important
+ * to establish which comes first. We follow AMD's convention and always
+ * apply masks before shifts. With that, let's look at an example of a
+ * made up bit set:
+ *
+ * Assumptions (to make this example simple):
+ * o The fabric ID is 16 bits
+ * o The component ID is 8 bits
+ * o The node ID is 8 bits
+ * o The socket and die ID are both 4 bits
+ *
+ * Here, let's say that we have the ID 0x2106. This decomposes into a
+ * socket 0x2, die 0x1, and component 0x6. Here is how that works in more
+ * detail:
+ *
+ * 0x21 0x06
+ * |------| |------|
+ * Node ID Component ID
+ * Mask: 0xff00 0x00ff
+ * Shift: 8 0
+ *
+ * Next we would decompose the Node ID as:
+ * 0x2 0x1
+ * |------| |------|
+ * Sock ID Die ID
+ * Mask: 0xf0 0x0f
+ * Shift: 4 0
+ *
+ * Composing a fabric ID from its parts would work in a similar way by
+ * applying masks and shifts.
+ *
+ * NORMAL ADDRESS
+ *
+ * A normal address is one of the primary address types that AMD uses in
+ * memory decoding. It takes into account the DRAM hole, interleave
+ * settings, and is basically the address that is dispatched to the broader
+ * data fabric towards a particular DRAM channel.
+ *
+ * Often, phrases like 'normalizing the address' or normalization refer to
+ * the process of transforming a system address into the channel address.
+ *
+ * INTERLEAVING
+ *
+ * The idea of interleaving is to take a contiguous range and weave it
+ * between multiple different actual entities. Generally certain bits in
+ * the range are used to select one of several smaller regions. For
+ * example, if you have 8 regions each that are 4 GiB in size, that creates
+ * a single 32 GiB region. You can use three bits in that 32 GiB space to
+ * select one of the 8 regions. For a more visual example, see the
+ * definition of this in uts/intel/io/imc/imc.c.
+ *
+ * CHANNEL
+ *
+ * A channel is used to refer to a single memory channel. This is sometimes
+ * called a DRAM channel as well. A channel operates in a specific mode
+ * based on the JEDEC DRAM standards (e.g. DDR4, LPDDR5, etc.). A
+ * (LP)DDR4/5 channel may support up to two DIMMs inside the channel. The
+ * number of slots is platform dependent and from there the number of DIMMs
+ * installed can vary. Generally speaking, a DRAM channel defines a set
+ * number of signals, most of which go to all DIMMs in the channel, what
+ * varies is which "chip-select" is activated which causes a given DIMM to
+ * pay attention or not.
+ *
+ * DIMM
+ *
+ * A DIMM refers to a physical hardware component that is installed into a
+ * computer to provide access to dynamic memory. Originally this stood for
+ * dual-inline memory module, though the DIMM itself has evolved beyond
+ * that. A DIMM is organized into various pages, which are addressed by
+ * a combination of rows, columns, banks, bank groups, and ranks. How this
+ * fits together changes from generation to generation and is standardized
+ * in something like DDR4, LPDDR4, DDR5, LPDDR5, etc. These standards
+ * define the general individual modules that are assembled into a DIMM.
+ * There are slightly different standards for combined memory modules
+ * (which is what we use the term DIMM for). Examples of those include
+ * things like registered DIMMs (RDIMMs).
+ *
+ * A DDR4 DIMM contains a single channel that is 64-bits wide with 8 check
+ * bits. A DDR5 DIMM has a notable change in this scheme from earlier DDR
+ * standards. It breaks a single DDR5 DIMM into two sub-channels. Each
+ * sub-channel is independently addressed and contains 32-bits of data and
+ * 8-bits of check data.
+ *
+ * ROW AND COLUMN
+ *
+ * The most basic building block of a DIMM is a die. A DIMM consists of
+ * multiple dies that are organized together (we'll discuss the
+ * organization next). A given die is organized into a series of rows and
+ * columns. First, one selects a row. At which point one is able to select
+ * a specific column. It is more expensive to change rows than columns,
+ * leading a given row to contain approximately 1 KiB of data spread across
+ * its columns. The exact size depends on the device. Each row/column is a
+ * series of capacitors and transistors. The transistor is used to select
+ * data from the capacitor and the capacitor actually contains the logical
+ * 0/1 value.
+ *
+ * BANKS AND BANK GROUPS
+ *
+ * An individual DRAM die is organized in something called a bank. A DIMM
+ * has a number of banks that sit in series. These are then grouped into
+ * larger bank groups. Generally speaking, each bank group has the same
+ * number of banks. Let's take a look at an example of a system with 4
+ * bank groups, each with 4 banks.
+ *
+ * +-----------------------+ +-----------------------+
+ * | Bank Group 0 | | Bank Group 1 |
+ * | +--------+ +--------+ | | +--------+ +--------+ |
+ * | | Bank 0 | | Bank 1 | | | | Bank 0 | | Bank 1 | |
+ * | +--------+ +--------+ | | +--------+ +--------+ |
+ * | +--------+ +--------+ | | +--------+ +--------+ |
+ * | | Bank 2 | | Bank 3 | | | | Bank 2 | | Bank 3 | |
+ * | +--------+ +--------+ | | +--------+ +--------+ |
+ * +-----------------------+ +-----------------------+
+ *
+ * +-----------------------+ +-----------------------+
+ * | Bank Group 2 | | Bank Group 3 |
+ * | +--------+ +--------+ | | +--------+ +--------+ |
+ * | | Bank 0 | | Bank 1 | | | | Bank 0 | | Bank 1 | |
+ * | +--------+ +--------+ | | +--------+ +--------+ |
+ * | +--------+ +--------+ | | +--------+ +--------+ |
+ * | | Bank 2 | | Bank 3 | | | | Bank 2 | | Bank 3 | |
+ * | +--------+ +--------+ | | +--------+ +--------+ |
+ * +-----------------------+ +-----------------------+
+ *
+ * On a DIMM, only a single bank and bank group can be active at a time for
+ * reading or writing an 8 byte chunk of data. However, these are still
+ * pretty important and useful because of the time involved to switch
+ * between them. It is much cheaper to switch between bank groups than
+ * between banks and that time can be cheaper than activating a new row.
+ * This allows memory controllers to pipeline this substantially.
+ *
+ * RANK AND CHIP-SELECT
+ *
+ * The next level of organization is a rank. A rank is effectively an
+ * independent copy of all the bank and bank groups on a DIMM. That is,
+ * there are additional copies of the DIMM's organization, but not the data
+ * itself. Originally a
+ * single or dual rank DIMM was built such that one copy of everything was
+ * on each physical side of the DIMM. As the number of ranks has increased
+ * this has changed as well. Generally speaking, the contents of the rank
+ * are equivalent. That is, you have the same number of bank groups, banks,
+ * and each bank has the same number of rows and columns.
+ *
+ * Ranks are selected by what's called a chip-select, often abbreviated as
+ * CS_L in the various DRAM standards. AMD also often abbreviates this as a
+ * CS (which is not to be confused with the DF class of device called a
+ * CS). These signals are used to select a rank to activate on a DIMM.
+ * There are some number of these for each DIMM which is how the memory
+ * controller chooses which of the DIMMs it's actually going to activate in
+ * the system.
+ *
+ * One interesting gotcha here is how AMD organizes things. Each DIMM
+ * logically is broken into two chip-selects in hardware. Between DIMMs
+ * with more than 2 ranks and 3D stacked RDIMMs, there are ways to
+ * potentially activate more bits. Ultimately these are mapped to a series
+ * of rank multiplication logic internally. These ultimately then control
+ * some of these extra pins, though the exact method isn't 100% clear at
+ * this time.
+ *
+ * -----------------------
+ * Rough Hardware Process
+ * -----------------------
+ *
+ * To better understand how everything is implemented and structured, it's worth
+ * briefly describing what happens when hardware wants to read a given physical
+ * address. This is roughly summarized in the following chart. In the left hand
+ * side is the type of address, which is transformed and generally shrinks along
+ * the way. Next to it is the actor that is taking action and the type of
+ * address that it starts with.
+ *
+ * +---------+ +------+
+ * | Virtual | | CPU |
+ * | Address | | Core |
+ * +---------+ +------+
+ * | | The CPU core receives a memory request and then
+ * | * . . . . determines whether this request is DRAM or MMIO
+ * | | (memory-mapped I/O) and then sends it to the data
+ * v v fabric.
+ * +----------+ +--------+
+ * | Physical | | Data |
+ * | Address | | Fabric |
+ * +----------+ +--------+
+ * | | The data fabric instance in the CCX/D uses the
+ * | * . . . . programmed DRAM rules to determine what DRAM
+ * | | channel to direct a request to and what the
+ * | | channel-relative address is. It then sends the
+ * | | request through the fabric. Note, the number of
+ * | | DRAM rules varies based on the processor SoC.
+ * | | Server parts like Milan have many more rules than
+ * | | an APU like Cezanne. The DRAM rules tell us both
+ * v v how to find and normalize the physical address.
+ * +---------+ +---------+
+ * | Channel | | DRAM |
+ * | Address | | Channel |
+ * +---------+ +---------+
+ * | | The UMC (unified memory controller) receives the
+ * | * . . . . DRAM request and determines which DIMM to send
+ * | | the request to along with the rank, banks, row,
+ * | | column, etc. It initiates a DRAM transaction and
+ * | | then sends the results back through the data
+ * v v fabric to the CPU core.
+ * +---------+ +--------+
+ * | DIMM | | Target |
+ * | Address | | DIMM |
+ * +---------+ +--------+
+ *
+ * The above is all generally done in hardware. There are multiple steps
+ * internal to this that we end up mimicking in software. This includes things
+ * like, applying hashing logic, address transformations, and related.
+ * Thankfully the hardware is fairly generic and programmed with enough
+ * information that we can pull out to figure this out. The rest of this theory
+ * statement covers the major parts of this: interleaving, the act of
+ * determining which memory channel to actually go to, and normalization, the
+ * act of removing some portion of the physical address bits to determine the
+ * address relative to a channel.
+ *
+ * ------------------------
+ * Data Fabric Interleaving
+ * ------------------------
+ *
+ * One of the major parts of address decoding is to understand how the
+ * interleaving features work in the data fabric. This is used to allow an
+ * address range to be spread out between multiple memory channels and then,
+ * later on, when normalizing the address. As mentioned above, a system address
+ * matches a rule which has information on interleaving. Interleaving comes in
+ * many different flavors. It can be used to just switch between channels,
+ * sockets, and dies. It can also end up involving some straightforward and some
+ * fairly complex hashing operations.
+ *
+ * Each DRAM rule has instructions on how to perform this interleaving. The way
+ * this works is that the rule first says to start at a given address bit,
+ * generally ranging from bit 8-12. These influence the granularity of the
+ * interleaving going on. From there, the rules determine how many bits to use
+ * from the address to determine the die, socket, and channel. In the simplest
+ * form, these perform a log2 of the actual number of things you're interleaving
+ * across (we'll come back to non-powers of two). So let's work a few common
+ * examples:
+ *
+ * o 8-channel interleave, 1-die interleave, 2-socket interleave
+ * Start at bit 9
+ *
+ * In this case we have 3 bits that determine the channel to use, 0 bits
+ * for the die, 1 bit for the socket. Here we would then use the following
+ * bits to determine what the channel, die, and socket IDs are:
+ *
+ * [12] - Socket ID
+ * [11:9] - Channel ID
+ *
+ * You'll note that there was no die-interleave, which means the die ID is
+ * always zero. This is the general thing you expect to see in Zen 2 and 3
+ * based systems as they only have one die or a Zen 1 APU.
+ *
+ * o 2-channel interleave, 4-die interleave, 2-socket interleave
+ * Start at bit 10
+ *
+ * In this case we have 1 bit for the channel and socket interleave. We
+ * have 2 bits for the die. This is something you might see on a Zen 1
+ * system. This results in the following bits:
+ *
+ * [13] - Socket ID
+ * [12:11] - Die ID
+ * [10] - Channel ID
+ *
+ *
+ * COD and NPS HASHING
+ *
+ * However, this isn't the only primary extraction rule of the above values. The
+ * other primary method is using a hash. While the exact hash methods vary
+ * between Zen 2/3 and Zen 4 based systems, they follow a general scheme. In the
+ * system there are three interleaving configurations that are either global or
+ * enabled on a per-rule basis. These indicate whether one should perform the
+ * XOR computation using addresses at:
+ *
+ * o 64 KiB (starting at bit 16)
+ * o 2 MiB (starting at bit 21)
+ * o 1 GiB (starting at bit 30)
+ *
+ * In this world, you take the starting address bit defined by the rule and XOR
+ * it with each enabled interleave address. If you have more than one bit to
+ * select (e.g. because you are hashing across more than 2 channels), then you
+ * continue taking subsequent bits from each enabled region. So the second bit
+ * would use 17, 21, and 31 if all three ranges were enabled while the third bit
+ * would use 18, 22, and 32. While these are straightforward, there is a catch.
+ *
+ * While the DRAM rule contains what the starting address bit, you don't
+ * actually use subsequent bits in the same way. Instead subsequent bits are
+ * deterministic and use bits 12 and 13 from the address. This is not the same
+ * consecutive thing that one might expect. Let's look at a Rome/Milan based
+ * example:
+ *
+ * o 8-channel "COD" hashing, starting at address 9. All three ranges enabled.
+ * 1-die and 1-socket interleaving.
+ *
+ * In this model we are using 3 bits for the channel, 0 bits for the socket
+ * and die.
+ *
+ * Channel ID[0] = addr[9] ^ addr[16] ^ addr[21] ^ addr[30]
+ * Channel ID[1] = addr[12] ^ addr[17] ^ addr[22] ^ addr[31]
+ * Channel ID[2] = addr[13] ^ addr[18] ^ addr[23] ^ addr[32]
+ *
+ * So through this scheme we'd have a socket/die of 0, and then the channel
+ * ID is computed based on that. The number of bits that we use here
+ * depends on how many channels the hash is going across.
+ *
+ * The Genoa and related variants, termed "NPS", has a few wrinkles. First,
+ * rather than 3 bits being used for the channel, up to 4 bits are. Second,
+ * while the Rome/Milan "COD" hash above does not support socket or die
+ * interleaving, the "NPS" hash actually supports socket interleaving. However,
+ * unlike the straightforward non-hashing scheme, the first bit is used to
+ * determine the socket when enabled as opposed to the last one. In addition, if
+ * we're not performing socket interleaving, then we end up throwing address bit
+ * 14 into the mix here. Let's look at examples:
+ *
+ * o 4-channel "NPS" hashing, starting at address 8. All three ranges enabled.
+ * 1-die and 1-socket interleaving.
+ *
+ * In this model we are using 2 bits for the channel, 0 bits for the socket
+ * and die. Because socket interleaving is not being used, bit 14 ends up
+ * being added into the first bit of the channel selection. Presumably this
+ * is to improve the address distribution in some form.
+ *
+ * Channel ID[0] = addr[8] ^ addr[16] ^ addr[21] ^ addr[30] ^ addr[14]
+ * Channel ID[1] = addr[12] ^ addr[17] ^ addr[22] ^ addr[31]
+ *
+ * o 8-channel "NPS" hashing, starting at address 9. All three ranges enabled.
+ * 1-die and 2-socket interleaving.
+ *
+ * In this model we are using 3 bits for the channel and 1 for the socket.
+ * The die is always set to 0. Unlike the above, address bit 14 is not used
+ * because it ends up being required for the 4th address bit.
+ *
+ * Socket ID[0] = addr[9] ^ addr[16] ^ addr[21] ^ addr[30]
+ * Channel ID[0] = addr[12] ^ addr[17] ^ addr[22] ^ addr[31]
+ * Channel ID[1] = addr[13] ^ addr[18] ^ addr[23] ^ addr[32]
+ * Channel ID[2] = addr[14] ^ addr[19] ^ addr[24] ^ addr[33]
+ *
+ *
+ * ZEN 3 6-CHANNEL
+ *
+ * These were the simple cases. Things get more complex when we move to
+ * non-power of 2 based hashes between channels. There are two different sets of
+ * these schemes. The first of these is 6-channel hashing that was added in Zen
+ * 3. The second of these is a more complex and general form that was added in
+ * Zen 4. Let's start with the Zen 3 case. The Zen 3 6-channel hash requires
+ * starting at address bits 11 or 12 and varies its logic somewhat from there.
+ * In the 6-channel world, the socket and die interleaving must be disabled.
+ * Let's walk through an example:
+ *
+ * o 6-channel Zen 3, starting at address 11. 2M and 1G range enabled.
+ * 1-die and 1-socket interleaving.
+ *
+ * Regardless of the starting address, we will always use three bits to
+ * determine a channel address. However, it's worth calling out that the
+ * 64K range is not considered for this at all. Another oddity is that when
+ * calculating the hash bits the order of the extracted 2M and 1G addresses
+ * are different.
+ *
+ * This flow starts by calculating the three hash bits. This is defined
+ * below. In the following, all bits marked with an '@' are ones that will
+ * change when starting at address bit 12. In those cases the value will
+ * increase by 1. Here's how we calculate the hash bits:
+ *
+ * hash[0] = addr[11@] ^ addr[14@] ^ addr[23] ^ addr[32]
+ * hash[1] = addr[12@] ^ addr[21] ^ addr[30]
+ * hash[2] = addr[13@] ^ addr[22] ^ addr[31]
+ *
+ * With this calculated, we always assign the first bit of the channel
+ * based on the hash. The other bits are more complicated as we have to
+ * deal with that gnarly power of two problem. We determine whether or not
+ * to use the hash bits directly in the channel based on their value. If
+ * they are not equal to 3, then we use it, otherwise if they are, then we
+ * need to go back to the physical address and we take its modulus.
+ * Basically:
+ *
+ * Channel Id[0] = hash[0]
+ * if (hash[2:1] == 3)
+ * Channel ID[2:1] = (addr >> [11@+3]) % 3
+ * else
+ * Channel ID[2:1] = hash[2:1]
+ *
+ *
+ * ZEN 4 NON-POWER OF 2
+ *
+ * I hope you like modulus calculations, because things get even more complex
+ * here now in Zen 4 which has many more modulus variations. These function in a
+ * similar way to the older 6-channel hash in Milan. They require one to start
+ * at address bit 8, they require that there is no die interleaving, and they
+ * support socket interleaving. The different channel arrangements end up in one
+ * of two sets of modulus values: a mod % 3 and a mod % 5 based on the number
+ * of channels used. Unlike the Milan form, all three address ranges (64 KiB, 2
+ * MiB, 1 GiB) are allowed to be used.
+ *
+ * o 6-channel Zen 4, starting at address 8. 64K, 2M, and 1G range enabled.
+ * 1-die and 2-socket interleaving.
+ *
+ * We start by calculating the following set of hash bits regardless of
+ * the number of channels that exist. The set of hash bits that is actually
+ * used in various computations ends up varying based upon the number of
+ * channels used. In 3-5 configs, only hash[0] is used. 6-10, both hash[0]
+ * and hash[2] (yes, not hash[1]). The 12 channel config uses all three.
+ *
+ * hash[0] = addr[8] ^ addr[16] ^ addr[21] ^ addr[30] ^ addr[14]
+ * hash[1] = addr[12] ^ addr[17] ^ addr[22] ^ addr[31]
+ * hash[2] = addr[13] ^ addr[18] ^ addr[23] ^ addr[32]
+ *
+ * Unlike other schemes where bits directly map here, they instead are used
+ * to seed the overall value. Depending on whether hash[0] is a 0 or 1, the
+ * system goes through two different calculations entirely. Though all of
+ * them end up involving the remainder of the system address going through
+ * the modulus. In the following, a '3@' indicates the modulus value would
+ * be swapped to 5 in a different scenario.
+ *
+ * Channel ID = addr[63:14] % 3@
+ * if (hash[0] == 1)
+ * Channel ID = (Channel ID + 1) % 3@
+ *
+ * Once this base has for the channel ID has been calculated, additional
+ * portions are added in. As this is the 6-channel form, we say:
+ *
+ * Channel ID = Channel ID + (hash[2] * 3@)
+ *
+ * Finally the socket is deterministic and always comes from hash[0].
+ * Basically:
+ *
+ * Socket ID = hash[0]
+ *
+ * o 12-channel Zen 4, starting at address 8. 64K, 2M, and 1G range enabled.
+ * 1-die and 1-socket interleaving.
+ *
+ * This is a variant of the above. The hash is calculated the same way.
+ * The base Channel ID is the same and if socket interleaving were enabled
+ * it would also be hash[0]. What instead differs is how we use hash[1]
+ * and hash[2]. The following logic is used instead of the final
+ * calculation above.
+ *
+ * Channel ID = Channel ID + (hash[2:1] * 3@)
+ *
+ *
+ * POST BIT EXTRACTION
+ *
+ * Now, all of this was done to concoct up a series of indexes used. However,
+ * you'll note that a given DRAM rule actually already has a fabric target. So
+ * what do we do here? We add them together.
+ *
+ * The data fabric has registers that describe which bits in a fabric ID
+ * correspond to a socket, die, and channel. Taking the channel, die, and socket
+ * IDs above, one can construct a fabric ID. From there, we add the two data
+ * fabric IDs together and can then get to the fabric ID of the actual logical
+ * target. This is why all of the socket and die interleaving examples with no
+ * interleaving are OK to result in a zero. The idea here is that the base
+ * fabric ID in the DRAM rule will take care of indicating those other things as
+ * required.
+ *
+ * You'll note the use of the term "logical target" up above. That's because
+ * some platforms have the ability to remap logical targets to physical targets
+ * (identified by the use of the ZEN_UMC_FAM_F_TARG_REMAP flag in the family
+ * data). The way that remapping works changes based on the hardware generation.
+ * This was first added in Milan (Zen 3) CPUs. In that model, you would use the
+ * socket and component information from the target ID to identify which
+ * remapping rules to use. On Genoa (Zen 4) CPUs, you would instead use
+ * information in the rule itself to determine which of the remap rule sets to
+ * use and then uses the component ID to select which rewrite rule to use.
+ *
+ * Finally, there's one small wrinkle with this whole scheme that we haven't
+ * discussed: what actually is the address that we plug into this calculation.
+ * While you might think it actually is just the system address itself, that
+ * isn't actually always the case. Sometimes rather than using the address
+ * itself, it gets normalized based on the DRAM rule, which involves subtracting
+ * out the base address and potentially subtracting out the size of the DRAM
+ * hole (if the address is above the hole and hoisting is active for that
+ * range). When this is performed appears to tie to the DF generation. After Zen
+ * 3, it is always the default (e.g. Zen 4 and things from DF gen 3.5). At and
+ * before Zen 3, it only occurs if we are doing a non-power of 2 based hashing.
+ *
+ * --------------------------------------------
+ * Data Fabric Interleave Address Normalization
+ * --------------------------------------------
+ *
+ * While you may have thought that we were actually done with the normalization
+ * fun in the last section, there's still a bit more here that we need to
+ * consider. In particular, there's a secondary transformation beyond
+ * interleaving that occurs as part of constructing the channel normalized
+ * address. Effectively, we need to account for all the bits that were used in
+ * the interleaving and generally speaking remove them from our normalized
+ * address.
+ *
+ * While this may sound weird on paper, the way to think about it is that
+ * interleaving at some granularity means that each device is grabbing the same
+ * set of addresses, the interleave just is used to direct it to its own
+ * location. When working with a channel normalized address, we're effectively
+ * creating a new region of addresses that have meaning within the DIMMs
+ * themselves. The channel doesn't care about what got it there, mainly just
+ * what it is now. So with that in mind, we need to discuss how we remove all
+ * the interleaving information in our different modes.
+ *
+ * Just to make sure it's clear, we are _removing_ all bits that were used for
+ * interleaving. This causes all bits above the removed ones to be shifted
+ * right.
+ *
+ * First, we have the case of standard power of 2 interleaving that applies to
+ * the 1, 2, 4, 8, 16, and 32 channel configurations. Here, we need to account
+ * for the total number of bits that are used for the channel, die, and socket
+ * interleaving and we simply remove all those bits starting from the starting
+ * address.
+ *
+ * o 8-channel interleave, 1-die interleave, 2-socket interleave
+ * Start at bit 9
+ *
+ * If we look at this example, we are using 3 bits for the channel, 1 for
+ * the socket, for a total of 4 bits. Because this is starting at bit 9,
+ * this means that interleaving covers the bit range [12:9]. In this case
+ * our new address would be (orig[63:13] >> 4) | orig[8:0].
+ *
+ *
+ * COD and NPS HASHING
+ *
+ * That was the simple case, next we have the COD/NPS hashing case that we need
+ * to consider. If we look at these, the way that they work is that they split
+ * which bits they use for determining the channel address and then hash others
+ * in. Here, we need to extract the starting address bit, then continue at bit
+ * 12 based on the number of bits in use and whether or not socket interleaving
+ * is at play for the NPS variant. Let's look at an example here:
+ *
+ * o 8-channel "COD" hashing, starting at address 9. All three ranges enabled.
+ * 1-die and 1-socket interleaving.
+ *
+ * Here we have three total bits being used. Because we start at bit 9, this
+ * means we need to drop bits [13:12], [9]. So our new address would be:
+ *
+ * orig[63:14] >> 3 | orig[11:10] >> 1 | orig[8:0]
+ * | | +-> stays the same
+ * | +-> relocated to bit 9 -- shifted by 1 because we
+ * | removed bit 9.
+ * +--> Relocated to bit 11 -- shifted by 3 because we removed bits, 9, 12,
+ * and 13.
+ *
+ * o 8-channel "NPS" hashing, starting at address 8. All three ranges enabled.
+ * 1-die and 2-socket interleaving.
+ *
+ * Here we need to remove bits [14:12], [8]. We're removing an extra bit
+ * because we have 2-socket interleaving. This results in a new address of:
+ *
+ * orig[63:15] >> 4 | orig[11:9] >> 1 | orig[7:0]
+ * | | +-> stays the same
+ * | +-> relocated to bit 8 -- shifted by 1 because we
+ * | removed bit 8.
+ * +--> Relocated to bit 11 -- shifted by 4 because we removed bits, 8, 12,
+ * 13, and 14.
+ *
+ *
+ * ZEN 3 6-CHANNEL
+ *
+ * Now, to the real fun stuff, our non-powers of two. First, let's start with
+ * our friend, the Zen 3 6-channel hash. So, the first thing that we need to do
+ * here is start by recomputing our hash again based on the current normalized
+ * address. Regardless of the hash value, this first removes all three bits from
+ * the starting address, so that's removing either [14:12] or [13:11].
+ *
+ * The rest of the normalization process here is quite complex and somewhat mind
+ * bending. Let's start working through an example here and build this up.
+ * First, let's assume that each channel has a single 16 GiB RDIMM. This would
+ * mean that the channel itself has 96 GiB RDIMM. However, by removing 3 bits
+ * worth, that technically corresponds to an 8-channel configuration that
+ * normally suggest a 128 GiB configuration. The processor requires us to record
+ * this fact in the DF::Np2ChannelConfig register. The value that it wants us a
+ * bit weird. We believe it's calculated by the following:
+ *
+ * 1. Round the channel size up to the next power of 2.
+ * 2. Divide this total size by 64 KiB.
+ * 3. Determine the log base 2 that satisfies this value.
+ *
+ * In our particular example above. We have a 96 GiB channel, so for (1) we end
+ * up with 128 GiB (2^37). We now divide that by 64 KiB (2^16), so this becomes
+ * 2^(37 - 16) or 2^21. Because we want the log base 2 of 2^21 from (2), this
+ * simply becomes 21. The DF::Np2ChannelConfig has two members, a 'space 0' and
+ * 'space 1'. Near as we can tell, in this mode only 'space 0' is used.
+ *
+ * Before we get into the actual normalization scheme, we have to ask ourselves
+ * how do we actually interleave data 6 ways. The scheme here is involved.
+ * First, it's important to remember like with other normalization schemes, we
+ * do adjust for the address for the base address in the DRAM rule and then also
+ * take into account the DRAM hole if present.
+ *
+ * If we delete 3 bits, let's take a sample address and see where it would end
+ * up in the above scheme. We're going to take our 3 address bits and say that
+ * they start at bit 12, so this means that the bits removed are [14:12]. So the
+ * following are the 8 addresses that we have here and where they end up
+ * starting with 1ff:
+ *
+ * o 0x01ff -> 0x1ff, Channel 0 (hash 0b000)
+ * o 0x11ff -> 0x1ff, Channel 1 (hash 0b001)
+ * o 0x21ff -> 0x1ff, Channel 2 (hash 0b010)
+ * o 0x31ff -> 0x1ff, Channel 3 (hash 0b011)
+ * o 0x41ff -> 0x1ff, Channel 4 (hash 0b100)
+ * o 0x51ff -> 0x1ff, Channel 5 (hash 0b101)
+ * o 0x61ff -> 0x3000001ff, Channel 0 (hash 0b110)
+ * o 0x71ff -> 0x3000001ff, Channel 1 (hash 0b111)
+ *
+ * Yes, we did just jump to near the top of what is a 16 GiB DIMM's range for
+ * those last two. The way we determine when to do this jump is based on our
+ * hash. Effectively we ask what is hash[2:1]. If it is 0b11, then we need to
+ * do something different and enter this special case, basically jumping to the
+ * top of the range. If we think about a 6-channel configuration for a moment,
+ * the thing that doesn't exist are the traditional 8-channel hash DIMMs 0b110
+ * and 0b111.
+ *
+ * If you go back to the interleave this kind of meshes, that tried to handle
+ * the case of the hash being 0, 1, and 2, normally, and then did special things
+ * with the case of the hash being in this upper quadrant. The hash then
+ * determined where it went by shifting over the upper address and doing a mod
+ * 3 and using that to determine the upper two bits. With that weird address at
+ * the top of the range, let's go through and see what else actually goes to
+ * those weird addresses:
+ *
+ * o 0x08000061ff -> 0x3000001ff, Channel 2 (hash 0b110)
+ * o 0x08000071ff -> 0x3000001ff, Channel 3 (hash 0b111)
+ * o 0x10000061ff -> 0x3000001ff, Channel 4 (hash 0b110)
+ * o 0x10000071ff -> 0x3000001ff, Channel 5 (hash 0b111)
+ *
+ * Based on the above you can see that we've split the 16 GiB DIMM into a 12 GiB
+ * region (e.g. [ 0x0, 0x300000000 ), and a 4 GiB region [ 0x300000000,
+ * 0x400000000 ). What seems to happen is that the CPU algorithmically is going
+ * to put things in this upper range. To perform that action it goes back to the
+ * register information that we stored in DF::Np2ChannelConfig. The way this
+ * seems to be thought of is it wants to set the upper two bits of a 64 KiB
+ * chunk (e.g. bits [15:14]) to 0b11 and then shift that over based on the DIMM
+ * size.
+ *
+ * Our 16 GiB DIMM has 34 bits, so effectively we want to set bits [33:32] in
+ * this case. The channel is 37 bits wide, which the CPU again knows as 2^21 *
+ * 2^16. So it constructs the 64 KiB value of [15:14] = 0b11 and fills the rest
+ * with zeros. It then multiplies it by 2^(21 - 3), or 2^18. The - 3 comes from
+ * the fact that we removed 3 address bits. This when added to the above gets
+ * us bits [33,32] = 0b11.
+ *
+ * While this appears to be the logic, I don't have a proof that this scheme
+ * actually evenly covers the entire range, but a few examples appear to work
+ * out.
+ *
+ * With this, the standard example flow that we give, results in something like:
+ *
+ * o 6-channel Zen 3, starting at address 11. 2M and 1G range enabled. Here,
+ * we assume that the value of the NP2 space0 is 21 bits. This example
+ * assumes we have 96 GiB total memory, which means rounding up to 128 GiB.
+ *
+ * Step 1 here is to adjust our address to remove the three bits indicated.
+ * So we simply always set our new address to:
+ *
+ * orig[63:14] >> 3 | orig[10:0]
+ * | +-> stays the same
+ * +--> Relocated to bit 11 because a 6-channel config always uses 3 bits to
+ * perform interleaving.
+ *
+ * At this step, one would need to consult the hash of the normalized
+ * address before removing bits (but after adjusting for the base / DRAM
+ * hole). If hash[2:1] == 3, then we would say that the address is actually:
+ *
+ * 0b11 << 32 | orig[63:14] >> 3 | orig[10:0]
+ *
+ *
+ * ZEN 4 NON-POWER OF 2
+ *
+ * Next, we have the DFv4 versions of the 3, 5, 6, 10, and 12 channel hashing.
+ * An important part of this is whether or not there is any socket hashing going
+ * on. Recall there, that if socket hashing was going on, then it is part of the
+ * interleave logic; however, if it is not, then its hash actually becomes
+ * part of the normalized address, but not in the same spot!
+ *
+ * In this mode, we always remove the bits that are actually used by the hash.
+ * Recall that some modes use hash[0], others hash[0] and hash[2], and then only
+ * the 12-channel config uses hash[2:0]. This means we need to be careful in how
+ * we actually remove address bits. All other bits in this lower range we end up
+ * keeping and using. The top bits, e.g. addr[63:14] are kept and divided by the
+ * actual channel-modulus. If we're not performing socket interleaving and
+ * therefore need to keep the value of hash[0], then it is appended as the least
+ * significant bit of that calculation.
+ *
+ * Let's look at an example of this to try to make sense of it all.
+ *
+ * o 6-channel Zen 4, starting at address 8. 64K, 2M, and 1G range enabled.
+ * 1-die and 2-socket interleaving.
+ *
+ * Here we'd start by calculating hash[2:0] as described in the earlier
+ * interleaving situation. Because we're using a socket interleave, we will
+ * not opt to include hash[0] in the higher-level address calculation.
+ * Because this is a 6-channel calculation, our modulus is 3. Here, we will
+ * strip out bits 8 and 13 (recall in the interleaving 6-channel example we
+ * ignored hash[1], thus no bit 12 here). Our new address will be:
+ *
+ * (orig[63:14] / 3) >> 2 | orig[12:9] >> 1 | orig[7:0]
+ * | | +-> stays the same
+ * | +-> relocated to bit 8 -- shifted by 1 because
+ * | we removed bit 8.
+ * +--> Relocated to bit 12 -- shifted by 2 because we removed bits 8 and
+ * 13.
+ *
+ * o 12-channel Zen 4, starting at address 8. 64K, 2M, and 1G range enabled.
+ * 1-die and 1-socket interleaving.
+ *
+ * This is a slightly different case from the above in two ways. First, we
+ * will end up removing bits 8, 12, and 13, but then we'll also reuse
+ * hash[0]. Our new address will be:
+ *
+ * ((orig[63:14] / 3) << 1 | hash[0]) >> 3 | orig[11:9] >> 1 | orig[7:0]
+ * | | +-> stays the
+ * | | same
+ * | +-> relocated to bit 8 -- shifted by
+ * | 1 because we removed bit 8.
+ * +--> Relocated to bit 11 -- shifted by 3 because we removed bits 8, 12,
+ * and 13.
+ *
+ * That's most of the normalization process for the time being. We will have to
+ * revisit this when we have to transform a normal address into a system address
+ * and undo all this.
+ *
+ * -------------------------------------
+ * Selecting a DIMM and UMC Organization
+ * -------------------------------------
+ *
+ * One of the more nuanced things in decoding and encoding is the question of
+ * where do we send a channel normalized address. That is, now that we've gotten
+ * to a given channel, we need to transform the address into something
+ * meaningful for a DIMM, and select a DIMM as well. The UMC SMN space contains
+ * a number of Base Address and Mask registers which they describe as activating
+ * a chip-select. A given UMC has up to four primary chip-selects (we'll come
+ * back to DDR5 sub-channels later). The first two always go to the first DIMM
+ * in the channel and the latter two always go to the second DIMM in the
+ * channel. Put another way, you can always determine which DIMM you are
+ * referring to by taking the chip-select and shifting it by 1.
+ *
+ * The UMC Channel registers are organized a bit differently in different
+ * hardware generations. In a DDR5 based UMC, almost all of our settings are on
+ * a per-chip-select basis while as in a DDR4 based system only the bases and
+ * masks are. While gathering data we normalize this such that each logical
+ * chip-select (umc_cs_t) that we have in the system has the same data so that
+ * way DDR4 and DDR5 based systems are the same to the decoding logic. There is
+ * also channel-wide data such as hash configurations and related.
+ *
+ * Each channel has a set of base and mask registers (and secondary ones as
+ * well). To determine if we activate a given one, we first check if the
+ * enabled bit is set. The enabled bit is set on a per-base basis, so both the
+ * primary and secondary registers have separate enables. As there are four of
+ * each base, mask, secondary base, and secondary mask, we say that if a
+ * normalized address matches either a given indexes primary or secondary index,
+ * then it activates that given UMC index. The basic formula for an enabled
+ * selection is:
+ *
+ * NormAddr & ~Mask[i] == Base[i] & ~Mask[i]
+ *
+ * Once this is selected, this index in the UMC is what it always used to derive
+ * the rest of the information that is specific to a given chip-select or DIMM.
+ * An important thing to remember is that from this point onwards, while there
+ * is a bunch of hashing and interleaving logic it doesn't change which UMC
+ * channel we read the data from. Though the particular DIMM, rank, and address
+ * we access will change as we go through hashing and interleaving.
+ *
+ * ------------------------
+ * Row and Column Selection
+ * ------------------------
+ *
+ * The number of bits that are used for the row and column address of a DIMM
+ * varies based on the type of module itself. These depend on the density of a
+ * DIMM module, e.g. how large an individual DRAM block is, a value such as 16
+ * Gbit, and the number of these wide it is, which is generally phrased as X4,
+ * X8, and X16. The memory controller encodes the number of bits (derived from
+ * the DIMM's SPD data) and then determines which bits are used for addresses.
+ *
+ * Based on this information we can initially construct a row and a column
+ * address by leveraging the information about the number of bits and then
+ * extracting the correct bits out of the normalized channel address.
+ *
+ * If you've made it this far, you know nothing is quite this simple, despite it
+ * seeming so. Importantly, not all DIMMs actually have storage that is a power
+ * of 2. As such, there's another bit that we have to consult to transform the
+ * actual value that we have for a row, remarkably the column somehow has no
+ * transformations applied to it.
+ *
+ * The hardware gives us information on inverting the two 'most significant
+ * bits' of the row address which we store in 'ucs_inv_msbs'. First, we have the
+ * question of what are our most significant bits here. This is basically
+ * determined by the number of low and high row bits. In this case higher
+ * actually is what we want. Note, the high row bits only exist in DDR4. Next,
+ * we need to know whether we used the primary or secondary base/mask pair for
+ * this as there is a primary and secondary inversion bits. The higher bit of
+ * the inversion register (e.g ucs_inv_msbs[1]) corresponds to the highest row
+ * bit. A zero in the bit position indicates that we should not perform an
+ * inversion where as a one says that we should invert this.
+ *
+ * To actually make this happen we can take advantage of the fact that the
+ * meaning of a 0/1 above means that this can be implemented with a binary
+ * exclusive-OR (XOR). Logically speaking if we have a don't invert setting
+ * present, a 0, then x ^ 0 is always x. However, if we have a 1 present, then
+ * we know that (for a single bit) x ^ 1 = ~x. We take advantage of this fact in
+ * the row logic.
+ *
+ * ---------------------
+ * Banks and Bank Groups
+ * ---------------------
+ *
+ * While addressing within a given module is done by the use of a row and column
+ * address, to increase storage density a module generally has a number of
+ * banks, which may be organized into one or more bank groups. While a given
+ * DDR4/5 access happens in some prefetched chunk of say 64 bytes (what do you
+ * know, that's a cacheline), that all occurs within a single bank. The addition
+ * of bank groups makes it easier to access data in parallel -- it is often
+ * faster to read from another bank group than to read another region inside a
+ * bank group.
+ *
+ * Based on the DIMMs internal configuration, there will be a specified number
+ * of bits used for the overall bank address (including bank group bits)
+ * followed by a number of bits actually used for bank groups. There are
+ * separately an array of bits used to concoct the actual address. It appears,
+ * mostly through experimental evidence, that the bank group bits occur first
+ * and then are followed by the bank selection itself. This makes some sense if
+ * you assume that switching bank groups is faster than switching banks.
+ *
+ * So if we see the UMC noting 4 bank bits and 2 bank groups bits, that means
+ * that the umc_cs_t's ucs_bank_bits[1:0] correspond to bank_group[1:0] and
+ * ucs_bank_bits[3:2] correspond to bank_address[1:0]. However, if there were no
+ * bank bits indicated, then all of the address bits would correspond to the
+ * bank address.
+ *
+ * Now, this would all be straightforward if not for hashing, our favorite.
+ * There are five bank hashing registers per channel (UMC_BANK_HASH_DDR4,
+ * UMC_BANK_HASH_DDR5), one that corresponds to the five possible bank bits. To
+ * do this we need to use the calculated row and column that we previously
+ * determined. This calculation happens in a few steps:
+ *
+ * 1) First check if the enable bit is set in the rule. If not, just use the
+ * normal bank address bit and we're done.
+ * 2) Take a bitwise-AND of the calculated row and hash register's row value.
+ * Next do the same thing for the column.
+ * 3) For each bit in the row, progressively XOR it, e.g. row[0] ^ row[1] ^
+ * row[2] ^ ... to calculate a net bit value for the row. This then
+ * repeats itself for the column. What basically has happened is that we're
+ * using the hash register to select which bits to impact our decision.
+ * Think of this as a traditional bitwise functional reduce.
+ * 4) XOR the combined rank bit with the column bit and the actual bank
+ * address bit from the normalized address. So if this were bank bit 0,
+ * which indicated we should use bit 15 for bank[0], then we would
+ * ultimately say our new bit is norm_addr[15] ^ row_xor ^ col_xor
+ *
+ * An important caveat is that we would only consult all this if we actually
+ * were told that the bank bit was being used. For example if we had 3 bank
+ * bits, then we'd only check the first 3 hash registers. The latter two would
+ * be ignored.
+ *
+ * Once this process is done, then we can go back and split the activated bank
+ * into the actual bank used and the bank group used based on the first bits
+ * going to the bank group.
+ *
+ * ---------------
+ * DDR5 Sub-channel
+ * ---------------
+ *
+ * As described in the definitions section, DDR5 has the notion of a
+ * sub-channel. Here, a single bit is used to determine which of the
+ * sub-channels to actually operate and utilize. Importantly the same
+ * chip-select seems to apply to both halves of a given sub-channel.
+ *
+ * There is also a hash that is used here. The hash here utilizes the calculated
+ * bank, column, and row and follows the same pattern used in the bank
+ * calculation where we do a bunch of running exclusive-ORs and then do that
+ * with the original value we found to get the new value. Because there's only
+ * one bit for the sub-channel, we only have a single hash to consider.
+ *
+ * -------------------------------------------
+ * Ranks, Chip-Select, and Rank Multiplication
+ * -------------------------------------------
+ *
+ * The notion of ranks and the chip-select are interwoven. From a strict DDR4
+ * RDIMM perspective, there are two lines that are dedicated for chip-selects
+ * and then another two that are shared with three 'chip-id' bits that are used
+ * in 3DS RDIMMs. In all cases the controller starts with two logical chip
+ * selects and then uses something called rank multiplication to figure out how
+ * to multiplex that and map to the broader set of things. Basically, in
+ * reality, DDR4 RDIMMs allow for 4 bits to determine a rank and then 3DS RDIMMs
+ * use 2 bits for a rank and 3 bits to select a stacked chip. In DDR5 this is
+ * different and you just have 2 bits for a rank.
+ *
+ * It's not entirely clear from what we know from AMD, but it seems that we use
+ * the RM bits as a way to basically go beyond the basic 2 bits of chip-select
+ * which is determined based on which channel we logically activate. Initially
+ * we treat this as two distinct things, here as that's what we get from the
+ * hardware. There are two hashes here a chip-select and rank-multiplication
+ * hash. Unlike the others, which rely on the bank, row, and column addresses,
+ * this hash relies on the normalized address. So we calculate that mask and do
+ * our same xor dance.
+ *
+ * There is one hash for each rank multiplication bit and chip-select bit. The
+ * number of rank multiplication bits is given to us. The number of chip-select
+ * bits is fixed, it's simply two because there are four base/mask registers and
+ * logical chip-selects in a given UMC channel. The chip-select on some DDR5
+ * platforms has a secondary exclusive-OR hash that can be applied. As this only
+ * exists in some families, for any where it does exist, we seed it to be zero
+ * so that it becomes a no-op.
+ *
+ * -----------
+ * Future Work
+ * -----------
+ *
+ * As the road goes ever on and on, down from the door where it began, there are
+ * still some stops on the journey for this driver. In particular, here are the
+ * major open areas that could be implemented to extend what this can do:
+ *
+ * o The ability to transform a normalized channel address back to a system
+ * address. This is required for MCA/MCA-X error handling as those generally
+ * work in terms of channel addresses.
+ * o Integrating with the MCA/MCA-X error handling paths so that way we can
+ * take correct action in the face of ECC errors and allowing recovery from
+ * uncorrectable errors.
+ * o Providing memory controller information to FMA so that way it can opt to
+ * do predictive failure or give us more information about what is fault
+ * with ECC errors.
+ * o Figuring out if we will get MCEs for privilged address decoding and if so
+ * mapping those back to system addresses and related.
+ * o 3DS RDIMMs likely will need a little bit of work to ensure we're handling
+ * the resulting combination of the RM bits and CS and reporting it
+ * intelligently.
+ */
+
+#include <sys/types.h>
+#include <sys/file.h>
+#include <sys/errno.h>
+#include <sys/open.h>
+#include <sys/cred.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/stat.h>
+#include <sys/conf.h>
+#include <sys/devops.h>
+#include <sys/cmn_err.h>
+#include <sys/x86_archext.h>
+#include <sys/sysmacros.h>
+#include <sys/mc.h>
+
+#include <zen_umc.h>
+#include <sys/amdzen/df.h>
+#include <sys/amdzen/umc.h>
+
+static zen_umc_t *zen_umc;
+
+/*
+ * Per-CPU family information that describes the set of capabilities that they
+ * implement. When adding support for new CPU generations, you must go through
+ * what documentation you have and validate these. The best bet is to find a
+ * similar processor and see what has changed. Unfortunately, there really isn't
+ * a substitute for just basically checking every register. The family name
+ * comes from the amdzen_c_family(). One additional note for new CPUs, if our
+ * parent amdzen nexus driver does not attach (because the DF has changed PCI
+ * IDs or more), then just adding something here will not be sufficient to make
+ * it work.
+ */
+static const zen_umc_fam_data_t zen_umc_fam_data[] = {
+ {
+ .zufd_family = ZEN_FAMILY_NAPLES,
+ .zufd_dram_nrules = 16,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR4,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_DHYANA,
+ .zufd_dram_nrules = 16,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR4,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_DALI,
+ .zufd_dram_nrules = 2,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR4_APU,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_ROME,
+ .zufd_flags = ZEN_UMC_FAM_F_NP2 | ZEN_UMC_FAM_F_NORM_HASH |
+ ZEN_UMC_FAM_F_UMC_HASH,
+ .zufd_dram_nrules = 16,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR4,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_RM |
+ UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_RENOIR,
+ .zufd_flags = ZEN_UMC_FAM_F_NORM_HASH,
+ .zufd_dram_nrules = 2,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR4_APU,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_PC |
+ UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_MATISSE,
+ .zufd_flags = ZEN_UMC_FAM_F_NORM_HASH | ZEN_UMC_FAM_F_UMC_HASH,
+ .zufd_dram_nrules = 16,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR4,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_RM |
+ UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_VAN_GOGH,
+ .zufd_flags = ZEN_UMC_FAM_F_NORM_HASH,
+ .zufd_dram_nrules = 2,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR5_APU,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_MENDOCINO,
+ .zufd_flags = ZEN_UMC_FAM_F_NORM_HASH,
+ .zufd_dram_nrules = 2,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR5_APU,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_MILAN,
+ .zufd_flags = ZEN_UMC_FAM_F_TARG_REMAP | ZEN_UMC_FAM_F_NP2 |
+ ZEN_UMC_FAM_F_NORM_HASH | ZEN_UMC_FAM_F_UMC_HASH,
+ .zufd_dram_nrules = 16,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR4,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_RM |
+ UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_GENOA,
+ .zufd_flags = ZEN_UMC_FAM_F_TARG_REMAP |
+ ZEN_UMC_FAM_F_UMC_HASH | ZEN_UMC_FAM_F_UMC_EADDR |
+ ZEN_UMC_FAM_F_CS_XOR,
+ .zufd_dram_nrules = 20,
+ .zufd_cs_nrules = 4,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR5,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_RM |
+ UMC_CHAN_HASH_F_PC | UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_VERMEER,
+ .zufd_flags = ZEN_UMC_FAM_F_NORM_HASH | ZEN_UMC_FAM_F_UMC_HASH,
+ .zufd_dram_nrules = 16,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR4,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_RM |
+ UMC_CHAN_HASH_F_CS,
+ }, {
+ .zufd_family = ZEN_FAMILY_REMBRANDT,
+ .zufd_flags = ZEN_UMC_FAM_F_NORM_HASH,
+ .zufd_dram_nrules = 2,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR5_APU,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_CEZANNE,
+ .zufd_flags = ZEN_UMC_FAM_F_NORM_HASH,
+ .zufd_dram_nrules = 2,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR4_APU,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_PC |
+ UMC_CHAN_HASH_F_CS
+ }, {
+ .zufd_family = ZEN_FAMILY_RAPHAEL,
+ .zufd_flags = ZEN_UMC_FAM_F_TARG_REMAP | ZEN_UMC_FAM_F_CS_XOR,
+ .zufd_dram_nrules = 2,
+ .zufd_cs_nrules = 2,
+ .zufd_umc_style = ZEN_UMC_UMC_S_DDR5,
+ .zufd_chan_hash = UMC_CHAN_HASH_F_BANK | UMC_CHAN_HASH_F_PC |
+ UMC_CHAN_HASH_F_CS
+ }
+};
+
+static boolean_t
+zen_umc_identify(zen_umc_t *umc)
+{
+ for (uint_t i = 0; i < ARRAY_SIZE(zen_umc_fam_data); i++) {
+ if (zen_umc_fam_data[i].zufd_family == umc->umc_family) {
+ umc->umc_fdata = &zen_umc_fam_data[i];
+ return (B_TRUE);
+ }
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * This operates on DFv2, DFv3, and DFv3.5 DRAM rules, which generally speaking
+ * are in similar register locations and meanings, but the size of bits in
+ * memory is not consistent.
+ */
+static int
+zen_umc_read_dram_rule_df_23(zen_umc_t *umc, const uint_t dfno,
+ const uint_t inst, const uint_t ruleno, df_dram_rule_t *rule)
+{
+ int ret;
+ uint32_t base, limit;
+ uint64_t dbase, dlimit;
+ uint16_t addr_ileave, chan_ileave, sock_ileave, die_ileave, dest;
+ boolean_t hash = B_FALSE;
+ zen_umc_df_t *df = &umc->umc_dfs[dfno];
+
+ if ((ret = amdzen_c_df_read32(dfno, inst, DF_DRAM_BASE_V2(ruleno),
+ &base)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read DRAM base "
+ "register %u on 0x%x/0x%x: %d", ruleno, dfno, inst, ret);
+ return (ret);
+ }
+
+ if ((ret = amdzen_c_df_read32(dfno, inst, DF_DRAM_LIMIT_V2(ruleno),
+ &limit)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read DRAM limit "
+ "register %u on 0x%x/0x%x: %d", ruleno, dfno, inst, ret);
+ return (ret);
+ }
+
+
+ rule->ddr_raw_base = base;
+ rule->ddr_raw_limit = limit;
+ rule->ddr_raw_ileave = rule->ddr_raw_ctrl = 0;
+
+ if (!DF_DRAM_BASE_V2_GET_VALID(base)) {
+ return (0);
+ }
+
+ /*
+ * Extract all values from the registers and then normalize. While there
+ * are often different bit patterns for the values, the interpretation
+ * is the same across all the Zen 1-3 parts. That is while which bits
+ * may be used for say channel interleave vary, the values of them are
+ * consistent.
+ */
+ rule->ddr_flags |= DF_DRAM_F_VALID;
+ if (DF_DRAM_BASE_V2_GET_HOLE_EN(base)) {
+ rule->ddr_flags |= DF_DRAM_F_HOLE;
+ }
+
+ dbase = DF_DRAM_BASE_V2_GET_BASE(base);
+ dlimit = DF_DRAM_LIMIT_V2_GET_LIMIT(limit);
+ switch (umc->umc_df_rev) {
+ case DF_REV_2:
+ addr_ileave = DF_DRAM_BASE_V2_GET_ILV_ADDR(base);
+ chan_ileave = DF_DRAM_BASE_V2_GET_ILV_CHAN(base);
+ die_ileave = DF_DRAM_LIMIT_V2_GET_ILV_DIE(limit);
+ sock_ileave = DF_DRAM_LIMIT_V2_GET_ILV_SOCK(limit);
+ dest = DF_DRAM_LIMIT_V2_GET_DEST_ID(limit);
+ break;
+ case DF_REV_3:
+ addr_ileave = DF_DRAM_BASE_V3_GET_ILV_ADDR(base);
+ sock_ileave = DF_DRAM_BASE_V3_GET_ILV_SOCK(base);
+ die_ileave = DF_DRAM_BASE_V3_GET_ILV_DIE(base);
+ chan_ileave = DF_DRAM_BASE_V3_GET_ILV_CHAN(base);
+ dest = DF_DRAM_LIMIT_V3_GET_DEST_ID(limit);
+ break;
+ case DF_REV_3P5:
+ addr_ileave = DF_DRAM_BASE_V3P5_GET_ILV_ADDR(base);
+ sock_ileave = DF_DRAM_BASE_V3P5_GET_ILV_SOCK(base);
+ die_ileave = DF_DRAM_BASE_V3P5_GET_ILV_DIE(base);
+ chan_ileave = DF_DRAM_BASE_V3P5_GET_ILV_CHAN(base);
+ dest = DF_DRAM_LIMIT_V3P5_GET_DEST_ID(limit);
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered unsupported "
+ "DF revision processing DRAM rules: 0x%x", umc->umc_df_rev);
+ return (-1);
+ }
+
+ rule->ddr_base = dbase << DF_DRAM_BASE_V2_BASE_SHIFT;
+ rule->ddr_sock_ileave_bits = sock_ileave;
+ rule->ddr_die_ileave_bits = die_ileave;
+ switch (addr_ileave) {
+ case DF_DRAM_ILV_ADDR_8:
+ case DF_DRAM_ILV_ADDR_9:
+ case DF_DRAM_ILV_ADDR_10:
+ case DF_DRAM_ILV_ADDR_11:
+ case DF_DRAM_ILV_ADDR_12:
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered invalid address "
+ "interleave on rule %u, df/inst 0x%x/0x%x: 0x%x", ruleno,
+ dfno, inst, addr_ileave);
+ return (EINVAL);
+ }
+ rule->ddr_addr_start = DF_DRAM_ILV_ADDR_BASE + addr_ileave;
+
+ switch (chan_ileave) {
+ case DF_DRAM_BASE_V2_ILV_CHAN_1:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_1CH;
+ break;
+ case DF_DRAM_BASE_V2_ILV_CHAN_2:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_2CH;
+ break;
+ case DF_DRAM_BASE_V2_ILV_CHAN_4:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_4CH;
+ break;
+ case DF_DRAM_BASE_V2_ILV_CHAN_8:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_8CH;
+ break;
+ case DF_DRAM_BASE_V2_ILV_CHAN_6:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_6CH;
+ break;
+ case DF_DRAM_BASE_V2_ILV_CHAN_COD4_2:
+ hash = B_TRUE;
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_COD4_2CH;
+ break;
+ case DF_DRAM_BASE_V2_ILV_CHAN_COD2_4:
+ hash = B_TRUE;
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_COD2_4CH;
+ break;
+ case DF_DRAM_BASE_V2_ILV_CHAN_COD1_8:
+ hash = B_TRUE;
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_COD1_8CH;
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered invalid channel "
+ "interleave on rule %u, df/inst 0x%x/0x%x: 0x%x", ruleno,
+ dfno, inst, chan_ileave);
+ return (EINVAL);
+ }
+
+ /*
+ * If hashing is enabled, note which hashing rules apply to this
+ * address. This is done to smooth over the differences between DFv3 and
+ * DFv4, where the flags are in the rules themselves in the latter, but
+ * global today.
+ */
+ if (hash) {
+ if ((df->zud_flags & ZEN_UMC_DF_F_HASH_16_18) != 0) {
+ rule->ddr_flags |= DF_DRAM_F_HASH_16_18;
+ }
+
+ if ((df->zud_flags & ZEN_UMC_DF_F_HASH_21_23) != 0) {
+ rule->ddr_flags |= DF_DRAM_F_HASH_21_23;
+ }
+
+ if ((df->zud_flags & ZEN_UMC_DF_F_HASH_30_32) != 0) {
+ rule->ddr_flags |= DF_DRAM_F_HASH_30_32;
+ }
+ }
+
+ /*
+ * While DFv4 makes remapping explicit, it is basically always enabled
+ * and used on supported platforms prior to that point. So flag such
+ * supported platforms as ones that need to do this. On those systems
+ * there is only one set of remap rules for an entire DF that are
+ * determined based on the target socket. To indicate that we use the
+ * DF_DRAM_F_REMAP_SOCK flag below and skip setting a remap target.
+ */
+ if ((umc->umc_fdata->zufd_flags & ZEN_UMC_FAM_F_TARG_REMAP) != 0) {
+ rule->ddr_flags |= DF_DRAM_F_REMAP_EN | DF_DRAM_F_REMAP_SOCK;
+ }
+
+ rule->ddr_limit = (dlimit << DF_DRAM_LIMIT_V2_LIMIT_SHIFT) +
+ DF_DRAM_LIMIT_V2_LIMIT_EXCL;
+ rule->ddr_dest_fabid = dest;
+
+ return (0);
+}
+
+static int
+zen_umc_read_dram_rule_df_4(zen_umc_t *umc, const uint_t dfno,
+ const uint_t inst, const uint_t ruleno, df_dram_rule_t *rule)
+{
+ int ret;
+ uint16_t addr_ileave;
+ uint32_t base, limit, ilv, ctl;
+
+ if ((ret = amdzen_c_df_read32(dfno, inst, DF_DRAM_BASE_V4(ruleno),
+ &base)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read DRAM base "
+ "register %u on 0x%x/0x%x: %d", ruleno, dfno, inst, ret);
+ return (ret);
+ }
+
+ if ((ret = amdzen_c_df_read32(dfno, inst, DF_DRAM_LIMIT_V4(ruleno),
+ &limit)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read DRAM limit "
+ "register %u on 0x%x/0x%x: %d", ruleno, dfno, inst, ret);
+ return (ret);
+ }
+
+ if ((ret = amdzen_c_df_read32(dfno, inst, DF_DRAM_ILV_V4(ruleno),
+ &ilv)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read DRAM "
+ "interleave register %u on 0x%x/0x%x: %d", ruleno, dfno,
+ inst, ret);
+ return (ret);
+ }
+
+ if ((ret = amdzen_c_df_read32(dfno, inst, DF_DRAM_CTL_V4(ruleno),
+ &ctl)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read DRAM control "
+ "register %u on 0x%x/0x%x: %d", ruleno, dfno, inst, ret);
+ return (ret);
+ }
+
+ rule->ddr_raw_base = base;
+ rule->ddr_raw_limit = limit;
+ rule->ddr_raw_ileave = ilv;
+ rule->ddr_raw_ctrl = ctl;
+
+ if (!DF_DRAM_CTL_V4_GET_VALID(ctl)) {
+ return (0);
+ }
+
+ rule->ddr_flags |= DF_DRAM_F_VALID;
+ rule->ddr_base = DF_DRAM_BASE_V4_GET_ADDR(base);
+ rule->ddr_base = rule->ddr_base << DF_DRAM_BASE_V4_BASE_SHIFT;
+ rule->ddr_limit = DF_DRAM_LIMIT_V4_GET_ADDR(limit);
+ rule->ddr_limit = (rule->ddr_limit << DF_DRAM_LIMIT_V4_LIMIT_SHIFT) +
+ DF_DRAM_LIMIT_V4_LIMIT_EXCL;
+ rule->ddr_dest_fabid = DF_DRAM_CTL_V4_GET_DEST_ID(ctl);
+
+ if (DF_DRAM_CTL_V4_GET_HASH_1G(ctl) != 0) {
+ rule->ddr_flags |= DF_DRAM_F_HASH_30_32;
+ }
+
+ if (DF_DRAM_CTL_V4_GET_HASH_2M(ctl) != 0) {
+ rule->ddr_flags |= DF_DRAM_F_HASH_21_23;
+ }
+
+ if (DF_DRAM_CTL_V4_GET_HASH_64K(ctl) != 0) {
+ rule->ddr_flags |= DF_DRAM_F_HASH_16_18;
+ }
+
+ if (DF_DRAM_CTL_V4_GET_REMAP_EN(ctl) != 0) {
+ rule->ddr_flags |= DF_DRAM_F_REMAP_EN;
+ rule->ddr_remap_ent = DF_DRAM_CTL_V4_GET_REMAP_SEL(ctl);
+ }
+
+ if (DF_DRAM_CTL_V4_GET_HOLE_EN(ctl) != 0) {
+ rule->ddr_flags |= DF_DRAM_F_HOLE;
+ }
+
+ rule->ddr_sock_ileave_bits = DF_DRAM_ILV_V4_GET_SOCK(ilv);
+ rule->ddr_die_ileave_bits = DF_DRAM_ILV_V4_GET_DIE(ilv);
+ switch (DF_DRAM_ILV_V4_GET_CHAN(ilv)) {
+ case DF_DRAM_ILV_V4_CHAN_1:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_1CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_2:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_2CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_4:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_4CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_8:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_8CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_16:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_16CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_32:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_32CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_NPS4_2CH:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_2CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_NPS2_4CH:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_COD2_4CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_NPS1_8CH:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_8CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_NPS4_3CH:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_NPS4_3CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_NPS2_6CH:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_6CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_NPS1_12CH:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_12CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_NPS2_5CH:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_NPS2_5CH;
+ break;
+ case DF_DRAM_ILV_V4_CHAN_NPS1_10CH:
+ rule->ddr_chan_ileave = DF_CHAN_ILEAVE_NPS1_10CH;
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered invalid channel "
+ "interleave on rule %u, df/inst 0x%x/0x%x: 0x%x", ruleno,
+ dfno, inst, DF_DRAM_ILV_V4_GET_CHAN(ilv));
+
+ break;
+ }
+
+ addr_ileave = DF_DRAM_ILV_V4_GET_ADDR(ilv);
+ switch (addr_ileave) {
+ case DF_DRAM_ILV_ADDR_8:
+ case DF_DRAM_ILV_ADDR_9:
+ case DF_DRAM_ILV_ADDR_10:
+ case DF_DRAM_ILV_ADDR_11:
+ case DF_DRAM_ILV_ADDR_12:
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered invalid address "
+ "interleave on rule %u, df/inst 0x%x/0x%x: 0x%x", ruleno,
+ dfno, inst, addr_ileave);
+ return (EINVAL);
+ }
+ rule->ddr_addr_start = DF_DRAM_ILV_ADDR_BASE + addr_ileave;
+
+ return (0);
+}
+
+static int
+zen_umc_read_dram_rule(zen_umc_t *umc, const uint_t dfno, const uint_t instid,
+ const uint_t ruleno, df_dram_rule_t *rule)
+{
+ int ret;
+
+ switch (umc->umc_df_rev) {
+ case DF_REV_2:
+ case DF_REV_3:
+ case DF_REV_3P5:
+ ret = zen_umc_read_dram_rule_df_23(umc, dfno, instid, ruleno,
+ rule);
+ break;
+ case DF_REV_4:
+ ret = zen_umc_read_dram_rule_df_4(umc, dfno, instid, ruleno,
+ rule);
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered unsupported "
+ "DF revision processing DRAM rules: 0x%x", umc->umc_df_rev);
+ return (-1);
+ }
+
+ if (ret != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read DRAM "
+ "rule %u on df/inst 0x%x/0x%x: %d", ruleno,
+ dfno, instid, ret);
+ return (-1);
+ }
+
+ return (0);
+}
+
+static int
+zen_umc_read_remap(zen_umc_t *umc, zen_umc_df_t *df, const uint_t instid)
+{
+ uint_t nremaps, nents;
+ uint_t dfno = df->zud_dfno;
+ const df_reg_def_t milan_remap0[ZEN_UMC_MILAN_CS_NREMAPS] = {
+ DF_SKT0_CS_REMAP0_V3, DF_SKT1_CS_REMAP0_V3 };
+ const df_reg_def_t milan_remap1[ZEN_UMC_MILAN_CS_NREMAPS] = {
+ DF_SKT0_CS_REMAP1_V3, DF_SKT1_CS_REMAP1_V3 };
+ const df_reg_def_t dfv4_remapA[ZEN_UMC_MAX_CS_REMAPS] = {
+ DF_CS_REMAP0A_V4, DF_CS_REMAP1A_V4, DF_CS_REMAP2A_V4,
+ DF_CS_REMAP3A_V4 };
+ const df_reg_def_t dfv4_remapB[ZEN_UMC_MAX_CS_REMAPS] = {
+ DF_CS_REMAP0B_V4, DF_CS_REMAP1B_V4, DF_CS_REMAP2B_V4,
+ DF_CS_REMAP3B_V4 };
+ const df_reg_def_t *remapA, *remapB;
+
+
+ switch (umc->umc_df_rev) {
+ case DF_REV_3:
+ nremaps = ZEN_UMC_MILAN_CS_NREMAPS;
+ nents = ZEN_UMC_MILAN_REMAP_ENTS;
+ remapA = milan_remap0;
+ remapB = milan_remap1;
+ break;
+ case DF_REV_4:
+ nremaps = ZEN_UMC_MAX_CS_REMAPS;
+ nents = ZEN_UMC_MAX_REMAP_ENTS;
+ remapA = dfv4_remapA;
+ remapB = dfv4_remapB;
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered unsupported DF "
+ "revision processing remap rules: 0x%x", umc->umc_df_rev);
+ return (-1);
+ }
+
+ df->zud_cs_nremap = nremaps;
+ for (uint_t i = 0; i < nremaps; i++) {
+ int ret;
+ uint32_t rmA, rmB;
+ zen_umc_cs_remap_t *remap = &df->zud_remap[i];
+
+ if ((ret = amdzen_c_df_read32(dfno, instid, remapA[i],
+ &rmA)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read "
+ "df/inst 0x%x/0x%x remap socket %u-0/A: %d", dfno,
+ instid, i, ret);
+ return (-1);
+ }
+
+ if ((ret = amdzen_c_df_read32(dfno, instid, remapB[i],
+ &rmB)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read "
+ "df/inst 0x%x/0x%x remap socket %u-1/B: %d", dfno,
+ instid, i, ret);
+ return (-1);
+ }
+
+ remap->csr_nremaps = nents;
+ for (uint_t ent = 0; ent < ZEN_UMC_REMAP_PER_REG; ent++) {
+ uint_t alt = ent + ZEN_UMC_REMAP_PER_REG;
+ boolean_t do_alt = alt < nents;
+ remap->csr_remaps[ent] = DF_CS_REMAP_GET_CSX(rmA,
+ ent);
+ if (do_alt) {
+ remap->csr_remaps[alt] =
+ DF_CS_REMAP_GET_CSX(rmB, ent);
+ }
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Now that we have a CCM, we have several different tasks ahead of us:
+ *
+ * o Determine whether or not the DRAM hole is valid.
+ * o Snapshot all of the system address rules and translate them into our
+ * generic format.
+ * o Determine if there are any rules to retarget things (currently
+ * Milan/Genoa).
+ * o Determine if there are any other hashing rules enabled.
+ *
+ * We only require this from a single CCM as these are currently required to be
+ * the same across all of them.
+ */
+static int
+zen_umc_fill_ccm_cb(const uint_t dfno, const uint32_t fabid,
+ const uint32_t instid, void *arg)
+{
+ zen_umc_t *umc = arg;
+ zen_umc_df_t *df = &umc->umc_dfs[dfno];
+ df_reg_def_t hole;
+ int ret;
+ uint32_t val;
+
+ df->zud_dfno = dfno;
+ df->zud_ccm_inst = instid;
+
+ /*
+ * First get the DRAM hole. This has the same layout, albeit different
+ * registers across our different platforms.
+ */
+ switch (umc->umc_df_rev) {
+ case DF_REV_2:
+ case DF_REV_3:
+ case DF_REV_3P5:
+ hole = DF_DRAM_HOLE_V2;
+ break;
+ case DF_REV_4:
+ hole = DF_DRAM_HOLE_V4;
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered unsupported "
+ "DF version: 0x%x", umc->umc_df_rev);
+ return (-1);
+ }
+
+ if ((ret = amdzen_c_df_read32(dfno, instid, hole, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read DRAM Hole: %d",
+ ret);
+ return (-1);
+ }
+
+ df->zud_hole_raw = val;
+ if (DF_DRAM_HOLE_GET_VALID(val)) {
+ uint64_t t;
+
+ df->zud_flags |= ZEN_UMC_DF_F_HOLE_VALID;
+ t = DF_DRAM_HOLE_GET_BASE(val);
+ df->zud_hole_base = t << DF_DRAM_HOLE_BASE_SHIFT;
+ }
+
+ /*
+ * Prior to Zen 4, the hash information was global and applied to all
+ * COD rules globally. Check if we're on such a system and snapshot this
+ * so we can use it during the rule application. Note, this was added in
+ * DFv3.
+ */
+ if (umc->umc_df_rev == DF_REV_3 || umc->umc_df_rev == DF_REV_3P5) {
+ uint32_t globctl;
+
+ if ((ret = amdzen_c_df_read32(dfno, instid, DF_GLOB_CTL_V3,
+ &globctl)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read global "
+ "control: %d", ret);
+ return (-1);
+ }
+
+ df->zud_glob_ctl_raw = globctl;
+ if (DF_GLOB_CTL_V3_GET_HASH_1G(globctl) != 0) {
+ df->zud_flags |= ZEN_UMC_DF_F_HASH_30_32;
+ }
+
+ if (DF_GLOB_CTL_V3_GET_HASH_2M(globctl) != 0) {
+ df->zud_flags |= ZEN_UMC_DF_F_HASH_21_23;
+ }
+
+ if (DF_GLOB_CTL_V3_GET_HASH_64K(globctl) != 0) {
+ df->zud_flags |= ZEN_UMC_DF_F_HASH_16_18;
+ }
+ }
+
+ df->zud_dram_nrules = umc->umc_fdata->zufd_dram_nrules;
+ for (uint_t i = 0; i < umc->umc_fdata->zufd_dram_nrules; i++) {
+ if (zen_umc_read_dram_rule(umc, dfno, instid, i,
+ &df->zud_rules[i]) != 0) {
+ return (-1);
+ }
+ }
+
+ if ((umc->umc_fdata->zufd_flags & ZEN_UMC_FAM_F_TARG_REMAP) != 0) {
+ if (zen_umc_read_remap(umc, df, instid) != 0) {
+ return (-1);
+ }
+ }
+
+ /*
+ * We only want a single entry, so always return 1 to terminate us
+ * early.
+ */
+ return (1);
+}
+
+/*
+ * Fill all the information about a DDR4 DIMM. In the DDR4 UMC, some of this
+ * information is on a per-chip select basis while at other times it is on a
+ * per-DIMM basis. In general, chip-selects 0/1 correspond to DIMM 0, and
+ * chip-selects 2/3 correspond to DIMM 1. To normalize things with the DDR5 UMC
+ * which generally has things stored on a per-rank/chips-select basis, we
+ * duplicate information that is DIMM-wide into the chip-select data structure
+ * (umc_cs_t).
+ */
+static boolean_t
+zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
+ zen_umc_chan_t *chan, const uint_t dimmno)
+{
+ umc_dimm_t *dimm;
+ umc_cs_t *cs0, *cs1;
+ const uint32_t id = chan->chan_logid;
+ int ret;
+ uint32_t val, reg;
+
+ ASSERT3U(dimmno, <, ZEN_UMC_MAX_DIMMS);
+ dimm = &chan->chan_dimms[dimmno];
+ dimm->ud_dimmno = dimmno;
+ cs0 = &dimm->ud_cs[0];
+ cs1 = &dimm->ud_cs[1];
+
+ /*
+ * DDR4 organization has initial data that exists on a per-chip select
+ * basis. The rest of it is on a per-DIMM basis. First we grab the
+ * per-chip-select data. After this for loop, we will always duplicate
+ * all data that we gather into both chip-selects.
+ */
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CS_PER_DIMM; i++) {
+ uint64_t addr;
+ const uint32_t reginst = i + dimmno * 2;
+ reg = UMC_BASE(id, reginst);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read base "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ addr = (uint64_t)UMC_BASE_GET_ADDR(val) << UMC_BASE_ADDR_SHIFT;
+ dimm->ud_cs[i].ucs_base.udb_base = addr;
+ dimm->ud_cs[i].ucs_base.udb_valid = UMC_BASE_GET_EN(val);
+
+ reg = UMC_BASE_SEC(id, reginst);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read "
+ "secondary base register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ addr = (uint64_t)UMC_BASE_GET_ADDR(val) << UMC_BASE_ADDR_SHIFT;
+ dimm->ud_cs[i].ucs_sec.udb_base = addr;
+ dimm->ud_cs[i].ucs_sec.udb_valid = UMC_BASE_GET_EN(val);
+ }
+
+ reg = UMC_MASK_DDR4(id, dimmno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read mask register "
+ "%x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ /*
+ * When we extract the masks, hardware only checks a limited range of
+ * bits. Therefore we need to always OR in those lower order bits.
+ */
+ cs0->ucs_base_mask = (uint64_t)UMC_MASK_GET_ADDR(val) <<
+ UMC_MASK_ADDR_SHIFT;
+ cs0->ucs_base_mask |= (1 << UMC_MASK_ADDR_SHIFT) - 1;
+ cs1->ucs_base_mask = cs0->ucs_base_mask;
+
+ reg = UMC_MASK_SEC_DDR4(id, dimmno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read secondary mask "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ cs0->ucs_sec_mask = (uint64_t)UMC_MASK_GET_ADDR(val) <<
+ UMC_MASK_ADDR_SHIFT;
+ cs0->ucs_sec_mask |= (1 << UMC_MASK_ADDR_SHIFT) - 1;
+ cs1->ucs_sec_mask = cs0->ucs_sec_mask;
+
+ reg = UMC_ADDRCFG_DDR4(id, dimmno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read address config "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ cs0->ucs_nbanks = UMC_ADDRCFG_GET_NBANK_BITS(val) +
+ UMC_ADDRCFG_NBANK_BITS_BASE;
+ cs1->ucs_nbanks = cs0->ucs_nbanks;
+ cs0->ucs_ncol = UMC_ADDRCFG_GET_NCOL_BITS(val) +
+ UMC_ADDRCFG_NCOL_BITS_BASE;
+ cs1->ucs_ncol = cs0->ucs_ncol;
+ cs0->ucs_nrow_hi = UMC_ADDRCFG_DDR4_GET_NROW_BITS_HI(val);
+ cs1->ucs_nrow_hi = cs0->ucs_nrow_hi;
+ cs0->ucs_nrow_lo = UMC_ADDRCFG_GET_NROW_BITS_LO(val) +
+ UMC_ADDRCFG_NROW_BITS_LO_BASE;
+ cs1->ucs_nrow_lo = cs0->ucs_nrow_lo;
+ cs0->ucs_nbank_groups = UMC_ADDRCFG_GET_NBANKGRP_BITS(val);
+ cs1->ucs_nbank_groups = cs0->ucs_nbank_groups;
+ /*
+ * As the chip-select XORs don't always show up, use a dummy value
+ * that'll result in no change occurring here.
+ */
+ cs0->ucs_cs_xor = cs1->ucs_cs_xor = 0;
+
+ /*
+ * APUs don't seem to support various rank select bits.
+ */
+ if (umc->umc_fdata->zufd_umc_style == ZEN_UMC_UMC_S_DDR4) {
+ cs0->ucs_nrm = UMC_ADDRCFG_DDR4_GET_NRM_BITS(val);
+ cs1->ucs_nrm = cs0->ucs_nrm;
+ } else {
+ cs0->ucs_nrm = cs1->ucs_nrm = 0;
+ }
+
+ reg = UMC_ADDRSEL_DDR4(id, dimmno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read bank address "
+ "select register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ cs0->ucs_row_hi_bit = UMC_ADDRSEL_DDR4_GET_ROW_HI(val) +
+ UMC_ADDRSEL_DDR4_ROW_HI_BASE;
+ cs1->ucs_row_hi_bit = cs0->ucs_row_hi_bit;
+ cs0->ucs_row_low_bit = UMC_ADDRSEL_GET_ROW_LO(val) +
+ UMC_ADDRSEL_ROW_LO_BASE;
+ cs1->ucs_row_low_bit = cs0->ucs_row_low_bit;
+ cs0->ucs_bank_bits[0] = UMC_ADDRSEL_GET_BANK0(val) +
+ UMC_ADDRSEL_BANK_BASE;
+ cs0->ucs_bank_bits[1] = UMC_ADDRSEL_GET_BANK1(val) +
+ UMC_ADDRSEL_BANK_BASE;
+ cs0->ucs_bank_bits[2] = UMC_ADDRSEL_GET_BANK2(val) +
+ UMC_ADDRSEL_BANK_BASE;
+ cs0->ucs_bank_bits[3] = UMC_ADDRSEL_GET_BANK3(val) +
+ UMC_ADDRSEL_BANK_BASE;
+ cs0->ucs_bank_bits[4] = UMC_ADDRSEL_GET_BANK4(val) +
+ UMC_ADDRSEL_BANK_BASE;
+ bcopy(cs0->ucs_bank_bits, cs1->ucs_bank_bits,
+ sizeof (cs0->ucs_bank_bits));
+
+ reg = UMC_COLSEL_LO_DDR4(id, dimmno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read column address "
+ "select low register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ for (uint_t i = 0; i < ZEN_UMC_MAX_COLSEL_PER_REG; i++) {
+ cs0->ucs_col_bits[i] = UMC_COLSEL_REMAP_GET_COL(val, i) +
+ UMC_COLSEL_LO_BASE;
+ }
+
+ reg = UMC_COLSEL_HI_DDR4(id, dimmno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read column address "
+ "select high register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ for (uint_t i = 0; i < ZEN_UMC_MAX_COLSEL_PER_REG; i++) {
+ cs0->ucs_col_bits[i + ZEN_UMC_MAX_COLSEL_PER_REG] =
+ UMC_COLSEL_REMAP_GET_COL(val, i) + UMC_COLSEL_HI_BASE;
+ }
+ bcopy(cs0->ucs_col_bits, cs1->ucs_col_bits, sizeof (cs0->ucs_col_bits));
+
+ /*
+ * The next two registers give us information about a given rank select.
+ * In the APUs, the inversion bits are there; however, the actual bit
+ * selects are not. In this case we read the reserved bits regardless.
+ * They should be ignored due to the fact that the number of banks is
+ * zero.
+ */
+ reg = UMC_RMSEL_DDR4(id, dimmno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read rank address "
+ "select register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ cs0->ucs_inv_msbs = UMC_RMSEL_DDR4_GET_INV_MSBE(val);
+ cs1->ucs_inv_msbs = UMC_RMSEL_DDR4_GET_INV_MSBO(val);
+ cs0->ucs_rm_bits[0] = UMC_RMSEL_DDR4_GET_RM0(val) +
+ UMC_RMSEL_BASE;
+ cs0->ucs_rm_bits[1] = UMC_RMSEL_DDR4_GET_RM1(val) +
+ UMC_RMSEL_BASE;
+ cs0->ucs_rm_bits[2] = UMC_RMSEL_DDR4_GET_RM2(val) +
+ UMC_RMSEL_BASE;
+ bcopy(cs0->ucs_rm_bits, cs1->ucs_rm_bits, sizeof (cs0->ucs_rm_bits));
+
+ reg = UMC_RMSEL_SEC_DDR4(id, dimmno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read secondary rank "
+ "address select register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ cs0->ucs_inv_msbs_sec = UMC_RMSEL_DDR4_GET_INV_MSBE(val);
+ cs1->ucs_inv_msbs_sec = UMC_RMSEL_DDR4_GET_INV_MSBO(val);
+ cs0->ucs_rm_bits_sec[0] = UMC_RMSEL_DDR4_GET_RM0(val) +
+ UMC_RMSEL_BASE;
+ cs0->ucs_rm_bits_sec[1] = UMC_RMSEL_DDR4_GET_RM1(val) +
+ UMC_RMSEL_BASE;
+ cs0->ucs_rm_bits_sec[2] = UMC_RMSEL_DDR4_GET_RM2(val) +
+ UMC_RMSEL_BASE;
+ bcopy(cs0->ucs_rm_bits_sec, cs1->ucs_rm_bits_sec,
+ sizeof (cs0->ucs_rm_bits_sec));
+
+ reg = UMC_DIMMCFG_DDR4(id, dimmno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read DIMM "
+ "configuration register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ dimm->ud_dimmcfg_raw = val;
+
+ if (UMC_DIMMCFG_GET_X16(val) != 0) {
+ dimm->ud_width = UMC_DIMM_W_X16;
+ } else if (UMC_DIMMCFG_GET_X4(val) != 0) {
+ dimm->ud_width = UMC_DIMM_W_X4;
+ } else {
+ dimm->ud_width = UMC_DIMM_W_X8;
+ }
+
+ if (UMC_DIMMCFG_GET_3DS(val) != 0) {
+ dimm->ud_kind = UMC_DIMM_K_3DS_RDIMM;
+ } else if (UMC_DIMMCFG_GET_LRDIMM(val) != 0) {
+ dimm->ud_kind = UMC_DIMM_K_LRDIMM;
+ } else if (UMC_DIMMCFG_GET_RDIMM(val) != 0) {
+ dimm->ud_kind = UMC_DIMM_K_RDIMM;
+ } else {
+ dimm->ud_kind = UMC_DIMM_K_UDIMM;
+ }
+
+ /*
+ * DIMM information in a UMC can be somewhat confusing. There are quite
+ * a number of non-zero reset values that are here. Flag whether or not
+ * we think this entry should be usable based on enabled chip-selects.
+ */
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CHAN_BASE; i++) {
+ if (dimm->ud_cs[i].ucs_base.udb_valid ||
+ dimm->ud_cs[i].ucs_sec.udb_valid) {
+ dimm->ud_flags |= UMC_DIMM_F_VALID;
+ break;
+ }
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * The DDR5 based systems are organized such that almost all the information we
+ * care about is split between two different chip-select structures in the UMC
+ * hardware SMN space.
+ */
+static boolean_t
+zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
+ zen_umc_chan_t *chan, const uint_t dimmno, const uint_t rankno)
+{
+ int ret;
+ umc_cs_t *cs;
+ uint32_t reg, val;
+ const uint32_t id = chan->chan_logid;
+ const uint32_t regno = dimmno * 2 + rankno;
+
+ ASSERT3U(dimmno, <, ZEN_UMC_MAX_DIMMS);
+ ASSERT3U(rankno, <, ZEN_UMC_MAX_CS_PER_DIMM);
+ cs = &chan->chan_dimms[dimmno].ud_cs[rankno];
+
+ reg = UMC_BASE(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read base "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ cs->ucs_base.udb_base = (uint64_t)UMC_BASE_GET_ADDR(val) <<
+ UMC_BASE_ADDR_SHIFT;
+ cs->ucs_base.udb_valid = UMC_BASE_GET_EN(val);
+ if ((umc->umc_fdata->zufd_flags & ZEN_UMC_FAM_F_UMC_EADDR) != 0) {
+ uint64_t addr;
+
+ reg = UMC_BASE_EXT_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) !=
+ 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read "
+ "extended base register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ addr = (uint64_t)UMC_BASE_EXT_GET_ADDR(val) <<
+ UMC_BASE_EXT_ADDR_SHIFT;
+ cs->ucs_base.udb_base |= addr;
+ }
+
+ reg = UMC_BASE_SEC(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read secondary base "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ cs->ucs_sec.udb_base = (uint64_t)UMC_BASE_GET_ADDR(val) <<
+ UMC_BASE_ADDR_SHIFT;
+ cs->ucs_sec.udb_valid = UMC_BASE_GET_EN(val);
+ if ((umc->umc_fdata->zufd_flags & ZEN_UMC_FAM_F_UMC_EADDR) != 0) {
+ uint64_t addr;
+
+ reg = UMC_BASE_EXT_SEC_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) !=
+ 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read "
+ "extended secondary base register %x: %d", reg,
+ ret);
+ return (B_FALSE);
+ }
+
+ addr = (uint64_t)UMC_BASE_EXT_GET_ADDR(val) <<
+ UMC_BASE_EXT_ADDR_SHIFT;
+ cs->ucs_sec.udb_base |= addr;
+ }
+
+ reg = UMC_MASK_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read mask "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ cs->ucs_base_mask = (uint64_t)UMC_MASK_GET_ADDR(val) <<
+ UMC_MASK_ADDR_SHIFT;
+ cs->ucs_base_mask |= (1 << UMC_MASK_ADDR_SHIFT) - 1;
+ if ((umc->umc_fdata->zufd_flags & ZEN_UMC_FAM_F_UMC_EADDR) != 0) {
+ uint64_t addr;
+
+ reg = UMC_MASK_EXT_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) !=
+ 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read "
+ "extended mask register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ addr = (uint64_t)UMC_MASK_EXT_GET_ADDR(val) <<
+ UMC_MASK_EXT_ADDR_SHIFT;
+ cs->ucs_base_mask |= addr;
+ }
+
+
+ reg = UMC_MASK_SEC_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read secondary mask "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ cs->ucs_sec_mask = (uint64_t)UMC_MASK_GET_ADDR(val) <<
+ UMC_MASK_ADDR_SHIFT;
+ cs->ucs_sec_mask |= (1 << UMC_MASK_ADDR_SHIFT) - 1;
+ if ((umc->umc_fdata->zufd_flags & ZEN_UMC_FAM_F_UMC_EADDR) != 0) {
+ uint64_t addr;
+
+ reg = UMC_MASK_EXT_SEC_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) !=
+ 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read "
+ "extended mask register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ addr = (uint64_t)UMC_MASK_EXT_GET_ADDR(val) <<
+ UMC_MASK_EXT_ADDR_SHIFT;
+ cs->ucs_sec_mask |= addr;
+ }
+
+ reg = UMC_ADDRCFG_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read address config "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ if ((umc->umc_fdata->zufd_flags & ZEN_UMC_FAM_F_CS_XOR) != 0) {
+ cs->ucs_cs_xor = UMC_ADDRCFG_DDR5_GET_CSXOR(val);
+ } else {
+ cs->ucs_cs_xor = 0;
+ }
+ cs->ucs_nbanks = UMC_ADDRCFG_GET_NBANK_BITS(val) +
+ UMC_ADDRCFG_NBANK_BITS_BASE;
+ cs->ucs_ncol = UMC_ADDRCFG_GET_NCOL_BITS(val) +
+ UMC_ADDRCFG_NCOL_BITS_BASE;
+ cs->ucs_nrow_lo = UMC_ADDRCFG_GET_NROW_BITS_LO(val) +
+ UMC_ADDRCFG_NROW_BITS_LO_BASE;
+ cs->ucs_nrow_hi = 0;
+ cs->ucs_nrm = UMC_ADDRCFG_DDR5_GET_NRM_BITS(val);
+ cs->ucs_nbank_groups = UMC_ADDRCFG_GET_NBANKGRP_BITS(val);
+
+ reg = UMC_ADDRSEL_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read address select "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ cs->ucs_row_hi_bit = 0;
+ cs->ucs_row_low_bit = UMC_ADDRSEL_GET_ROW_LO(val) +
+ UMC_ADDRSEL_ROW_LO_BASE;
+ cs->ucs_bank_bits[4] = UMC_ADDRSEL_GET_BANK4(val) +
+ UMC_ADDRSEL_BANK_BASE;
+ cs->ucs_bank_bits[3] = UMC_ADDRSEL_GET_BANK3(val) +
+ UMC_ADDRSEL_BANK_BASE;
+ cs->ucs_bank_bits[2] = UMC_ADDRSEL_GET_BANK2(val) +
+ UMC_ADDRSEL_BANK_BASE;
+ cs->ucs_bank_bits[1] = UMC_ADDRSEL_GET_BANK1(val) +
+ UMC_ADDRSEL_BANK_BASE;
+ cs->ucs_bank_bits[0] = UMC_ADDRSEL_GET_BANK0(val) +
+ UMC_ADDRSEL_BANK_BASE;
+
+ reg = UMC_COLSEL_LO_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read column address "
+ "select low register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ for (uint_t i = 0; i < ZEN_UMC_MAX_COLSEL_PER_REG; i++) {
+ cs->ucs_col_bits[i] = UMC_COLSEL_REMAP_GET_COL(val, i) +
+ UMC_COLSEL_LO_BASE;
+ }
+
+ reg = UMC_COLSEL_HI_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read column address "
+ "select high register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ for (uint_t i = 0; i < ZEN_UMC_MAX_COLSEL_PER_REG; i++) {
+ cs->ucs_col_bits[i + ZEN_UMC_MAX_COLSEL_PER_REG] =
+ UMC_COLSEL_REMAP_GET_COL(val, i) + UMC_COLSEL_HI_BASE;
+ }
+
+ /*
+ * Time for our friend, the RM Selection register. Like in DDR4 we end
+ * up reading everything here, even though most others have reserved
+ * bits here. The intent is that we won't look at the reserved bits
+ * unless something actually points us there.
+ */
+ reg = UMC_RMSEL_DDR5(id, regno);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read rank multiply "
+ "select register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ /*
+ * DDR5 based devices have a primary and secondary msbs; however, they
+ * only have a single set of rm bits. To normalize things with the DDR4
+ * subsystem, we copy the primary bits to the secondary so we can use
+ * these the same way in the decoder/encoder.
+ */
+ cs->ucs_inv_msbs = UMC_RMSEL_DDR5_GET_INV_MSBS(val);
+ cs->ucs_inv_msbs_sec = UMC_RMSEL_DDR5_GET_INV_MSBS_SEC(val);
+ cs->ucs_subchan = UMC_RMSEL_DDR5_GET_SUBCHAN(val) +
+ UMC_RMSEL_DDR5_SUBCHAN_BASE;
+ cs->ucs_rm_bits[3] = UMC_RMSEL_DDR5_GET_RM3(val) + UMC_RMSEL_BASE;
+ cs->ucs_rm_bits[2] = UMC_RMSEL_DDR5_GET_RM2(val) + UMC_RMSEL_BASE;
+ cs->ucs_rm_bits[1] = UMC_RMSEL_DDR5_GET_RM1(val) + UMC_RMSEL_BASE;
+ cs->ucs_rm_bits[0] = UMC_RMSEL_DDR5_GET_RM0(val) + UMC_RMSEL_BASE;
+ bcopy(cs->ucs_rm_bits, cs->ucs_rm_bits_sec,
+ sizeof (cs->ucs_rm_bits));
+
+ return (B_TRUE);
+}
+
+static void
+zen_umc_fill_ddr_type(zen_umc_chan_t *chan, boolean_t ddr4)
+{
+ umc_dimm_type_t dimm = UMC_DIMM_T_UNKNOWN;
+ uint8_t val;
+
+ /*
+ * The DDR4 and DDR5 values while overlapping in some parts of this
+ * space (e.g. DDR4 values), are otherwise actually different in all the
+ * space in-between. As such we need to treat them differently in case
+ * we encounter something we don't expect.
+ */
+ val = UMC_UMCCFG_GET_DDR_TYPE(chan->chan_umccfg_raw);
+ if (ddr4) {
+ switch (val) {
+ case UMC_UMCCFG_DDR4_T_DDR4:
+ dimm = UMC_DIMM_T_DDR4;
+ break;
+ case UMC_UMCCFG_DDR4_T_LPDDR4:
+ dimm = UMC_DIMM_T_LPDDR4;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (val) {
+ case UMC_UMCCFG_DDR5_T_DDR5:
+ dimm = UMC_DIMM_T_DDR5;
+ break;
+ case UMC_UMCCFG_DDR5_T_LPDDR5:
+ dimm = UMC_DIMM_T_LPDDR5;
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (uint_t i = 0; i < ZEN_UMC_MAX_DIMMS; i++) {
+ chan->chan_dimms[i].ud_type = dimm;
+ }
+}
+
+/*
+ * Fill common channel information. While the locations of many of the registers
+ * changed between the DDR4-capable and DDR5-capable devices, the actual
+ * contents are the same so we process them together.
+ */
+static boolean_t
+zen_umc_fill_chan_hash(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan,
+ boolean_t ddr4)
+{
+ int ret;
+ uint32_t reg;
+ uint32_t val;
+
+ const umc_chan_hash_flags_t flags = umc->umc_fdata->zufd_chan_hash;
+ const uint32_t id = chan->chan_logid;
+ umc_chan_hash_t *chash = &chan->chan_hash;
+ chash->uch_flags = flags;
+
+ if ((flags & UMC_CHAN_HASH_F_BANK) != 0) {
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CHAN_BANK_HASH; i++) {
+ umc_bank_hash_t *bank = &chash->uch_bank_hashes[i];
+
+ if (ddr4) {
+ reg = UMC_BANK_HASH_DDR4(id, i);
+ } else {
+ reg = UMC_BANK_HASH_DDR5(id, i);
+ }
+
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg,
+ &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read "
+ "bank hash register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ bank->ubh_row_xor = UMC_BANK_HASH_GET_ROW(val);
+ bank->ubh_col_xor = UMC_BANK_HASH_GET_COL(val);
+ bank->ubh_en = UMC_BANK_HASH_GET_EN(val);
+ }
+ }
+
+ if ((flags & UMC_CHAN_HASH_F_RM) != 0) {
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CHAN_RM_HASH; i++) {
+ uint64_t addr;
+ umc_addr_hash_t *rm = &chash->uch_rm_hashes[i];
+
+ if (ddr4) {
+ reg = UMC_RANK_HASH_DDR4(id, i);
+ } else {
+ reg = UMC_RANK_HASH_DDR5(id, i);
+ }
+
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg,
+ &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read "
+ "rm hash register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ addr = UMC_RANK_HASH_GET_ADDR(val);
+ rm->uah_addr_xor = addr << UMC_RANK_HASH_SHIFT;
+ rm->uah_en = UMC_RANK_HASH_GET_EN(val);
+
+ if (ddr4 || (umc->umc_fdata->zufd_flags &
+ ZEN_UMC_FAM_F_UMC_EADDR) == 0) {
+ continue;
+ }
+
+ reg = UMC_RANK_HASH_EXT_DDR5(id, i);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg,
+ &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read "
+ "rm hash ext register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ addr = UMC_RANK_HASH_EXT_GET_ADDR(val);
+ rm->uah_addr_xor |= addr <<
+ UMC_RANK_HASH_EXT_ADDR_SHIFT;
+ }
+ }
+
+ if ((flags & UMC_CHAN_HASH_F_PC) != 0) {
+ umc_pc_hash_t *pc = &chash->uch_pc_hash;
+
+ if (ddr4) {
+ reg = UMC_PC_HASH_DDR4(id);
+ } else {
+ reg = UMC_PC_HASH_DDR5(id);
+ }
+
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read pc hash "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ pc->uph_row_xor = UMC_PC_HASH_GET_ROW(val);
+ pc->uph_col_xor = UMC_PC_HASH_GET_COL(val);
+ pc->uph_en = UMC_PC_HASH_GET_EN(val);
+
+ if (ddr4) {
+ reg = UMC_PC_HASH2_DDR4(id);
+ } else {
+ reg = UMC_PC_HASH2_DDR5(id);
+ }
+
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read pc hash "
+ "2 register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ pc->uph_bank_xor = UMC_PC_HASH2_GET_BANK(val);
+ }
+
+ if ((flags & UMC_CHAN_HASH_F_CS) != 0) {
+ for (uint_t i = 0; i < ZEN_UMC_MAX_CHAN_CS_HASH; i++) {
+ uint64_t addr;
+ umc_addr_hash_t *rm = &chash->uch_cs_hashes[i];
+
+ if (ddr4) {
+ reg = UMC_CS_HASH_DDR4(id, i);
+ } else {
+ reg = UMC_CS_HASH_DDR5(id, i);
+ }
+
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg,
+ &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read "
+ "cs hash register %x", reg);
+ return (B_FALSE);
+ }
+
+ addr = UMC_CS_HASH_GET_ADDR(val);
+ rm->uah_addr_xor = addr << UMC_CS_HASH_SHIFT;
+ rm->uah_en = UMC_CS_HASH_GET_EN(val);
+
+ if (ddr4 || (umc->umc_fdata->zufd_flags &
+ ZEN_UMC_FAM_F_UMC_EADDR) == 0) {
+ continue;
+ }
+
+ reg = UMC_CS_HASH_EXT_DDR5(id, i);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg,
+ &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read "
+ "cs hash ext register %x", reg);
+ return (B_FALSE);
+ }
+
+ addr = UMC_CS_HASH_EXT_GET_ADDR(val);
+ rm->uah_addr_xor |= addr << UMC_CS_HASH_EXT_ADDR_SHIFT;
+ }
+ }
+
+ return (B_TRUE);
+}
+
+/*
+ * This fills in settings that we care about which are valid for the entire
+ * channel and are the same between DDR4/5 capable devices.
+ */
+static boolean_t
+zen_umc_fill_chan(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan)
+{
+ uint32_t reg, val;
+ const uint32_t id = chan->chan_logid;
+ int ret;
+ boolean_t ddr4;
+
+ if (umc->umc_fdata->zufd_umc_style == ZEN_UMC_UMC_S_DDR4 ||
+ umc->umc_fdata->zufd_umc_style == ZEN_UMC_UMC_S_DDR4_APU) {
+ ddr4 = B_TRUE;
+ } else {
+ ddr4 = B_FALSE;
+ }
+
+ /*
+ * Begin by gathering all of the information related to hashing. What is
+ * valid here varies based on the actual chip family and then the
+ * registers vary based on DDR4 and DDR5.
+ */
+ if (!zen_umc_fill_chan_hash(umc, df, chan, ddr4)) {
+ return (B_FALSE);
+ }
+
+ reg = UMC_UMCCFG(id);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read UMC "
+ "configuration register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+
+ chan->chan_umccfg_raw = val;
+ if (UMC_UMCCFG_GET_ECC_EN(val)) {
+ chan->chan_flags |= UMC_CHAN_F_ECC_EN;
+ }
+
+ /*
+ * This register contains information to determine the type of DIMM.
+ * All DIMMs in the channel must be the same type. As such, set this on
+ * all DIMMs we've discovered.
+ */
+ zen_umc_fill_ddr_type(chan, ddr4);
+
+ /*
+ * Grab data that we can use to determine if we're scrambling or
+ * encrypting regions of memory.
+ */
+ reg = UMC_DATACTL(id);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read data control "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ chan->chan_datactl_raw = val;
+ if (UMC_DATACTL_GET_SCRAM_EN(val)) {
+ chan->chan_flags |= UMC_CHAN_F_SCRAMBLE_EN;
+ }
+
+ if (UMC_DATACTL_GET_ENCR_EN(val)) {
+ chan->chan_flags |= UMC_CHAN_F_ENCR_EN;
+ }
+
+ /*
+ * At the moment we snapshot the raw ECC control information. When we do
+ * further work of making this a part of the MCA/X decoding, we'll want
+ * to further take this apart for syndrome decoding. Until then, simply
+ * cache it for future us and observability.
+ */
+ reg = UMC_ECCCTL(id);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read ECC control "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ chan->chan_eccctl_raw = val;
+
+ /*
+ * Read and snapshot the UMC capability registers for debugging in the
+ * future.
+ */
+ reg = UMC_UMCCAP(id);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read UMC cap"
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ chan->chan_umccap_raw = val;
+
+ reg = UMC_UMCCAP_HI(id);
+ if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "failed to read UMC cap high "
+ "register %x: %d", reg, ret);
+ return (B_FALSE);
+ }
+ chan->chan_umccap_hi_raw = val;
+
+ return (B_TRUE);
+}
+
+static int
+zen_umc_fill_umc_cb(const uint_t dfno, const uint32_t fabid,
+ const uint32_t instid, void *arg)
+{
+ zen_umc_t *umc = arg;
+ zen_umc_df_t *df = &umc->umc_dfs[dfno];
+ zen_umc_chan_t *chan = &df->zud_chan[df->zud_nchan];
+
+ df->zud_nchan++;
+ VERIFY3U(df->zud_nchan, <=, ZEN_UMC_MAX_UMCS);
+
+ /*
+ * The data fabric is generally organized such that all UMC entries
+ * should be continuous in their fabric ID space; however, we don't
+ * want to rely on specific ID locations. The UMC SMN addresses are
+ * organized in a relative order. To determine the SMN ID to use (the
+ * chan_logid) we end up making the following assumptions:
+ *
+ * o The iteration order will always be from the lowest component ID
+ * to the highest component ID.
+ * o The relative order that we encounter will be the same as the SMN
+ * order. That is, the first thing we find (regardless of component
+ * ID) will be SMN UMC entry 0, the next 1, etc.
+ */
+ chan->chan_logid = df->zud_nchan - 1;
+ chan->chan_fabid = fabid;
+ chan->chan_instid = instid;
+ chan->chan_nrules = umc->umc_fdata->zufd_cs_nrules;
+ for (uint_t i = 0; i < umc->umc_fdata->zufd_cs_nrules; i++) {
+ if (zen_umc_read_dram_rule(umc, dfno, instid, i,
+ &chan->chan_rules[i]) != 0) {
+ return (-1);
+ }
+ }
+
+ for (uint_t i = 0; i < umc->umc_fdata->zufd_cs_nrules - 1; i++) {
+ int ret;
+ uint32_t offset;
+ uint64_t t;
+ df_reg_def_t off_reg;
+ chan_offset_t *offp = &chan->chan_offsets[i];
+
+ switch (umc->umc_df_rev) {
+ case DF_REV_2:
+ case DF_REV_3:
+ case DF_REV_3P5:
+ ASSERT3U(i, ==, 0);
+ off_reg = DF_DRAM_OFFSET_V2;
+ break;
+ case DF_REV_4:
+ off_reg = DF_DRAM_OFFSET_V4(i);
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered "
+ "unsupported DF revision processing DRAM Offsets: "
+ "0x%x", umc->umc_df_rev);
+ return (-1);
+ }
+
+ if ((ret = amdzen_c_df_read32(dfno, instid, off_reg,
+ &offset)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read DRAM "
+ "offset %u on 0x%x/0x%x: %d", i, dfno, instid, ret);
+ return (-1);
+ }
+
+ offp->cho_raw = offset;
+ offp->cho_valid = DF_DRAM_OFFSET_GET_EN(offset);
+
+ switch (umc->umc_df_rev) {
+ case DF_REV_2:
+ t = DF_DRAM_OFFSET_V2_GET_OFFSET(offset);
+ break;
+ case DF_REV_3:
+ case DF_REV_3P5:
+ t = DF_DRAM_OFFSET_V3_GET_OFFSET(offset);
+ break;
+ case DF_REV_4:
+ t = DF_DRAM_OFFSET_V4_GET_OFFSET(offset);
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered "
+ "unsupported DF revision processing DRAM Offsets: "
+ "0x%x", umc->umc_df_rev);
+ return (-1);
+ }
+ offp->cho_offset = t << DF_DRAM_OFFSET_SHIFT;
+ }
+
+ /*
+ * If this platform supports our favorete Zen 3 6-channel hash special
+ * then we need to grab the NP2 configuration registers. This will only
+ * be referenced if this channel is actually being used for a 6-channel
+ * hash, so even if the contents are weird that should still be ok.
+ */
+ if ((umc->umc_fdata->zufd_flags & ZEN_UMC_FAM_F_NP2) != 0) {
+ uint32_t np2;
+ int ret;
+
+ if ((ret = amdzen_c_df_read32(dfno, instid, DF_NP2_CONFIG_V3,
+ &np2)) != 0) {
+ dev_err(umc->umc_dip, CE_WARN, "!failed to read NP2 "
+ "config: %d", ret);
+ return (-1);
+ }
+
+ chan->chan_np2_raw = np2;
+ chan->chan_np2_space0 = DF_NP2_CONFIG_V3_GET_SPACE0(np2);
+ }
+
+ /*
+ * Now that we have everything we need from the data fabric, read out
+ * the rest of what we need from the UMC channel data in SMN register
+ * space.
+ */
+ switch (umc->umc_fdata->zufd_umc_style) {
+ case ZEN_UMC_UMC_S_DDR4:
+ case ZEN_UMC_UMC_S_DDR4_APU:
+ for (uint_t i = 0; i < ZEN_UMC_MAX_DIMMS; i++) {
+ if (!zen_umc_fill_chan_dimm_ddr4(umc, df, chan, i)) {
+ return (-1);
+ }
+ }
+ break;
+ case ZEN_UMC_UMC_S_DDR5:
+ case ZEN_UMC_UMC_S_DDR5_APU:
+ for (uint_t i = 0; i < ZEN_UMC_MAX_DIMMS; i++) {
+ for (uint_t r = 0; r < ZEN_UMC_MAX_CS_PER_DIMM; r++) {
+ if (!zen_umc_fill_chan_rank_ddr5(umc, df, chan,
+ i, r)) {
+ return (-1);
+ }
+ }
+ }
+ break;
+ default:
+ dev_err(umc->umc_dip, CE_WARN, "!encountered unsupported "
+ "Zen family: 0x%x", umc->umc_fdata->zufd_umc_style);
+ return (-1);
+ }
+
+ if (!zen_umc_fill_chan(umc, df, chan)) {
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Today there are no privileges for the memory controller information, it is
+ * restricted based on file system permissions.
+ */
+static int
+zen_umc_open(dev_t *devp, int flag, int otyp, cred_t *credp)
+{
+ zen_umc_t *umc = zen_umc;
+
+ if ((flag & (FEXCL | FNDELAY | FNONBLOCK | FWRITE)) != 0) {
+ return (EINVAL);
+ }
+
+ if (otyp != OTYP_CHR) {
+ return (EINVAL);
+ }
+
+ if (getminor(*devp) >= umc->umc_ndfs) {
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static void
+zen_umc_ioctl_decode(zen_umc_t *umc, mc_encode_ioc_t *encode)
+{
+ zen_umc_decoder_t dec;
+ uint32_t sock, die, comp;
+
+ bzero(&dec, sizeof (dec));
+ if (!zen_umc_decode_pa(umc, encode->mcei_pa, &dec)) {
+ encode->mcei_err = (uint32_t)dec.dec_fail;
+ encode->mcei_errdata = dec.dec_fail_data;
+ return;
+ }
+
+ encode->mcei_errdata = 0;
+ encode->mcei_err = 0;
+ encode->mcei_chan_addr = dec.dec_norm_addr;
+ encode->mcei_rank_addr = UINT64_MAX;
+ encode->mcei_board = 0;
+ zen_fabric_id_decompose(&umc->umc_decomp, dec.dec_targ_fabid, &sock,
+ &die, &comp);
+ encode->mcei_chip = sock;
+ encode->mcei_die = die;
+ encode->mcei_mc = dec.dec_umc_chan->chan_logid;
+ encode->mcei_chan = 0;
+ encode->mcei_dimm = dec.dec_dimm_no;
+ encode->mcei_row = dec.dec_dimm_row;
+ encode->mcei_column = dec.dec_dimm_col;
+ /*
+ * We don't have a logical rank that something matches to, we have the
+ * actual chip-select and rank multiplication. If we could figure out
+ * how to transform that into an actual rank, that'd be grand.
+ */
+ encode->mcei_rank = UINT8_MAX;
+ encode->mcei_cs = dec.dec_dimm_csno;
+ encode->mcei_rm = dec.dec_dimm_rm;
+ encode->mcei_bank = dec.dec_dimm_bank;
+ encode->mcei_bank_group = dec.dec_dimm_bank_group;
+ encode->mcei_subchan = dec.dec_dimm_subchan;
+}
+
+static void
+umc_decoder_pack(zen_umc_t *umc)
+{
+ char *buf = NULL;
+ size_t len = 0;
+
+ ASSERT(MUTEX_HELD(&umc->umc_nvl_lock));
+ if (umc->umc_decoder_buf != NULL) {
+ return;
+ }
+
+ if (umc->umc_decoder_nvl == NULL) {
+ umc->umc_decoder_nvl = zen_umc_dump_decoder(umc);
+ if (umc->umc_decoder_nvl == NULL) {
+ return;
+ }
+ }
+
+ if (nvlist_pack(umc->umc_decoder_nvl, &buf, &len, NV_ENCODE_XDR,
+ KM_NOSLEEP_LAZY) != 0) {
+ return;
+ }
+
+ umc->umc_decoder_buf = buf;
+ umc->umc_decoder_len = len;
+}
+
+static int
+zen_umc_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
+ int *rvalp)
+{
+ int ret;
+ zen_umc_t *umc = zen_umc;
+ mc_encode_ioc_t encode;
+ mc_snapshot_info_t info;
+
+ if (getminor(dev) >= umc->umc_ndfs) {
+ return (ENXIO);
+ }
+
+ switch (cmd) {
+ case MC_IOC_DECODE_PA:
+ if (crgetzoneid(credp) != GLOBAL_ZONEID ||
+ drv_priv(credp) != 0) {
+ ret = EPERM;
+ break;
+ }
+
+ if (ddi_copyin((void *)arg, &encode, sizeof (encode),
+ mode & FKIOCTL) != 0) {
+ ret = EFAULT;
+ break;
+ }
+
+ zen_umc_ioctl_decode(umc, &encode);
+ ret = 0;
+
+ if (ddi_copyout(&encode, (void *)arg, sizeof (encode),
+ mode & FKIOCTL) != 0) {
+ ret = EFAULT;
+ break;
+ }
+ break;
+ case MC_IOC_DECODE_SNAPSHOT_INFO:
+ mutex_enter(&umc->umc_nvl_lock);
+ umc_decoder_pack(umc);
+
+ if (umc->umc_decoder_buf == NULL) {
+ mutex_exit(&umc->umc_nvl_lock);
+ ret = EIO;
+ break;
+ }
+
+ if (umc->umc_decoder_len > UINT32_MAX) {
+ mutex_exit(&umc->umc_nvl_lock);
+ ret = EOVERFLOW;
+ break;
+ }
+
+ info.mcs_size = umc->umc_decoder_len;
+ info.mcs_gen = 0;
+ if (ddi_copyout(&info, (void *)arg, sizeof (info),
+ mode & FKIOCTL) != 0) {
+ mutex_exit(&umc->umc_nvl_lock);
+ ret = EFAULT;
+ break;
+ }
+
+ mutex_exit(&umc->umc_nvl_lock);
+ ret = 0;
+ break;
+ case MC_IOC_DECODE_SNAPSHOT:
+ mutex_enter(&umc->umc_nvl_lock);
+ umc_decoder_pack(umc);
+
+ if (umc->umc_decoder_buf == NULL) {
+ mutex_exit(&umc->umc_nvl_lock);
+ ret = EIO;
+ break;
+ }
+
+ if (ddi_copyout(umc->umc_decoder_buf, (void *)arg,
+ umc->umc_decoder_len, mode & FKIOCTL) != 0) {
+ mutex_exit(&umc->umc_nvl_lock);
+ ret = EFAULT;
+ break;
+ }
+
+ mutex_exit(&umc->umc_nvl_lock);
+ ret = 0;
+ break;
+ default:
+ ret = ENOTTY;
+ break;
+ }
+
+ return (ret);
+}
+
+static int
+zen_umc_close(dev_t dev, int flag, int otyp, cred_t *credp)
+{
+ return (0);
+}
+
+static void
+zen_umc_cleanup(zen_umc_t *umc)
+{
+ nvlist_free(umc->umc_decoder_nvl);
+ umc->umc_decoder_nvl = NULL;
+ if (umc->umc_decoder_buf != NULL) {
+ kmem_free(umc->umc_decoder_buf, umc->umc_decoder_len);
+ umc->umc_decoder_buf = NULL;
+ umc->umc_decoder_len = 0;
+ }
+
+ if (umc->umc_dip != NULL) {
+ ddi_remove_minor_node(umc->umc_dip, NULL);
+ }
+ mutex_destroy(&umc->umc_nvl_lock);
+ kmem_free(umc, sizeof (zen_umc_t));
+}
+
+static int
+zen_umc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int ret;
+ zen_umc_t *umc;
+
+ if (cmd == DDI_RESUME) {
+ return (DDI_SUCCESS);
+ } else if (cmd != DDI_ATTACH) {
+ return (DDI_FAILURE);
+ }
+ if (zen_umc != NULL) {
+ dev_err(dip, CE_WARN, "!zen_umc is already attached to a "
+ "dev_info_t: %p", zen_umc->umc_dip);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * To get us going, we need to do several bits of set up. First, we need
+ * to use the knowledge about the actual hardware that we're using to
+ * encode a bunch of different data:
+ *
+ * o The set of register styles and extra hardware features that exist
+ * on the hardware platform.
+ * o The number of actual rules there are for the CCMs and UMCs.
+ * o How many actual things exist (DFs, etc.)
+ * o Useful fabric and instance IDs for all of the different UMC
+ * entries so we can actually talk to them.
+ *
+ * Only once we have all the above will we go dig into the actual data.
+ */
+ umc = kmem_zalloc(sizeof (zen_umc_t), KM_SLEEP);
+ mutex_init(&umc->umc_nvl_lock, NULL, MUTEX_DRIVER, NULL);
+ umc->umc_family = amdzen_c_family();
+ umc->umc_ndfs = amdzen_c_df_count();
+ umc->umc_dip = dip;
+
+ if (!zen_umc_identify(umc)) {
+ dev_err(dip, CE_WARN, "!encountered unsupported CPU");
+ goto err;
+ }
+
+ umc->umc_df_rev = amdzen_c_df_rev();
+ switch (umc->umc_df_rev) {
+ case DF_REV_2:
+ case DF_REV_3:
+ case DF_REV_3P5:
+ case DF_REV_4:
+ break;
+ default:
+ dev_err(dip, CE_WARN, "!encountered unknown DF revision: %x",
+ umc->umc_df_rev);
+ goto err;
+ }
+
+ if ((ret = amdzen_c_df_fabric_decomp(&umc->umc_decomp)) != 0) {
+ dev_err(dip, CE_WARN, "!failed to get fabric decomposition: %d",
+ ret);
+ }
+
+ umc->umc_tom = rdmsr(MSR_AMD_TOM);
+ umc->umc_tom2 = rdmsr(MSR_AMD_TOM2);
+
+ /*
+ * For each DF, start by reading all of the data that we need from it.
+ * This involves finding a target CCM, reading all of the rules,
+ * ancillary settings, and related. Then we'll do a pass over all of the
+ * actual UMC targets there.
+ */
+ for (uint_t i = 0; i < umc->umc_ndfs; i++) {
+ if (amdzen_c_df_iter(i, ZEN_DF_TYPE_CCM_CPU,
+ zen_umc_fill_ccm_cb, umc) < 0 ||
+ amdzen_c_df_iter(i, ZEN_DF_TYPE_CS_UMC, zen_umc_fill_umc_cb,
+ umc) != 0) {
+ goto err;
+ }
+ }
+
+ /*
+ * Create a minor node for each df that we encounter.
+ */
+ for (uint_t i = 0; i < umc->umc_ndfs; i++) {
+ int ret;
+ char minor[64];
+
+ (void) snprintf(minor, sizeof (minor), "mc-umc-%u", i);
+ if ((ret = ddi_create_minor_node(umc->umc_dip, minor, S_IFCHR,
+ i, "ddi_mem_ctrl", 0)) != 0) {
+ dev_err(dip, CE_WARN, "!failed to create minor %s: %d",
+ minor, ret);
+ goto err;
+ }
+ }
+
+ zen_umc = umc;
+ return (DDI_SUCCESS);
+
+err:
+ zen_umc_cleanup(umc);
+ return (DDI_FAILURE);
+}
+
+static int
+zen_umc_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
+{
+ zen_umc_t *umc;
+
+ if (zen_umc == NULL || zen_umc->umc_dip == NULL) {
+ return (DDI_FAILURE);
+ }
+ umc = zen_umc;
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ *resultp = (void *)umc->umc_dip;
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *resultp = (void *)(uintptr_t)ddi_get_instance(
+ umc->umc_dip);
+ break;
+ default:
+ return (DDI_FAILURE);
+ }
+ return (DDI_SUCCESS);
+}
+
+static int
+zen_umc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ zen_umc_t *umc;
+
+ if (cmd == DDI_SUSPEND) {
+ return (DDI_SUCCESS);
+ } else if (cmd != DDI_DETACH) {
+ return (DDI_FAILURE);
+ }
+
+ if (zen_umc == NULL) {
+ dev_err(dip, CE_WARN, "!asked to detach zen_umc, but it "
+ "was never successfully attached");
+ return (DDI_FAILURE);
+ }
+
+ umc = zen_umc;
+ zen_umc = NULL;
+ zen_umc_cleanup(umc);
+ return (DDI_SUCCESS);
+}
+
+static struct cb_ops zen_umc_cb_ops = {
+ .cb_open = zen_umc_open,
+ .cb_close = zen_umc_close,
+ .cb_strategy = nodev,
+ .cb_print = nodev,
+ .cb_dump = nodev,
+ .cb_read = nodev,
+ .cb_write = nodev,
+ .cb_ioctl = zen_umc_ioctl,
+ .cb_devmap = nodev,
+ .cb_mmap = nodev,
+ .cb_segmap = nodev,
+ .cb_chpoll = nochpoll,
+ .cb_prop_op = ddi_prop_op,
+ .cb_flag = D_MP,
+ .cb_rev = CB_REV,
+ .cb_aread = nodev,
+ .cb_awrite = nodev
+};
+
+static struct dev_ops zen_umc_dev_ops = {
+ .devo_rev = DEVO_REV,
+ .devo_refcnt = 0,
+ .devo_getinfo = zen_umc_getinfo,
+ .devo_identify = nulldev,
+ .devo_probe = nulldev,
+ .devo_attach = zen_umc_attach,
+ .devo_detach = zen_umc_detach,
+ .devo_reset = nodev,
+ .devo_quiesce = ddi_quiesce_not_needed,
+ .devo_cb_ops = &zen_umc_cb_ops
+};
+
+static struct modldrv zen_umc_modldrv = {
+ .drv_modops = &mod_driverops,
+ .drv_linkinfo = "AMD Zen Unified Memory Controller",
+ .drv_dev_ops = &zen_umc_dev_ops
+};
+
+static struct modlinkage zen_umc_modlinkage = {
+ .ml_rev = MODREV_1,
+ .ml_linkage = { &zen_umc_modldrv, NULL }
+};
+
+int
+_init(void)
+{
+ return (mod_install(&zen_umc_modlinkage));
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&zen_umc_modlinkage, modinfop));
+}
+
+int
+_fini(void)
+{
+ return (mod_remove(&zen_umc_modlinkage));
+}
diff --git a/usr/src/uts/intel/io/amdzen/zen_umc.h b/usr/src/uts/intel/io/amdzen/zen_umc.h
new file mode 100644
index 0000000000..d4f2127d74
--- /dev/null
+++ b/usr/src/uts/intel/io/amdzen/zen_umc.h
@@ -0,0 +1,634 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+#ifndef _ZEN_UMC_H
+#define _ZEN_UMC_H
+
+/*
+ * This file contains definitions that are used to manage and decode the Zen UMC
+ * state.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/stdint.h>
+#include <sys/sunddi.h>
+#include <sys/nvpair.h>
+#include <amdzen_client.h>
+
+/*
+ * This is the maximum number of DRAM rules that we expect any supported device
+ * to have here. The actual number may be less. These are rules that come from a
+ * DF CCM.
+ */
+#define ZEN_UMC_MAX_DRAM_RULES 20
+
+/*
+ * This is the maximum number of rules that we expect any system to actually
+ * have for each UMC.
+ */
+#define ZEN_UMC_MAX_CS_RULES 4
+
+/*
+ * This is the maximum number of DFs that we expect to encounter in a given
+ * platform. This number comes from the Naples generation, where there were up
+ * to 4 per socket, 2 sockets per machine, so 8 total. In subsequent generations
+ * there is only a single 1 per socket.
+ */
+#define ZEN_UMC_MAX_DFS 8
+
+/*
+ * This indicates the maximum number of UMC DF nodes that we expect to
+ * encounter.
+ */
+#define ZEN_UMC_MAX_UMCS 12
+
+/*
+ * This indicates the maximum number of DRAM offset rules that can exist in a
+ * platform. Note, this is directly tied to the maximum number of CS rules.
+ */
+#define ZEN_UMC_MAX_DRAM_OFFSET (ZEN_UMC_MAX_CS_RULES - 1)
+
+/*
+ * This indicates the maximum number of remap rule sets and corresponding
+ * entries that can exist. Milan's max is smaller than the current overall DFv4
+ * maximum.
+ */
+#define ZEN_UMC_MAX_CS_REMAPS 4
+#define ZEN_UMC_MAX_REMAP_ENTS 16
+#define ZEN_UMC_MILAN_CS_NREMAPS 2
+#define ZEN_UMC_MILAN_REMAP_ENTS 12
+#define ZEN_UMC_REMAP_PER_REG 8
+
+/*
+ * DRAM Channel related maximums.
+ */
+#define ZEN_UMC_MAX_DIMMS 2
+#define ZEN_UMC_MAX_CS_PER_DIMM 2
+#define ZEN_UMC_MAX_CS_BITS 2
+#define ZEN_UMC_MAX_CHAN_BASE 2
+#define ZEN_UMC_MAX_CHAN_MASK 2
+#define ZEN_UMC_MAX_BANK_BITS 5
+#define ZEN_UMC_MAX_COL_BITS 16
+#define ZEN_UMC_MAX_RM_BITS 4
+#define ZEN_UMC_MAX_COLSEL_PER_REG 8
+
+#define ZEN_UMC_DDR4_CHAN_NMASKS 1
+
+/*
+ * DRAM Channel hash maximums. Surprisingly enough, the DDR4 and DDR5 maximums
+ * are the same; however, in exchange what hashes are actually implemented
+ * varies.
+ */
+#define ZEN_UMC_MAX_CHAN_BANK_HASH 5
+#define ZEN_UMC_MAX_CHAN_RM_HASH 3
+#define ZEN_UMC_MAX_CHAN_CS_HASH 2
+
+/*
+ * This is the logical set of different channel interleaving rules that we
+ * support today in the driver. The actual values of the enumeration do not
+ * overlap at all with hardware. Do not use these to try and marry up against
+ * values from the DF itself.
+ *
+ * Note, these values are also encoded in the private mc decoder dumps that we
+ * can produce. If these values change, please take care of ensuring
+ * compatibility for others who may be consuming this. Appending to this list
+ * should be OK.
+ */
+typedef enum df_chan_ileave {
+ DF_CHAN_ILEAVE_1CH = 0,
+ DF_CHAN_ILEAVE_2CH,
+ DF_CHAN_ILEAVE_4CH,
+ DF_CHAN_ILEAVE_6CH,
+ DF_CHAN_ILEAVE_8CH,
+ DF_CHAN_ILEAVE_16CH,
+ DF_CHAN_ILEAVE_32CH,
+ DF_CHAN_ILEAVE_COD4_2CH,
+ DF_CHAN_ILEAVE_COD2_4CH,
+ DF_CHAN_ILEAVE_COD1_8CH,
+ DF_CHAN_ILEAVE_NPS4_2CH,
+ DF_CHAN_ILEAVE_NPS2_4CH,
+ DF_CHAN_ILEAVE_NPS1_8CH,
+ DF_CHAN_ILEAVE_NPS4_3CH,
+ DF_CHAN_ILEAVE_NPS2_6CH,
+ DF_CHAN_ILEAVE_NPS1_12CH,
+ DF_CHAN_ILEAVE_NPS2_5CH,
+ DF_CHAN_ILEAVE_NPS1_10CH
+} df_chan_ileave_t;
+
+/*
+ * This is a collection of logical flags that we use to cover attributes of a
+ * DRAM rule.
+ */
+typedef enum df_dram_flags {
+ /*
+ * Used to indicate that the contents of the rule are actually valid and
+ * should be considered. Many rules can be unused in hardware.
+ */
+ DF_DRAM_F_VALID = 1 << 0,
+ /*
+ * Indicates that the DRAM hole is active for this particular rule. If
+ * this flag is set and the hole is valid in the DF, then we need to
+ * take the actual DRAM hole into account.
+ */
+ DF_DRAM_F_HOLE = 1 << 1,
+ /*
+ * These next three are used to indicate when hashing is going on, which
+ * bits to use. These are for 64K, 2M, and 1G parts of addresses
+ * respectively.
+ */
+ DF_DRAM_F_HASH_16_18 = 1 << 2,
+ DF_DRAM_F_HASH_21_23 = 1 << 3,
+ DF_DRAM_F_HASH_30_32 = 1 << 4,
+ /*
+ * Indicates that this rule should have remap processing and the remap
+ * target is valid. If the DF_DRAM_F_REMAP_SOCK flag is set, this
+ * indicates that the processing is based on socket versus a particular
+ * entry.
+ */
+ DF_DRAM_F_REMAP_EN = 1 << 5,
+ DF_DRAM_F_REMAP_SOCK = 1 << 6
+} df_dram_flags_t;
+
+/*
+ * This represents a single offset value for a channel. This is used when
+ * applying normalization.
+ */
+typedef struct chan_offset {
+ uint32_t cho_raw;
+ boolean_t cho_valid;
+ uint64_t cho_offset;
+} chan_offset_t;
+
+/*
+ * This structure represents a single DRAM rule, no matter where it shows up.
+ * This smooths over the differences between generations.
+ */
+typedef struct df_dram_rule {
+ uint32_t ddr_raw_base;
+ uint32_t ddr_raw_limit;
+ uint32_t ddr_raw_ctrl;
+ uint32_t ddr_raw_ileave;
+ df_dram_flags_t ddr_flags;
+ uint64_t ddr_base;
+ uint64_t ddr_limit;
+ uint16_t ddr_dest_fabid;
+ uint8_t ddr_sock_ileave_bits;
+ uint8_t ddr_die_ileave_bits;
+ uint8_t ddr_addr_start;
+ uint8_t ddr_remap_ent;
+ df_chan_ileave_t ddr_chan_ileave;
+} df_dram_rule_t;
+
+typedef struct umc_dimm_base {
+ uint64_t udb_base;
+ boolean_t udb_valid;
+} umc_dimm_base_t;
+
+typedef enum umc_dimm_type {
+ UMC_DIMM_T_UNKNOWN,
+ UMC_DIMM_T_DDR4,
+ UMC_DIMM_T_LPDDR4,
+ UMC_DIMM_T_DDR5,
+ UMC_DIMM_T_LPDDR5
+} umc_dimm_type_t;
+
+typedef enum umc_dimm_width {
+ UMC_DIMM_W_X4,
+ UMC_DIMM_W_X8,
+ UMC_DIMM_W_X16,
+} umc_dimm_width_t;
+
+typedef enum umc_dimm_kind {
+ UMC_DIMM_K_UDIMM,
+ UMC_DIMM_K_RDIMM,
+ UMC_DIMM_K_LRDIMM,
+ UMC_DIMM_K_3DS_RDIMM
+} umc_dimm_kind_t;
+
+typedef enum umc_dimm_flags {
+ /*
+ * This flag indicates that this DIMM should be used for decoding
+ * purposes. It basically means that there is at least one chip-select
+ * decoding register that has been enabled. Unfortunately, we don't have
+ * a good way right now of distinguishing between a DIMM being present
+ * and being usable. This likely needs to be re-evaluated when we
+ * consider how we present things to topo. We may be able to pull this
+ * out of the clock disable logic.
+ */
+ UMC_DIMM_F_VALID = 1 << 0,
+} umc_dimm_flags_t;
+
+/*
+ * A DIMM may have one or more ranks, which is an independent logical item that
+ * is activated by a 'chip-select' signal on a DIMM (e.g. CS_L[1:0]). In a given
+ * channel, AMD always has two instances of a 'chip-select' data structure.
+ * While these have a 1:1 correspondence in the case of single and dual rank
+ * DIMMs, in the case where there are more, then rank multiplication rules are
+ * used to determine which of the additional chip and chip-select signals to
+ * actually drive on the bus. But still, there are only up to two of these
+ * structures. To match AMD terminology we call these a 'chip-select' or
+ * 'umc_cs_t'.
+ *
+ * The amount of information that exists on a per-chip-select and per-DIMM basis
+ * varies between the different memory controller generations. As such, we
+ * normalize things such that a given chip-select always has all of the
+ * information related to it, duplicating it in the DDR4 case.
+ *
+ * While DDR5 adds the notion of sub-channels, a single chip-select is used to
+ * cover both sub-channels and instead a bit in the normalized address (and
+ * hashing) is used to determine which sub-channel to active. So while hardware
+ * actually has different chip-select lines for each sub-channel they are not
+ * represented that way in the UMC.
+ */
+typedef struct umc_cs {
+ umc_dimm_base_t ucs_base;
+ umc_dimm_base_t ucs_sec;
+ uint64_t ucs_base_mask;
+ uint64_t ucs_sec_mask;
+ uint8_t ucs_nbanks;
+ uint8_t ucs_ncol;
+ uint8_t ucs_nrow_lo;
+ uint8_t ucs_nrow_hi;
+ uint8_t ucs_nrm;
+ uint8_t ucs_nbank_groups;
+ uint8_t ucs_cs_xor;
+ uint8_t ucs_row_hi_bit;
+ uint8_t ucs_row_low_bit;
+ uint8_t ucs_bank_bits[ZEN_UMC_MAX_BANK_BITS];
+ uint8_t ucs_col_bits[ZEN_UMC_MAX_COL_BITS];
+ uint8_t ucs_inv_msbs;
+ uint8_t ucs_rm_bits[ZEN_UMC_MAX_RM_BITS];
+ uint8_t ucs_inv_msbs_sec;
+ uint8_t ucs_rm_bits_sec[ZEN_UMC_MAX_RM_BITS];
+ uint8_t ucs_subchan;
+} umc_cs_t;
+
+/*
+ * This structure represents information about a DIMM. Most of the interesting
+ * stuff is on the umc_cs_t up above, which is the logical 'chip-select' that
+ * AMD implements in the UMC.
+ *
+ * When we come back and add topo glue for the driver, we should consider adding
+ * the following information here and in the channel:
+ *
+ * o Configured DIMM speed
+ * o Channel capable speed
+ * o Calculated size
+ * o A way to map this DIMM to an SMBIOS / SPD style entry
+ */
+typedef struct umc_dimm {
+ umc_dimm_flags_t ud_flags;
+ umc_dimm_width_t ud_width;
+ umc_dimm_type_t ud_type;
+ umc_dimm_kind_t ud_kind;
+ uint32_t ud_dimmno;
+ uint32_t ud_dimmcfg_raw;
+ umc_cs_t ud_cs[ZEN_UMC_MAX_CS_PER_DIMM];
+} umc_dimm_t;
+
+typedef enum umc_chan_flags {
+ /*
+ * Indicates that the channel has enabled ECC logic.
+ */
+ UMC_CHAN_F_ECC_EN = 1 << 0,
+ /*
+ * We believe that this indicates some amount of the AMD SEV encryption
+ * is ongoing, leveraging some of the page-table control.
+ */
+ UMC_CHAN_F_ENCR_EN = 1 << 1,
+ /*
+ * Indicates that the channel is employing data scrambling. This is
+ * basically what folks have called Transparent Shared Memory
+ * Encryption.
+ */
+ UMC_CHAN_F_SCRAMBLE_EN = 1 << 2
+} umc_chan_flags_t;
+
+typedef struct umc_bank_hash {
+ uint32_t ubh_row_xor;
+ uint32_t ubh_col_xor;
+ boolean_t ubh_en;
+} umc_bank_hash_t;
+
+typedef struct umc_addr_hash {
+ uint64_t uah_addr_xor;
+ boolean_t uah_en;
+} umc_addr_hash_t;
+
+typedef struct umc_pc_hash {
+ uint32_t uph_row_xor;
+ uint32_t uph_col_xor;
+ uint8_t uph_bank_xor;
+ boolean_t uph_en;
+} umc_pc_hash_t;
+
+typedef enum umc_chan_hash_flags {
+ UMC_CHAN_HASH_F_BANK = 1 << 0,
+ UMC_CHAN_HASH_F_RM = 1 << 1,
+ UMC_CHAN_HASH_F_PC = 1 << 2,
+ UMC_CHAN_HASH_F_CS = 1 << 3,
+} umc_chan_hash_flags_t;
+
+typedef struct umc_chan_hash {
+ umc_chan_hash_flags_t uch_flags;
+ umc_bank_hash_t uch_bank_hashes[ZEN_UMC_MAX_CHAN_BANK_HASH];
+ umc_addr_hash_t uch_rm_hashes[ZEN_UMC_MAX_CHAN_RM_HASH];
+ umc_addr_hash_t uch_cs_hashes[ZEN_UMC_MAX_CHAN_CS_HASH];
+ umc_pc_hash_t uch_pc_hash;
+} umc_chan_hash_t;
+
+/*
+ * This structure represents the overall memory channel. There is a 1:1
+ * relationship between these structures and discover UMC hardware entities on
+ * the data fabric. Note, these always exist regardless of whether the channels
+ * are actually implemented on a PCB or not.
+ */
+typedef struct zen_umc_chan {
+ umc_chan_flags_t chan_flags;
+ uint32_t chan_fabid;
+ uint32_t chan_instid;
+ uint32_t chan_logid;
+ uint32_t chan_nrules;
+ uint32_t chan_umccfg_raw;
+ uint32_t chan_datactl_raw;
+ uint32_t chan_eccctl_raw;
+ uint32_t chan_umccap_raw;
+ uint32_t chan_umccap_hi_raw;
+ uint32_t chan_np2_raw;
+ uint32_t chan_np2_space0;
+ df_dram_rule_t chan_rules[ZEN_UMC_MAX_CS_RULES];
+ chan_offset_t chan_offsets[ZEN_UMC_MAX_DRAM_OFFSET];
+ umc_dimm_t chan_dimms[ZEN_UMC_MAX_DIMMS];
+ umc_chan_hash_t chan_hash;
+} zen_umc_chan_t;
+
+typedef struct zen_umc_cs_remap {
+ uint_t csr_nremaps;
+ uint16_t csr_remaps[ZEN_UMC_MAX_REMAP_ENTS];
+} zen_umc_cs_remap_t;
+
+typedef enum zen_umc_df_flags {
+ /*
+ * Indicates that the DRAM Hole is valid and in use.
+ */
+ ZEN_UMC_DF_F_HOLE_VALID = 1 << 0,
+ /*
+ * These next three are used to indicate when hashing is going on, which
+ * bits to use. These are for 64K, 2M, and 1G parts of addresses
+ * respectively.
+ */
+ ZEN_UMC_DF_F_HASH_16_18 = 1 << 1,
+ ZEN_UMC_DF_F_HASH_21_23 = 1 << 2,
+ ZEN_UMC_DF_F_HASH_30_32 = 1 << 3
+} zen_umc_df_flags_t;
+
+typedef struct zen_umc_df {
+ zen_umc_df_flags_t zud_flags;
+ uint_t zud_dfno;
+ uint_t zud_ccm_inst;
+ uint_t zud_dram_nrules;
+ uint_t zud_nchan;
+ uint_t zud_cs_nremap;
+ uint32_t zud_hole_raw;
+ uint32_t zud_glob_ctl_raw;
+ uint64_t zud_hole_base;
+ df_dram_rule_t zud_rules[ZEN_UMC_MAX_DRAM_RULES];
+ zen_umc_cs_remap_t zud_remap[ZEN_UMC_MAX_CS_REMAPS];
+ zen_umc_chan_t zud_chan[ZEN_UMC_MAX_UMCS];
+} zen_umc_df_t;
+
+typedef enum zen_umc_umc_style {
+ ZEN_UMC_UMC_S_DDR4,
+ ZEN_UMC_UMC_S_DDR4_APU,
+ ZEN_UMC_UMC_S_DDR5,
+ ZEN_UMC_UMC_S_DDR5_APU
+} zen_umc_umc_style_t;
+
+typedef enum zen_umc_fam_flags {
+ /*
+ * Indicates that there's an indirection table for the destinations of
+ * target rules.
+ */
+ ZEN_UMC_FAM_F_TARG_REMAP = 1 << 0,
+ /*
+ * Indicates that non-power of two interleave rules are supported and
+ * that we need additional register configuration.
+ */
+ ZEN_UMC_FAM_F_NP2 = 1 << 1,
+ /*
+ * Indicates that the DF hashing rules to configure COD hashes need to
+ * be checked.
+ */
+ ZEN_UMC_FAM_F_NORM_HASH = 1 << 2,
+ /*
+ * In DDR4 this indicates presence of the HashRM and in DDR5 the
+ * AddrHash.
+ */
+ ZEN_UMC_FAM_F_UMC_HASH = 1 << 3,
+ /*
+ * Indicates support for extended UMC registers for larger addresses.
+ * Generally on Server parts.
+ */
+ ZEN_UMC_FAM_F_UMC_EADDR = 1 << 4,
+ /*
+ * Indicates that CS decoder supports an XOR function.
+ */
+ ZEN_UMC_FAM_F_CS_XOR = 1 << 5
+} zen_umc_fam_flags_t;
+
+/*
+ * This structure is meant to contain per SoC family (not CPUID family)
+ * information. This is stuff that we basically need to encode about the
+ * processor itself and relates to its limits, the style it operates in, the
+ * way it works, etc.
+ */
+typedef struct zen_umc_fam_data {
+ zen_family_t zufd_family;
+ zen_umc_fam_flags_t zufd_flags;
+ uint8_t zufd_dram_nrules;
+ uint8_t zufd_cs_nrules;
+ zen_umc_umc_style_t zufd_umc_style;
+ umc_chan_hash_flags_t zufd_chan_hash;
+} zen_umc_fam_data_t;
+
+/*
+ * The top-level data structure for the system. This is a single structure that
+ * represents everything that could possibly exist and is filled in with what we
+ * actually discover.
+ */
+typedef struct zen_umc {
+ uint64_t umc_tom;
+ uint64_t umc_tom2;
+ dev_info_t *umc_dip;
+ zen_family_t umc_family;
+ df_rev_t umc_df_rev;
+ const zen_umc_fam_data_t *umc_fdata;
+ df_fabric_decomp_t umc_decomp;
+ uint_t umc_ndfs;
+ zen_umc_df_t umc_dfs[ZEN_UMC_MAX_DFS];
+ /*
+ * This lock protects the data underneath here.
+ */
+ kmutex_t umc_nvl_lock;
+ nvlist_t *umc_decoder_nvl;
+ char *umc_decoder_buf;
+ size_t umc_decoder_len;
+} zen_umc_t;
+
+typedef enum zen_umc_decode_failure {
+ ZEN_UMC_DECODE_F_NONE = 0,
+ /*
+ * Indicates that the address was not contained within the TOM and TOM2
+ * regions that indicate DRAM (or was in a reserved hole).
+ */
+ ZEN_UMC_DECODE_F_OUTSIDE_DRAM,
+ /*
+ * Indicates that we could not find a DF rule in the CCM rule that
+ * claims to honor this address.
+ */
+ ZEN_UMC_DECODE_F_NO_DF_RULE,
+ /*
+ * Indicates that trying to construct the interleave address to use
+ * would have led to an underflow somehow.
+ */
+ ZEN_UMC_DECODE_F_ILEAVE_UNDERFLOW,
+ /*
+ * Indicates that we do not currently support decoding the indicated
+ * channel interleave type.
+ */
+ ZEN_UMC_DECODE_F_CHAN_ILEAVE_NOTSUP,
+ /*
+ * Indicates that we found a COD hash rule that had a non-zero socket or
+ * die interleave, which isn't supported and we don't know how to
+ * decode.
+ */
+ ZEN_UMC_DECODE_F_COD_BAD_ILEAVE,
+ /*
+ * This is similar to the above, but indicates that we hit a bad NPS
+ * interleave rule instead of a COD.
+ */
+ ZEN_UMC_DECODE_F_NPS_BAD_ILEAVE,
+ /*
+ * Indicates that somehow we thought we should use a remap rule set that
+ * was beyond our capabilities.
+ */
+ ZEN_UMC_DECODE_F_BAD_REMAP_SET,
+ /*
+ * Indicates that we tried to find an index for the remap rules;
+ * however, the logical component ID was outside the range of the number
+ * of entries that we have.
+ */
+ ZEN_UMC_DECODE_F_BAD_REMAP_ENTRY,
+ /*
+ * Indicates that the remap rule had an invalid component bit set in its
+ * mask.
+ */
+ ZEN_UMC_DECODE_F_REMAP_HAS_BAD_COMP,
+ /*
+ * Indicates that we could not find a UMC with the fabric ID we thought
+ * we should have.
+ */
+ ZEN_UMC_DECODE_F_CANNOT_MAP_FABID,
+ /*
+ * Indicates that somehow the UMC we found did not actually contain a
+ * DRAM rule that covered our original PA.
+ */
+ ZEN_UMC_DECODE_F_UMC_DOESNT_HAVE_PA,
+ /*
+ * Indicates that we would have somehow underflowed the address
+ * calculations normalizing the system address.
+ */
+ ZEN_UMC_DECODE_F_CALC_NORM_UNDERFLOW,
+ /*
+ * Indicates that none of the UMC's chip-selects actually matched a base
+ * or secondary.
+ */
+ ZEN_UMC_DECODE_F_NO_CS_BASE_MATCH
+} zen_umc_decode_failure_t;
+
+/*
+ * This struct accumulates all of our decoding logic and states and we use it so
+ * it's easier for us to look at what's going on and the decisions that we made
+ * along the way.
+ */
+typedef struct zen_umc_decoder {
+ zen_umc_decode_failure_t dec_fail;
+ uint64_t dec_fail_data;
+ uint64_t dec_pa;
+ const zen_umc_df_t *dec_df_rulesrc;
+ uint32_t dec_df_ruleno;
+ const df_dram_rule_t *dec_df_rule;
+ uint64_t dec_ilv_pa;
+ /*
+ * These three values represent the IDs that we extract from the
+ * interleave address.
+ */
+ uint32_t dec_ilv_sock;
+ uint32_t dec_ilv_die;
+ uint32_t dec_ilv_chan;
+ uint32_t dec_ilv_fabid;
+ uint32_t dec_log_fabid;
+ uint32_t dec_remap_comp;
+ uint32_t dec_targ_fabid;
+ const zen_umc_chan_t *dec_umc_chan;
+ uint32_t dec_umc_ruleno;
+ uint64_t dec_norm_addr;
+ const umc_dimm_t *dec_dimm;
+ const umc_cs_t *dec_cs;
+ boolean_t dec_cs_sec;
+ uint32_t dec_dimm_col;
+ uint32_t dec_dimm_row;
+ uint8_t dec_log_csno;
+ uint8_t dec_dimm_bank;
+ uint8_t dec_dimm_bank_group;
+ uint8_t dec_dimm_subchan;
+ uint8_t dec_dimm_rm;
+ uint8_t dec_chan_csno;
+ uint8_t dec_dimm_no;
+ uint8_t dec_dimm_csno;
+} zen_umc_decoder_t;
+
+/*
+ * Decoding and normalization routines.
+ */
+extern boolean_t zen_umc_decode_pa(const zen_umc_t *, const uint64_t,
+ zen_umc_decoder_t *);
+
+/*
+ * Fabric ID utilities
+ */
+extern boolean_t zen_fabric_id_valid_fabid(const df_fabric_decomp_t *,
+ const uint32_t);
+extern boolean_t zen_fabric_id_valid_parts(const df_fabric_decomp_t *,
+ const uint32_t, const uint32_t, const uint32_t);
+extern void zen_fabric_id_decompose(const df_fabric_decomp_t *, const uint32_t,
+ uint32_t *, uint32_t *, uint32_t *);
+extern void zen_fabric_id_compose(const df_fabric_decomp_t *, const uint32_t,
+ const uint32_t, const uint32_t, uint32_t *);
+
+/*
+ * Encoding and decoding
+ */
+extern nvlist_t *zen_umc_dump_decoder(zen_umc_t *);
+extern boolean_t zen_umc_restore_decoder(nvlist_t *, zen_umc_t *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ZEN_UMC_H */
diff --git a/usr/src/uts/intel/io/imc/imc.c b/usr/src/uts/intel/io/imc/imc.c
index 67a14528f0..70778db13c 100644
--- a/usr/src/uts/intel/io/imc/imc.c
+++ b/usr/src/uts/intel/io/imc/imc.c
@@ -11,6 +11,7 @@
/*
* Copyright 2019 Joyent, Inc.
+ * Copyright 2022 Oxide Computer Company
*/
/*
@@ -2665,14 +2666,22 @@ imc_ioctl_decode(imc_t *imc, mc_encode_ioc_t *encode)
break;
}
encode->mcei_chip = i;
+ /*
+ * These Intel platforms are all monolithic dies, so set the die to
+ * zero.
+ */
+ encode->mcei_die = 0;
encode->mcei_mc = dec.ids_tadid;
+ encode->mcei_chan_addr = dec.ids_chanaddr;
encode->mcei_chan = dec.ids_channelid;
encode->mcei_dimm = dec.ids_dimmid;
encode->mcei_rank_addr = dec.ids_rankaddr;
encode->mcei_rank = dec.ids_rankid;
encode->mcei_row = UINT32_MAX;
encode->mcei_column = UINT32_MAX;
- encode->mcei_pad = 0;
+ encode->mcei_cs = encode->mcei_rm = UINT8_MAX;
+ encode->mcei_bank = encode->mcei_bank_group = UINT8_MAX;
+ encode->mcei_subchan = UINT8_MAX;
}
static int
diff --git a/usr/src/uts/intel/sys/amdzen/df.h b/usr/src/uts/intel/sys/amdzen/df.h
new file mode 100644
index 0000000000..6c1e5b5c79
--- /dev/null
+++ b/usr/src/uts/intel/sys/amdzen/df.h
@@ -0,0 +1,896 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+#ifndef _SYS_AMDZEN_DF_H
+#define _SYS_AMDZEN_DF_H
+
+/*
+ * This file contains definitions for the registers that appears in the AMD Zen
+ * Data Fabric. The data fabric is the main component which routes transactions
+ * between entities (e.g. CPUS, DRAM, PCIe, etc.) in the system. The data fabric
+ * itself is made up of up to 8 PCI functions. There can be multiple instances
+ * of the data fabric. There is one instance per die. In most AMD processors
+ * after Zen 1, there is only a single die per socket, for more background see
+ * the uts/i86pc/os/cpuid.c big theory statement. All data fabric instances
+ * appear on PCI bus 0. The first instance shows up on device 0x18. Subsequent
+ * instances simply increment that number by one.
+ *
+ * There are currently four major revisions of the data fabric that are
+ * supported here, which are v2 (Zen 1), v3 (Zen 2/3), v3.5 (Zen 2/3 with DDR5),
+ * and v4 (Zen 4). In many cases, while the same logical thing exists in
+ * different generations, they often have different shapes and sometimes things
+ * with the same shape show up in different locations.
+ *
+ * To make things a little easier for clients, each register definition encodes
+ * enough information to also include which hardware generations it supports,
+ * the actual PCI function it appears upon, and the register offset. This is to
+ * make sure that consumers don't have to guess some of this information in the
+ * latter cases and we can try to guarantee we're not accessing an incorrect
+ * register for our platform (unfortunately at runtime).
+ *
+ * Register definitions have the following form:
+ *
+ * DF_<reg name>_<vers>
+ *
+ * Here <reg name> is something that describes the register. This may not be the
+ * exact same as the PPR (processor programming reference); however, the PPR
+ * name for the register will be included above it in a comment (though these
+ * have sometimes changed from time to time). For example, DF_DRAM_HOLE. If a
+ * given register is the same in all currently supported versions, then there is
+ * no version suffix appended. Otherwise, the first version it is supported in
+ * is appended. For example, DF_DRAM_BASE_V2, DF_DRAM_BASE_V3, DF_DRAM_BASE_V4,
+ * etc. or DF_FIDMASK0_V3P5, etc. If the register offset is the same in multiple
+ * versions, then there they share the earliest version.
+ *
+ * For fields there are currently macros to extract these or chain them together
+ * leveraging bitx32() and bitset32(). Fields have the forms:
+ *
+ * DF_<reg name>_<vers>_GET_<field>
+ * DF_<reg name>_<vers>_SET_<field>
+ *
+ * Like in the above, if there are cases where a single field is the same across
+ * all versions, then the <vers> portion will be elided. There are many cases
+ * where the register definition does not change, but the fields themselves do
+ * change with each version because each hardware rev opts to be slightly
+ * different.
+ *
+ * When adding support for a new chip, please look carefully through the
+ * requisite documentation to ensure that they match what we see here. There are
+ * often cases where there may be a subtle thing or you hit a case like V3P5
+ * that until you dig deeper just seem to be weird.
+ */
+
+#include <sys/bitext.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum df_rev {
+ DF_REV_UNKNOWN = 0,
+ DF_REV_2 = 1 << 0,
+ DF_REV_3 = 1 << 1,
+ DF_REV_3P5 = 1 << 2,
+ DF_REV_4 = 1 << 3
+} df_rev_t;
+
+#define DF_REV_ALL_23 (DF_REV_2 | DF_REV_3 | DF_REV_3P5)
+#define DF_REV_ALL (DF_REV_2 | DF_REV_3 | DF_REV_3P5 | DF_REV_4)
+
+typedef struct df_reg_def {
+ df_rev_t drd_gens;
+ uint8_t drd_func;
+ uint16_t drd_reg;
+} df_reg_def_t;
+
+/*
+ * This set of registers provides us access to the count of instances in the
+ * data fabric and then a number of different pieces of information about them
+ * like their type. Note, these registers require indirect access because the
+ * information cannot be broadcast.
+ */
+
+/*
+ * DF::FabricBlockInstanceCount -- Describes the number of instances in the data
+ * fabric. With v4, also includes versioning information.
+ */
+/*CSTYLED*/
+#define DF_FBICNT (df_reg_def_t){ .drd_gens = DF_REV_ALL, \
+ .drd_func = 0, .drd_reg = 0x40 }
+#define DF_FBICNT_V4_GET_MAJOR(r) bitx32(r, 27, 24)
+#define DF_FBICNT_V4_GET_MINOR(r) bitx32(r, 23, 16)
+#define DF_FBICNT_GET_COUNT(r) bitx32(r, 7, 0)
+
+/*
+ * DF::FabricBlockInstanceInformation0 -- get basic information about a fabric
+ * instance.
+ */
+/*CSTYLED*/
+#define DF_FBIINFO0 (df_reg_def_t){ .drd_gens = DF_REV_ALL, \
+ .drd_func = 0, .drd_reg = 0x44 }
+#define DF_FBIINFO0_GET_SUBTYPE(r) bitx32(r, 26, 24)
+#define DF_SUBTYPE_NONE 0
+typedef enum {
+ DF_CAKE_SUBTYPE_GMI = 1,
+ DF_CAKE_SUBTYPE_xGMI = 2
+} df_cake_subtype_t;
+
+typedef enum {
+ DF_IOM_SUBTYPE_IOHUB = 1,
+} df_iom_subtype_t;
+
+typedef enum {
+ DF_CS_SUBTYPE_UMC = 1,
+ /*
+ * The subtype changed beginning in DFv4. Prior to DFv4, the secondary
+ * type was CCIX. Starting with DFv4, this is now CMP. It is unclear if
+ * these are the same thing or not.
+ */
+ DF_CS_SUBTYPE_CCIX = 2,
+ DF_CS_SUBTYPE_CMP = 2
+} df_cs_subtype_t;
+
+/*
+ * Note, this only exists in Genoa (maybe more generally Zen 4), otherwise it's
+ * always zero.
+ */
+typedef enum {
+ DF_CCM_SUBTYPE_CPU = 1,
+ DF_CCM_SUBTYPE_ACM = 2
+} df_ccm_subtype_v4_t;
+#define DF_FBIINFO0_GET_HAS_MCA(r) bitx32(r, 23, 23)
+#define DF_FBIINFO0_GET_FTI_DCNT(r) bitx32(r, 21, 20)
+#define DF_FBIINFO0_GET_FTI_PCNT(r) bitx32(r, 18, 16)
+#define DF_FBIINFO0_GET_SDP_RESPCNT(r) bitx32(r, 14, 14)
+#define DF_FBIINFO0_GET_SDP_PCNT(r) bitx32(r, 13, 12)
+#define DF_FBIINFO0_GET_FTI_WIDTH(r) bitx32(r, 9, 8)
+typedef enum {
+ DF_FTI_W_64 = 0,
+ DF_FTI_W_128,
+ DF_FTI_W_256,
+ DF_FTI_W_512
+} df_fti_width_t;
+#define DF_FBIINFO0_V3_GET_ENABLED(r) bitx32(r, 6, 6)
+#define DF_FBIINFO0_GET_SDP_WIDTH(r) bitx32(r, 5, 4)
+typedef enum {
+ DF_SDP_W_64 = 0,
+ DF_SDP_W_128,
+ DF_SDP_W_256,
+ DF_SDP_W_512
+} df_sdp_width_t;
+#define DF_FBIINFO0_GET_TYPE(r) bitx32(r, 3, 0)
+typedef enum {
+ DF_TYPE_CCM = 0,
+ DF_TYPE_GCM,
+ DF_TYPE_NCM,
+ DF_TYPE_IOMS,
+ DF_TYPE_CS,
+ DF_TYPE_NCS,
+ DF_TYPE_TCDX,
+ DF_TYPE_PIE,
+ DF_TYPE_SPF,
+ DF_TYPE_LLC,
+ DF_TYPE_CAKE,
+ DF_TYPE_CNLI = 0xd,
+} df_type_t;
+
+/*
+ * DF::FabricBlockInstanceInformation1 -- get basic information about a fabric
+ * instance.
+ */
+/*CSTYLED*/
+#define DF_FBIINFO1 (df_reg_def_t){ .drd_gens = DF_REV_ALL, \
+ .drd_func = 0, .drd_reg = 0x48 }
+#define DF_FBINFO1_GET_FTI3_NINSTID(r) bitx32(r, 31, 24)
+#define DF_FBINFO1_GET_FTI2_NINSTID(r) bitx32(r, 23, 16)
+#define DF_FBINFO1_GET_FTI1_NINSTID(r) bitx32(r, 15, 8)
+#define DF_FBINFO1_GET_FTI0_NINSTID(r) bitx32(r, 7, 0)
+
+/*
+ * DF::FabricBlockInstanceInformation2 -- get basic information about a fabric
+ * instance.
+ */
+/*CSTYLED*/
+#define DF_FBIINFO2 (df_reg_def_t){ .drd_gens = DF_REV_ALL, \
+ .drd_func = 0, .drd_reg = 0x4c }
+#define DF_FBINFO2_GET_FTI5_NINSTID(r) bitx32(r, 15, 8)
+#define DF_FBINFO2_GET_FTI4_NINSTID(r) bitx32(r, 7, 0)
+
+/*
+ * DF::FabricBlockInstanceInformation3 -- obtain the basic IDs for a given
+ * instance.
+ */
+/*CSTYLED*/
+#define DF_FBIINFO3 (df_reg_def_t){ .drd_gens = DF_REV_ALL, \
+ .drd_func = 0, .drd_reg = 0x50 }
+#define DF_FBIINFO3_V2_GET_BLOCKID(r) bitx32(r, 15, 8)
+#define DF_FBIINFO3_V3_GET_BLOCKID(r) bitx32(r, 13, 8)
+#define DF_FBIINFO3_V3P5_GET_BLOCKID(r) bitx32(r, 11, 8)
+#define DF_FBIINFO3_V4_GET_BLOCKID(r) bitx32(r, 19, 8)
+#define DF_FBIINFO3_GET_INSTID(r) bitx32(r, 7, 0)
+
+/*
+ * DF::Skt0CsTargetRemap0, DF::Skt0CsTargetRemap1, DF::Skt1CsTargetRemap0,
+ * DF::Skt1CsTargetRemap1 -- The next set of registers provide access to
+ * chip-select remapping. Caution, while these have a documented DF generation
+ * that they are specific to, it seems they still aren't always implemented and
+ * are specific to Milan (v3) and Genoa (v4). The actual remap extraction is the
+ * same between both.
+ */
+#define DF_CS_REMAP_GET_CSX(r, x) bitx32(r, (3 + (4 * (x))), (4 * ((x))))
+/*CSTYLED*/
+#define DF_SKT0_CS_REMAP0_V3 (df_reg_def_t){ .drd_gens = DF_REV_3, \
+ .drd_func = 0, .drd_reg = 0x60 }
+/*CSTYLED*/
+#define DF_SKT1_CS_REMAP0_V3 (df_reg_def_t){ .drd_gens = DF_REV_3, \
+ .drd_func = 0, .drd_reg = 0x68 }
+/*CSTYLED*/
+#define DF_SKT0_CS_REMAP1_V3 (df_reg_def_t){ .drd_gens = DF_REV_3, \
+ .drd_func = 0, .drd_reg = 0x64 }
+/*CSTYLED*/
+#define DF_SKT1_CS_REMAP1_V3 (df_reg_def_t){ .drd_gens = DF_REV_3, \
+ .drd_func = 0, .drd_reg = 0x6c }
+/*
+ * DF::CsTargetRemap0A, DF::CsTargetRemap0B, etc. -- These registers contain the
+ * remap engines in DFv4. Note, that while v3 used 0/1 as REMAP[01], as
+ * referring to the same logical set of things, here [0-3] is used for different
+ * things and A/B distinguish the different actual CS values.
+ */
+/*CSTYLED*/
+#define DF_CS_REMAP0A_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, .drd_reg = 0x180 }
+/*CSTYLED*/
+#define DF_CS_REMAP0B_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, .drd_reg = 0x184 }
+/*CSTYLED*/
+#define DF_CS_REMAP1A_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, .drd_reg = 0x188 }
+/*CSTYLED*/
+#define DF_CS_REMAP1B_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, .drd_reg = 0x18c }
+/*CSTYLED*/
+#define DF_CS_REMAP2A_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, .drd_reg = 0x190 }
+/*CSTYLED*/
+#define DF_CS_REMAP2B_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, .drd_reg = 0x194 }
+/*CSTYLED*/
+#define DF_CS_REMAP3A_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, .drd_reg = 0x198 }
+/*CSTYLED*/
+#define DF_CS_REMAP3B_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, .drd_reg = 0x19c }
+/*
+ * DF::CfgAddressCntl -- This register contains the information about the
+ * configuration of PCIe buses. We care about finding which one has our BUS A,
+ * which is required to map it to the in-package northbridge instance.
+ */
+/*CSTYLED*/
+#define DF_CFG_ADDR_CTL_V2 (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0x84 }
+/*CSTYLED*/
+#define DF_CFG_ADDR_CTL_V4 (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0xc04 }
+#define DF_CFG_ADDR_CTL_GET_BUS_NUM(r) bitx32(r, 7, 0)
+
+/*
+ * DF::CfgAddressMap -- This next set of registers covers PCI Bus configuration
+ * address maps. The layout here changes at v4. This routes a given PCI bus to a
+ * device.
+ */
+/*CSTYLED*/
+#define DF_CFGMAP_V2(x) (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0xa0 + ((x) * 4) }
+#define DF_MAX_CFGMAP 8
+#define DF_CFGMAP_V2_GET_BUS_LIMIT(r) bitx32(r, 31, 24)
+#define DF_CFGMAP_V2_GET_BUS_BASE(r) bitx32(r, 23, 16)
+#define DF_CFGMAP_V2_GET_DEST_ID(r) bitx32(r, 11, 4)
+#define DF_CFGMAP_V3_GET_DEST_ID(r) bitx32(r, 13, 4)
+#define DF_CFGMAP_V3P5_GET_DEST_ID(r) bitx32(r, 7, 4)
+#define DF_CFGMAP_V2_GET_WE(r) bitx32(r, 1, 1)
+#define DF_CFGMAP_V2_GET_RE(r) bitx32(r, 0, 0)
+
+/*
+ * DF::CfgBaseAddress, DF::CfgLimitAddress -- DFv4 variants of the above now in
+ * two registers and more possible entries!
+ */
+/*CSTYLED*/
+#define DF_CFGMAP_BASE_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 0, \
+ .drd_reg = 0xc80 + ((x) * 8) }
+/*CSTYLED*/
+#define DF_CFGMAP_LIMIT_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 0, \
+ .drd_reg = 0xc84 + ((x) * 8) }
+#define DF_CFGMAP_BASE_V4_GET_BASE(r) bitx32(r, 23, 16)
+#define DF_CFGMAP_BASE_V4_GET_SEG(r) bitx32(r, 15, 8)
+#define DF_CFGMAP_BASE_V4_GET_WE(r) bitx32(r, 1, 1)
+#define DF_CFGMAP_BASE_V4_GET_RE(r) bitx32(r, 0, 0)
+#define DF_CFGMAP_LIMIT_V4_GET_LIMIT(r) bitx32(r, 23, 16)
+#define DF_CFGMAP_LIMIT_V4_GET_DEST_ID(r) bitx32(r, 11, 0)
+
+/*
+ * DF::X86IOBaseAddress, DF::X86IOLimitAddress -- Base and limit registers for
+ * routing I/O space. These are fairly similar prior to DFv4.
+ */
+/*CSTYLED*/
+#define DF_IO_BASE_V2(x) (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0xc0 + ((x) * 8) }
+/*CSTYLED*/
+#define DF_IO_BASE_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 0, \
+ .drd_reg = 0xd00 + ((x) * 8) }
+#define DF_MAX_IO_RULES 8
+#define DF_IO_BASE_SHIFT 12
+#define DF_IO_BASE_V2_GET_BASE(r) bitx32(r, 24, 12)
+#define DF_IO_BASE_V2_GET_IE(r) bitx32(r, 5, 5)
+#define DF_IO_BASE_V2_GET_WE(r) bitx32(r, 1, 1)
+#define DF_IO_BASE_V2_GET_RE(r) bitx32(r, 0, 0)
+#define DF_IO_BASE_V2_SET_BASE(r, v) bitset32(r, 24, 12, v)
+#define DF_IO_BASE_V2_SET_IE(r, v) bitset32(r, 5, 5, v)
+#define DF_IO_BASE_V2_SET_WE(r, v) bitset32(r, 1, 1, v)
+#define DF_IO_BASE_V2_SET_RE(r, v) bitset32(r, 0, 0, v)
+
+#define DF_IO_BASE_V4_GET_BASE(r) bitx32(r, 28, 16)
+#define DF_IO_BASE_V4_GET_IE(r) bitx32(r, 5, 5)
+#define DF_IO_BASE_V4_GET_WE(r) bitx32(r, 1, 1)
+#define DF_IO_BASE_V4_GET_RE(r) bitx32(r, 0, 0)
+#define DF_IO_BASE_V4_SET_BASE(r, v) bitset32(r, 28, 16, v)
+#define DF_IO_BASE_V4_SET_IE(r, v) bitset32(r, 5, 5, v)
+#define DF_IO_BASE_V4_SET_WE(r, v) bitset32(r, 1, 1, v)
+#define DF_IO_BASE_V4_SET_RE(r, v) bitset32(r, 0, 0, v)
+
+/*CSTYLED*/
+#define DF_IO_LIMIT_V2(x) (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0xc4 + ((x) * 8) }
+/*CSTYLED*/
+#define DF_IO_LIMIT_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 0, \
+ .drd_reg = 0xd04 + ((x) * 8) }
+#define DF_MAX_IO_LIMIT ((1 << 24) - 1)
+#define DF_IO_LIMIT_SHIFT 12
+#define DF_IO_LIMIT_EXCL (1 << DF_IO_LIMIT_SHIFT)
+#define DF_IO_LIMIT_V2_GET_LIMIT(r) bitx32(r, 24, 12)
+#define DF_IO_LIMIT_V2_GET_DEST_ID(r) bitx32(r, 7, 0)
+#define DF_IO_LIMIT_V3_GET_DEST_ID(r) bitx32(r, 9, 0)
+#define DF_IO_LIMIT_V3P5_GET_DEST_ID(r) bitx32(r, 3, 0)
+#define DF_IO_LIMIT_V2_SET_LIMIT(r, v) bitset32(r, 24, 12, v)
+#define DF_IO_LIMIT_V2_SET_DEST_ID(r, v) bitset32(r, 7, 0, v)
+#define DF_IO_LIMIT_V3_SET_DEST_ID(r, v) bitset32(r, 9, 0, v)
+#define DF_IO_LIMIT_V3P5_SET_DEST_ID(r, v) bitset32(r, 3, 0, v)
+
+#define DF_IO_LIMIT_V4_GET_LIMIT(r) bitx32(r, 28, 16)
+#define DF_IO_LIMIT_V4_GET_DEST_ID(r) bitx32(r, 11, 0)
+#define DF_IO_LIMIT_V4_SET_LIMIT(r, v) bitset32(r, 28, 16, v)
+#define DF_IO_LIMIT_V4_SET_DEST_ID(r, v) bitset32(r, 11, 0, v)
+
+/*
+ * DF::DramHoleControl -- This controls MMIO below 4 GiB. Note, both this and
+ * the Top of Memory (TOM) need to be set consistently.
+ */
+/*CSTYLED*/
+#define DF_DRAM_HOLE_V2 (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0x104 }
+/*CSTYLED*/
+#define DF_DRAM_HOLE_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, \
+ .drd_reg = 0x104 }
+#define DF_DRAM_HOLE_GET_BASE(r) bitx32(r, 31, 24)
+#define DF_DRAM_HOLE_BASE_SHIFT 24
+#define DF_DRAM_HOLE_GET_VALID(r) bitx32(r, 0, 0)
+
+/*
+ * DF::DramBaseAddress, DF::DramLimitAddress -- DRAM rules, these are split into
+ * a base and limit. While DFv2, 3, and 3.5 all have the same addresses, they
+ * have different bit patterns entirely. DFv4 is in a different location and
+ * further splits this into four registers. We do all of the pre-DFv4 stuff and
+ * follow with DFv4. In DFv2-3.5 the actual values of the bits (e.g. the meaning
+ * of the channel interleave value) are the same, even though where those bits
+ * are in the register changes.
+ *
+ * In DF v2, v3, and v3.5 the set of constants for interleave values are the
+ * same, so we define them once at the v2 version.
+ */
+/*CSTYLED*/
+#define DF_DRAM_BASE_V2(r) (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0x110 + ((r) * 8) }
+#define DF_DRAM_BASE_V2_GET_BASE(r) bitx32(r, 31, 12)
+#define DF_DRAM_BASE_V2_BASE_SHIFT 28
+#define DF_DRAM_BASE_V2_GET_ILV_ADDR(r) bitx32(r, 10, 8)
+#define DF_DRAM_BASE_V2_GET_ILV_CHAN(r) bitx32(r, 7, 4)
+#define DF_DRAM_BASE_V2_ILV_CHAN_1 0x0
+#define DF_DRAM_BASE_V2_ILV_CHAN_2 0x1
+#define DF_DRAM_BASE_V2_ILV_CHAN_4 0x3
+#define DF_DRAM_BASE_V2_ILV_CHAN_8 0x5
+#define DF_DRAM_BASE_V2_ILV_CHAN_6 0x6
+#define DF_DRAM_BASE_V2_ILV_CHAN_COD4_2 0xc
+#define DF_DRAM_BASE_V2_ILV_CHAN_COD2_4 0xd
+#define DF_DRAM_BASE_V2_ILV_CHAN_COD1_8 0xe
+#define DF_DRAM_BASE_V2_GET_HOLE_EN(r) bitx32(r, 1, 1)
+#define DF_DRAM_BASE_V2_GET_VALID(r) bitx32(r, 0, 0)
+
+#define DF_DRAM_BASE_V3_GET_ILV_ADDR(r) bitx32(r, 11, 9)
+#define DF_DRAM_BASE_V3_GET_ILV_SOCK(r) bitx32(r, 8, 8)
+#define DF_DRAM_BASE_V3_GET_ILV_DIE(r) bitx32(r, 7, 6)
+#define DF_DRAM_BASE_V3_GET_ILV_CHAN(r) bitx32(r, 5, 2)
+
+#define DF_DRAM_BASE_V3P5_GET_ILV_ADDR(r) bitx32(r, 11, 9)
+#define DF_DRAM_BASE_V3P5_GET_ILV_SOCK(r) bitx32(r, 8, 8)
+#define DF_DRAM_BASE_V3P5_GET_ILV_DIE(r) bitx32(r, 7, 7)
+#define DF_DRAM_BASE_V3P5_GET_ILV_CHAN(r) bitx32(r, 6, 2)
+
+/*
+ * Shared definitions for the DF DRAM interleaving address start bits. While the
+ * bitfield / register definition is different between DFv2/3/3.5 and DFv4, the
+ * actual contents of the base address register and the base are shared.
+ */
+#define DF_DRAM_ILV_ADDR_8 0
+#define DF_DRAM_ILV_ADDR_9 1
+#define DF_DRAM_ILV_ADDR_10 2
+#define DF_DRAM_ILV_ADDR_11 3
+#define DF_DRAM_ILV_ADDR_12 4
+#define DF_DRAM_ILV_ADDR_BASE 8
+
+/*CSTYLED*/
+#define DF_DRAM_LIMIT_V2(r) (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0x114 + ((r) * 8) }
+#define DF_DRAM_LIMIT_V2_GET_LIMIT(r) bitx32(r, 31, 12)
+#define DF_DRAM_LIMIT_V2_LIMIT_SHIFT 28
+#define DF_DRAM_LIMIT_V2_LIMIT_EXCL (1 << 28)
+/* These are in the base register for v3, v3.5 */
+#define DF_DRAM_LIMIT_V2_GET_ILV_DIE(r) bitx32(r, 11, 10)
+#define DF_DRAM_LIMIT_V2_GET_ILV_SOCK(r) bitx32(r, 8, 8)
+#define DF_DRAM_LIMIT_V2_GET_DEST_ID(r) bitx32(r, 7, 0)
+
+#define DF_DRAM_LIMIT_V3_GET_BUS_BREAK(r) bitx32(r, 10, 10)
+#define DF_DRAM_LIMIT_V3_GET_DEST_ID(r) bitx32(r, 9, 0)
+
+#define DF_DRAM_LIMIT_V3P5_GET_DEST_ID(r) bitx32(r, 3, 0)
+
+/*
+ * DF::DramBaseAddress, DF::DramLimitAddress, DF::DramAddressCtl,
+ * DF::DramAddressIntlv -- DFv4 edition. Here all the controls around the
+ * target, interleaving, hashing, and more is split out from the base and limit
+ * registers and put into dedicated control and interleave registers.
+ */
+/*CSTYLED*/
+#define DF_DRAM_BASE_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, \
+ .drd_reg = 0xe00 + ((x) * 0x10) }
+#define DF_DRAM_BASE_V4_GET_ADDR(r) bitx32(r, 27, 0)
+#define DF_DRAM_BASE_V4_BASE_SHIFT 28
+/*CSTYLED*/
+#define DF_DRAM_LIMIT_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, \
+ .drd_reg = 0xe04 + ((x) * 0x10) }
+#define DF_DRAM_LIMIT_V4_GET_ADDR(r) bitx32(r, 27, 0)
+#define DF_DRAM_LIMIT_V4_LIMIT_SHIFT 28
+#define DF_DRAM_LIMIT_V4_LIMIT_EXCL (1 << 28)
+
+/*CSTYLED*/
+#define DF_DRAM_CTL_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, \
+ .drd_reg = 0xe08 + ((x) * 0x10) }
+#define DF_DRAM_CTL_V4_GET_DEST_ID(r) bitx32(r, 27, 16)
+#define DF_DRAM_CTL_V4_GET_HASH_1G(r) bitx32(r, 10, 10)
+#define DF_DRAM_CTL_V4_GET_HASH_2M(r) bitx32(r, 9, 9)
+#define DF_DRAM_CTL_V4_GET_HASH_64K(r) bitx32(r, 8, 8)
+#define DF_DRAM_CTL_V4_GET_REMAP_SEL(r) bitx32(r, 7, 5)
+#define DF_DRAM_CTL_V4_GET_REMAP_EN(r) bitx32(r, 4, 4)
+#define DF_DRAM_CTL_V4_GET_SCM(r) bitx32(r, 2, 2)
+#define DF_DRAM_CTL_V4_GET_HOLE_EN(r) bitx32(r, 1, 1)
+#define DF_DRAM_CTL_V4_GET_VALID(r) bitx32(r, 0, 0)
+
+/*CSTYLED*/
+#define DF_DRAM_ILV_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, \
+ .drd_reg = 0xe0c + ((x) * 0x10) }
+#define DF_DRAM_ILV_V4_GET_SOCK(r) bitx32(r, 18, 18)
+#define DF_DRAM_ILV_V4_GET_DIE(r) bitx32(r, 13, 12)
+#define DF_DRAM_ILV_V4_GET_CHAN(r) bitx32(r, 8, 4)
+#define DF_DRAM_ILV_V4_CHAN_1 0x0
+#define DF_DRAM_ILV_V4_CHAN_2 0x1
+#define DF_DRAM_ILV_V4_CHAN_4 0x3
+#define DF_DRAM_ILV_V4_CHAN_8 0x5
+#define DF_DRAM_ILV_V4_CHAN_16 0x7
+#define DF_DRAM_ILV_V4_CHAN_32 0x8
+#define DF_DRAM_ILV_V4_CHAN_NPS4_2CH 0x10
+#define DF_DRAM_ILV_V4_CHAN_NPS2_4CH 0x11
+#define DF_DRAM_ILV_V4_CHAN_NPS1_8CH 0x12
+#define DF_DRAM_ILV_V4_CHAN_NPS4_3CH 0x13
+#define DF_DRAM_ILV_V4_CHAN_NPS2_6CH 0x14
+#define DF_DRAM_ILV_V4_CHAN_NPS1_12CH 0x15
+#define DF_DRAM_ILV_V4_CHAN_NPS2_5CH 0x16
+#define DF_DRAM_ILV_V4_CHAN_NPS1_10CH 0x17
+#define DF_DRAM_ILV_V4_GET_ADDR(r) bitx32(r, 2, 0)
+
+/*
+ * DF::DramOffset -- These exist only for CS entries, e.g. a UMC. There is
+ * generally only one of these in Zen 1-3. This register changes in Zen 4 and
+ * there are up to 3 instances there. This register corresponds to each DRAM
+ * rule that the UMC has starting at the second one. This is because the first
+ * DRAM rule in a channel always is defined to start at offset 0, so there is no
+ * entry here.
+ */
+/*CSTYLED*/
+#define DF_DRAM_OFFSET_V2 (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0x1b4 }
+/*CSTYLED*/
+#define DF_DRAM_OFFSET_V4(r) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 7, \
+ .drd_reg = 0x404 + ((r) * 4) }
+#define DF_DRAM_OFFSET_V2_GET_OFFSET(r) bitx32(r, 31, 20)
+#define DF_DRAM_OFFSET_V3_GET_OFFSET(r) bitx32(r, 31, 12)
+#define DF_DRAM_OFFSET_V4_GET_OFFSET(r) bitx32(r, 24, 1)
+#define DF_DRAM_OFFSET_SHIFT 28
+#define DF_DRAM_OFFSET_GET_EN(r) bitx32(r, 0, 0)
+
+/*
+ * DF::MmioBaseAddress, DF::MmioLimitAddress, DF::MmioAddressControl -- These
+ * control the various MMIO rules for a given system.
+ */
+/*CSTYLED*/
+#define DF_MMIO_BASE_V2(x) (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0x200 + ((x) * 0x10) }
+/*CSTYLED*/
+#define DF_MMIO_LIMIT_V2(x) (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0x204 + ((x) * 0x10) }
+/*CSTYLED*/
+#define DF_MMIO_BASE_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 0, \
+ .drd_reg = 0xd80 + ((x) * 0x10) }
+/*CSTYLED*/
+#define DF_MMIO_LIMIT_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 0, \
+ .drd_reg = 0xd84 + ((x) * 0x10) }
+#define DF_MMIO_SHIFT 16
+#define DF_MMIO_LIMIT_EXCL (1 << DF_MMIO_SHIFT)
+#define DF_MAX_MMIO_RULES 16
+/*CSTYLED*/
+#define DF_MMIO_CTL_V2(x) (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 0, \
+ .drd_reg = 0x208 + ((x) * 0x10) }
+/*CSTYLED*/
+#define DF_MMIO_CTL_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 0, \
+ .drd_reg = 0xd88 + ((x) * 0x10) }
+#define DF_MMIO_CTL_V2_GET_NP(r) bitx32(r, 12, 12)
+#define DF_MMIO_CTL_V2_GET_DEST_ID(r) bitx32(r, 11, 4)
+#define DF_MMIO_CTL_V2_SET_NP(r, v) bitset32(r, 12, 12, v)
+#define DF_MMIO_CTL_V2_SET_DEST_ID(r, v) bitset32(r, 11, 4, v)
+
+#define DF_MMIO_CTL_V3_GET_NP(r) bitx32(r, 16, 16)
+#define DF_MMIO_CTL_V3_GET_DEST_ID(r) bitx32(r, 13, 4)
+#define DF_MMIO_CTL_V3P5_GET_DEST_ID(r) bitx32(r, 7, 4)
+#define DF_MMIO_CTL_V3_SET_NP(r, v) bitset32(r, 16, 16, v)
+#define DF_MMIO_CTL_V3_SET_DEST_ID(r, v) bitset32(r, 13, 4, v)
+#define DF_MMIO_CTL_V3P5_SET_DEST_ID(r, v) bitset32(r, 7, 4, v)
+
+#define DF_MMIO_CTL_V4_GET_DEST_ID(r) bitx32(r, 27, 16)
+#define DF_MMIO_CTL_V4_GET_NP(r) bitx32(r, 3, 3)
+#define DF_MMIO_CTL_V4_SET_DEST_ID(r, v) bitset32(r, 27, 16, v)
+#define DF_MMIO_CTL_V4_SET_NP(r, v) bitset32(r, 3, 3, v)
+
+#define DF_MMIO_CTL_GET_CPU_DIS(r) bitx32(r, 2, 2)
+#define DF_MMIO_CTL_GET_WE(r) bitx32(r, 1, 1)
+#define DF_MMIO_CTL_GET_RE(r) bitx32(r, 0, 0)
+#define DF_MMIO_CTL_SET_CPU_DIS(r, v) bitset32(r, 2, 2, v)
+#define DF_MMIO_CTL_SET_WE(r, v) bitset32(r, 1, 1, v)
+#define DF_MMIO_CTL_SET_RE(r, v) bitset32(r, 0, 0, v)
+
+/*
+ * DF::MmioExtAddress -- New in DFv4, this allows extending the number of bits
+ * used for MMIO.
+ */
+/*CSTYLED*/
+#define DF_MMIO_EXT_V4(x) (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 0, \
+ .drd_reg = 0xd8c + ((x) * 0x10) }
+#define DF_MMIO_EXT_V4_GET_LIMIT(r) bitx32(r, 23, 16)
+#define DF_MMIO_EXT_V4_GET_BASE(r) bitx32(r, 7, 0)
+#define DF_MMIO_EXT_V4_SET_LIMIT(r) bitset32(r, 23, 16)
+#define DF_MMIO_EXT_V4_SET_BASE(r) bitset32(r, 7, 0)
+
+/*
+ * DF::DfGlobalCtrl -- This register we generally only care about in the
+ * DFv3/3.5 timeframe when it has the actual hash controls, hence its current
+ * definition. It technically exists in DFv2/v4, but is not relevant.
+ */
+/*CSTYLED*/
+#define DF_GLOB_CTL_V3 (df_reg_def_t){ .drd_gens = DF_REV_3 | \
+ DF_REV_3P5, \
+ .drd_func = 0, \
+ .drd_reg = 0x3F8 }
+#define DF_GLOB_CTL_V3_GET_HASH_1G(r) bitx32(r, 22, 22)
+#define DF_GLOB_CTL_V3_GET_HASH_2M(r) bitx32(r, 21, 21)
+#define DF_GLOB_CTL_V3_GET_HASH_64K(r) bitx32(r, 20, 20)
+
+/*
+ * DF::SystemCfg -- This register describes the basic information about the data
+ * fabric that we're talking to. Don't worry, this is different in every
+ * generation, even when the address is the same. Somehow despite all these
+ * differences the actual things like defined types are somehow the same.
+ */
+typedef enum {
+ DF_DIE_TYPE_CPU = 0,
+ DF_DIE_TYPE_APU,
+ DF_DIE_TYPE_dGPU
+} df_die_type_t;
+
+/*CSTYLED*/
+#define DF_SYSCFG_V2 (df_reg_def_t){ .drd_gens = DF_REV_2, \
+ .drd_func = 1, \
+ .drd_reg = 0x200 }
+#define DF_SYSCFG_V2_GET_SOCK_ID(r) bitx32(r, 27, 27)
+#define DF_SYSCFG_V2_GET_DIE_ID(r) bitx32(r, 25, 24)
+#define DF_SYSCFG_V2_GET_MY_TYPE(r) bitx32(r, 22, 21)
+#define DF_SYSCFG_V2_GET_LOCAL_IS_ME(r) bitx32(r, 19, 16)
+#define DF_SYSCFG_V2_GET_LOCAL_TYPE3(r) bitx32(r, 13, 12)
+#define DF_SYSCFG_V2_GET_LOCAL_TYPE2(r) bitx32(r, 11, 10)
+#define DF_SYSCFG_V2_GET_LOCAL_TYPE1(r) bitx32(r, 9, 8)
+#define DF_SYSCFG_V2_GET_LOCAL_TYPE0(r) bitx32(r, 7, 6)
+#define DF_SYSCFG_V2_GET_OTHER_SOCK(r) bitx32(r, 5, 5)
+#define DF_SYSCFG_V2_GET_DIE_PRESENT(r) bitx32(r, 4, 0)
+#define DF_SYSCFG_V2_DIE_PRESENT(x) bitx32(r, 3, 0)
+
+/*CSTYLED*/
+#define DF_SYSCFG_V3 (df_reg_def_t){ .drd_gens = DF_REV_3, \
+ .drd_func = 1, \
+ .drd_reg = 0x200 }
+#define DF_SYSCFG_V3_GET_NODE_ID(r) bitx32(r, 30, 28)
+#define DF_SYSCFG_V3_GET_OTHER_SOCK(r) bitx32(r, 27, 27)
+#define DF_SYSCFG_V3_GET_OTHER_TYPE(r) bitx32(r, 26, 25)
+#define DF_SYSCFG_V3_GET_MY_TYPE(r) bitx32(r, 24, 23)
+#define DF_SYSCFG_V3_GET_DIE_TYPE(r) bitx32(r, 18, 11)
+#define DF_SYSCFG_V3_GET_DIE_PRESENT(r) bitx32(r, 7, 0)
+
+/*CSTYLED*/
+#define DF_SYSCFG_V3P5 (df_reg_def_t){ .drd_gens = DF_REV_3P5, \
+ .drd_func = 1, \
+ .drd_reg = 0x140 }
+#define DF_SYSCFG_V3P5_GET_NODE_ID(r) bitx32(r, 19, 16)
+#define DF_SYSCFG_V3P5_GET_OTHER_SOCK(r) bitx32(r, 8, 8)
+#define DF_SYSCFG_V3P5_GET_NODE_MAP(r) bitx32(r, 4, 4)
+#define DF_SYSCFG_V3P5_GET_OTHER_TYPE(r) bitx32(r, 3, 2)
+#define DF_SYSCFG_V3P5_GET_MY_TYPE(r) bitx32(r, 1, 0)
+
+/*CSTYLED*/
+#define DF_SYSCFG_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 4, \
+ .drd_reg = 0x180 }
+#define DF_SYSCFG_V4_GET_NODE_ID(r) bitx32(r, 27, 16)
+#define DF_SYSCFG_V4_GET_OTHER_SOCK(r) bitx32(r, 8, 8)
+#define DF_SYSCFG_V4_GET_NODE_MAP(r) bitx32(r, 4, 4)
+#define DF_SYSCFG_V4_GET_OTHER_TYPE(r) bitx32(r, 3, 2)
+#define DF_SYSCFG_V4_GET_MY_TYPE(r) bitx32(r, 1, 0)
+
+/*
+ * DF::SystemComponentCnt -- Has a count of how many things are here. However,
+ * this does not seem defined for DFv3.5
+ */
+/*CSTYLED*/
+#define DF_COMPCNT_V2 (df_reg_def_t){ .drd_gens = DF_REV_2 | \
+ DF_REV_3, \
+ .drd_func = 1, \
+ .drd_reg = 0x204 }
+#define DF_COMPCNT_V2_GET_IOMS(r) bitx32(r, 23, 16)
+#define DF_COMPCNT_V2_GET_GCM(r) bitx32(r, 15, 8)
+#define DF_COMPCNT_V2_GET_PIE(r) bitx32(r, 7, 0)
+
+/*CSTYLED*/
+#define DF_COMPCNT_V4 (df_reg_def_t){ .drd_gens = DF_REV_4 \
+ .drd_func = 4, \
+ .drd_reg = 0x184 }
+#define DF_COMPCNT_V4_GET_IOS(r) bitx32(r, 31, 26)
+#define DF_COMPCNT_V4_GET_GCM(r) bitx32(r, 25, 16)
+#define DF_COMPCNT_V4_GET_IOM(r) bitx32(r, 15, 8)
+#define DF_COMPCNT_V4_GET_PIE(r) bitx32(r, 7, 0)
+
+/*
+ * This next section contains a bunch of register definitions for how to take
+ * apart ID masks. The register names and sets have changed across every DF
+ * revision. This will be done in chunks that define all DFv2, then v3, etc.
+ */
+
+/*
+ * DF::SystemFabricIdMask -- DFv2 style breakdowns of IDs. Note, unlike others
+ * the socket and die shifts are not relative to a node mask, but are global.
+ */
+/*CSTYLED*/
+#define DF_FIDMASK_V2 (df_reg_def_t){ .drd_gens = DF_REV_2, \
+ .drd_func = 1, \
+ .drd_reg = 0x208 }
+#define DF_FIDMASK_V2_GET_SOCK_SHIFT(r) bitx32(r, 31, 28)
+#define DF_FIDMASK_V2_GET_DIE_SHIFT(r) bitx32(r, 27, 24)
+#define DF_FIDMASK_V2_GET_SOCK_MASK(r) bitx32(r, 23, 16)
+#define DF_FIDMASK_V2_GET_DIE_MASK(r) bitx32(r, 15, 8)
+
+/*
+ * DF::SystemFabricIdMask0, DF::SystemFabricIdMask1 -- The DFv3 variant of
+ * breaking down an ID into bits and shifts. Unlike in DFv2, the socket and die
+ * are relative to a node ID. For more, see amdzen_determine_fabric_decomp() in
+ * uts/intel/io/amdzen/amdzen.c.
+ */
+/*CSTYLED*/
+#define DF_FIDMASK0_V3 (df_reg_def_t){ .drd_gens = DF_REV_3, \
+ .drd_func = 1, \
+ .drd_reg = 0x208 }
+#define DF_FIDMASK0_V3_GET_NODE_MASK(r) bitx32(r, 25, 16)
+#define DF_FIDMASK0_V3_GET_COMP_MASK(r) bitx32(r, 9, 0)
+/*CSTYLED*/
+#define DF_FIDMASK1_V3 (df_reg_def_t){ .drd_gens = DF_REV_3, \
+ .drd_func = 1, \
+ .drd_reg = 0x20c }
+#define DF_FIDMASK1_V3_GET_SOCK_MASK(r) bitx32(r, 26, 24)
+#define DF_FIDMASK1_V3_GET_DIE_MASK(r) bitx32(r, 18, 16)
+#define DF_FIDMASK1_V3_GET_SOCK_SHIFT(r) bitx32(r, 9, 8)
+#define DF_FIDMASK1_V3_GET_NODE_SHIFT(r) bitx32(r, 3, 0)
+
+/*
+ * DF::SystemFabricIdMask0, DF::SystemFabricIdMask1, DF::SystemFabricIdMask2 --
+ * DFv3.5 and DFv4 have the same format here, but in different registers.
+ */
+/*CSTYLED*/
+#define DF_FIDMASK0_V3P5 (df_reg_def_t){ .drd_gens = DF_REV_3P5, \
+ .drd_func = 1, \
+ .drd_reg = 0x150 }
+/*CSTYLED*/
+#define DF_FIDMASK0_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 4, \
+ .drd_reg = 0x1b0 }
+#define DF_FIDMASK0_V3P5_GET_NODE_MASK(r) bitx32(r, 31, 16)
+#define DF_FIDMASK0_V3P5_GET_COMP_MASK(r) bitx32(r, 15, 0)
+/*CSTYLED*/
+#define DF_FIDMASK1_V3P5 (df_reg_def_t){ .drd_gens = DF_REV_3P5, \
+ .drd_func = 1, \
+ .drd_reg = 0x154 }
+/*CSTYLED*/
+#define DF_FIDMASK1_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 4, \
+ .drd_reg = 0x1b4 }
+#define DF_FIDMASK1_V3P5_GET_SOCK_SHIFT(r) bitx32(r, 11, 8)
+#define DF_FIDMASK1_V3P5_GET_NODE_SHIFT(r) bitx32(r, 3, 0)
+/*CSTYLED*/
+#define DF_FIDMASK2_V3P5 (df_reg_def_t){ .drd_gens = DF_REV_3P5, \
+ .drd_func = 1, \
+ .drd_reg = 0x158 }
+/*CSTYLED*/
+#define DF_FIDMASK2_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 4, \
+ .drd_reg = 0x1b8 }
+#define DF_FIDMASK2_V3P5_GET_SOCK_MASK(r) bitx32(r, 31, 16)
+#define DF_FIDMASK2_V3P5_GET_DIE_MASK(r) bitx32(r, 15, 0)
+
+/*
+ * DF::DieFabricIdMask -- This is a Zeppelin, DFv2 special. There are a couple
+ * instances of this for different types of devices; however, this is where the
+ * component mask is actually stored. This is replicated for a CPU, APU, and
+ * dGPU, each with slightly different values. We need to look at DF_SYSCFG_V2 to
+ * determine which type of die we have and use the appropriate one when looking
+ * at this. This makes the Zen 1 CPUs and APUs have explicitly different set up
+ * here. Look, it got better in DFv3.
+ */
+/*CSTYLED*/
+#define DF_DIEMASK_CPU_V2 (df_reg_def_t){ .drd_gens = DF_REV_2, \
+ .drd_func = 1, \
+ .drd_reg = 0x22c }
+/*CSTYLED*/
+#define DF_DIEMASK_APU_V2 (df_reg_def_t){ .drd_gens = DF_REV_2, \
+ .drd_func = 1, \
+ .drd_reg = 0x24c }
+#define DF_DIEMASK_V2_GET_SOCK_SHIFT(r) bitx32(r, 31, 28)
+#define DF_DIEMASK_V2_GET_DIE_SHIFT(r) bitx32(r, 27, 24)
+#define DF_DIEMASK_V2_GET_SOCK_MASK(r) bitx32(r, 23, 16)
+#define DF_DIEMASK_V2_GET_DIE_MASK(r) bitx32(r, 15, 8)
+#define DF_DIEMASK_V2_GET_COMP_MASK(r) bitx32(r, 7, 0)
+
+
+/*
+ * DF::PhysicalCoreEnable0, etc. -- These registers can be used to tell us which
+ * cores are actually enabled. We know these exist in DFv3 and v4. It is less
+ * clear in DFv3.5 and DFv2.
+ */
+/*CSTYLED*/
+#define DF_PHYS_CORE_EN0_V3 (df_reg_def_t){ .drd_gens = DF_REV_3, \
+ .drd_func = 1, \
+ .drd_reg = 0x300 }
+/*CSTYLED*/
+#define DF_PHYS_CORE_EN1_V3 (df_reg_def_t){ .drd_gens = DF_REV_3, \
+ .drd_func = 1, \
+ .drd_reg = 0x304 }
+/*CSTYLED*/
+#define DF_PHYS_CORE_EN0_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 1, \
+ .drd_reg = 0x140 }
+/*CSTYLED*/
+#define DF_PHYS_CORE_EN1_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 1, \
+ .drd_reg = 0x144 }
+/*CSTYLED*/
+#define DF_PHYS_CORE_EN2_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 1, \
+ .drd_reg = 0x148 }
+
+/*
+ * DF::Np2ChannelConfig -- This is used in Milan to contain information about
+ * how non-power of 2 based channel configuration works. Note, we only know that
+ * this exists in Milan (and its ThreadRipper equivalent). We don't believe it
+ * is in other DFv3 products like Rome, Matisse, Vermeer, or the APUs.
+ */
+/*CSTYLED*/
+#define DF_NP2_CONFIG_V3 (df_reg_def_t){ .drd_gens = DF_REV_3, \
+ .drd_func = 2, \
+ .drd_reg = 0x90 }
+#define DF_NP2_CONFIG_V3_GET_SPACE1(r) bitx32(r, 13, 8)
+#define DF_NP2_CONFIG_V3_GET_SPACE0(r) bitx32(r, 5, 0)
+
+
+/*
+ * DF::FabricIndirectConfigAccessAddress, DF::FabricIndirectConfigAccessDataLo,
+ * DF::FabricIndirectConfigAccessDataHi -- These registers are used to define
+ * Indirect Access, commonly known as FICAA and FICAD for the system. While
+ * there are multiple copies of the indirect access registers in device 4, we're
+ * only allowed access to one set of those (which are the ones present here).
+ * Specifically the OS is given access to set 3.
+ */
+/*CSTYLED*/
+#define DF_FICAA_V2 (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 4, \
+ .drd_reg = 0x5c }
+/*CSTYLED*/
+#define DF_FICAA_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 4, \
+ .drd_reg = 0x8c }
+#define DF_FICAA_V2_SET_INST(r, v) bitset32(r, 23, 16, v)
+#define DF_FICAA_V2_SET_64B(r, v) bitset32(r, 14, 14, v)
+#define DF_FICAA_V2_SET_FUNC(r, v) bitset32(r, 13, 11, v)
+#define DF_FICAA_V2_SET_REG(r, v) bitset32(r, 10, 2, v)
+#define DF_FICAA_V2_SET_TARG_INST(r, v) bitset32(r, 0, 0, v)
+
+#define DF_FICAA_V4_SET_REG(r, v) bitset32(r, 10, 1, v)
+
+/*CSTYLED*/
+#define DF_FICAD_LO_V2 (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 4, \
+ .drd_reg = 0x98}
+/*CSTYLED*/
+#define DF_FICAD_HI_V2 (df_reg_def_t){ .drd_gens = DF_REV_ALL_23, \
+ .drd_func = 4, \
+ .drd_reg = 0x9c}
+/*CSTYLED*/
+#define DF_FICAD_LO_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 4, \
+ .drd_reg = 0xb8}
+/*CSTYLED*/
+#define DF_FICAD_HI_V4 (df_reg_def_t){ .drd_gens = DF_REV_4, \
+ .drd_func = 4, \
+ .drd_reg = 0xbc}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_AMDZEN_DF_H */
diff --git a/usr/src/uts/intel/sys/amdzen/umc.h b/usr/src/uts/intel/sys/amdzen/umc.h
new file mode 100644
index 0000000000..78644442d4
--- /dev/null
+++ b/usr/src/uts/intel/sys/amdzen/umc.h
@@ -0,0 +1,390 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Company
+ */
+
+#ifndef _SYS_UMC_H
+#define _SYS_UMC_H
+
+#include <sys/bitext.h>
+
+/*
+ * Various register definitions for accessing the AMD Unified Memory Controller
+ * (UMC) over SMN (the system management network). Note, that the SMN exists
+ * independently in each die and must be accessed through the appropriate
+ * IOHC.
+ *
+ * There are effectively four different revisions of the UMC that we know about
+ * and support querying:
+ *
+ * o DDR4 capable APUs
+ * o DDR4 capable CPUs
+ * o DDR5 capable APUs
+ * o DDR5 capable CPUs
+ *
+ * In general for a given revision and generation of a controller (DDR4 vs.
+ * DDR5), all of the address layouts are the same whether it is for an APU or a
+ * CPU. The main difference is generally in the number of features. For example,
+ * most APUs may not support the same rank multiplication bits and related in a
+ * device. However, unlike the DF where everything changes, the main difference
+ * within a generation is just which bits are implemented. This makes it much
+ * easier to define UMC information.
+ *
+ * Between DDR4 and DDR5 based devices, the register locations have shifted;
+ * however, generally speaking, the registers themselves are actually the same.
+ * Registers here, similar to the DF, have a common form:
+ *
+ * UMC_<reg name>_<vers>
+ *
+ * Here, <reg name> would be something like 'BASE', for the UMC
+ * UMC::CH::BaseAddr register. <vers> is one of DDR4 or DDR5. When the same
+ * register is supported at the same address between versions, then <vers> is
+ * elided.
+ *
+ * For fields inside of these registers, everything follows the same pattern in
+ * <sys/amdzen/df.h> which is:
+ *
+ * UMC_<reg name>_<vers>_GET_<field>
+ *
+ * Note, <vers> will be elided if the register is the same between the DDR4 and
+ * DDR5 versions.
+ *
+ * Finally, a cautionary note. While the DF provided a way for us to determine
+ * what version something is, we have not determined a way to programmatically
+ * determine what something supports outside of making notes based on the
+ * family, model, and stepping CPUID information. Unfortunately, you must look
+ * towards the documentation and find what you need in the PPR (processor
+ * programming reference).
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * UMC Channel registers. These are in SMN Space. DDR4 and DDR5 based UMCs share
+ * the same base address, somewhat surprisingly. This constructs the appropriate
+ * offset and ensures that a caller doesn't exceed the number of known instances
+ * of the register.
+ */
+static inline uint32_t
+amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
+ uint32_t reginst)
+{
+ ASSERT3U(umcno, <, 12);
+ ASSERT3U(nents, >, reginst);
+
+ uint32_t base = 0x50000;
+ uint32_t reg = base_reg + reginst * 4;
+ return ((umcno << 20) + base + reg);
+}
+
+/*
+ * UMC::CH::BaseAddr, UMC::CH::BaseAddrSec -- determines the base address used
+ * to match a chip select. Instances 0/1 always refer to DIMM 0, while
+ * instances 2/3 always refer to DIMM 1.
+ */
+#define UMC_BASE(u, i) amdzen_umc_smn_addr(u, 0x00, 4, i)
+#define UMC_BASE_SEC(u, i) amdzen_umc_smn_addr(u, 0x10, 4, i)
+#define UMC_BASE_GET_ADDR(r) bitx32(r, 31, 1)
+#define UMC_BASE_ADDR_SHIFT 9
+#define UMC_BASE_GET_EN(r) bitx32(r, 0, 0)
+
+/*
+ * UMC::BaseAddrExt, UMC::BaseAddrSecExt -- The first of several extensions to
+ * registers that allow more address bits. Note, only present in some DDR5
+ * capable SoCs.
+ */
+#define UMC_BASE_EXT_DDR5(u, i) amdzen_umc_smn_addr(u, 0xb00, 4, i)
+#define UMC_BASE_EXT_SEC_DDR5(u, i) amdzen_umc_smn_addr(u, 0xb10, 4, i)
+#define UMC_BASE_EXT_GET_ADDR(r) bitx32(r, 7, 0)
+#define UMC_BASE_EXT_ADDR_SHIFT 40
+
+
+/*
+ * UMC::CH::AddrMask, UMC::CH::AddrMaskSec -- This register is used to compare
+ * the incoming address to see it matches the base. Tweaking what is used for
+ * match is often part of the interleaving strategy.
+ */
+#define UMC_MASK_DDR4(u, i) amdzen_umc_smn_addr(u, 0x20, 2, i)
+#define UMC_MASK_SEC_DDR4(u, i) amdzen_umc_smn_addr(u, 0x28, 2, i)
+#define UMC_MASK_DDR5(u, i) amdzen_umc_smn_addr(u, 0x20, 4, i)
+#define UMC_MASK_SEC_DDR5(u, i) amdzen_umc_smn_addr(u, 0x30, 4, i)
+#define UMC_MASK_GET_ADDR(r) bitx32(r, 31, 1)
+#define UMC_MASK_ADDR_SHIFT 9
+
+/*
+ * UMC::AddrMaskExt, UMC::AddrMaskSecExt -- Extended mask addresses.
+ */
+#define UMC_MASK_EXT_DDR5(u, i) amdzen_umc_smn_addr(u, 0xb20, 4, i)
+#define UMC_MASK_EXT_SEC_DDR5(u, i) amdzen_umc_smn_addr(u, 0xb30, 4, i)
+#define UMC_MASK_EXT_GET_ADDR(r) bitx32(r, 7, 0)
+#define UMC_MASK_EXT_ADDR_SHIFT 40
+
+/*
+ * UMC::CH::AddrCfg -- This register contains a number of bits that describe how
+ * the address is actually used, one per DIMM. Note, not all members are valid
+ * for all classes of DIMMs. It's worth calling out that the total number of
+ * banks value here describes the total number of banks on the entire chip, e.g.
+ * it is bank groups * banks/groups. Therefore to determine the number of
+ * banks/group you must subtract the number of bank group bits from the total
+ * number of bank bits.
+ */
+#define UMC_ADDRCFG_DDR4(u, i) amdzen_umc_smn_addr(u, 0x30, 2, i)
+#define UMC_ADDRCFG_DDR5(u, i) amdzen_umc_smn_addr(u, 0x40, 4, i)
+#define UMC_ADDRCFG_GET_NBANK_BITS(r) bitx32(r, 21, 20)
+#define UMC_ADDRCFG_NBANK_BITS_BASE 3
+#define UMC_ADDRCFG_GET_NCOL_BITS(r) bitx32(r, 19, 16)
+#define UMC_ADDRCFG_NCOL_BITS_BASE 5
+#define UMC_ADDRCFG_GET_NROW_BITS_LO(r) bitx32(r, 11, 8)
+#define UMC_ADDRCFG_NROW_BITS_LO_BASE 10
+#define UMC_ADDRCFG_GET_NBANKGRP_BITS(r) bitx32(r, 3, 2)
+
+#define UMC_ADDRCFG_DDR4_GET_NROW_BITS_HI(r) bitx32(r, 15, 12)
+#define UMC_ADDRCFG_DDR4_GET_NRM_BITS(r) bitx32(r, 5, 4)
+#define UMC_ADDRCFG_DDR5_GET_CSXOR(r) bitx32(r, 31, 30)
+#define UMC_ADDRCFG_DDR5_GET_NRM_BITS(r) bitx32(r, 6, 4)
+
+/*
+ * UMC::CH::AddrSel -- This register is used to program how the actual bits in
+ * the normalized address map to the row and bank. While the bank can select
+ * which bits in the normalized address are used to construct the bank number,
+ * row bits are contiguous from the starting number.
+ */
+#define UMC_ADDRSEL_DDR4(u, i) amdzen_umc_smn_addr(u, 0x40, 2, i)
+#define UMC_ADDRSEL_DDR5(u, i) amdzen_umc_smn_addr(u, 0x50, 4, i)
+#define UMC_ADDRSEL_GET_ROW_LO(r) bitx32(r, 27, 24)
+#define UMC_ADDRSEL_ROW_LO_BASE 12
+#define UMC_ADDRSEL_GET_BANK4(r) bitx32(r, 19, 16)
+#define UMC_ADDRSEL_GET_BANK3(r) bitx32(r, 15, 12)
+#define UMC_ADDRSEL_GET_BANK2(r) bitx32(r, 11, 8)
+#define UMC_ADDRSEL_GET_BANK1(r) bitx32(r, 7, 4)
+#define UMC_ADDRSEL_GET_BANK0(r) bitx32(r, 3, 0)
+#define UMC_ADDRSEL_BANK_BASE 5
+
+#define UMC_ADDRSEL_DDR4_GET_ROW_HI(r) bitx32(r, 31, 28)
+#define UMC_ADDRSEL_DDR4_ROW_HI_BASE 24
+
+/*
+ * UMC::CH::ColSelLo, UMC::CH::ColSelHi -- This register selects which address
+ * bits map to the various column select bits. These registers interleave so in
+ * the case of DDR4, it's 0x50, 0x54 for DIMM 0 lo, hi. Then 0x58, 0x5c for
+ * DIMM1. DDR5 based entries do something similar; however, instead of being
+ * per-DIMM, there is one of these for each CS.
+ *
+ * This leads to a somewhat odder construction for the maximum number of
+ * instances. Because amdzen_umc_smn_addr() assumes each register instance is 4
+ * bytes apart, we instead take the actual register instance and multiply it by
+ * 2. This means that in the DDR4 case we will always access what
+ * amdzen_umc_smn_addr() considers instance 0 and 2. In the DDR5 case this is 0,
+ * 2, 4, and 6. This means our maximum instance for both cases has to be one
+ * higher than this, 3 and 7 respectively. While technically you could use 4 and
+ * 8, this is a tighter bind.
+ */
+#define UMC_COLSEL_LO_DDR4(u, i) amdzen_umc_smn_addr(u, 0x50, 3, i * 2)
+#define UMC_COLSEL_HI_DDR4(u, i) amdzen_umc_smn_addr(u, 0x54, 3, i * 2)
+#define UMC_COLSEL_LO_DDR5(u, i) amdzen_umc_smn_addr(u, 0x60, 7, i * 2)
+#define UMC_COLSEL_HI_DDR5(u, i) amdzen_umc_smn_addr(u, 0x64, 7, i * 2)
+
+#define UMC_COLSEL_REMAP_GET_COL(r, x) bitx32(r, (3 + (4 * (x))), (4 * ((x))))
+#define UMC_COLSEL_LO_BASE 2
+#define UMC_COLSEL_HI_BASE 8
+
+/*
+ * UMC::CH::RmSel -- This register contains the bits that determine how the rank
+ * is determined. Which fields of this are valid vary a lot in the different
+ * parts. The DDR4 and DDR5 versions are different enough that we use totally
+ * disjoint definitions. It's also worth noting that DDR5 doesn't have a
+ * secondary version of this as it is included in the main register.
+ *
+ * In general, APUs have some of the MSBS (most significant bit swap) related
+ * fields; however, they do not have rank multiplication bits.
+ */
+#define UMC_RMSEL_DDR4(u, i) amdzen_umc_smn_addr(u, 0x70, 2, i)
+#define UMC_RMSEL_SEC_DDR4(u, i) amdzen_umc_smn_addr(u, 0x78, 2, i)
+#define UMC_RMSEL_DDR4_GET_INV_MSBO(r) bitx32(r, 19, 18)
+#define UMC_RMSEL_DDR4_GET_INV_MSBE(r) bitx32(r, 17, 16)
+#define UMC_RMSEL_DDR4_GET_RM2(r) bitx32(r, 11, 8)
+#define UMC_RMSEL_DDR4_GET_RM1(r) bitx32(r, 7, 4)
+#define UMC_RMSEL_DDR4_GET_RM0(r) bitx32(r, 3, 0)
+#define UMC_RMSEL_BASE 12
+
+#define UMC_RMSEL_DDR5(u, i) amdzen_umc_smn_addr(u, 0x80, 4, i)
+#define UMC_RMSEL_DDR5_GET_INV_MSBS_SEC(r) bitx32(r, 31, 30)
+#define UMC_RMSEL_DDR5_GET_INV_MSBS(r) bitx32(r, 29, 28)
+#define UMC_RMSEL_DDR5_GET_SUBCHAN(r) bitx32(r, 19, 16)
+#define UMC_RMSEL_DDR5_SUBCHAN_BASE 5
+#define UMC_RMSEL_DDR5_GET_RM3(r) bitx32(r, 15, 12)
+#define UMC_RMSEL_DDR5_GET_RM2(r) bitx32(r, 11, 8)
+#define UMC_RMSEL_DDR5_GET_RM1(r) bitx32(r, 7, 4)
+#define UMC_RMSEL_DDR5_GET_RM0(r) bitx32(r, 3, 0)
+
+
+/*
+ * UMC::CH::DimmCfg -- This describes several properties of the DIMM that is
+ * installed, such as its overall width or type.
+ */
+#define UMC_DIMMCFG_DDR4(u, i) amdzen_umc_smn_addr(u, 0x80, 2, i)
+#define UMC_DIMMCFG_DDR5(u, i) amdzen_umc_smn_addr(u, 0x90, 2, i)
+#define UMC_DIMMCFG_GET_PKG_RALIGN(r) bitx32(r, 10, 10)
+#define UMC_DIMMCFG_GET_REFRESH_DIS(r) bitx32(r, 9, 9)
+#define UMC_DIMMCFG_GET_DQ_SWAP_DIS(r) bitx32(r, 8, 8)
+#define UMC_DIMMCFG_GET_X16(r) bitx32(r, 7, 7)
+#define UMC_DIMMCFG_GET_X4(r) bitx32(r, 6, 6)
+#define UMC_DIMMCFG_GET_LRDIMM(r) bitx32(r, 5, 5)
+#define UMC_DIMMCFG_GET_RDIMM(r) bitx32(r, 4, 4)
+#define UMC_DIMMCFG_GET_CISCS(r) bitx32(r, 3, 3)
+#define UMC_DIMMCFG_GET_3DS(r) bitx32(r, 2, 2)
+
+#define UMC_DIMMCFG_DDR4_GET_NVDIMMP(r) bitx32(r, 12, 12)
+#define UMC_DIMMCFG_DDR4_GET_DDR4e(r) bitx32(r, 11, 11)
+#define UMC_DIMMCFG_DDR5_GET_RALIGN(r) bitx32(r, 13, 12)
+#define UMC_DIMMCFG_DDR5_GET_ASYM(r) bitx32(r, 11, 11)
+
+#define UMC_DIMMCFG_DDR4_GET_OUTPUT_INV(r) bitx32(r, 1, 1)
+#define UMC_DIMMCFG_DDR4_GET_MRS_MIRROR(r) bitx32(r, 0, 0)
+
+/*
+ * UMC::CH::AddrHashBank -- These registers contain various instructions about
+ * how to hash an address across a bank to influence which bank is used.
+ */
+#define UMC_BANK_HASH_DDR4(u, i) amdzen_umc_smn_addr(u, 0xc8, 5, i)
+#define UMC_BANK_HASH_DDR5(u, i) amdzen_umc_smn_addr(u, 0x98, 5, i)
+#define UMC_BANK_HASH_GET_ROW(r) bitx32(r, 31, 14)
+#define UMC_BANK_HASH_GET_COL(r) bitx32(r, 13, 1)
+#define UMC_BANK_HASH_GET_EN(r) bitx32(r, 0, 0)
+
+/*
+ * UMC::CH::AddrHashRM -- This hash register describes how to transform a UMC
+ * address when trying to do rank hashing. Note, instance 3 is is reserved in
+ * DDR5 modes.
+ */
+#define UMC_RANK_HASH_DDR4(u, i) amdzen_umc_smn_addr(u, 0xdc, 3, i)
+#define UMC_RANK_HASH_DDR5(u, i) amdzen_umc_smn_addr(u, 0xb0, 4, i)
+#define UMC_RANK_HASH_GET_ADDR(r) bitx32(r, 31, 1)
+#define UMC_RANK_HASH_SHIFT 9
+#define UMC_RANK_HASH_GET_EN(r) bitx32(r, 0, 0)
+
+/*
+ * UMC::AddrHashRMExt -- Extended rank hash addresses.
+ */
+#define UMC_RANK_HASH_EXT_DDR5(u, i) amdzen_umc_smn_addr(u, 0xbb0, 4, i)
+#define UMC_RANK_HASH_EXT_GET_ADDR(r) bitx32(r, 7, 0)
+#define UMC_RANK_HASH_EXT_ADDR_SHIFT 40
+
+/*
+ * UMC::CH::AddrHashPC, UMC::CH::AddrHashPC2 -- These registers describe a hash
+ * to use for the DDR5 sub-channel. Note, in the DDR4 case this is actually the
+ * upper two rank hash registers defined above because on the systems where this
+ * occurs for DDR4, they only have up to one rank hash.
+ */
+#define UMC_PC_HASH_DDR4(u) UMC_RANK_HASH_DDR4(u, 1)
+#define UMC_PC_HASH2_DDR4(u) UMC_RANK_HASH_DDR4(u, 2)
+#define UMC_PC_HASH_DDR5(u) amdzen_umc_smn_addr(u, 0xc0, 1, 0)
+#define UMC_PC_HASH2_DDR5(u) amdzen_umc_smn_addr(u, 0xc4, 1, 0)
+#define UMC_PC_HASH_GET_ROW(r) bitx32(r, 31, 14)
+#define UMC_PC_HASH_GET_COL(r) bitx32(r, 13, 1)
+#define UMC_PC_HASH_GET_EN(r) bitx32(r, 0, 0)
+#define UMC_PC_HASH2_GET_BANK(r) bitx32(r, 4, 0)
+
+/*
+ * UMC::CH::AddrHashCS -- Hashing: chip-select edition. Note, these can
+ * ultimately cause you to change which DIMM is being actually accessed.
+ */
+#define UMC_CS_HASH_DDR4(u, i) amdzen_umc_smn_addr(u, 0xe8, 2, i)
+#define UMC_CS_HASH_DDR5(u, i) amdzen_umc_smn_addr(u, 0xc8, 2, i)
+#define UMC_CS_HASH_GET_ADDR(r) bitx32(r, 31, 1)
+#define UMC_CS_HASH_SHIFT 9
+#define UMC_CS_HASH_GET_EN(r) bitx32(r, 0, 0)
+
+/*
+ * UMC::AddrHashExtCS -- Extended chip-select hash addresses.
+ */
+#define UMC_CS_HASH_EXT_DDR5(u, i) amdzen_umc_smn_addr(u, 0xbc8, 2, i)
+#define UMC_CS_HASH_EXT_GET_ADDR(r) bitx32(r, 7, 0)
+#define UMC_CS_HASH_EXT_ADDR_SHIFT 40
+
+/*
+ * UMC::CH::UmcConfig -- This register controls various features of the device.
+ * For our purposes we mostly care about seeing if ECC is enabled and a DIMM
+ * type.
+ */
+#define UMC_UMCCFG(u) amdzen_umc_smn_addr(u, 0x100, 1, 0)
+#define UMC_UMCCFG_GET_READY(r) bitx32(r, 31, 31)
+#define UMC_UMCCFG_GET_ECC_EN(r) bitx32(r, 12, 12)
+#define UMC_UMCCFG_GET_BURST_CTL(r) bitx32(r, 11, 10)
+#define UMC_UMCCFG_GET_BURST_LEN(r) bitx32(r, 9, 8)
+#define UMC_UMCCFG_GET_DDR_TYPE(r) bitx32(r, 2, 0)
+#define UMC_UMCCFG_DDR4_T_DDR4 0
+#define UMC_UMCCFG_DDR4_T_LPDDR4 5
+
+#define UMC_UMCCFG_DDR5_T_DDR4 0
+#define UMC_UMCCFG_DDR5_T_DDR5 1
+#define UMC_UMCCFG_DDR5_T_LPDDR4 5
+#define UMC_UMCCFG_DDR5_T_LPDDR5 6
+
+/*
+ * UMC::CH::DataCtrl -- Various settings around whether data encryption or
+ * scrambling is enabled. Note, this register really changes a bunch from family
+ * to family.
+ */
+#define UMC_DATACTL(u) amdzen_umc_smn_addr(u, 0x144, 1, 0)
+#define UMC_DATACTL_GET_ENCR_EN(r) bitx32(r, 8, 8)
+#define UMC_DATACTL_GET_SCRAM_EN(r) bitx32(r, 0, 0)
+
+#define UMC_DATACTL_DDR4_GET_TWEAK(r) bitx32(r, 19, 16)
+#define UMC_DATACTL_DDR4_GET_VMG2M(r) bitx32(r, 12, 12)
+#define UMC_DATACTL_DDR4_GET_FORCE_ENCR(r) bitx32(r, 11, 11)
+
+#define UMC_DATACTL_DDR5_GET_TWEAK(r) bitx32(r, 16, 16)
+#define UMC_DATACTL_DDR5_GET_XTS(r) bitx32(r, 14, 14)
+#define UMC_DATACTL_DDR5_GET_AES256(r) bitx32(r, 13, 13)
+
+/*
+ * UMC::CH:EccCtrl -- Various settings around how ECC operates.
+ */
+#define UMC_ECCCTL(u) amdzen_umc_smn_addr(u, 0x14c, 1, 0)
+#define UMC_ECCCTL_GET_RD_EN(r) bitx32(x, 10, 10)
+#define UMC_ECCCTL_GET_X16(r) bitx32(x, 9, 9)
+#define UMC_ECCCTL_GET_UC_FATAL(r) bitx32(x, 8, 8)
+#define UMC_ECCCTL_GET_SYM_SIZE(r) bitx32(x, 7, 7)
+#define UMC_ECCCTL_GET_BIT_IL(r) bitx32(x, 6, 6)
+#define UMC_ECCCTL_GET_HIST_EN(r) bitx32(x, 5, 5)
+#define UMC_ECCCTL_GET_SW_SYM_EN(r) bitx32(x, 4, 4)
+#define UMC_ECCCTL_GET_WR_EN(r) bitx32(x, 0, 0)
+
+/*
+ * Note, while this group appears generic and is the same in both DDR4/DDR5
+ * systems, this is not always present on every SoC and seems to depend on
+ * something else inside the chip.
+ */
+#define UMC_ECCCTL_DDR_GET_PI(r) bitx32(r, 13, 13)
+#define UMC_ECCCTL_DDR_GET_PF_DIS(r) bitx32(r, 12, 12)
+#define UMC_ECCCTL_DDR_GET_SDP_OVR(r) bitx32(x, 11, 11)
+#define UMC_ECCCTL_DDR_GET_REPLAY_EN(r) bitx32(x, 1, 1)
+
+#define UMC_ECCCTL_DDR5_GET_PIN_RED(r) bitx32(r, 14, 14)
+
+/*
+ * UMC::Ch::UmcCap, UMC::CH::UmcCapHi -- Various capability registers and
+ * feature disables. We mostly just record these for future us for debugging
+ * purposes. They aren't used as part of memory decoding.
+ */
+#define UMC_UMCCAP(u) amdzen_umc_smn_addr(u, 0xdf0, 1, 0)
+#define UMC_UMCCAP_HI(u) amdzen_umc_smn_addr(u, 0xdf4, 1, 0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_UMC_H */
diff --git a/usr/src/uts/intel/sys/mc.h b/usr/src/uts/intel/sys/mc.h
index d4815b515f..6bba18ad1c 100644
--- a/usr/src/uts/intel/sys/mc.h
+++ b/usr/src/uts/intel/sys/mc.h
@@ -23,6 +23,7 @@
*/
/*
* Copyright 2019 Joyent, Inc.
+ * Copyright 2022 Oxide Computer Company
*/
#ifndef _SYS_MC_H
@@ -87,21 +88,62 @@ typedef struct mc_snapshot_info {
/*
* Data used to simulate encoding or decoding of a physical / DIMM address.
+ * These are used in different ways between AMD and Intel, so this is a bit of a
+ * smorgasbord. Details about each field are listed below.
*/
typedef struct mc_encode_ioc {
+ /*
+ * The first three values here are different addresses. We have a
+ * physical / system address. A DRAM-channel relative address, and
+ * finally a rank-relative address. Where a platform does not support
+ * one of these, UINT64_MAX is used.
+ */
uint64_t mcei_pa;
+ uint64_t mcei_chan_addr;
+ uint64_t mcei_rank_addr;
+ /*
+ * These next two provide a way for the memory controller software
+ * driver to provide additional information. The mcei_err generally
+ * corresponds to an enum that the driver has and the errdata is
+ * error-specific data that can be useful.
+ */
uint64_t mcei_errdata;
uint32_t mcei_err;
+ /*
+ * This next set is used to identify information about where to find a
+ * DIMM in question. The board and chip are used to uniquely identify a
+ * socket. Generally on x86, there is only one board, so it would be
+ * zero. The chip should correspond to the socket ID. The die refers to
+ * a particular internal die if on a chiplet or MCP. The memory
+ * controller and channel refer to a unique instance of both within a
+ * given die. On platforms where the memory controller and channel are
+ * 1:1 (that is each memory controller has only a single channel or
+ * doesn't have a specific distinction between the two), set chan to 0
+ * and set the mc to the logical channel value. The DIMM is a relative
+ * DIMM in the channel, meaning it's usually going to be 0, 1, or 2.
+ */
uint32_t mcei_board;
uint32_t mcei_chip;
+ uint32_t mcei_die;
uint32_t mcei_mc;
uint32_t mcei_chan;
uint32_t mcei_dimm;
- uint64_t mcei_rank_addr;
- uint32_t mcei_rank;
+ /*
+ * These values all refer to information on the DIMM itself and identify
+ * how to find the address. mcei_rank is meant to be a logical rank;
+ * however, some systems phrase things that way while others phrase
+ * things in terms of a chip select and rank multiplication. For unknown
+ * entries use UINT8_MAX.
+ */
uint32_t mcei_row;
uint32_t mcei_column;
- uint32_t mcei_pad;
+ uint8_t mcei_rank;
+ uint8_t mcei_cs;
+ uint8_t mcei_rm;
+ uint8_t mcei_bank;
+ uint8_t mcei_bank_group;
+ uint8_t mcei_subchan;
+ uint8_t mcei_pad[6];
} mc_encode_ioc_t;
#ifdef __cplusplus
diff --git a/usr/src/uts/intel/sys/x86_archext.h b/usr/src/uts/intel/sys/x86_archext.h
index f1241a9183..ab62bd6deb 100644
--- a/usr/src/uts/intel/sys/x86_archext.h
+++ b/usr/src/uts/intel/sys/x86_archext.h
@@ -32,7 +32,7 @@
* Copyright 2012 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
* Copyright 2014 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
* Copyright 2018 Nexenta Systems, Inc.
- * Copyright 2021 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
#ifndef _SYS_X86_ARCHEXT_H
@@ -617,6 +617,15 @@ extern "C" {
#define IA32_PKG_THERM_INTERRUPT_TR2_IE 0x00800000
#define IA32_PKG_THERM_INTERRUPT_PL_NE 0x01000000
+/*
+ * AMD TOM and TOM2 MSRs. These control the split between DRAM and MMIO below
+ * and above 4 GiB respectively. These have existed since family 0xf.
+ */
+#define MSR_AMD_TOM 0xc001001a
+#define MSR_AMD_TOM_MASK(x) ((x) & 0xffffff800000)
+#define MSR_AMD_TOM2 0xc001001d
+#define MSR_AMD_TOM2_MASK(x) ((x) & 0xffffff800000)
+
#define MCI_CTL_VALUE 0xffffffff
diff --git a/usr/src/uts/intel/zen_umc/Makefile b/usr/src/uts/intel/zen_umc/Makefile
new file mode 100644
index 0000000000..7e529b4041
--- /dev/null
+++ b/usr/src/uts/intel/zen_umc/Makefile
@@ -0,0 +1,41 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2022 Oxide Computer Company
+#
+
+UTSBASE = ../..
+
+MODULE = zen_umc
+OBJECTS = $(ZEN_UMC_OBJS:%=$(OBJS_DIR)/%)
+ROOTMODULE = $(ROOT_DRV_DIR)/$(MODULE)
+
+include $(UTSBASE)/intel/Makefile.intel
+
+ALL_TARGET = $(BINARY)
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+CPPFLAGS += -I$(UTSBASE)/intel/io/amdzen
+LDFLAGS += -Ndrv/amdzen
+
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+install: $(INSTALL_DEPS)
+
+include $(UTSBASE)/intel/Makefile.targ