summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authorKeith M Wesolowski <wesolows@oxide.computer>2022-07-21 06:57:54 -0700
committerRobert Mustacchi <rm@fingolfin.org>2022-09-11 04:06:00 +0000
commitba215efe42e70993d3838f7af671f9d9fc0ebc33 (patch)
tree69ccaa0a7d42ba0e7ced50b2aceb61cc3f0977e4 /usr/src
parent56726c7e321b6e5ecb2f10215f5386016547e68c (diff)
downloadillumos-joyent-ba215efe42e70993d3838f7af671f9d9fc0ebc33.tar.gz
14936 need a better SMN addressing mechanism
Reviewed by: Robert Mustacchi <rm@fingolfin.org> Reviewed by: Andy Fiddaman <illumos@fiddaman.net> Approved by: Garrett D'Amore <garrett@damore.org>
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/intel/io/amdzen/amdzen.c13
-rw-r--r--usr/src/uts/intel/io/amdzen/amdzen_client.h5
-rw-r--r--usr/src/uts/intel/io/amdzen/smntemp.c5
-rw-r--r--usr/src/uts/intel/io/amdzen/usmn.c8
-rw-r--r--usr/src/uts/intel/io/amdzen/zen_umc.c95
-rw-r--r--usr/src/uts/intel/sys/amdzen/smn.h461
-rw-r--r--usr/src/uts/intel/sys/amdzen/umc.h368
7 files changed, 841 insertions, 114 deletions
diff --git a/usr/src/uts/intel/io/amdzen/amdzen.c b/usr/src/uts/intel/io/amdzen/amdzen.c
index c3617c9742..f6ba32d968 100644
--- a/usr/src/uts/intel/io/amdzen/amdzen.c
+++ b/usr/src/uts/intel/io/amdzen/amdzen.c
@@ -277,18 +277,19 @@ amdzen_df_read32_bcast(amdzen_t *azn, amdzen_df_t *df, const df_reg_def_t def)
static uint32_t
-amdzen_smn_read32(amdzen_t *azn, amdzen_df_t *df, uint32_t reg)
+amdzen_smn_read32(amdzen_t *azn, amdzen_df_t *df, const smn_reg_t reg)
{
VERIFY(MUTEX_HELD(&azn->azn_mutex));
- amdzen_stub_put32(df->adf_nb, AMDZEN_NB_SMN_ADDR, reg);
+ amdzen_stub_put32(df->adf_nb, AMDZEN_NB_SMN_ADDR, SMN_REG_ADDR(reg));
return (amdzen_stub_get32(df->adf_nb, AMDZEN_NB_SMN_DATA));
}
static void
-amdzen_smn_write32(amdzen_t *azn, amdzen_df_t *df, uint32_t reg, uint32_t val)
+amdzen_smn_write32(amdzen_t *azn, amdzen_df_t *df, const smn_reg_t reg,
+ const uint32_t val)
{
VERIFY(MUTEX_HELD(&azn->azn_mutex));
- amdzen_stub_put32(df->adf_nb, AMDZEN_NB_SMN_ADDR, reg);
+ amdzen_stub_put32(df->adf_nb, AMDZEN_NB_SMN_ADDR, SMN_REG_ADDR(reg));
amdzen_stub_put32(df->adf_nb, AMDZEN_NB_SMN_DATA, val);
}
@@ -321,7 +322,7 @@ amdzen_df_find(amdzen_t *azn, uint_t dfno)
* Client functions that are used by nexus children.
*/
int
-amdzen_c_smn_read32(uint_t dfno, uint32_t reg, uint32_t *valp)
+amdzen_c_smn_read32(uint_t dfno, const smn_reg_t reg, uint32_t *valp)
{
amdzen_df_t *df;
amdzen_t *azn = amdzen_data;
@@ -344,7 +345,7 @@ amdzen_c_smn_read32(uint_t dfno, uint32_t reg, uint32_t *valp)
}
int
-amdzen_c_smn_write32(uint_t dfno, uint32_t reg, uint32_t val)
+amdzen_c_smn_write32(uint_t dfno, const smn_reg_t reg, const uint32_t val)
{
amdzen_df_t *df;
amdzen_t *azn = amdzen_data;
diff --git a/usr/src/uts/intel/io/amdzen/amdzen_client.h b/usr/src/uts/intel/io/amdzen/amdzen_client.h
index 4d937a1321..fc82c1039e 100644
--- a/usr/src/uts/intel/io/amdzen/amdzen_client.h
+++ b/usr/src/uts/intel/io/amdzen/amdzen_client.h
@@ -22,6 +22,7 @@
#include <sys/types.h>
#include <sys/amdzen/df.h>
+#include <sys/amdzen/smn.h>
#ifdef __cplusplus
extern "C" {
@@ -51,8 +52,8 @@ extern int amdzen_c_df_fabric_decomp(df_fabric_decomp_t *);
/*
* SMN and DF access routines.
*/
-extern int amdzen_c_smn_read32(uint_t, uint32_t, uint32_t *);
-extern int amdzen_c_smn_write32(uint_t, uint32_t, uint32_t);
+extern int amdzen_c_smn_read32(uint_t, const smn_reg_t, uint32_t *);
+extern int amdzen_c_smn_write32(uint_t, const smn_reg_t, const uint32_t);
extern int amdzen_c_df_read32(uint_t, uint8_t, const df_reg_def_t, uint32_t *);
extern int amdzen_c_df_read64(uint_t, uint8_t, const df_reg_def_t, uint64_t *);
diff --git a/usr/src/uts/intel/io/amdzen/smntemp.c b/usr/src/uts/intel/io/amdzen/smntemp.c
index aa595f5ce5..94b7aa8b83 100644
--- a/usr/src/uts/intel/io/amdzen/smntemp.c
+++ b/usr/src/uts/intel/io/amdzen/smntemp.c
@@ -11,7 +11,7 @@
/*
* Copyright 2019, Joyent, Inc.
- * Copyright 2020 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
/*
@@ -31,6 +31,7 @@
#include <sys/cpuvar.h>
#include <sys/sensors.h>
#include <sys/sysmacros.h>
+#include <sys/amdzen/smn.h>
#include <amdzen_client.h>
/*
@@ -39,7 +40,7 @@
* accessed through the northbridge. They are not addresses in PCI configuration
* space.
*/
-#define SMN_SMU_THERMAL_CURTEMP 0x00059800
+#define SMN_SMU_THERMAL_CURTEMP SMN_MAKE_REG(0x00059800)
#define SMN_SMU_THERMAL_CURTEMP_TEMPERATURE(x) ((x) >> 21)
#define SMN_SMU_THERMAL_CURTEMP_RANGE_SEL (1 << 19)
diff --git a/usr/src/uts/intel/io/amdzen/usmn.c b/usr/src/uts/intel/io/amdzen/usmn.c
index d5050def92..789e15830e 100644
--- a/usr/src/uts/intel/io/amdzen/usmn.c
+++ b/usr/src/uts/intel/io/amdzen/usmn.c
@@ -10,7 +10,7 @@
*/
/*
- * Copyright 2021 Oxide Computer Company
+ * Copyright 2022 Oxide Computer Company
*/
/*
@@ -101,7 +101,8 @@ usmn_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
return (EINVAL);
}
- ret = amdzen_c_smn_read32(dfno, usr.usr_addr, &usr.usr_data);
+ ret = amdzen_c_smn_read32(dfno, SMN_MAKE_REG(usr.usr_addr),
+ &usr.usr_data);
if (ret != 0) {
return (ret);
}
@@ -112,7 +113,8 @@ usmn_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
return (EINVAL);
}
- ret = amdzen_c_smn_write32(dfno, usr.usr_addr, usr.usr_data);
+ ret = amdzen_c_smn_write32(dfno, SMN_MAKE_REG(usr.usr_addr),
+ usr.usr_data);
if (ret != 0) {
return (ret);
}
diff --git a/usr/src/uts/intel/io/amdzen/zen_umc.c b/usr/src/uts/intel/io/amdzen/zen_umc.c
index bf5914fae6..ed10a1e50b 100644
--- a/usr/src/uts/intel/io/amdzen/zen_umc.c
+++ b/usr/src/uts/intel/io/amdzen/zen_umc.c
@@ -1856,7 +1856,8 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
umc_cs_t *cs0, *cs1;
const uint32_t id = chan->chan_logid;
int ret;
- uint32_t val, reg;
+ uint32_t val;
+ smn_reg_t reg;
ASSERT3U(dimmno, <, ZEN_UMC_MAX_DIMMS);
dimm = &chan->chan_dimms[dimmno];
@@ -1872,11 +1873,11 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
*/
for (uint_t i = 0; i < ZEN_UMC_MAX_CS_PER_DIMM; i++) {
uint64_t addr;
- const uint32_t reginst = i + dimmno * 2;
+ const uint16_t reginst = i + dimmno * 2;
reg = UMC_BASE(id, reginst);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read base "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -1887,7 +1888,8 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_BASE_SEC(id, reginst);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read "
- "secondary base register %x: %d", reg, ret);
+ "secondary base register %x: %d", SMN_REG_ADDR(reg),
+ ret);
return (B_FALSE);
}
@@ -1899,7 +1901,7 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_MASK_DDR4(id, dimmno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read mask register "
- "%x: %d", reg, ret);
+ "%x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -1915,7 +1917,7 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_MASK_SEC_DDR4(id, dimmno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read secondary mask "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
cs0->ucs_sec_mask = (uint64_t)UMC_MASK_GET_ADDR(val) <<
@@ -1926,7 +1928,7 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_ADDRCFG_DDR4(id, dimmno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read address config "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -1962,7 +1964,7 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_ADDRSEL_DDR4(id, dimmno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read bank address "
- "select register %x: %d", reg, ret);
+ "select register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
cs0->ucs_row_hi_bit = UMC_ADDRSEL_DDR4_GET_ROW_HI(val) +
@@ -1987,7 +1989,7 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_COLSEL_LO_DDR4(id, dimmno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read column address "
- "select low register %x: %d", reg, ret);
+ "select low register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
for (uint_t i = 0; i < ZEN_UMC_MAX_COLSEL_PER_REG; i++) {
@@ -1998,7 +2000,7 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_COLSEL_HI_DDR4(id, dimmno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read column address "
- "select high register %x: %d", reg, ret);
+ "select high register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
for (uint_t i = 0; i < ZEN_UMC_MAX_COLSEL_PER_REG; i++) {
@@ -2017,7 +2019,7 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_RMSEL_DDR4(id, dimmno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read rank address "
- "select register %x: %d", reg, ret);
+ "select register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
cs0->ucs_inv_msbs = UMC_RMSEL_DDR4_GET_INV_MSBE(val);
@@ -2033,7 +2035,7 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_RMSEL_SEC_DDR4(id, dimmno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read secondary rank "
- "address select register %x: %d", reg, ret);
+ "address select register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
cs0->ucs_inv_msbs_sec = UMC_RMSEL_DDR4_GET_INV_MSBE(val);
@@ -2050,7 +2052,7 @@ zen_umc_fill_chan_dimm_ddr4(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_DIMMCFG_DDR4(id, dimmno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read DIMM "
- "configuration register %x: %d", reg, ret);
+ "configuration register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
dimm->ud_dimmcfg_raw = val;
@@ -2100,7 +2102,8 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
{
int ret;
umc_cs_t *cs;
- uint32_t reg, val;
+ uint32_t val;
+ smn_reg_t reg;
const uint32_t id = chan->chan_logid;
const uint32_t regno = dimmno * 2 + rankno;
@@ -2111,7 +2114,7 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_BASE(id, regno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read base "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
cs->ucs_base.udb_base = (uint64_t)UMC_BASE_GET_ADDR(val) <<
@@ -2124,7 +2127,8 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) !=
0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read "
- "extended base register %x: %d", reg, ret);
+ "extended base register %x: %d", SMN_REG_ADDR(reg),
+ ret);
return (B_FALSE);
}
@@ -2136,7 +2140,7 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_BASE_SEC(id, regno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read secondary base "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
cs->ucs_sec.udb_base = (uint64_t)UMC_BASE_GET_ADDR(val) <<
@@ -2149,8 +2153,8 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) !=
0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read "
- "extended secondary base register %x: %d", reg,
- ret);
+ "extended secondary base register %x: %d",
+ SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -2162,7 +2166,7 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_MASK_DDR5(id, regno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read mask "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
cs->ucs_base_mask = (uint64_t)UMC_MASK_GET_ADDR(val) <<
@@ -2175,7 +2179,8 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) !=
0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read "
- "extended mask register %x: %d", reg, ret);
+ "extended mask register %x: %d", SMN_REG_ADDR(reg),
+ ret);
return (B_FALSE);
}
@@ -2188,7 +2193,7 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_MASK_SEC_DDR5(id, regno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read secondary mask "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
cs->ucs_sec_mask = (uint64_t)UMC_MASK_GET_ADDR(val) <<
@@ -2201,7 +2206,8 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) !=
0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read "
- "extended mask register %x: %d", reg, ret);
+ "extended mask register %x: %d", SMN_REG_ADDR(reg),
+ ret);
return (B_FALSE);
}
@@ -2213,7 +2219,7 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_ADDRCFG_DDR5(id, regno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read address config "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
if ((umc->umc_fdata->zufd_flags & ZEN_UMC_FAM_F_CS_XOR) != 0) {
@@ -2234,7 +2240,7 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_ADDRSEL_DDR5(id, regno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read address select "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
cs->ucs_row_hi_bit = 0;
@@ -2254,7 +2260,7 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_COLSEL_LO_DDR5(id, regno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read column address "
- "select low register %x: %d", reg, ret);
+ "select low register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
for (uint_t i = 0; i < ZEN_UMC_MAX_COLSEL_PER_REG; i++) {
@@ -2265,7 +2271,7 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_COLSEL_HI_DDR5(id, regno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read column address "
- "select high register %x: %d", reg, ret);
+ "select high register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
for (uint_t i = 0; i < ZEN_UMC_MAX_COLSEL_PER_REG; i++) {
@@ -2282,7 +2288,7 @@ zen_umc_fill_chan_rank_ddr5(zen_umc_t *umc, zen_umc_df_t *df,
reg = UMC_RMSEL_DDR5(id, regno);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read rank multiply "
- "select register %x: %d", reg, ret);
+ "select register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -2358,7 +2364,7 @@ zen_umc_fill_chan_hash(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan,
boolean_t ddr4)
{
int ret;
- uint32_t reg;
+ smn_reg_t reg;
uint32_t val;
const umc_chan_hash_flags_t flags = umc->umc_fdata->zufd_chan_hash;
@@ -2379,7 +2385,8 @@ zen_umc_fill_chan_hash(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg,
&val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read "
- "bank hash register %x: %d", reg, ret);
+ "bank hash register %x: %d",
+ SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -2403,7 +2410,8 @@ zen_umc_fill_chan_hash(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg,
&val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read "
- "rm hash register %x: %d", reg, ret);
+ "rm hash register %x: %d",
+ SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -2420,7 +2428,8 @@ zen_umc_fill_chan_hash(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg,
&val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read "
- "rm hash ext register %x: %d", reg, ret);
+ "rm hash ext register %x: %d",
+ SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -2441,7 +2450,7 @@ zen_umc_fill_chan_hash(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read pc hash "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -2457,7 +2466,7 @@ zen_umc_fill_chan_hash(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read pc hash "
- "2 register %x: %d", reg, ret);
+ "2 register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -2478,7 +2487,7 @@ zen_umc_fill_chan_hash(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg,
&val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read "
- "cs hash register %x", reg);
+ "cs hash register %x", SMN_REG_ADDR(reg));
return (B_FALSE);
}
@@ -2495,7 +2504,8 @@ zen_umc_fill_chan_hash(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan,
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg,
&val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read "
- "cs hash ext register %x", reg);
+ "cs hash ext register %x",
+ SMN_REG_ADDR(reg));
return (B_FALSE);
}
@@ -2514,7 +2524,8 @@ zen_umc_fill_chan_hash(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan,
static boolean_t
zen_umc_fill_chan(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan)
{
- uint32_t reg, val;
+ uint32_t val;
+ smn_reg_t reg;
const uint32_t id = chan->chan_logid;
int ret;
boolean_t ddr4;
@@ -2538,7 +2549,7 @@ zen_umc_fill_chan(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan)
reg = UMC_UMCCFG(id);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read UMC "
- "configuration register %x: %d", reg, ret);
+ "configuration register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
@@ -2561,7 +2572,7 @@ zen_umc_fill_chan(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan)
reg = UMC_DATACTL(id);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read data control "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
chan->chan_datactl_raw = val;
@@ -2582,7 +2593,7 @@ zen_umc_fill_chan(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan)
reg = UMC_ECCCTL(id);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read ECC control "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
chan->chan_eccctl_raw = val;
@@ -2594,7 +2605,7 @@ zen_umc_fill_chan(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan)
reg = UMC_UMCCAP(id);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read UMC cap"
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
chan->chan_umccap_raw = val;
@@ -2602,7 +2613,7 @@ zen_umc_fill_chan(zen_umc_t *umc, zen_umc_df_t *df, zen_umc_chan_t *chan)
reg = UMC_UMCCAP_HI(id);
if ((ret = amdzen_c_smn_read32(df->zud_dfno, reg, &val)) != 0) {
dev_err(umc->umc_dip, CE_WARN, "failed to read UMC cap high "
- "register %x: %d", reg, ret);
+ "register %x: %d", SMN_REG_ADDR(reg), ret);
return (B_FALSE);
}
chan->chan_umccap_hi_raw = val;
diff --git a/usr/src/uts/intel/sys/amdzen/smn.h b/usr/src/uts/intel/sys/amdzen/smn.h
new file mode 100644
index 0000000000..3f8250a158
--- /dev/null
+++ b/usr/src/uts/intel/sys/amdzen/smn.h
@@ -0,0 +1,461 @@
+/*
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ */
+
+/*
+ * Copyright 2022 Oxide Computer Co.
+ */
+
+#ifndef _SYS_AMDZEN_SMN_H
+#define _SYS_AMDZEN_SMN_H
+
+#include <sys/debug.h>
+#include <sys/types.h>
+
+/*
+ * Generic definitions for the system management network (SMN) in Milan and many
+ * other AMD Zen processors. These are shared between the amdzen nexus and its
+ * client drivers and kernel code that may require SMN access to resources.
+ *
+ * ------------------------
+ * Endpoints and Addressing
+ * ------------------------
+ *
+ * SMN addresses are 36 bits long but in practice we can use only 32. Bits
+ * [35:32] identify a destination node, but all consumers instead direct SMN
+ * transactions to a specific node by selecting the address/data register pair
+ * in the NBIO PCI config space corresponding to the destination. Additional
+ * information about nodes and the organisation of devices in the Zen
+ * architecture may be found in the block comments in amdzen.c and cpuid.c.
+ *
+ * The SMN provides access to instances of various functional units present on
+ * or accessed via each node. Some functional units have only a single instance
+ * per node while others may have many. Each functional unit instance has one
+ * or more apertures in which it decodes addresses. The aperture portion of the
+ * address consists of bits [31:20] and the remainder of the address is used to
+ * specify a register instance within that functional unit. To complicate
+ * matters, some functional units have multiple smaller sub-units that decode
+ * smaller regions within its parent's aperture; in some cases, the bits in a
+ * mask describing the sub-unit's registers may not be contiguous. To keep
+ * software relatively simple, we generally treat sub-units and parent units the
+ * same and try to choose collections of registers whose addresses can all be
+ * computed in the same manner to form what we will describe as a unit.
+ *
+ * Each functional unit should typically have its own header containing register
+ * definitions, accessors, and address calculation routines; some functional
+ * units are small and straightforward while others may have numerous complex
+ * sub-units, registers with many instances whose locations are computed in
+ * unusual and nonstandard ways, and other features that need to be declared for
+ * consumers. Those functional units that are present across many processors
+ * and have similar or identical contents across them should live in this
+ * directory; umc.h is such an example. Others may be specific to a particular
+ * processor family (see cpuid.c) or other collection and may require their own
+ * subdirectories, symbol prefixes, and so on. Unlike the DF, the existence,
+ * location, and format of registers accessible over SMN are not versioned nor
+ * are they generally self-discoverable. Each functional unit may be present or
+ * absent, in varying numbers and with varying functionality, across the entire
+ * Zen product range. Therefore, at this time most per-unit headers are
+ * intended for use only by code that will execute on a specific processor
+ * family. Unifying them over time is considered desirable to the extent the
+ * hardware allows it.
+ *
+ * -----
+ * Types
+ * -----
+ *
+ * Practically every last one of us has screwed up the order of arguments to
+ * functions like amdzen_smn_write32() when they take an address and a value of
+ * the same type. Repeatedly. Often. To safety this particularly annoying
+ * footgun, we pass SMN register addresses around in a dedicated struct type
+ * smn_reg_t, intended to be instantiated only by the amdzen_xx_smn_reg() and
+ * analogous kernel functions and the macros that expand to them or, for the
+ * YOLO crew, SMN_MAKE_REG(). Since the struct type and uint32_t are not
+ * compatible, the compiler will always squawk if the register and value
+ * arguments are reversed, leaving us far fewer baffling failures to debug at
+ * runtime. Typical callers don't require any awareness of this at all, but
+ * those that want to pass the address around to e.g. log warnings can obtain
+ * the uint32_t address via SMN_REG_ADDR().
+ *
+ * Register definitions within functional units are provided by objects of type
+ * `const smn_reg_def_t`, the usage of which is described in detail in the next
+ * section. For now these are produced on demand by macros; see additional
+ * notes on conventions below. In time, this mechanism may be extended to
+ * incorporate version information in a manner similar to that used in df.h. An
+ * automated mechanism for creating a single collection of register and field
+ * definitions for C, in CTF, and/or for other language consumers as well as
+ * automated register value decoding remains an open area for future work.
+ *
+ * -----------------------
+ * Instances and Iterators
+ * -----------------------
+ *
+ * Not only do some functional units have many instances, so too do many
+ * registers. AMD documentation describes registers in terms of a series of
+ * iterators over various functional units, subunits, and other entities and
+ * attributes that each multiply the number of register instances. A concrete
+ * example from the publicly-available Naples PPR (publication 54945 rev. 1.14)
+ * may make this simpler to understand. Unfortunately, SMN is not described by
+ * this document, but the register instance syntax used is the same and is
+ * described in additional detail in sections 1.3.3-4. For our example, let us
+ * consider the same MSR that AMD uses in their own example,
+ * Core::X86::MSR::TSC. We are given that this register has the following
+ * instances: lthree[1:0]_core[3:0]_thread[1:0]. We therefore have three
+ * iterators: one for 'lthree's, one for 'core's for each 'lthree', and one for
+ * 'thread's for each 'core'. We can also see that there are 16 total
+ * instances; in fact, there are actually 16 per core-complex die (CCD), which
+ * documents for more recent processors would expose as a fourth iterator. To
+ * keep things relatively simple, we will assume that there are only 16 per
+ * processor. If it were possible to access all of these instances via MMIO,
+ * SMN, or some other flat address space (it isn't, as far as we can tell), a
+ * function for computing the address of each instance would require three
+ * parameters. Let us suppose that this register really were accessible via
+ * SMN; in that case, we would also be provided with a list of instance alias
+ * such as
+ *
+ * _thread[1:0]_core[7:0]_lthree[1:0]_alias_SMN: THREADREGS[1:0]x0000_0010;
+ * THREADREGS[1:0]=COREREGS[7:0]x0000_[4,0]000;
+ * COREREGS[7:0]=L3REGS[1:0]x000[7:0]_5000; L3REGS[1:0]=57[A,6]0_0000
+ *
+ * To compute the address of an instance of this hypothetical register, we would
+ * begin by determining that its top-level functional unit is L3REGS with a base
+ * aperture at 0x5760_0000. There are two instances of this functional unit (01
+ * and 1) and each subsequent instance is offset 0x40_0000 from the previous.
+ * This allows us to compute the base address of each L3REGS block; a similar
+ * process is then used to compute the base address of each COREREGS block, and
+ * finally the address of each THREADREGS block that contains the register
+ * instance. In practice, we might choose instead to consider the COREREGS as
+ * our functional unit, with instances at 0x5760_5000, 0x5761_5000, 0x57A0_5000,
+ * and 0x57A1_5000; whether it is useful to do this depends on whether we need
+ * to consider other registers in the L3REGS unit that may not have per-core
+ * blocks or instances but would otherwise be interleaved with these. This ends
+ * up being something of a judgment call. Let's suppose we want to consider the
+ * entire L3REGS functional unit and write a function to compute the address of
+ * any register (including our hypothetical TSC) in the subordinate THREADREGS
+ * blocks. We'll start by adding the new unit to the smn_unit_t enumeration;
+ * let's call it SMN_UNIT_L3REGS_COREREGS since that's the sub-unit level at
+ * which we can uniformly compute register instance addresses. We have already
+ * determined our base aperture and we know that we have 3 iterators and
+ * therefore three parameters; all SMN address calculators return an smn_reg_t
+ * and must accept an smn_reg_def_t. Therefore our function's signature is:
+ *
+ * smn_reg_t amdzen_smn_l3regs_coreregs_reg(uint8_t l3no,
+ * const smn_reg_def_t def, uint16_t coreinst, uint16_t threadinst);
+ *
+ * We have chosen to use a base aperture of 0x5760_0000 and unit offset
+ * 0x40_0000, so we can begin by computing a COREREGS aperture:
+ *
+ * const uint32_t aperture_base = 0x57600000;
+ * const uint32_t aperture_off = l3no * 0x400000;
+ * const uint32_t coreregs_aperture_base = 0x5000;
+ * const uint32_t coreregs_aperture_off = coreinst * 0x10000;
+ *
+ * We can now consider the smn_reg_def_t our function will be given, which
+ * describes THREADREGS::TSC. Within the COREREGS functional sub-unit, each
+ * thread register has 2 instances present at a stride of 0x4000 bytes (from our
+ * hypothetical register definition), so the register would be defined as
+ * follows:
+ *
+ * #define D_L3REGS_COREREGS_THREAD_TSC (const smn_reg_def_t){ \
+ * .srd_unit = SMN_UNIT_L3REGS_COREREGS, \
+ * .srd_reg = 0x10, \
+ * .srd_nents = 2, \
+ * .srd_stride = 0x4000 \
+ * }
+ *
+ * Note that describing the number of entries and their stride in the register
+ * definition allows us to collapse the last functional sub-unit in our
+ * calculation process: we need not compute the base aperture address of the
+ * THREADREGS sub-unit. Instead, we can follow our previous code with:
+ *
+ * const uint32_t aperture = aperture_base +
+ * coreregs_aperture_base + coreregs_aperture_off;
+ * const uint32_t reg = def.srd_reg + threadinst * def.srd_stride;
+ *
+ * Finally, we convert the aperture address and register offset into the
+ * appropriate type and return it:
+ *
+ * return (SMN_MAKE_REG(aperture + reg));
+ *
+ * As you can see, other registers in THREADREGS would be defined with the same
+ * number entries and stride but a different offset (srd_reg member), while
+ * other registers in the COREREGS block would have a different offset and
+ * stride. For example, if a block of per-core (not per-thread) registers were
+ * located at COREREGS[7:0]x0000_1000, a register called "COREREGS::FrobberCntl"
+ * in that block with a single instance at offset 0x48 might be defined as
+ *
+ * #define D_L3REGS_COREREGS_FROB_CTL (const smn_reg_def_t){ \
+ * .srd_unit = SMN_UNIT_L3REGS_COREREGS, \
+ * .srd_reg = 0x1048, \
+ * .srd_nents = 1 \
+ * }
+ *
+ * You can satisfy yourself that the same calculation function we wrote above
+ * will correctly compute the address of the sole instance (0) of this register.
+ * To further simplify register definitions and callers, the actual address
+ * calculation functions are written to treat srd_nents == 0 to mean a register
+ * with a single instance, and to treat srd_stride == 0 as if it were 4 (the
+ * space occupied by registers accessed by SMN is -- so far as we can tell,
+ * practically always -- 4 bytes in size, even if the register itself is
+ * smaller). Additionally, a large number of assertions should be present in
+ * such functions to guard against foreign unit register definitions,
+ * out-of-bounds unit and register instance parameters, address overflow, and
+ * register instance offsets that overflow improperly into an aperture base
+ * address. All of these conditions indicate either an incorrect register
+ * definition or a bug in the caller. See the template macro at the bottom of
+ * this file and umc.h for additional examples of calculating and checking
+ * register addresses.
+ *
+ * With address computation out of the way, we can then provide an accessor for
+ * each instance this register:
+ *
+ * #define L3REGS_COREREGS_THREAD_TSC(l3, core, thread) \
+ * amdzen_l3regs_coreregs_reg(l3, D_L3REGS_COREREGS_THREAD_TSC, \
+ * core, thread)
+ *
+ * Our other per-core register's accessor would look like:
+ *
+ * #define L3REGS_COREREGS_FROB_CTL(l3, core) \
+ * amdzen_l3regs_coreregs_reg(l3, D_L3REGS_COREREGS_FROB_CTL, core, 0)
+ *
+ * The next section describes these conventions in greater detail.
+ *
+ * -----------
+ * Conventions
+ * -----------
+ *
+ * First, let's consider the names of the register definition and the
+ * convenience macro supplied to obtain an instance of that register: we've
+ * prefixed the global definition of the registers with D_ and the convenience
+ * macros to return a specific instance are simply named for the register
+ * itself. Additionally, the two macros expand to objects of incompatible
+ * types, so that using the wrong one will always be detected at compile time.
+ * Why do we expose both of these? The instance macro is useful for callers who
+ * know at compile-time the name of the register of which they want instances;
+ * this makes it unnecessary to remember the names of functions used to compute
+ * register instance addresses. The definition itself is useful to callers that
+ * accept const smn_reg_def_t arguments referring to registers of which the
+ * immediate caller does not know the names at compile time.
+ *
+ * You may wonder why we don't declare named constants for the definitions.
+ * There are two ways we could do that and both are unfortunate: one would be to
+ * declare them static in the header, the other to separate declarations in the
+ * header from initialisation in a separate source file. Measurements revealed
+ * that the former causes a very substantial increase in data size, which will
+ * be multiplied by the number of registers defined and the number of source
+ * files including the header. As convenient as it is to have these symbolic
+ * constants available to debuggers and other tools at runtime, they're just too
+ * big. However, it is possible to generate code to be compiled into loadable
+ * modules that would contain a single copy of the constants for this purpose as
+ * well as for providing CTF to foreign-language binding generators. The other
+ * option considered here, putting the constants in separate source files, makes
+ * maintenance significantly more challenging and makes it likely not only that
+ * new registers may not be added properly but also that definitions, macros, or
+ * both may be incorrect. Neither of these options is terrible but for now
+ * we've optimised for simplicity of maintenance and minimal data size at the
+ * immediate but not necessarily permanent expense of some debugging
+ * convenience.
+ *
+ * We wish to standardise as much as possible on conventions across all
+ * Zen-related functional units and blocks (including those accessed by SMN,
+ * through the DF directly, and by other means). In general, some register and
+ * field names are shortened from their official names for clarity and brevity;
+ * the official names are always given in the comment above the definition.
+ * AMD's functional units come from many internal teams and presumably several
+ * outside vendors as well; as a result, there is no single convention to be
+ * found throughout the PPRs and other documentation. For example, different
+ * units may have registers containing "CTL", "CNTL", "CTRL", "CNTRL", and
+ * "CONTROL", as well as "FOO_CNTL", "FooCntl", and "Foo_Cntl". Reflecting
+ * longstanding illumos conventions, we collapse all such register names
+ * regardless of case as follows:
+ *
+ * CTL/CTRL/CNTL/CNTRL/CONTROL => CTL
+ * CFG/CONF/CONFIG/CONFIGURATION => CFG
+ * EN/ENAB/ENABLE/ENABLED => EN
+ * DIS/DISAB/DISABLE/DISABLED => DIS
+ *
+ * Note that if collapsing these would result in ambiguity, more of the official
+ * names will be preserved. In addition to collapsing register and field names
+ * in this case-insensitive manner, we also follow standard code style practice
+ * and name macros and constants in SCREAMING_SNAKE_CASE regardless of AMD's
+ * official name. It is similarly reasonable to truncate or abbreviate other
+ * common terms in a consistent manner where doing so preserves uniqueness and
+ * at least some semantic value; without doing so, some official register names
+ * will be excessively unwieldy and may not even fit into 80 columns. Please
+ * maintain these practices and strive for consistency with existing examples
+ * when abbreviation is required.
+ *
+ * As we have done elsewhere throughout the amdzen body of work, register fields
+ * should always be given in order starting with the most significant bits and
+ * working down toward 0; this matches AMD's documentation and makes it easier
+ * for reviewers and other readers to follow. The routines in bitext.h should
+ * be used to extract and set bitfields unless there is a compelling reason to
+ * do otherwise (e.g., assembly consumers). Accessors should be named
+ * UNIT_REG_GET_FIELD and UNIT_REG_SET_FIELD respectively, unless the register
+ * has a single field that has no meaningful name (i.e., the field's name is the
+ * same as the register's or it's otherwise obvious from the context what its
+ * purpose is), in which case UNIT_REG_GET and UNIT_REG_SET are appropriate.
+ * Additional getters and setters that select a particular bit from a register
+ * or field consisting entirely of individual bits describing or controlling the
+ * state of some entity may also be useful. As with register names, be as brief
+ * as possible without sacrificing too much information.
+ *
+ * Constant values associated with a field should be declared immediately
+ * following that field. If a constant or collection of constants is used in
+ * multiple fields of the same register, the definitions should follow the last
+ * such field; similarly, constants used in multiple registers should follow the
+ * last such register, and a comment explaining the scope of their validity is
+ * recommended. Such constants should be named for the common elements of the
+ * fields or registers in which they are valid.
+ *
+ * As noted above, SMN register definitions should omit the srd_nents and
+ * srd_stride members when there is a single instance of the register within the
+ * unit. The srd_stride member should also be elided when the register
+ * instances are contiguous. All address calculation routines should be written
+ * to support these conventions. Each register should have an accessor macro or
+ * function, and should accept instance numbers in order from superior to
+ * inferior (e.g., from the largest functional unit to the smallest, ending with
+ * the register instance itself). This convention is similar to that used in
+ * generic PCIe code in which a register is specified by bus, device, and
+ * function numbers in that order. Register accessor macros or inline functions
+ * should not expose inapplicable taxons to callers; in our example above,
+ * COREREGS_FROB_CTL has an instance for each core but is not associated with a
+ * thread; therefore its accessor should not accept a thread instance argument
+ * even though the address calculation function it uses does.
+ *
+ * Most of these conventions are not specific to registers accessed via SMN;
+ * note also that some registers may be accessed in multiple ways (e.g., SMN and
+ * MMIO, or SMN and the MSR instructions). While the code here is generally
+ * unaware of such aliased access methods, following these conventions will
+ * simplify naming and usage if such a register needs to be accessed in multiple
+ * ways. Sensible additions to macro and symbol names such as the access method
+ * to be used will generally be sufficient to disambiguate while allowing reuse
+ * of associated field accessors, constants, and in some cases even register
+ * offset, instance count, and stride.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SMN_APERTURE_MASK 0xfff00000
+
+/*
+ * An instance of an SMN-accessible register.
+ */
+typedef struct smn_reg {
+ uint32_t sr_addr;
+} smn_reg_t;
+
+/*CSTYLED*/
+#define SMN_MAKE_REG(x) ((const smn_reg_t){ .sr_addr = (x) })
+#define SMN_REG_ADDR(x) ((x).sr_addr)
+
+/*
+ * This exists so that address calculation functions can check that the register
+ * definitions they're passed are something they understand how to use. While
+ * many address calculation functions are similar, some functional units define
+ * registers with multiple iterators, have differently-sized apertures, or both;
+ * it's important that we reject foreign register definitions in these
+ * functions. In principle this could be done at compile time, but the
+ * preprocessor gymnastics required to do so are excessively vile and we are
+ * really already hanging it pretty far over the edge in terms of what the C
+ * preprocessor can do for us.
+ */
+typedef enum smn_unit {
+ SMN_UNIT_UNKNOWN,
+ SMN_UNIT_IOAPIC,
+ SMN_UNIT_IOHC,
+ SMN_UNIT_IOHCDEV_PCIE,
+ SMN_UNIT_IOHCDEV_NBIF,
+ SMN_UNIT_IOHCDEV_SB,
+ SMN_UNIT_IOAGR,
+ SMN_UNIT_SDPMUX,
+ SMN_UNIT_UMC,
+ SMN_UNIT_PCIE_CORE,
+ SMN_UNIT_PCIE_PORT,
+ SMN_UNIT_PCIE_RSMU,
+ SMN_UNIT_SCFCTP,
+ SMN_UNIT_SMUPWR,
+ SMN_UNIT_IOMMUL1,
+ SMN_UNIT_IOMMUL2,
+ SMN_UNIT_NBIF,
+ SMN_UNIT_NBIF_ALT,
+ SMN_UNIT_NBIF_FUNC
+} smn_unit_t;
+
+/*
+ * srd_unit and srd_reg are required; they describe the functional unit and the
+ * register's address within that unit's aperture (which may be the SDP-defined
+ * aperture described above or a smaller one if a unit has been broken down
+ * logically into smaller units). srd_nents is optional; if not set, all
+ * existing consumers assume a value of 0 is equivalent to 1: the register has
+ * but a single instance in each unit. srd_stride is ignored if srd_nents is 0
+ * or 1 and optional otherwise; it describes the number of bytes to be added to
+ * the previous instance's address to obtain that of the next instance. If left
+ * at 0 it is assumed to be 4 bytes.
+ *
+ * There are units in which registers have more complicated collections of
+ * instances that cannot be represented perfectly by this simple descriptor;
+ * they require custom address calculation macros and functions that may take
+ * additional arguments, and they may not be able to check their arguments or
+ * the computed addresses as carefully as would be ideal.
+ */
+typedef struct smn_reg_def {
+ smn_unit_t srd_unit;
+ uint32_t srd_reg;
+ uint32_t srd_stride;
+ uint16_t srd_nents;
+} smn_reg_def_t;
+
+/*
+ * This macro may be used by per-functional-unit code to construct an address
+ * calculation function. It is usable by some, BUT NOT ALL, functional units;
+ * see the block comment above for an example that cannot be accommodated. Here
+ * we assume that there are at most 2 iterators in any register's definition.
+ * Use this when possible, as it provides a large number of useful checks on
+ * DEBUG bits. Similar checks should be incorporated into implementations for
+ * nonstandard functional units to the extent possible.
+ */
+
+#define AMDZEN_MAKE_SMN_REG_FN(_fn, _unit, _base, _mask, _nunits, _unitshift) \
+CTASSERT(((_base) & ~(_mask)) == 0); \
+static inline smn_reg_t \
+_fn(const uint8_t unitno, const smn_reg_def_t def, const uint16_t reginst) \
+{ \
+ const uint32_t unit32 = (const uint32_t)unitno; \
+ const uint32_t reginst32 = (const uint32_t)reginst; \
+ const uint32_t stride = (def.srd_stride == 0) ? 4 : def.srd_stride; \
+ const uint32_t nents = (def.srd_nents == 0) ? 1 : \
+ (const uint32_t)def.srd_nents; \
+ \
+ ASSERT3S(def.srd_unit, ==, SMN_UNIT_ ## _unit); \
+ ASSERT3U(unit32, <, (_nunits)); \
+ ASSERT3U(nents, >, reginst32); \
+ ASSERT0(def.srd_reg & (_mask)); \
+ \
+ const uint32_t aperture_base = (_base); \
+ \
+ const uint32_t aperture_off = (unit32 << (_unitshift)); \
+ ASSERT3U(aperture_off, <=, UINT32_MAX - aperture_base); \
+ \
+ const uint32_t aperture = aperture_base + aperture_off; \
+ ASSERT0(aperture & ~(_mask)); \
+ \
+ const uint32_t reg = def.srd_reg + reginst32 * stride; \
+ ASSERT0(reg & (_mask)); \
+ \
+ return (SMN_MAKE_REG(aperture + reg)); \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_AMDZEN_SMN_H */
diff --git a/usr/src/uts/intel/sys/amdzen/umc.h b/usr/src/uts/intel/sys/amdzen/umc.h
index 78644442d4..a06c2021eb 100644
--- a/usr/src/uts/intel/sys/amdzen/umc.h
+++ b/usr/src/uts/intel/sys/amdzen/umc.h
@@ -17,6 +17,7 @@
#define _SYS_UMC_H
#include <sys/bitext.h>
+#include <sys/amdzen/smn.h>
/*
* Various register definitions for accessing the AMD Unified Memory Controller
@@ -75,18 +76,38 @@ extern "C" {
* UMC Channel registers. These are in SMN Space. DDR4 and DDR5 based UMCs share
* the same base address, somewhat surprisingly. This constructs the appropriate
* offset and ensures that a caller doesn't exceed the number of known instances
- * of the register.
+ * of the register. See smn.h for additional details on SMN addressing.
*/
-static inline uint32_t
-amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
- uint32_t reginst)
+
+static inline smn_reg_t
+amdzen_umc_smn_reg(const uint8_t umcno, const smn_reg_def_t def,
+ const uint16_t reginst)
{
- ASSERT3U(umcno, <, 12);
- ASSERT3U(nents, >, reginst);
+ const uint32_t APERTURE_BASE = 0x50000;
+ const uint32_t APERTURE_MASK = 0xffffe000;
+
+ const uint32_t umc32 = (const uint32_t)umcno;
+ const uint32_t reginst32 = (const uint32_t)reginst;
+
+ const uint32_t stride = (def.srd_stride == 0) ? 4 : def.srd_stride;
+ const uint32_t nents = (def.srd_nents == 0) ? 1 :
+ (const uint32_t)def.srd_nents;
+
+ ASSERT3S(def.srd_unit, ==, SMN_UNIT_UMC);
+ ASSERT0(def.srd_reg & APERTURE_MASK);
+ ASSERT3U(umc32, <, 12);
+ ASSERT3U(nents, >, reginst32);
+
+ const uint32_t aperture_off = umc32 << 20;
+ ASSERT3U(aperture_off, <=, UINT32_MAX - APERTURE_BASE);
+
+ const uint32_t aperture = APERTURE_BASE + aperture_off;
+ ASSERT0(aperture & ~APERTURE_MASK);
- uint32_t base = 0x50000;
- uint32_t reg = base_reg + reginst * 4;
- return ((umcno << 20) + base + reg);
+ const uint32_t reg = def.srd_reg + reginst32 * stride;
+ ASSERT0(reg & APERTURE_MASK);
+
+ return (SMN_MAKE_REG(aperture + reg));
}
/*
@@ -94,8 +115,20 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* to match a chip select. Instances 0/1 always refer to DIMM 0, while
* instances 2/3 always refer to DIMM 1.
*/
-#define UMC_BASE(u, i) amdzen_umc_smn_addr(u, 0x00, 4, i)
-#define UMC_BASE_SEC(u, i) amdzen_umc_smn_addr(u, 0x10, 4, i)
+/*CSTYLED*/
+#define D_UMC_BASE (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x00, \
+ .srd_nents = 4 \
+}
+/*CSTYLED*/
+#define D_UMC_BASE_SEC (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x10, \
+ .srd_nents = 4 \
+}
+#define UMC_BASE(u, i) amdzen_umc_smn_reg(u, D_UMC_BASE, i)
+#define UMC_BASE_SEC(u, i) amdzen_umc_smn_reg(u, D_UMC_BASE_SEC, i)
#define UMC_BASE_GET_ADDR(r) bitx32(r, 31, 1)
#define UMC_BASE_ADDR_SHIFT 9
#define UMC_BASE_GET_EN(r) bitx32(r, 0, 0)
@@ -105,8 +138,21 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* registers that allow more address bits. Note, only present in some DDR5
* capable SoCs.
*/
-#define UMC_BASE_EXT_DDR5(u, i) amdzen_umc_smn_addr(u, 0xb00, 4, i)
-#define UMC_BASE_EXT_SEC_DDR5(u, i) amdzen_umc_smn_addr(u, 0xb10, 4, i)
+/*CSTYLED*/
+#define D_UMC_BASE_EXT_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xb00, \
+ .srd_nents = 4 \
+}
+/*CSTYLED*/
+#define D_UMC_BASE_EXT_SEC_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xb10, \
+ .srd_nents = 4 \
+}
+#define UMC_BASE_EXT_DDR5(u, i) amdzen_umc_smn_reg(u, D_UMC_BASE_EXT_DDR5, i)
+#define UMC_BASE_EXT_SEC_DDR5(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_BASE_EXT_SEC_DDR5, i)
#define UMC_BASE_EXT_GET_ADDR(r) bitx32(r, 7, 0)
#define UMC_BASE_EXT_ADDR_SHIFT 40
@@ -116,18 +162,55 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* the incoming address to see it matches the base. Tweaking what is used for
* match is often part of the interleaving strategy.
*/
-#define UMC_MASK_DDR4(u, i) amdzen_umc_smn_addr(u, 0x20, 2, i)
-#define UMC_MASK_SEC_DDR4(u, i) amdzen_umc_smn_addr(u, 0x28, 2, i)
-#define UMC_MASK_DDR5(u, i) amdzen_umc_smn_addr(u, 0x20, 4, i)
-#define UMC_MASK_SEC_DDR5(u, i) amdzen_umc_smn_addr(u, 0x30, 4, i)
+/*CSTYLED*/
+#define D_UMC_MASK_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x20, \
+ .srd_nents = 2 \
+}
+/*CSTYLED*/
+#define D_UMC_MASK_SEC_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x28, \
+ .srd_nents = 2 \
+}
+/*CSTYLED*/
+#define D_UMC_MASK_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x20, \
+ .srd_nents = 4 \
+}
+/*CSTYLED*/
+#define D_UMC_MASK_SEC_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x30, \
+ .srd_nents = 4 \
+}
+#define UMC_MASK_DDR4(u, i) amdzen_umc_smn_reg(u, D_UMC_MASK_DDR4, i)
+#define UMC_MASK_SEC_DDR4(u, i) amdzen_umc_smn_reg(u, D_UMC_MASK_SEC_DDR4, i)
+#define UMC_MASK_DDR5(u, i) amdzen_umc_smn_reg(u, D_UMC_MASK_DDR5, i)
+#define UMC_MASK_SEC_DDR5(u, i) amdzen_umc_smn_reg(u, D_UMC_MASK_SEC_DDR5, i)
#define UMC_MASK_GET_ADDR(r) bitx32(r, 31, 1)
#define UMC_MASK_ADDR_SHIFT 9
/*
* UMC::AddrMaskExt, UMC::AddrMaskSecExt -- Extended mask addresses.
*/
-#define UMC_MASK_EXT_DDR5(u, i) amdzen_umc_smn_addr(u, 0xb20, 4, i)
-#define UMC_MASK_EXT_SEC_DDR5(u, i) amdzen_umc_smn_addr(u, 0xb30, 4, i)
+/*CSTYLED*/
+#define D_UMC_MASK_EXT_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xb20, \
+ .srd_nents = 4 \
+}
+/*CSTYLED*/
+#define D_UMC_MASK_EXT_SEC_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xb30, \
+ .srd_nents = 4 \
+}
+#define UMC_MASK_EXT_DDR5(u, i) amdzen_umc_smn_reg(u, D_UMC_MASK_EXT_DDR5, i)
+#define UMC_MASK_EXT_SEC_DDR5(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_MASK_EXT_SEC_DDR5, i)
#define UMC_MASK_EXT_GET_ADDR(r) bitx32(r, 7, 0)
#define UMC_MASK_EXT_ADDR_SHIFT 40
@@ -140,8 +223,20 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* banks/group you must subtract the number of bank group bits from the total
* number of bank bits.
*/
-#define UMC_ADDRCFG_DDR4(u, i) amdzen_umc_smn_addr(u, 0x30, 2, i)
-#define UMC_ADDRCFG_DDR5(u, i) amdzen_umc_smn_addr(u, 0x40, 4, i)
+/*CSTYLED*/
+#define D_UMC_ADDRCFG_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x30, \
+ .srd_nents = 2 \
+}
+/*CSTYLED*/
+#define D_UMC_ADDRCFG_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x40, \
+ .srd_nents = 4 \
+}
+#define UMC_ADDRCFG_DDR4(u, i) amdzen_umc_smn_reg(u, D_UMC_ADDRCFG_DDR4, i)
+#define UMC_ADDRCFG_DDR5(u, i) amdzen_umc_smn_reg(u, D_UMC_ADDRCFG_DDR5, i)
#define UMC_ADDRCFG_GET_NBANK_BITS(r) bitx32(r, 21, 20)
#define UMC_ADDRCFG_NBANK_BITS_BASE 3
#define UMC_ADDRCFG_GET_NCOL_BITS(r) bitx32(r, 19, 16)
@@ -161,8 +256,20 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* which bits in the normalized address are used to construct the bank number,
* row bits are contiguous from the starting number.
*/
-#define UMC_ADDRSEL_DDR4(u, i) amdzen_umc_smn_addr(u, 0x40, 2, i)
-#define UMC_ADDRSEL_DDR5(u, i) amdzen_umc_smn_addr(u, 0x50, 4, i)
+/*CSTYLED*/
+#define D_UMC_ADDRSEL_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x40, \
+ .srd_nents = 2 \
+}
+/*CSTYLED*/
+#define D_UMC_ADDRSEL_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x50, \
+ .srd_nents = 4 \
+}
+#define UMC_ADDRSEL_DDR4(u, i) amdzen_umc_smn_reg(u, D_UMC_ADDRSEL_DDR4, i)
+#define UMC_ADDRSEL_DDR5(u, i) amdzen_umc_smn_reg(u, D_UMC_ADDRSEL_DDR5, i)
#define UMC_ADDRSEL_GET_ROW_LO(r) bitx32(r, 27, 24)
#define UMC_ADDRSEL_ROW_LO_BASE 12
#define UMC_ADDRSEL_GET_BANK4(r) bitx32(r, 19, 16)
@@ -181,20 +288,43 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* the case of DDR4, it's 0x50, 0x54 for DIMM 0 lo, hi. Then 0x58, 0x5c for
* DIMM1. DDR5 based entries do something similar; however, instead of being
* per-DIMM, there is one of these for each CS.
- *
- * This leads to a somewhat odder construction for the maximum number of
- * instances. Because amdzen_umc_smn_addr() assumes each register instance is 4
- * bytes apart, we instead take the actual register instance and multiply it by
- * 2. This means that in the DDR4 case we will always access what
- * amdzen_umc_smn_addr() considers instance 0 and 2. In the DDR5 case this is 0,
- * 2, 4, and 6. This means our maximum instance for both cases has to be one
- * higher than this, 3 and 7 respectively. While technically you could use 4 and
- * 8, this is a tighter bind.
*/
-#define UMC_COLSEL_LO_DDR4(u, i) amdzen_umc_smn_addr(u, 0x50, 3, i * 2)
-#define UMC_COLSEL_HI_DDR4(u, i) amdzen_umc_smn_addr(u, 0x54, 3, i * 2)
-#define UMC_COLSEL_LO_DDR5(u, i) amdzen_umc_smn_addr(u, 0x60, 7, i * 2)
-#define UMC_COLSEL_HI_DDR5(u, i) amdzen_umc_smn_addr(u, 0x64, 7, i * 2)
+/*CSTYLED*/
+#define D_UMC_COLSEL_LO_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x50, \
+ .srd_nents = 2, \
+ .srd_stride = 8 \
+}
+/*CSTYLED*/
+#define D_UMC_COLSEL_HI_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x54, \
+ .srd_nents = 2, \
+ .srd_stride = 8 \
+}
+/*CSTYLED*/
+#define D_UMC_COLSEL_LO_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x60, \
+ .srd_nents = 4, \
+ .srd_stride = 8 \
+}
+/*CSTYLED*/
+#define D_UMC_COLSEL_HI_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x64, \
+ .srd_nents = 4, \
+ .srd_stride = 8 \
+}
+#define UMC_COLSEL_LO_DDR4(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_COLSEL_LO_DDR4, i)
+#define UMC_COLSEL_HI_DDR4(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_COLSEL_HI_DDR4, i)
+#define UMC_COLSEL_LO_DDR5(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_COLSEL_LO_DDR5, i)
+#define UMC_COLSEL_HI_DDR5(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_COLSEL_HI_DDR5, i)
#define UMC_COLSEL_REMAP_GET_COL(r, x) bitx32(r, (3 + (4 * (x))), (4 * ((x))))
#define UMC_COLSEL_LO_BASE 2
@@ -210,8 +340,21 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* In general, APUs have some of the MSBS (most significant bit swap) related
* fields; however, they do not have rank multiplication bits.
*/
-#define UMC_RMSEL_DDR4(u, i) amdzen_umc_smn_addr(u, 0x70, 2, i)
-#define UMC_RMSEL_SEC_DDR4(u, i) amdzen_umc_smn_addr(u, 0x78, 2, i)
+/*CSTYLED*/
+#define D_UMC_RMSEL_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x70, \
+ .srd_nents = 2 \
+}
+/*CSTYLED*/
+#define D_UMC_RMSEL_SEC_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x78, \
+ .srd_nents = 2 \
+}
+#define UMC_RMSEL_DDR4(u, i) amdzen_umc_smn_reg(u, D_UMC_RMSEL_DDR4, i)
+#define UMC_RMSEL_SEC_DDR4(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_RMSEL_SEC_DDR4, i)
#define UMC_RMSEL_DDR4_GET_INV_MSBO(r) bitx32(r, 19, 18)
#define UMC_RMSEL_DDR4_GET_INV_MSBE(r) bitx32(r, 17, 16)
#define UMC_RMSEL_DDR4_GET_RM2(r) bitx32(r, 11, 8)
@@ -219,7 +362,13 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
#define UMC_RMSEL_DDR4_GET_RM0(r) bitx32(r, 3, 0)
#define UMC_RMSEL_BASE 12
-#define UMC_RMSEL_DDR5(u, i) amdzen_umc_smn_addr(u, 0x80, 4, i)
+/*CSTYLED*/
+#define D_UMC_RMSEL_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x80, \
+ .srd_nents = 4 \
+}
+#define UMC_RMSEL_DDR5(u, i) amdzen_umc_smn_reg(u, D_UMC_RMSEL_DDR5, i)
#define UMC_RMSEL_DDR5_GET_INV_MSBS_SEC(r) bitx32(r, 31, 30)
#define UMC_RMSEL_DDR5_GET_INV_MSBS(r) bitx32(r, 29, 28)
#define UMC_RMSEL_DDR5_GET_SUBCHAN(r) bitx32(r, 19, 16)
@@ -234,8 +383,20 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* UMC::CH::DimmCfg -- This describes several properties of the DIMM that is
* installed, such as its overall width or type.
*/
-#define UMC_DIMMCFG_DDR4(u, i) amdzen_umc_smn_addr(u, 0x80, 2, i)
-#define UMC_DIMMCFG_DDR5(u, i) amdzen_umc_smn_addr(u, 0x90, 2, i)
+/*CSTYLED*/
+#define D_UMC_DIMMCFG_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x80, \
+ .srd_nents = 2 \
+}
+/*CSTYLED*/
+#define D_UMC_DIMMCFG_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x90, \
+ .srd_nents = 2 \
+}
+#define UMC_DIMMCFG_DDR4(u, i) amdzen_umc_smn_reg(u, D_UMC_DIMMCFG_DDR4, i)
+#define UMC_DIMMCFG_DDR5(u, i) amdzen_umc_smn_reg(u, D_UMC_DIMMCFG_DDR5, i)
#define UMC_DIMMCFG_GET_PKG_RALIGN(r) bitx32(r, 10, 10)
#define UMC_DIMMCFG_GET_REFRESH_DIS(r) bitx32(r, 9, 9)
#define UMC_DIMMCFG_GET_DQ_SWAP_DIS(r) bitx32(r, 8, 8)
@@ -258,8 +419,22 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* UMC::CH::AddrHashBank -- These registers contain various instructions about
* how to hash an address across a bank to influence which bank is used.
*/
-#define UMC_BANK_HASH_DDR4(u, i) amdzen_umc_smn_addr(u, 0xc8, 5, i)
-#define UMC_BANK_HASH_DDR5(u, i) amdzen_umc_smn_addr(u, 0x98, 5, i)
+/*CSTYLED*/
+#define D_UMC_BANK_HASH_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xc8, \
+ .srd_nents = 5 \
+}
+/*CSTYLED*/
+#define D_UMC_BANK_HASH_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x98, \
+ .srd_nents = 5 \
+}
+#define UMC_BANK_HASH_DDR4(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_BANK_HASH_DDR4, i)
+#define UMC_BANK_HASH_DDR5(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_BANK_HASH_DDR5, i)
#define UMC_BANK_HASH_GET_ROW(r) bitx32(r, 31, 14)
#define UMC_BANK_HASH_GET_COL(r) bitx32(r, 13, 1)
#define UMC_BANK_HASH_GET_EN(r) bitx32(r, 0, 0)
@@ -269,8 +444,22 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* address when trying to do rank hashing. Note, instance 3 is is reserved in
* DDR5 modes.
*/
-#define UMC_RANK_HASH_DDR4(u, i) amdzen_umc_smn_addr(u, 0xdc, 3, i)
-#define UMC_RANK_HASH_DDR5(u, i) amdzen_umc_smn_addr(u, 0xb0, 4, i)
+/*CSTYLED*/
+#define D_UMC_RANK_HASH_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xdc, \
+ .srd_nents = 3 \
+}
+/*CSTYLED*/
+#define D_UMC_RANK_HASH_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xb0, \
+ .srd_nents = 4 \
+}
+#define UMC_RANK_HASH_DDR4(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_RANK_HASH_DDR4, i)
+#define UMC_RANK_HASH_DDR5(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_RANK_HASH_DDR5, i)
#define UMC_RANK_HASH_GET_ADDR(r) bitx32(r, 31, 1)
#define UMC_RANK_HASH_SHIFT 9
#define UMC_RANK_HASH_GET_EN(r) bitx32(r, 0, 0)
@@ -278,7 +467,14 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
/*
* UMC::AddrHashRMExt -- Extended rank hash addresses.
*/
-#define UMC_RANK_HASH_EXT_DDR5(u, i) amdzen_umc_smn_addr(u, 0xbb0, 4, i)
+/*CSTYLED*/
+#define D_UMC_RANK_HASH_EXT_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xbb0, \
+ .srd_nents = 4 \
+}
+#define UMC_RANK_HASH_EXT_DDR5(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_RANK_HASH_EXT_DDR5, i)
#define UMC_RANK_HASH_EXT_GET_ADDR(r) bitx32(r, 7, 0)
#define UMC_RANK_HASH_EXT_ADDR_SHIFT 40
@@ -288,10 +484,20 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* upper two rank hash registers defined above because on the systems where this
* occurs for DDR4, they only have up to one rank hash.
*/
+/*CSTYLED*/
+#define D_UMC_PC_HASH_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xc0 \
+}
+/*CSTYLED*/
+#define D_UMC_PC_HASH2_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xc4 \
+}
#define UMC_PC_HASH_DDR4(u) UMC_RANK_HASH_DDR4(u, 1)
#define UMC_PC_HASH2_DDR4(u) UMC_RANK_HASH_DDR4(u, 2)
-#define UMC_PC_HASH_DDR5(u) amdzen_umc_smn_addr(u, 0xc0, 1, 0)
-#define UMC_PC_HASH2_DDR5(u) amdzen_umc_smn_addr(u, 0xc4, 1, 0)
+#define UMC_PC_HASH_DDR5(u) amdzen_umc_smn_reg(u, D_UMC_PC_HASH_DDR5, 0)
+#define UMC_PC_HASH2_DDR5(u) amdzen_umc_smn_reg(u, D_UMC_PC_HASH2_DDR5, 0)
#define UMC_PC_HASH_GET_ROW(r) bitx32(r, 31, 14)
#define UMC_PC_HASH_GET_COL(r) bitx32(r, 13, 1)
#define UMC_PC_HASH_GET_EN(r) bitx32(r, 0, 0)
@@ -301,8 +507,20 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* UMC::CH::AddrHashCS -- Hashing: chip-select edition. Note, these can
* ultimately cause you to change which DIMM is being actually accessed.
*/
-#define UMC_CS_HASH_DDR4(u, i) amdzen_umc_smn_addr(u, 0xe8, 2, i)
-#define UMC_CS_HASH_DDR5(u, i) amdzen_umc_smn_addr(u, 0xc8, 2, i)
+/*CSTYLED*/
+#define D_UMC_CS_HASH_DDR4 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xe8, \
+ .srd_nents = 2 \
+}
+/*CSTYLED*/
+#define D_UMC_CS_HASH_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xc8, \
+ .srd_nents = 2 \
+}
+#define UMC_CS_HASH_DDR4(u, i) amdzen_umc_smn_reg(u, D_UMC_CS_HASH_DDR4, i)
+#define UMC_CS_HASH_DDR5(u, i) amdzen_umc_smn_reg(u, D_UMC_CS_HASH_DDR5, i)
#define UMC_CS_HASH_GET_ADDR(r) bitx32(r, 31, 1)
#define UMC_CS_HASH_SHIFT 9
#define UMC_CS_HASH_GET_EN(r) bitx32(r, 0, 0)
@@ -310,7 +528,14 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
/*
* UMC::AddrHashExtCS -- Extended chip-select hash addresses.
*/
-#define UMC_CS_HASH_EXT_DDR5(u, i) amdzen_umc_smn_addr(u, 0xbc8, 2, i)
+/*CSTYLED*/
+#define D_UMC_CS_HASH_EXT_DDR5 (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xbc8, \
+ .srd_nents = 2 \
+}
+#define UMC_CS_HASH_EXT_DDR5(u, i) \
+ amdzen_umc_smn_reg(u, D_UMC_CS_HASH_EXT_DDR5, i)
#define UMC_CS_HASH_EXT_GET_ADDR(r) bitx32(r, 7, 0)
#define UMC_CS_HASH_EXT_ADDR_SHIFT 40
@@ -319,7 +544,12 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* For our purposes we mostly care about seeing if ECC is enabled and a DIMM
* type.
*/
-#define UMC_UMCCFG(u) amdzen_umc_smn_addr(u, 0x100, 1, 0)
+/*CSTYLED*/
+#define D_UMC_UMCCFG (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x100 \
+}
+#define UMC_UMCCFG(u) amdzen_umc_smn_reg(u, D_UMC_UMCCFG, 0)
#define UMC_UMCCFG_GET_READY(r) bitx32(r, 31, 31)
#define UMC_UMCCFG_GET_ECC_EN(r) bitx32(r, 12, 12)
#define UMC_UMCCFG_GET_BURST_CTL(r) bitx32(r, 11, 10)
@@ -338,7 +568,12 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* scrambling is enabled. Note, this register really changes a bunch from family
* to family.
*/
-#define UMC_DATACTL(u) amdzen_umc_smn_addr(u, 0x144, 1, 0)
+/*CSTYLED*/
+#define D_UMC_DATACTL (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x144 \
+}
+#define UMC_DATACTL(u) amdzen_umc_smn_reg(u, D_UMC_DATACTL, 0)
#define UMC_DATACTL_GET_ENCR_EN(r) bitx32(r, 8, 8)
#define UMC_DATACTL_GET_SCRAM_EN(r) bitx32(r, 0, 0)
@@ -353,7 +588,12 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
/*
* UMC::CH:EccCtrl -- Various settings around how ECC operates.
*/
-#define UMC_ECCCTL(u) amdzen_umc_smn_addr(u, 0x14c, 1, 0)
+/*CSTYLED*/
+#define D_UMC_ECCCTL (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0x14c \
+}
+#define UMC_ECCCTL(u) amdzen_umc_smn_reg(u, D_UMC_ECCCTL, 0)
#define UMC_ECCCTL_GET_RD_EN(r) bitx32(x, 10, 10)
#define UMC_ECCCTL_GET_X16(r) bitx32(x, 9, 9)
#define UMC_ECCCTL_GET_UC_FATAL(r) bitx32(x, 8, 8)
@@ -368,9 +608,9 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* systems, this is not always present on every SoC and seems to depend on
* something else inside the chip.
*/
-#define UMC_ECCCTL_DDR_GET_PI(r) bitx32(r, 13, 13)
-#define UMC_ECCCTL_DDR_GET_PF_DIS(r) bitx32(r, 12, 12)
-#define UMC_ECCCTL_DDR_GET_SDP_OVR(r) bitx32(x, 11, 11)
+#define UMC_ECCCTL_DDR_GET_PI(r) bitx32(r, 13, 13)
+#define UMC_ECCCTL_DDR_GET_PF_DIS(r) bitx32(r, 12, 12)
+#define UMC_ECCCTL_DDR_GET_SDP_OVR(r) bitx32(x, 11, 11)
#define UMC_ECCCTL_DDR_GET_REPLAY_EN(r) bitx32(x, 1, 1)
#define UMC_ECCCTL_DDR5_GET_PIN_RED(r) bitx32(r, 14, 14)
@@ -380,8 +620,18 @@ amdzen_umc_smn_addr(uint8_t umcno, uint32_t base_reg, uint32_t nents,
* feature disables. We mostly just record these for future us for debugging
* purposes. They aren't used as part of memory decoding.
*/
-#define UMC_UMCCAP(u) amdzen_umc_smn_addr(u, 0xdf0, 1, 0)
-#define UMC_UMCCAP_HI(u) amdzen_umc_smn_addr(u, 0xdf4, 1, 0)
+/*CSTYLED*/
+#define D_UMC_UMCCAP (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xdf0 \
+}
+/*CSTYLED*/
+#define D_UMC_UMCCAP_HI (const smn_reg_def_t){ \
+ .srd_unit = SMN_UNIT_UMC, \
+ .srd_reg = 0xdf4 \
+}
+#define UMC_UMCCAP(u) amdzen_umc_smn_reg(u, D_UMC_UMCCAP, 0)
+#define UMC_UMCCAP_HI(u) amdzen_umc_smn_reg(u, D_UMC_UMCCAP_HI, 0)
#ifdef __cplusplus
}