summaryrefslogtreecommitdiff
path: root/usr/src/uts/intel/sys/amdzen/umc.h
blob: ca018c89afdf555ec1e3dc16eee53d1a09e6fcb9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
/*
 * This file and its contents are supplied under the terms of the
 * Common Development and Distribution License ("CDDL"), version 1.0.
 * You may only use this file in accordance with the terms of version
 * 1.0 of the CDDL.
 *
 * A full copy of the text of the CDDL should have accompanied this
 * source.  A copy of the CDDL is also available via the Internet at
 * http://www.illumos.org/license/CDDL.
 */

/*
 * Copyright 2022 Oxide Computer Company
 */

#ifndef _SYS_UMC_H
#define	_SYS_UMC_H

#include <sys/bitext.h>
#include <sys/amdzen/smn.h>

/*
 * Various register definitions for accessing the AMD Unified Memory Controller
 * (UMC) over SMN (the system management network). Note, that the SMN exists
 * independently in each die and must be accessed through the appropriate
 * IOHC.
 *
 * There are effectively four different revisions of the UMC that we know about
 * and support querying:
 *
 *   o DDR4 capable APUs
 *   o DDR4 capable CPUs
 *   o DDR5 capable APUs
 *   o DDR5 capable CPUs
 *
 * In general for a given revision and generation of a controller (DDR4 vs.
 * DDR5), all of the address layouts are the same whether it is for an APU or a
 * CPU. The main difference is generally in the number of features. For example,
 * most APUs may not support the same rank multiplication bits and related in a
 * device. However, unlike the DF where everything changes, the main difference
 * within a generation is just which bits are implemented. This makes it much
 * easier to define UMC information.
 *
 * Between DDR4 and DDR5 based devices, the register locations have shifted;
 * however, generally speaking, the registers themselves are actually the same.
 * Registers here, similar to the DF, have a common form:
 *
 * UMC_<reg name>_<vers>
 *
 * Here, <reg name> would be something like 'BASE', for the UMC
 * UMC::CH::BaseAddr register. <vers> is one of DDR4 or DDR5. When the same
 * register is supported at the same address between versions, then <vers> is
 * elided.
 *
 * For fields inside of these registers, everything follows the same pattern in
 * <sys/amdzen/df.h> which is:
 *
 * UMC_<reg name>_<vers>_GET_<field>
 *
 * Note, <vers> will be elided if the register is the same between the DDR4 and
 * DDR5 versions.
 *
 * Finally, a cautionary note. While the DF provided a way for us to determine
 * what version something is, we have not determined a way to programmatically
 * determine what something supports outside of making notes based on the
 * family, model, and stepping CPUID information. Unfortunately, you must look
 * towards the documentation and find what you need in the PPR (processor
 * programming reference).
 */

#ifdef __cplusplus
extern "C" {
#endif

/*
 * UMC Channel registers. These are in SMN Space. DDR4 and DDR5 based UMCs share
 * the same base address, somewhat surprisingly. This constructs the appropriate
 * offset and ensures that a caller doesn't exceed the number of known instances
 * of the register.  See smn.h for additional details on SMN addressing.  All
 * UMC registers are 32 bits wide; we check for violations.
 */

static inline smn_reg_t
amdzen_umc_smn_reg(const uint8_t umcno, const smn_reg_def_t def,
    const uint16_t reginst)
{
	const uint32_t APERTURE_BASE = 0x50000;
	const uint32_t APERTURE_MASK = 0xffffe000;

	const uint32_t umc32 = (const uint32_t)umcno;
	const uint32_t reginst32 = (const uint32_t)reginst;

	const uint32_t stride = (def.srd_stride == 0) ? 4 : def.srd_stride;
	const uint32_t nents = (def.srd_nents == 0) ? 1 :
	    (const uint32_t)def.srd_nents;

	ASSERT0(def.srd_size);
	ASSERT3S(def.srd_unit, ==, SMN_UNIT_UMC);
	ASSERT0(def.srd_reg & APERTURE_MASK);
	ASSERT3U(umc32, <, 12);
	ASSERT3U(nents, >, reginst32);

	const uint32_t aperture_off = umc32 << 20;
	ASSERT3U(aperture_off, <=, UINT32_MAX - APERTURE_BASE);

	const uint32_t aperture = APERTURE_BASE + aperture_off;
	ASSERT0(aperture & ~APERTURE_MASK);

	const uint32_t reg = def.srd_reg + reginst32 * stride;
	ASSERT0(reg & APERTURE_MASK);

	return (SMN_MAKE_REG(aperture + reg));
}

/*
 * UMC::CH::BaseAddr, UMC::CH::BaseAddrSec -- determines the base address used
 * to match a chip select. Instances 0/1 always refer to DIMM 0, while
 * instances 2/3 always refer to DIMM 1.
 */
/*CSTYLED*/
#define	D_UMC_BASE	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x00,	\
	.srd_nents = 4	\
}
/*CSTYLED*/
#define	D_UMC_BASE_SEC	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x10,	\
	.srd_nents = 4	\
}
#define	UMC_BASE(u, i)		amdzen_umc_smn_reg(u, D_UMC_BASE, i)
#define	UMC_BASE_SEC(u, i)	amdzen_umc_smn_reg(u, D_UMC_BASE_SEC, i)
#define	UMC_BASE_GET_ADDR(r)	bitx32(r, 31, 1)
#define	UMC_BASE_ADDR_SHIFT	9
#define	UMC_BASE_GET_EN(r)	bitx32(r, 0, 0)

/*
 * UMC::BaseAddrExt, UMC::BaseAddrSecExt -- The first of several extensions to
 * registers that allow more address bits. Note, only present in some DDR5
 * capable SoCs.
 */
/*CSTYLED*/
#define	D_UMC_BASE_EXT_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xb00,	\
	.srd_nents = 4	\
}
/*CSTYLED*/
#define	D_UMC_BASE_EXT_SEC_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xb10,	\
	.srd_nents = 4	\
}
#define	UMC_BASE_EXT_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_BASE_EXT_DDR5, i)
#define	UMC_BASE_EXT_SEC_DDR5(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_BASE_EXT_SEC_DDR5, i)
#define	UMC_BASE_EXT_GET_ADDR(r)	bitx32(r, 7, 0)
#define	UMC_BASE_EXT_ADDR_SHIFT		40


/*
 * UMC::CH::AddrMask, UMC::CH::AddrMaskSec -- This register is used to compare
 * the incoming address to see it matches the base. Tweaking what is used for
 * match is often part of the interleaving strategy.
 */
/*CSTYLED*/
#define	D_UMC_MASK_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x20,	\
	.srd_nents = 2	\
}
/*CSTYLED*/
#define	D_UMC_MASK_SEC_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x28,	\
	.srd_nents = 2	\
}
/*CSTYLED*/
#define	D_UMC_MASK_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x20,	\
	.srd_nents = 4	\
}
/*CSTYLED*/
#define	D_UMC_MASK_SEC_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x30,	\
	.srd_nents = 4	\
}
#define	UMC_MASK_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_MASK_DDR4, i)
#define	UMC_MASK_SEC_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_MASK_SEC_DDR4, i)
#define	UMC_MASK_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_MASK_DDR5, i)
#define	UMC_MASK_SEC_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_MASK_SEC_DDR5, i)
#define	UMC_MASK_GET_ADDR(r)	bitx32(r, 31, 1)
#define	UMC_MASK_ADDR_SHIFT	9

/*
 * UMC::AddrMaskExt, UMC::AddrMaskSecExt -- Extended mask addresses.
 */
/*CSTYLED*/
#define	D_UMC_MASK_EXT_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xb20,	\
	.srd_nents = 4	\
}
/*CSTYLED*/
#define	D_UMC_MASK_EXT_SEC_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xb30,	\
	.srd_nents = 4	\
}
#define	UMC_MASK_EXT_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_MASK_EXT_DDR5, i)
#define	UMC_MASK_EXT_SEC_DDR5(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_MASK_EXT_SEC_DDR5, i)
#define	UMC_MASK_EXT_GET_ADDR(r)	bitx32(r, 7, 0)
#define	UMC_MASK_EXT_ADDR_SHIFT		40

/*
 * UMC::CH::AddrCfg -- This register contains a number of bits that describe how
 * the address is actually used, one per DIMM. Note, not all members are valid
 * for all classes of DIMMs. It's worth calling out that the total number of
 * banks value here describes the total number of banks on the entire chip, e.g.
 * it is bank groups * banks/groups. Therefore to determine the number of
 * banks/group you must subtract the number of bank group bits from the total
 * number of bank bits.
 */
/*CSTYLED*/
#define	D_UMC_ADDRCFG_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x30,	\
	.srd_nents = 2	\
}
/*CSTYLED*/
#define	D_UMC_ADDRCFG_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x40,	\
	.srd_nents = 4	\
}
#define	UMC_ADDRCFG_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_ADDRCFG_DDR4, i)
#define	UMC_ADDRCFG_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_ADDRCFG_DDR5, i)
#define	UMC_ADDRCFG_GET_NBANK_BITS(r)		bitx32(r, 21, 20)
#define	UMC_ADDRCFG_NBANK_BITS_BASE		3
#define	UMC_ADDRCFG_GET_NCOL_BITS(r)		bitx32(r, 19, 16)
#define	UMC_ADDRCFG_NCOL_BITS_BASE		5
#define	UMC_ADDRCFG_GET_NROW_BITS_LO(r)		bitx32(r, 11, 8)
#define	UMC_ADDRCFG_NROW_BITS_LO_BASE		10
#define	UMC_ADDRCFG_GET_NBANKGRP_BITS(r)	bitx32(r, 3, 2)

#define	UMC_ADDRCFG_DDR4_GET_NROW_BITS_HI(r)	bitx32(r, 15, 12)
#define	UMC_ADDRCFG_DDR4_GET_NRM_BITS(r)	bitx32(r, 5, 4)
#define	UMC_ADDRCFG_DDR5_GET_CSXOR(r)		bitx32(r, 31, 30)
#define	UMC_ADDRCFG_DDR5_GET_NRM_BITS(r)	bitx32(r, 6, 4)

/*
 * UMC::CH::AddrSel -- This register is used to program how the actual bits in
 * the normalized address map to the row and bank. While the bank can select
 * which bits in the normalized address are used to construct the bank number,
 * row bits are contiguous from the starting number.
 */
/*CSTYLED*/
#define	D_UMC_ADDRSEL_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x40,	\
	.srd_nents = 2	\
}
/*CSTYLED*/
#define	D_UMC_ADDRSEL_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x50,	\
	.srd_nents = 4	\
}
#define	UMC_ADDRSEL_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_ADDRSEL_DDR4, i)
#define	UMC_ADDRSEL_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_ADDRSEL_DDR5, i)
#define	UMC_ADDRSEL_GET_ROW_LO(r)	bitx32(r, 27, 24)
#define	UMC_ADDRSEL_ROW_LO_BASE		12
#define	UMC_ADDRSEL_GET_BANK4(r)	bitx32(r, 19, 16)
#define	UMC_ADDRSEL_GET_BANK3(r)	bitx32(r, 15, 12)
#define	UMC_ADDRSEL_GET_BANK2(r)	bitx32(r, 11, 8)
#define	UMC_ADDRSEL_GET_BANK1(r)	bitx32(r, 7, 4)
#define	UMC_ADDRSEL_GET_BANK0(r)	bitx32(r, 3, 0)
#define	UMC_ADDRSEL_BANK_BASE		5

#define	UMC_ADDRSEL_DDR4_GET_ROW_HI(r)	bitx32(r, 31, 28)
#define	UMC_ADDRSEL_DDR4_ROW_HI_BASE	24

/*
 * UMC::CH::ColSelLo, UMC::CH::ColSelHi -- This register selects which address
 * bits map to the various column select bits. These registers interleave so in
 * the case of DDR4, it's 0x50, 0x54 for DIMM 0 lo, hi. Then 0x58, 0x5c for
 * DIMM1. DDR5 based entries do something similar; however, instead of being
 * per-DIMM, there is one of these for each CS.
 */
/*CSTYLED*/
#define	D_UMC_COLSEL_LO_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x50,	\
	.srd_nents = 2,	\
	.srd_stride = 8	\
}
/*CSTYLED*/
#define	D_UMC_COLSEL_HI_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x54,	\
	.srd_nents = 2,	\
	.srd_stride = 8	\
}
/*CSTYLED*/
#define	D_UMC_COLSEL_LO_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x60,	\
	.srd_nents = 4,	\
	.srd_stride = 8	\
}
/*CSTYLED*/
#define	D_UMC_COLSEL_HI_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x64,	\
	.srd_nents = 4,	\
	.srd_stride = 8	\
}
#define	UMC_COLSEL_LO_DDR4(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_COLSEL_LO_DDR4, i)
#define	UMC_COLSEL_HI_DDR4(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_COLSEL_HI_DDR4, i)
#define	UMC_COLSEL_LO_DDR5(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_COLSEL_LO_DDR5, i)
#define	UMC_COLSEL_HI_DDR5(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_COLSEL_HI_DDR5, i)

#define	UMC_COLSEL_REMAP_GET_COL(r, x)	bitx32(r, (3 + (4 * (x))), (4 * ((x))))
#define	UMC_COLSEL_LO_BASE		2
#define	UMC_COLSEL_HI_BASE		8

/*
 * UMC::CH::RmSel -- This register contains the bits that determine how the rank
 * is determined. Which fields of this are valid vary a lot in the different
 * parts. The DDR4 and DDR5 versions are different enough that we use totally
 * disjoint definitions. It's also worth noting that DDR5 doesn't have a
 * secondary version of this as it is included in the main register.
 *
 * In general, APUs have some of the MSBS (most significant bit swap) related
 * fields; however, they do not have rank multiplication bits.
 */
/*CSTYLED*/
#define	D_UMC_RMSEL_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x70,	\
	.srd_nents = 2	\
}
/*CSTYLED*/
#define	D_UMC_RMSEL_SEC_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x78,	\
	.srd_nents = 2	\
}
#define	UMC_RMSEL_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_RMSEL_DDR4, i)
#define	UMC_RMSEL_SEC_DDR4(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_RMSEL_SEC_DDR4, i)
#define	UMC_RMSEL_DDR4_GET_INV_MSBO(r)	bitx32(r, 19, 18)
#define	UMC_RMSEL_DDR4_GET_INV_MSBE(r)	bitx32(r, 17, 16)
#define	UMC_RMSEL_DDR4_GET_RM2(r)	bitx32(r, 11, 8)
#define	UMC_RMSEL_DDR4_GET_RM1(r)	bitx32(r, 7, 4)
#define	UMC_RMSEL_DDR4_GET_RM0(r)	bitx32(r, 3, 0)
#define	UMC_RMSEL_BASE			12

/*CSTYLED*/
#define	D_UMC_RMSEL_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x80,	\
	.srd_nents = 4	\
}
#define	UMC_RMSEL_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_RMSEL_DDR5, i)
#define	UMC_RMSEL_DDR5_GET_INV_MSBS_SEC(r)	bitx32(r, 31, 30)
#define	UMC_RMSEL_DDR5_GET_INV_MSBS(r)		bitx32(r, 29, 28)
#define	UMC_RMSEL_DDR5_GET_SUBCHAN(r)	bitx32(r, 19, 16)
#define	UMC_RMSEL_DDR5_SUBCHAN_BASE	5
#define	UMC_RMSEL_DDR5_GET_RM3(r)	bitx32(r, 15, 12)
#define	UMC_RMSEL_DDR5_GET_RM2(r)	bitx32(r, 11, 8)
#define	UMC_RMSEL_DDR5_GET_RM1(r)	bitx32(r, 7, 4)
#define	UMC_RMSEL_DDR5_GET_RM0(r)	bitx32(r, 3, 0)


/*
 * UMC::CH::DimmCfg -- This describes several properties of the DIMM that is
 * installed, such as its overall width or type.
 */
/*CSTYLED*/
#define	D_UMC_DIMMCFG_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x80,	\
	.srd_nents = 2	\
}
/*CSTYLED*/
#define	D_UMC_DIMMCFG_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x90,	\
	.srd_nents = 2	\
}
#define	UMC_DIMMCFG_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_DIMMCFG_DDR4, i)
#define	UMC_DIMMCFG_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_DIMMCFG_DDR5, i)
#define	UMC_DIMMCFG_GET_PKG_RALIGN(r)	bitx32(r, 10, 10)
#define	UMC_DIMMCFG_GET_REFRESH_DIS(r)	bitx32(r, 9, 9)
#define	UMC_DIMMCFG_GET_DQ_SWAP_DIS(r)	bitx32(r, 8, 8)
#define	UMC_DIMMCFG_GET_X16(r)		bitx32(r, 7, 7)
#define	UMC_DIMMCFG_GET_X4(r)		bitx32(r, 6, 6)
#define	UMC_DIMMCFG_GET_LRDIMM(r)	bitx32(r, 5, 5)
#define	UMC_DIMMCFG_GET_RDIMM(r)	bitx32(r, 4, 4)
#define	UMC_DIMMCFG_GET_CISCS(r)	bitx32(r, 3, 3)
#define	UMC_DIMMCFG_GET_3DS(r)		bitx32(r, 2, 2)

#define	UMC_DIMMCFG_DDR4_GET_NVDIMMP(r)	bitx32(r, 12, 12)
#define	UMC_DIMMCFG_DDR4_GET_DDR4e(r)	bitx32(r, 11, 11)
#define	UMC_DIMMCFG_DDR5_GET_RALIGN(r)	bitx32(r, 13, 12)
#define	UMC_DIMMCFG_DDR5_GET_ASYM(r)	bitx32(r, 11, 11)

#define	UMC_DIMMCFG_DDR4_GET_OUTPUT_INV(r)	bitx32(r, 1, 1)
#define	UMC_DIMMCFG_DDR4_GET_MRS_MIRROR(r)	bitx32(r, 0, 0)

/*
 * UMC::CH::AddrHashBank -- These registers contain various instructions about
 * how to hash an address across a bank to influence which bank is used.
 */
/*CSTYLED*/
#define	D_UMC_BANK_HASH_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xc8,	\
	.srd_nents = 5	\
}
/*CSTYLED*/
#define	D_UMC_BANK_HASH_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x98,	\
	.srd_nents = 5	\
}
#define	UMC_BANK_HASH_DDR4(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_BANK_HASH_DDR4, i)
#define	UMC_BANK_HASH_DDR5(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_BANK_HASH_DDR5, i)
#define	UMC_BANK_HASH_GET_ROW(r)	bitx32(r, 31, 14)
#define	UMC_BANK_HASH_GET_COL(r)	bitx32(r, 13, 1)
#define	UMC_BANK_HASH_GET_EN(r)		bitx32(r, 0, 0)

/*
 * UMC::CH::AddrHashRM -- This hash register describes how to transform a UMC
 * address when trying to do rank hashing. Note, instance 3 is is reserved in
 * DDR5 modes.
 */
/*CSTYLED*/
#define	D_UMC_RANK_HASH_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xdc,	\
	.srd_nents = 3	\
}
/*CSTYLED*/
#define	D_UMC_RANK_HASH_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xb0,	\
	.srd_nents = 4	\
}
#define	UMC_RANK_HASH_DDR4(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_RANK_HASH_DDR4, i)
#define	UMC_RANK_HASH_DDR5(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_RANK_HASH_DDR5, i)
#define	UMC_RANK_HASH_GET_ADDR(r)	bitx32(r, 31, 1)
#define	UMC_RANK_HASH_SHIFT		9
#define	UMC_RANK_HASH_GET_EN(r)		bitx32(r, 0, 0)

/*
 * UMC::AddrHashRMExt -- Extended rank hash addresses.
 */
/*CSTYLED*/
#define	D_UMC_RANK_HASH_EXT_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xbb0,	\
	.srd_nents = 4	\
}
#define	UMC_RANK_HASH_EXT_DDR5(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_RANK_HASH_EXT_DDR5, i)
#define	UMC_RANK_HASH_EXT_GET_ADDR(r)	bitx32(r, 7, 0)
#define	UMC_RANK_HASH_EXT_ADDR_SHIFT	40

/*
 * UMC::CH::AddrHashPC, UMC::CH::AddrHashPC2 -- These registers describe a hash
 * to use for the DDR5 sub-channel. Note, in the DDR4 case this is actually the
 * upper two rank hash registers defined above because on the systems where this
 * occurs for DDR4, they only have up to one rank hash.
 */
/*CSTYLED*/
#define	D_UMC_PC_HASH_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xc0	\
}
/*CSTYLED*/
#define	D_UMC_PC_HASH2_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xc4	\
}
#define	UMC_PC_HASH_DDR4(u)	UMC_RANK_HASH_DDR4(u, 1)
#define	UMC_PC_HASH2_DDR4(u)	UMC_RANK_HASH_DDR4(u, 2)
#define	UMC_PC_HASH_DDR5(u)	amdzen_umc_smn_reg(u, D_UMC_PC_HASH_DDR5, 0)
#define	UMC_PC_HASH2_DDR5(u)	amdzen_umc_smn_reg(u, D_UMC_PC_HASH2_DDR5, 0)
#define	UMC_PC_HASH_GET_ROW(r)		bitx32(r, 31, 14)
#define	UMC_PC_HASH_GET_COL(r)		bitx32(r, 13, 1)
#define	UMC_PC_HASH_GET_EN(r)		bitx32(r, 0, 0)
#define	UMC_PC_HASH2_GET_BANK(r)	bitx32(r, 4, 0)

/*
 * UMC::CH::AddrHashCS -- Hashing: chip-select edition. Note, these can
 * ultimately cause you to change which DIMM is being actually accessed.
 */
/*CSTYLED*/
#define	D_UMC_CS_HASH_DDR4	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xe8,	\
	.srd_nents = 2	\
}
/*CSTYLED*/
#define	D_UMC_CS_HASH_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xc8,	\
	.srd_nents = 2	\
}
#define	UMC_CS_HASH_DDR4(u, i)	amdzen_umc_smn_reg(u, D_UMC_CS_HASH_DDR4, i)
#define	UMC_CS_HASH_DDR5(u, i)	amdzen_umc_smn_reg(u, D_UMC_CS_HASH_DDR5, i)
#define	UMC_CS_HASH_GET_ADDR(r)		bitx32(r, 31, 1)
#define	UMC_CS_HASH_SHIFT		9
#define	UMC_CS_HASH_GET_EN(r)		bitx32(r, 0, 0)

/*
 * UMC::AddrHashExtCS -- Extended chip-select hash addresses.
 */
/*CSTYLED*/
#define	D_UMC_CS_HASH_EXT_DDR5	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xbc8,	\
	.srd_nents = 2	\
}
#define	UMC_CS_HASH_EXT_DDR5(u, i)	\
    amdzen_umc_smn_reg(u, D_UMC_CS_HASH_EXT_DDR5, i)
#define	UMC_CS_HASH_EXT_GET_ADDR(r)	bitx32(r, 7, 0)
#define	UMC_CS_HASH_EXT_ADDR_SHIFT	40

/*
 * UMC::CH::UmcConfig -- This register controls various features of the device.
 * For our purposes we mostly care about seeing if ECC is enabled and a DIMM
 * type.
 */
/*CSTYLED*/
#define	D_UMC_UMCCFG	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x100	\
}
#define	UMC_UMCCFG(u)	amdzen_umc_smn_reg(u, D_UMC_UMCCFG, 0)
#define	UMC_UMCCFG_GET_READY(r)		bitx32(r, 31, 31)
#define	UMC_UMCCFG_GET_ECC_EN(r)	bitx32(r, 12, 12)
#define	UMC_UMCCFG_GET_BURST_CTL(r)	bitx32(r, 11, 10)
#define	UMC_UMCCFG_GET_BURST_LEN(r)	bitx32(r, 9, 8)
#define	UMC_UMCCFG_GET_DDR_TYPE(r)	bitx32(r, 2, 0)
#define	UMC_UMCCFG_DDR4_T_DDR4		0
#define	UMC_UMCCFG_DDR4_T_LPDDR4	5

#define	UMC_UMCCFG_DDR5_T_DDR4		0
#define	UMC_UMCCFG_DDR5_T_DDR5		1
#define	UMC_UMCCFG_DDR5_T_LPDDR4	5
#define	UMC_UMCCFG_DDR5_T_LPDDR5	6

/*
 * UMC::CH::DataCtrl -- Various settings around whether data encryption or
 * scrambling is enabled. Note, this register really changes a bunch from family
 * to family.
 */
/*CSTYLED*/
#define	D_UMC_DATACTL	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x144	\
}
#define	UMC_DATACTL(u)		amdzen_umc_smn_reg(u, D_UMC_DATACTL, 0)
#define	UMC_DATACTL_GET_ENCR_EN(r)	bitx32(r, 8, 8)
#define	UMC_DATACTL_GET_SCRAM_EN(r)	bitx32(r, 0, 0)

#define	UMC_DATACTL_DDR4_GET_TWEAK(r)		bitx32(r, 19, 16)
#define	UMC_DATACTL_DDR4_GET_VMG2M(r)		bitx32(r, 12, 12)
#define	UMC_DATACTL_DDR4_GET_FORCE_ENCR(r)	bitx32(r, 11, 11)

#define	UMC_DATACTL_DDR5_GET_TWEAK(r)	bitx32(r, 16, 16)
#define	UMC_DATACTL_DDR5_GET_XTS(r)	bitx32(r, 14, 14)
#define	UMC_DATACTL_DDR5_GET_AES256(r)	bitx32(r, 13, 13)

/*
 * UMC::CH:EccCtrl -- Various settings around how ECC operates.
 */
/*CSTYLED*/
#define	D_UMC_ECCCTL	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0x14c	\
}
#define	UMC_ECCCTL(u)	amdzen_umc_smn_reg(u, D_UMC_ECCCTL, 0)
#define	UMC_ECCCTL_GET_RD_EN(r)		bitx32(x, 10, 10)
#define	UMC_ECCCTL_GET_X16(r)		bitx32(x, 9, 9)
#define	UMC_ECCCTL_GET_UC_FATAL(r)	bitx32(x, 8, 8)
#define	UMC_ECCCTL_GET_SYM_SIZE(r)	bitx32(x, 7, 7)
#define	UMC_ECCCTL_GET_BIT_IL(r)	bitx32(x, 6, 6)
#define	UMC_ECCCTL_GET_HIST_EN(r)	bitx32(x, 5, 5)
#define	UMC_ECCCTL_GET_SW_SYM_EN(r)	bitx32(x, 4, 4)
#define	UMC_ECCCTL_GET_WR_EN(r)		bitx32(x, 0, 0)

/*
 * Note, while this group appears generic and is the same in both DDR4/DDR5
 * systems, this is not always present on every SoC and seems to depend on
 * something else inside the chip.
 */
#define	UMC_ECCCTL_DDR_GET_PI(r)	bitx32(r, 13, 13)
#define	UMC_ECCCTL_DDR_GET_PF_DIS(r)	bitx32(r, 12, 12)
#define	UMC_ECCCTL_DDR_GET_SDP_OVR(r)	bitx32(x, 11, 11)
#define	UMC_ECCCTL_DDR_GET_REPLAY_EN(r)	bitx32(x, 1, 1)

#define	UMC_ECCCTL_DDR5_GET_PIN_RED(r)	bitx32(r, 14, 14)

/*
 * UMC::Ch::UmcCap, UMC::CH::UmcCapHi -- Various capability registers and
 * feature disables. We mostly just record these for future us for debugging
 * purposes. They aren't used as part of memory decoding.
 */
/*CSTYLED*/
#define	D_UMC_UMCCAP	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xdf0	\
}
/*CSTYLED*/
#define	D_UMC_UMCCAP_HI	(const smn_reg_def_t){	\
	.srd_unit = SMN_UNIT_UMC,	\
	.srd_reg = 0xdf4	\
}
#define	UMC_UMCCAP(u)		amdzen_umc_smn_reg(u, D_UMC_UMCCAP, 0)
#define	UMC_UMCCAP_HI(u)	amdzen_umc_smn_reg(u, D_UMC_UMCCAP_HI, 0)

#ifdef __cplusplus
}
#endif

#endif /* _SYS_UMC_H */