summaryrefslogtreecommitdiff
path: root/usr/src/uts/sun4u/io/px
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/sun4u/io/px')
-rw-r--r--usr/src/uts/sun4u/io/px/px_asm.h43
-rw-r--r--usr/src/uts/sun4u/io/px/px_asm.s169
-rw-r--r--usr/src/uts/sun4u/io/px/px_csr.h113
-rw-r--r--usr/src/uts/sun4u/io/px/px_hlib.c2705
-rw-r--r--usr/src/uts/sun4u/io/px/px_lib4u.c1566
-rw-r--r--usr/src/uts/sun4u/io/px/px_lib4u.h337
-rw-r--r--usr/src/uts/sun4u/io/px/px_regs.h2637
-rw-r--r--usr/src/uts/sun4u/io/px/px_tools.c1038
-rw-r--r--usr/src/uts/sun4u/io/px/px_tools_var.h55
9 files changed, 8663 insertions, 0 deletions
diff --git a/usr/src/uts/sun4u/io/px/px_asm.h b/usr/src/uts/sun4u/io/px/px_asm.h
new file mode 100644
index 0000000000..3098136c5e
--- /dev/null
+++ b/usr/src/uts/sun4u/io/px/px_asm.h
@@ -0,0 +1,43 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PX_ASM_H
+#define _SYS_PX_ASM_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int px_phys_peek(size_t size, uint64_t paddr, uint64_t *value, int type);
+extern int px_phys_poke(size_t size, uint64_t paddr, uint64_t *value, int type);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PX_ASM_H */
diff --git a/usr/src/uts/sun4u/io/px/px_asm.s b/usr/src/uts/sun4u/io/px/px_asm.s
new file mode 100644
index 0000000000..9bea60be94
--- /dev/null
+++ b/usr/src/uts/sun4u/io/px/px_asm.s
@@ -0,0 +1,169 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Assembly language support for px driver
+ */
+
+#include <sys/asm_linkage.h>
+#include <sys/machthread.h>
+#include <sys/privregs.h>
+
+/*LINTLIBRARY*/
+
+#if defined(lint)
+
+/*ARGSUSED*/
+int
+px_phys_peek(size_t size, uint64_t paddr, uint64_t *value, int type)
+{ return (0); }
+
+/*ARGSUSED*/
+int
+px_phys_poke(size_t size, uint64_t paddr, uint64_t *value, int type)
+{ return (0); }
+
+#else /* lint */
+
+! px_phys_peek: Do physical address read.
+!
+! %o0 is size in bytes - Must be 8, 4, 2 or 1. Invalid sizes default to 1.
+! %o1 is address to read
+! %o2 is address to save value into
+! %o3 is 0 for little endian, non-zero for big endian
+!
+! To be called from an on_trap environment.
+! Interrupts will be disabled for the duration of the read, to prevent
+! an interrupt from raising the trap level to 1 and then a possible
+! data access exception being delivered while the trap level > 0.
+!
+! Assumes alignment is correct.
+
+ ENTRY(px_phys_peek)
+
+ rdpr %pstate, %o4 ! Disable interrupts if not already
+ andcc %o4, PSTATE_IE, %g2 ! Save original state first
+ bz .peek_ints_disabled
+ nop
+ wrpr %o4, PSTATE_IE, %pstate
+.peek_ints_disabled:
+
+ tst %o3 ! Set up %asi with modifier for
+ movz %xcc, ASI_IOL, %g1 ! Big/little endian physical space
+ movnz %xcc, ASI_IO, %g1
+ mov %g1, %asi
+
+ cmp %o0, 8 ! 64-bit?
+ bne .peek_int
+ cmp %o0, 4 ! 32-bit?
+ ldxa [%o1]%asi, %g1
+ ba .peekdone
+ stx %g1, [%o2]
+
+.peek_int:
+ bne .peek_half
+ cmp %o0, 2 ! 16-bit?
+ lduwa [%o1]%asi, %g1
+ ba .peekdone
+ stuw %g1, [%o2]
+
+.peek_half:
+ bne .peek_byte
+ nop
+ lduha [%o1]%asi, %g1
+ ba .peekdone
+ stuh %g1, [%o2]
+
+.peek_byte:
+ lduba [%o1]%asi, %g1 ! 8-bit!
+ stub %g1, [%o2]
+
+.peekdone:
+ membar #Sync ! Make sure the loads take
+ tst %g2 ! No need to reenable interrupts
+ bz .peek_ints_done ! if not enabled at entry
+ rdpr %pstate, %o4
+ wrpr %o4, PSTATE_IE, %pstate
+.peek_ints_done:
+ mov %g0, %o0
+ retl
+ nop
+ SET_SIZE(px_phys_peek)
+
+
+! px_phys_poke: Do physical address write.
+!
+! %o0 is size in bytes - Must be 8, 4, 2 or 1. Invalid sizes default to 1.
+! %o1 is address to write to
+! %o2 is address to read from
+! %o3 is 0 for little endian, non-zero for big endian
+!
+! Always returns success (0) in %o0
+!
+! Assumes alignment is correct and that on_trap handling has been installed
+
+ ENTRY(px_phys_poke)
+
+ tst %o3
+ bz .poke_asi_set
+ mov ASI_IOL, %asi
+ mov ASI_IO, %asi
+.poke_asi_set:
+
+ cmp %o0, 8 ! 64 bit?
+ bne .poke_int
+ cmp %o0, 4 ! 32-bit?
+ ldx [%o2], %g1
+ ba .pokedone
+ stxa %g1, [%o1]%asi
+
+.poke_int:
+ bne .poke_half
+ cmp %o0, 2 ! 16-bit?
+ lduw [%o2], %g1
+ ba .pokedone
+ stuwa %g1, [%o1]%asi
+
+.poke_half:
+ bne .poke_byte
+ nop
+ lduh [%o2], %g1
+ ba .pokedone
+ stuha %g1, [%o1]%asi
+
+.poke_byte:
+ ldub [%o2], %g1 ! 8-bit!
+ stuba %g1, [%o1]%asi
+
+.pokedone:
+ membar #Sync
+ retl
+ mov %g0, %o0
+ SET_SIZE(px_phys_poke)
+
+#endif
diff --git a/usr/src/uts/sun4u/io/px/px_csr.h b/usr/src/uts/sun4u/io/px/px_csr.h
new file mode 100644
index 0000000000..3d8821af7e
--- /dev/null
+++ b/usr/src/uts/sun4u/io/px/px_csr.h
@@ -0,0 +1,113 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PX_CSR_H
+#define _SYS_PX_CSR_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* To read and write whole register */
+#define CSR_XR(base, off) \
+ (*(volatile uint64_t *)((base) + ((off))))
+
+#define CSRA_XR(base, off, index) \
+ (*(volatile uint64_t *)((base) + ((off) + ((index) * 8))))
+
+#define CSR_XS(base, off, val) \
+ ((*(volatile uint64_t *)((base) + ((off)))) = (val))
+
+#define CSRA_XS(base, off, index, val) \
+ ((*(volatile uint64_t *)((base) + ((off) + ((index) * 8)))) = (val))
+
+/* To read, set and clear specific fields within a register */
+#define CSR_FR(base, off, bit) \
+ (((*(volatile uint64_t *) ((base) + ((off)))) >> \
+ (off ## _ ## bit)) & (off ## _ ## bit ## _MASK))
+
+#define CSRA_FR(base, off, index, bit) \
+ (((*(volatile uint64_t *) ((base) + ((off) + ((index) * 8)))) >> \
+ (off ## _ ## bit)) & (off ## _ ## bit ## _MASK))
+
+#define CSR_FS(base, off, bit, val) \
+ ((*(volatile uint64_t *) ((base) + ((off)))) = \
+ (((*(volatile uint64_t *) ((base) + ((off)))) & \
+ ~(((uint64_t)(off ## _ ## bit ## _MASK)) << \
+ (off ## _ ## bit))) | (((uint64_t)(val)) << (off ## _ ## bit))))
+
+#define CSRA_FS(base, off, index, bit, val) \
+ ((*(volatile uint64_t *) ((base) + ((off) + ((index) * 8)))) = \
+ (((*(volatile uint64_t *) ((base) + ((off) + ((index) * 8)))) & \
+ ~(((uint64_t)(off ## _ ## bit ## _MASK)) << \
+ (off ## _ ## bit))) | (((uint64_t)(val)) << (off ## _ ## bit))))
+
+#define CSR_FC(base, off, bit) \
+ ((*(volatile uint64_t *) ((base) + ((off)))) = \
+ ((*(volatile uint64_t *)((base) + ((off)))) & \
+ ~(((uint64_t)(off ## _ ## bit ## _MASK)) << (off ## _ ## bit))))
+
+#define CSRA_FC(base, off, index, bit) \
+ ((*(volatile uint64_t *) ((base) + ((off) + ((index) * 8)))) = \
+ ((*(volatile uint64_t *)((base) + ((off) + ((index) * 8)))) & \
+ ~(((uint64_t)(off ## _ ## bit ## _MASK)) << (off ## _ ## bit))))
+
+/* To read, set and clear specific bit within a register */
+#define CSR_BR(base, off, bit) \
+ (((*(volatile uint64_t *)((base) + ((off)))) >> \
+ (off ## _ ## bit)) & 0x1)
+
+#define CSRA_BR(base, off, index, bit) \
+ (((*(volatile uint64_t *)((base) + ((off) + ((index) * 8)))) >> \
+ (off ## _ ## bit)) & 0x1)
+
+#define CSR_BS(base, off, bit) \
+ ((*(volatile uint64_t *)((base) + ((off)))) = \
+ ((*(volatile uint64_t *)((base) + ((off)))) | \
+ (1ULL<<(off ## _ ## bit))))
+
+#define CSRA_BS(base, off, index, bit) \
+ ((*(volatile uint64_t *)((base) + ((off) + ((index) * 8)))) = \
+ ((*(volatile uint64_t *)((base) + ((off) + ((index) * 8)))) | \
+ (1ULL<<(off ## _ ## bit))))
+
+#define CSR_BC(base, off, bit) \
+ ((*(volatile uint64_t *)((base) + ((off)))) = \
+ ((*(volatile uint64_t *)((base) + ((off)))) & \
+ ~(1ULL<<(off ## _ ## bit))))
+
+#define CSRA_BC(base, off, index, bit) \
+ ((*(volatile uint64_t *)((base) + ((off) + ((index) * 8)))) = \
+ ((*(volatile uint64_t *)((base) + ((off) + ((index) * 8)))) & \
+ ~(1ULL<<(off ## _ ## bit))))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PX_CSR_H */
diff --git a/usr/src/uts/sun4u/io/px/px_hlib.c b/usr/src/uts/sun4u/io/px/px_hlib.c
new file mode 100644
index 0000000000..52fa649029
--- /dev/null
+++ b/usr/src/uts/sun4u/io/px/px_hlib.c
@@ -0,0 +1,2705 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/cmn_err.h>
+#include <sys/vmsystm.h>
+#include <sys/vmem.h>
+#include <sys/machsystm.h> /* lddphys() */
+#include <sys/iommutsb.h>
+#include <sys/pci.h>
+#include <pcie_pwr.h>
+#include <px_obj.h>
+#include "px_regs.h"
+#include "px_csr.h"
+#include "px_lib4u.h"
+
+/*
+ * Registers that need to be saved and restored during suspend/resume.
+ */
+
+/*
+ * Registers in the PEC Module.
+ * LPU_RESET should be set to 0ull during resume
+ */
+static uint64_t pec_config_state_regs[] = {
+ PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
+ ILU_ERROR_LOG_ENABLE,
+ ILU_INTERRUPT_ENABLE,
+ TLU_CONTROL,
+ TLU_OTHER_EVENT_LOG_ENABLE,
+ TLU_OTHER_EVENT_INTERRUPT_ENABLE,
+ TLU_DEVICE_CONTROL,
+ TLU_LINK_CONTROL,
+ TLU_UNCORRECTABLE_ERROR_LOG_ENABLE,
+ TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE,
+ TLU_CORRECTABLE_ERROR_LOG_ENABLE,
+ TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE,
+ LPU_DEBUG_CONFIG,
+ LPU_INTERRUPT_MASK,
+ LPU_LINK_LAYER_CONFIG,
+ LPU_FLOW_CONTROL_UPDATE_CONTROL,
+ LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD,
+ LPU_TXLINK_REPLAY_TIMER_THRESHOLD,
+ LPU_REPLAY_BUFFER_MAX_ADDRESS,
+ LPU_TXLINK_RETRY_FIFO_POINTER,
+ LPU_PHY_INTERRUPT_MASK,
+ LPU_RECEIVE_PHY_INTERRUPT_MASK,
+ LPU_TRANSMIT_PHY_INTERRUPT_MASK,
+ LPU_LTSSM_CONFIG2,
+ LPU_LTSSM_CONFIG3,
+ LPU_LTSSM_CONFIG4,
+ LPU_LTSSM_CONFIG5,
+ LPU_LTSSM_INTERRUPT_MASK,
+ LPU_GIGABLAZE_GLUE_INTERRUPT_MASK,
+ DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
+ DMC_DEBUG_SELECT_FOR_PORT_A,
+ DMC_DEBUG_SELECT_FOR_PORT_B
+};
+#define PEC_SIZE (sizeof (pec_config_state_regs))
+#define PEC_KEYS (PEC_SIZE / sizeof (uint64_t))
+
+/*
+ * Registers for the MMU module.
+ * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull)
+ */
+static uint64_t mmu_config_state_regs[] = {
+ MMU_TSB_CONTROL,
+ MMU_CONTROL_AND_STATUS,
+ MMU_INTERRUPT_ENABLE
+};
+#define MMU_SIZE (sizeof (mmu_config_state_regs))
+#define MMU_KEYS (MMU_SIZE / sizeof (uint64_t))
+
+/*
+ * Registers for the IB Module
+ */
+static uint64_t ib_config_state_regs[] = {
+ IMU_ERROR_LOG_ENABLE,
+ IMU_INTERRUPT_ENABLE
+};
+#define IB_SIZE (sizeof (ib_config_state_regs))
+#define IB_KEYS (IB_SIZE / sizeof (uint64_t))
+#define IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
+
+/*
+ * Registers for the CB module.
+ * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
+ */
+static uint64_t cb_config_state_regs[] = {
+ JBUS_PARITY_CONTROL,
+ JBC_FATAL_RESET_ENABLE,
+ JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
+ JBC_ERROR_LOG_ENABLE,
+ JBC_INTERRUPT_ENABLE
+};
+#define CB_SIZE (sizeof (cb_config_state_regs))
+#define CB_KEYS (CB_SIZE / sizeof (uint64_t))
+
+static uint64_t msiq_config_other_regs[] = {
+ ERR_COR_MAPPING,
+ ERR_NONFATAL_MAPPING,
+ ERR_FATAL_MAPPING,
+ PM_PME_MAPPING,
+ PME_TO_ACK_MAPPING,
+ MSI_32_BIT_ADDRESS,
+ MSI_64_BIT_ADDRESS
+};
+#define MSIQ_OTHER_SIZE (sizeof (msiq_config_other_regs))
+#define MSIQ_OTHER_KEYS (MSIQ_OTHER_SIZE / sizeof (uint64_t))
+
+#define MSIQ_STATE_SIZE (EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t))
+#define MSIQ_MAPPING_SIZE (MSI_MAPPING_ENTRIES * sizeof (uint64_t))
+
+static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
+static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p);
+
+/* ARGSUSED */
+void
+hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
+{
+ uint64_t val;
+
+ /* Check if we need to enable inverted parity */
+ val = (1ULL << JBUS_PARITY_CONTROL_P_EN);
+ CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val);
+ DBG(DBG_CB, NULL, "hvio_cb_init, JBUS_PARITY_CONTROL: 0x%llx\n",
+ CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL));
+
+ val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN)|
+ (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) |
+ (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) |
+ (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) |
+ (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) |
+ (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) |
+ (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) |
+ (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN);
+ CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val);
+ DBG(DBG_CB, NULL, "hvio_cb_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
+ CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE));
+
+ /*
+ * Enable merge, jbc and dmc interrupts.
+ */
+ CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull);
+ DBG(DBG_CB, NULL,
+ "hvio_cb_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
+ CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
+
+ /*
+ * Enable all error log bits.
+ */
+ CSR_XS(xbc_csr_base, JBC_ERROR_LOG_ENABLE, -1ull);
+ DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
+ CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE));
+
+ /*
+ * Enable all interrupts.
+ */
+ CSR_XS(xbc_csr_base, JBC_INTERRUPT_ENABLE, -1ull);
+ DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
+ CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE));
+
+ /*
+ * Emit warning for pending errors and flush the logged error
+ * status register.
+ */
+ val = CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR);
+
+ CSR_XS(xbc_csr_base, JBC_ERROR_STATUS_CLEAR, -1ull);
+ DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
+ CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR));
+}
+
+/* ARGSUSED */
+void
+hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p)
+{
+ uint64_t val;
+
+ /*
+ * CSR_V IMU_ERROR_LOG_ENABLE Expect Kernel 0x3FF
+ */
+ val = -1ull;
+ CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, val);
+ DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE));
+
+ /*
+ * CSR_V IMU_INTERRUPT_ENABLE Expect Kernel 0x3FF000003FF
+ */
+ val = -1ull;
+ CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, val);
+ DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, IMU_INTERRUPT_ENABLE));
+
+ /*
+ * CSR_V IMU_INTERRUPT_STATUS Expect HW 0x0
+ */
+ DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, IMU_INTERRUPT_STATUS));
+
+ /*
+ * CSR_V IMU_ERROR_STATUS_CLEAR Expect HW 0x0
+ */
+ DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n",
+ CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR));
+}
+
+/* ARGSUSED */
+static void
+ilu_init(caddr_t csr_base, pxu_t *pxu_p)
+{
+ uint64_t val;
+
+ /*
+ * CSR_V ILU_ERROR_LOG_ENABLE Expect OBP 0x10
+ */
+
+ val = 0ull;
+ val = (1ull << ILU_ERROR_LOG_ENABLE_IHB_PE);
+
+ CSR_XS(csr_base, ILU_ERROR_LOG_ENABLE, val);
+ DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE));
+
+ /*
+ * CSR_V ILU_INTERRUPT_ENABLE Expect OBP 0x1000000010
+ */
+
+ val = (1ull << ILU_INTERRUPT_ENABLE_IHB_PE_S) |
+ (1ull << ILU_INTERRUPT_ENABLE_IHB_PE_P);
+
+ CSR_XS(csr_base, ILU_INTERRUPT_ENABLE, val);
+ DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, ILU_INTERRUPT_ENABLE));
+
+ /*
+ * CSR_V ILU_INTERRUPT_STATUS Expect HW 0x1000000010
+ */
+ DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, ILU_INTERRUPT_STATUS));
+
+ /*
+ * CSR_V ILU_ERROR_STATUS_CLEAR Expect HW 0x0
+ */
+ DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n",
+ CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR));
+}
+
+static void
+tlu_init(caddr_t csr_base, pxu_t *pxu_p)
+{
+ uint64_t val;
+
+ /*
+ * CSR_V TLU_CONTROL Expect OBP ???
+ */
+
+ /*
+ * L0s entry default timer value - 7.0 us
+ * Completion timeout select default value - 67.1 ms and
+ * OBP will set this value.
+ *
+ * Configuration - Bit 0 should always be 0 for upstream port.
+ * Bit 1 is clock - how is this related to the clock bit in TLU
+ * Link Control register? Both are hardware dependent and likely
+ * set by OBP.
+ *
+ * Disable non-posted write bit - ordering by setting
+ * NPWR_EN bit to force serialization of writes.
+ */
+ val = CSR_XR(csr_base, TLU_CONTROL);
+
+ if (pxu_p->chip_id == FIRE_VER_10) {
+ val |= (TLU_CONTROL_L0S_TIM_DEFAULT <<
+ FIRE10_TLU_CONTROL_L0S_TIM) |
+ (1ull << FIRE10_TLU_CONTROL_NPWR_EN) |
+ TLU_CONTROL_CONFIG_DEFAULT;
+ } else {
+ /* Default case is FIRE2.0 */
+ val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) |
+ (1ull << TLU_CONTROL_NPWR_EN) | TLU_CONTROL_CONFIG_DEFAULT;
+ }
+
+ CSR_XS(csr_base, TLU_CONTROL, val);
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n",
+ CSR_XR(csr_base, TLU_CONTROL));
+
+ /*
+ * CSR_V TLU_STATUS Expect HW 0x4
+ */
+
+ /*
+ * Only bit [7:0] are currently defined. Bits [2:0]
+ * are the state, which should likely be in state active,
+ * 100b. Bit three is 'recovery', which is not understood.
+ * All other bits are reserved.
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, TLU_STATUS));
+
+ /*
+ * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n",
+ CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE));
+
+ /*
+ * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0
+ */
+
+ /*
+ * Ingress credits initial register. Bits [39:32] should be
+ * 0x10, bits [19:12] should be 0x20, and bits [11:0] should
+ * be 0xC0. These are the reset values, and should be set by
+ * HW.
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n",
+ CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL));
+
+ /*
+ * CSR_V TLU_DIAGNOSTIC Expect HW 0x0
+ */
+
+ /*
+ * Diagnostic register - always zero unless we are debugging.
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n",
+ CSR_XR(csr_base, TLU_DIAGNOSTIC));
+
+ /*
+ * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n",
+ CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED));
+
+ /*
+ * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n",
+ CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT));
+
+ /*
+ * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n",
+ CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER));
+
+ /*
+ * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n",
+ CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED));
+
+ /*
+ * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n",
+ CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED));
+
+ /*
+ * CSR_V TLU_OTHER_EVENT_LOG_ENABLE Expected HW 0x7FF0F
+ */
+
+ /*
+ * First of a 'guilty five'. Problem now is that the orde
+ * seems to different - some are log enable first then
+ * interrupt enable, others are have them reversed. For
+ * now I'll do them independently before creating a common
+ * framework for them all.
+ */
+
+ val = -1ull;
+ CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, val);
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE));
+
+ /*
+ * CSR_V TLU_OTHER_EVENT_INTERRUPT_ENABLE OBP 0x7FF0F0007FF0F
+ */
+
+ /*
+ * Second of five. Bits [55-32] enable secondary other event
+ * interrupt enables, bit [23:0] enable primatry other event
+ * interrupt enables.
+ */
+
+ val = -1ull;
+ CSR_XS(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE, val);
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE));
+
+ /*
+ * CSR_V TLU_OTHER_EVENT_INTERRUPT_STATUS Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS));
+
+ /*
+ * CSR_V TLU_OTHER_EVENT_STATUS_CLEAR Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n",
+ CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR));
+
+ /*
+ * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
+ CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG));
+
+ /*
+ * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
+ CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG));
+
+ /*
+ * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
+ CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG));
+
+ /*
+ * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
+ CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG));
+
+ /*
+ * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
+ CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT));
+
+ /*
+ * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n",
+ CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO));
+
+ /*
+ * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n",
+ CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE));
+
+ /*
+ * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n",
+ CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO));
+
+ /*
+ * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0
+ */
+
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n",
+ CSR_XR(csr_base, TLU_DEBUG_SELECT_A));
+
+ /*
+ * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n",
+ CSR_XR(csr_base, TLU_DEBUG_SELECT_B));
+
+ /*
+ * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n",
+ CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES));
+
+ /*
+ * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0
+ */
+
+ /*
+ * Bits [14:12] are the Max Read Request Size, which is always 64
+ * bytes which is 000b. Bits [7:5] are Max Payload Size, which
+ * start at 128 bytes which is 000b. This may be revisited if
+ * init_child finds greater values.
+ */
+ val = 0x0ull;
+ CSR_XS(csr_base, TLU_DEVICE_CONTROL, val);
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n",
+ CSR_XR(csr_base, TLU_DEVICE_CONTROL));
+
+ /*
+ * CSR_V TLU_DEVICE_STATUS Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, TLU_DEVICE_STATUS));
+
+ /*
+ * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n",
+ CSR_XR(csr_base, TLU_LINK_CAPABILITIES));
+
+ /*
+ * CSR_V TLU_LINK_CONTROL Expect OBP 0x40
+ */
+
+ /*
+ * The CLOCK bit should be set by OBP if the hardware dictates,
+ * and if it is set then ASPM should be used since then L0s exit
+ * latency should be lower than L1 exit latency.
+ *
+ * Note that we will not enable power management during bringup
+ * since it has not been test and is creating some problems in
+ * simulation.
+ */
+ val = (1ull << TLU_LINK_CONTROL_CLOCK);
+
+ CSR_XS(csr_base, TLU_LINK_CONTROL, val);
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n",
+ CSR_XR(csr_base, TLU_LINK_CONTROL));
+
+ /*
+ * CSR_V TLU_LINK_STATUS Expect OBP 0x1011
+ */
+
+ /*
+ * Not sure if HW or OBP will be setting this read only
+ * register. Bit 12 is Clock, and it should always be 1
+ * signifying that the component uses the same physical
+ * clock as the platform. Bits [9:4] are for the width,
+ * with the expected value above signifying a x1 width.
+ * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s,
+ * the only speed as yet supported by the PCI-E spec.
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, TLU_LINK_STATUS));
+
+ /*
+ * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ???
+ */
+
+ /*
+ * Power Limits for the slots. Will be platform
+ * dependent, and OBP will need to set after consulting
+ * with the HW guys.
+ *
+ * Bits [16:15] are power limit scale, which most likely
+ * will be 0b signifying 1x. Bits [14:7] are the Set
+ * Power Limit Value, which is a number which is multiplied
+ * by the power limit scale to get the actual power limit.
+ */
+ DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n",
+ CSR_XR(csr_base, TLU_SLOT_CAPABILITIES));
+
+ /*
+ * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011
+ */
+
+ /*
+ * First of a 'guilty five'. See note for Other Event Log.
+ */
+ val = -1ull;
+ CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, val);
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE));
+
+ /*
+ * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE
+ * Expect Kernel 0x17F0110017F011
+ */
+
+ /*
+ * Second of a 'guilty five'. Needs the value in both bits [52:32]
+ * and bits [20:0] for primary and secondary error interrupts.
+ */
+ val = -1ull;
+ CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE, val);
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE));
+
+ /*
+ * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS));
+
+ /*
+ * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
+ CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR));
+
+ /*
+ * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
+ CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG));
+
+ /*
+ * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
+ CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG));
+
+ /*
+ * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
+ CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG));
+
+ /*
+ * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
+ CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG));
+
+ /*
+ * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1
+ */
+
+ /*
+ * Another set of 'guilty five'.
+ */
+
+ val = -1ull;
+ CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, val);
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE));
+
+ /*
+ * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1
+ */
+
+ /*
+ * Bits [44:32] for secondary error, bits [12:0] for primary errors.
+ */
+ val = -1ull;
+ CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE, val);
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE));
+
+ /*
+ * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS));
+
+ /*
+ * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
+ */
+ DBG(DBG_TLU, NULL,
+ "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
+ CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR));
+}
+
+static void
+lpu_init(caddr_t csr_base, pxu_t *pxu_p)
+{
+ /* Variables used to set the ACKNAK Latency Timer and Replay Timer */
+ int link_width, max_payload;
+
+ uint64_t val;
+
+ /*
+ * ACKNAK Latency Threshold Table.
+ * See Fire PRM 1.0 sections 1.2.11.1, table 1-17.
+ */
+ int fire10_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE]
+ [LINK_WIDTH_ARR_SIZE] = {
+ {0xED, 0x76, 0x70, 0x58},
+ {0x1A0, 0x76, 0x6B, 0x61},
+ {0x22F, 0x9A, 0x6A, 0x6A},
+ {0x42F, 0x11A, 0x96, 0x96},
+ {0x82F, 0x21A, 0x116, 0x116},
+ {0x102F, 0x41A, 0x216, 0x216}
+ };
+
+ /*
+ * TxLink Replay Timer Latency Table
+ * See Fire PRM 1.0 sections 1.2.11.2, table 1-18.
+ */
+ int fire10_replay_timer_table[LINK_MAX_PKT_ARR_SIZE]
+ [LINK_WIDTH_ARR_SIZE] = {
+ {0x2C7, 0x108, 0xF6, 0xBD},
+ {0x4E0, 0x162, 0x141, 0xF1},
+ {0x68D, 0x1CE, 0x102, 0x102},
+ {0xC8D, 0x34E, 0x1C2, 0x1C2},
+ {0x188D, 0x64E, 0x342, 0x342},
+ {0x308D, 0xC4E, 0x642, 0x642}
+ };
+
+ /*
+ * ACKNAK Latency Threshold Table.
+ * See Fire PRM 2.0 section 1.2.12.2, table 1-17.
+ */
+ int acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
+ {0xED, 0x49, 0x43, 0x30},
+ {0x1A0, 0x76, 0x6B, 0x48},
+ {0x22F, 0x9A, 0x56, 0x56},
+ {0x42F, 0x11A, 0x96, 0x96},
+ {0x82F, 0x21A, 0x116, 0x116},
+ {0x102F, 0x41A, 0x216, 0x216}
+ };
+
+ /*
+ * TxLink Replay Timer Latency Table
+ * See Fire PRM 2.0 sections 1.2.12.3, table 1-18.
+ */
+ int replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
+ {0x379, 0x112, 0xFC, 0xB4},
+ {0x618, 0x1BA, 0x192, 0x10E},
+ {0x831, 0x242, 0x143, 0x143},
+ {0xFB1, 0x422, 0x233, 0x233},
+ {0x1EB0, 0x7E1, 0x412, 0x412},
+ {0x3CB0, 0xF61, 0x7D2, 0x7D2}
+ };
+ /*
+ * Get the Link Width. See table above LINK_WIDTH_ARR_SIZE #define
+ * Only Link Widths of x1, x4, and x8 are supported.
+ * If any width is reported other than x8, set default to x8.
+ */
+ link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
+ DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width);
+
+ /*
+ * Convert link_width to match timer array configuration.
+ */
+ switch (link_width) {
+ case 1:
+ link_width = 0;
+ break;
+ case 4:
+ link_width = 1;
+ break;
+ case 8:
+ link_width = 2;
+ break;
+ case 16:
+ link_width = 3;
+ break;
+ default:
+ link_width = 0;
+ }
+
+ /*
+ * Get the Max Payload Size.
+ * See table above LINK_MAX_PKT_ARR_SIZE #define
+ */
+ if (pxu_p->chip_id == FIRE_VER_10) {
+ max_payload = CSR_FR(csr_base,
+ FIRE10_LPU_LINK_LAYER_CONFIG, MAX_PAYLOAD);
+ } else {
+ /* Default case is FIRE2.0 */
+ max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) &
+ TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT);
+ }
+
+ DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n",
+ (0x80 << max_payload));
+
+ /* Make sure the packet size is not greater than 4096 */
+ max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ?
+ (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload;
+
+ /*
+ * CSR_V LPU_ID Expect HW 0x0
+ */
+
+ /*
+ * This register has link id, phy id and gigablaze id.
+ * Should be set by HW.
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n",
+ CSR_XR(csr_base, LPU_ID));
+
+ /*
+ * CSR_V LPU_RESET Expect Kernel 0x0
+ */
+
+ /*
+ * No reason to have any reset bits high until an error is
+ * detected on the link.
+ */
+ val = 0ull;
+ CSR_XS(csr_base, LPU_RESET, val);
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RESET));
+
+ /*
+ * CSR_V LPU_DEBUG_STATUS Expect HW 0x0
+ */
+
+ /*
+ * Bits [15:8] are Debug B, and bit [7:0] are Debug A.
+ * They are read-only. What do the 8 bits mean, and
+ * how do they get set if they are read only?
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_DEBUG_STATUS));
+
+ /*
+ * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n",
+ CSR_XR(csr_base, LPU_DEBUG_CONFIG));
+
+ /*
+ * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_CONTROL));
+
+ /*
+ * CSR_V LPU_LINK_STATUS Expect HW 0x101
+ */
+
+ /*
+ * This register has bits [9:4] for link width, and the
+ * default 0x10, means a width of x16. The problem is
+ * this width is not supported according to the TLU
+ * link status register.
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_STATUS));
+
+ /*
+ * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_INTERRUPT_STATUS));
+
+ /*
+ * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0
+ */
+ val = 0ull;
+ CSR_XS(csr_base, LPU_INTERRUPT_MASK, val);
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n",
+ CSR_XR(csr_base, LPU_INTERRUPT_MASK));
+
+ /*
+ * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT));
+
+ /*
+ * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL));
+
+ /*
+ * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1));
+
+ /*
+ * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST));
+
+ /*
+ * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2));
+
+ /*
+ * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST));
+
+ /*
+ * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100
+ */
+
+ /*
+ * This is another place where Max Payload can be set,
+ * this time for the link layer. It will be set to
+ * 128B, which is the default, but this will need to
+ * be revisited.
+ */
+ val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN);
+ CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val);
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG));
+
+ /*
+ * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5
+ */
+
+ /*
+ * Another R/W status register. Bit 3, DL up Status, will
+ * be set high. The link state machine status bits [2:0]
+ * are set to 0x1, but the status bits are not defined in the
+ * PRM. What does 0x1 mean, what others values are possible
+ * and what are thier meanings?
+ *
+ * This register has been giving us problems in simulation.
+ * It has been mentioned that software should not program
+ * any registers with WE bits except during debug. So
+ * this register will no longer be programmed.
+ */
+
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_LAYER_STATUS));
+
+ /*
+ * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS));
+
+ /*
+ * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST));
+
+ /*
+ * CSR_V LPU_LINK_LAYER_INTERRUPT_MASK Expect OBP 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK));
+
+ /*
+ * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7
+ */
+
+ /*
+ * The PRM says that only the first two bits will be set
+ * high by default, which will enable flow control for
+ * posted and non-posted updates, but NOT completetion
+ * updates.
+ */
+ val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
+ (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
+ CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val);
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n",
+ CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL));
+
+ /*
+ * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
+ * Expect OBP 0x1D4C
+ */
+
+ /*
+ * This should be set by OBP. We'll check to make sure.
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - "
+ "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n",
+ CSR_XR(csr_base,
+ LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE));
+
+ /*
+ * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ???
+ */
+
+ /*
+ * This register has Flow Control Update Timer values for
+ * non-posted and posted requests, bits [30:16] and bits
+ * [14:0], respectively. These are read-only to SW so
+ * either HW or OBP needs to set them.
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - "
+ "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n",
+ CSR_XR(csr_base,
+ LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0));
+
+ /*
+ * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ???
+ */
+
+ /*
+ * Same as timer0 register above, except for bits [14:0]
+ * have the timer values for completetions. Read-only to
+ * SW; OBP or HW need to set it.
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - "
+ "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n",
+ CSR_XR(csr_base,
+ LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1));
+
+ /*
+ * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
+ */
+ if (pxu_p->chip_id == FIRE_VER_10) {
+ val = fire10_acknak_timer_table[max_payload][link_width];
+ } else {
+ /* Default case is FIRE2.0 */
+ val = acknak_timer_table[max_payload][link_width];
+ }
+
+ CSR_XS(csr_base,
+ LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
+
+ DBG(DBG_LPU, NULL, "lpu_init - "
+ "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD));
+
+ /*
+ * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER));
+
+ /*
+ * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD
+ */
+ if (pxu_p->chip_id == FIRE_VER_10) {
+ val = fire10_replay_timer_table[max_payload][link_width];
+ } else {
+ /* Default case is FIRE2.0 */
+ val = replay_timer_table[max_payload][link_width];
+ }
+
+ CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
+
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD));
+
+ /*
+ * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER));
+
+ /*
+ * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS));
+
+ /*
+ * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS));
+
+ /*
+ * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000
+ */
+ val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT <<
+ LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) |
+ (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT <<
+ LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR));
+
+ CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val);
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER));
+
+ /*
+ * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER));
+
+ /*
+ * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT));
+
+ /*
+ * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER));
+
+ /*
+ * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER));
+
+ /*
+ * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157
+ */
+
+ /*
+ * Test only register. Will not be programmed.
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR));
+
+ /*
+ * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000
+ */
+
+ /*
+ * Test only register. Will not be programmed.
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS));
+
+ /*
+ * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS));
+
+ /*
+ * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL));
+
+ /*
+ * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0
+ */
+
+ /*
+ * Test only register. Will not be programmed.
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL));
+
+ /*
+ * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0));
+
+ /*
+ * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1));
+
+ /*
+ * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2));
+
+ /*
+ * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3));
+
+ /*
+ * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4));
+
+ /*
+ * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0
+ */
+
+ /*
+ * Test only register. Will not be programmed.
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT));
+
+ /*
+ * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0
+ */
+
+ /*
+ * Test only register. Will not be programmed.
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT));
+
+ /*
+ * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0
+ */
+
+ /*
+ * Test only register.
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA));
+
+ /*
+ * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - "
+ "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER));
+
+ /*
+ * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0
+ */
+
+ /*
+ * test only register.
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED));
+
+ /*
+ * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0
+ */
+
+ /*
+ * test only register.
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL));
+
+ /*
+ * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n",
+ CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION));
+
+ /*
+ * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_PHY_LAYER_STATUS));
+
+ /*
+ * CSR_V LPU_PHY_LAYER_INTERRUPT_AND_STATUS Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS));
+
+ /*
+ * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
+ CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST));
+
+ /*
+ * CSR_V LPU_PHY_INTERRUPT_MASK Expect HW 0x0
+ */
+
+ val = 0ull;
+ CSR_XS(csr_base, LPU_PHY_INTERRUPT_MASK, val);
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n",
+ CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK));
+
+ /*
+ * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0
+ */
+
+ /*
+ * This also needs some explanation. What is the best value
+ * for the water mark? Test mode enables which test mode?
+ * Programming model needed for the Receiver Reset Lane N
+ * bits.
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG));
+
+ /*
+ * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1));
+
+ /*
+ * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2));
+
+ /*
+ * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3));
+
+ /*
+ * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS));
+
+ /*
+ * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST));
+
+ /*
+ * CSR_V LPU_RECEIVE_PHY_INTERRUPT_MASK Expect OBP 0x0
+ */
+ val = 0ull;
+ CSR_XS(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK, val);
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n",
+ CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK));
+
+ /*
+ * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG));
+
+ /*
+ * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS));
+
+ /*
+ * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS));
+
+ /*
+ * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
+ CSR_XR(csr_base,
+ LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST));
+
+ /*
+ * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_MASK Expect HW 0x0
+ */
+ val = 0ull;
+ CSR_XS(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK, val);
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK));
+
+ /*
+ * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n",
+ CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2));
+
+ /*
+ * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205
+ */
+
+ /*
+ * The new PRM has values for LTSSM 8 ns timeout value and
+ * LTSSM 20 ns timeout value. But what do these values mean?
+ * Most of the other bits are questions as well.
+ *
+ * As such we will use the reset value.
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_CONFIG1));
+
+ /*
+ * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0
+ */
+
+ /*
+ * Again, what does '12 ms timeout value mean'?
+ */
+ val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT <<
+ LPU_LTSSM_CONFIG2_LTSSM_12_TO);
+ CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val);
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_CONFIG2));
+
+ /*
+ * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120
+ */
+ val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT <<
+ LPU_LTSSM_CONFIG3_LTSSM_2_TO);
+ CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val);
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_CONFIG3));
+
+ /*
+ * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300
+ *
+ * XXX fix LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT &
+ * LPU_LTSSM_CONFIG4_N_FTS_DEFAULT in px_pec.h
+ */
+ val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT <<
+ LPU_LTSSM_CONFIG4_DATA_RATE) |
+ (LPU_LTSSM_CONFIG4_N_FTS_DEFAULT <<
+ LPU_LTSSM_CONFIG4_N_FTS));
+ CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val);
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_CONFIG4));
+
+ /*
+ * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0
+ */
+ val = 0ull;
+ CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val);
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_CONFIG5));
+
+ /*
+ * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0
+ */
+
+ /*
+ * LTSSM Status registers are test only.
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_STATUS1));
+
+ /*
+ * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_STATUS2));
+
+ /*
+ * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS));
+
+ /*
+ * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST));
+
+ /*
+ * CSR_V LPU_LTSSM_INTERRUPT_MASK Expect HW 0x0
+ */
+ val = 0ull;
+ CSR_XS(csr_base, LPU_LTSSM_INTERRUPT_MASK, val);
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK));
+
+ /*
+ * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n",
+ CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n",
+ CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n",
+ CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n",
+ CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS Expect OBP 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - "
+ "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
+ CSR_XR(csr_base,
+ LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_MASK Expect OBP 0x0
+ */
+
+ /*
+ * Reset value masks all interrupts. This will be changed
+ * to enable all interrupts.
+ */
+ val = 0x0ull;
+ CSR_XS(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK, val);
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n",
+ CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n",
+ CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0
+ */
+ DBG(DBG_LPU, NULL,
+ "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n",
+ CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2));
+
+ /*
+ * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0
+ */
+ DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n",
+ CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5));
+}
+
+/* ARGSUSED */
+static void
+dmc_init(caddr_t csr_base, pxu_t *pxu_p)
+{
+ uint64_t val;
+
+/*
+ * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003
+ */
+
+ val = -1ull;
+ CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
+ DBG(DBG_DMC, NULL,
+ "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
+
+ /*
+ * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0
+ */
+ DBG(DBG_DMC, NULL,
+ "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS));
+
+ /*
+ * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0
+ */
+ val = 0x0ull;
+ CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val);
+ DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n",
+ CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A));
+
+ /*
+ * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0
+ */
+ val = 0x0ull;
+ CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val);
+ DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n",
+ CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B));
+}
+
+void
+hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
+{
+ uint64_t val;
+
+ ilu_init(csr_base, pxu_p);
+ tlu_init(csr_base, pxu_p);
+ lpu_init(csr_base, pxu_p);
+ dmc_init(csr_base, pxu_p);
+
+/*
+ * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F
+ */
+
+ val = -1ull;
+ CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
+ DBG(DBG_PEC, NULL,
+ "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
+ CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
+
+ /*
+ * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0
+ */
+ DBG(DBG_PEC, NULL,
+ "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n",
+ CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS));
+}
+
+void
+hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p)
+{
+ uint64_t val, i, tsb_ctrl, obp_tsb_pa, *base_tte_addr;
+ uint_t obp_tsb_entries, obp_tsb_size;
+
+ bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size);
+
+ /*
+ * Preserve OBP's TSB
+ */
+ val = CSR_XR(csr_base, MMU_TSB_CONTROL);
+
+ tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
+
+ obp_tsb_pa = tsb_ctrl & 0x7FFFFFFE000;
+ obp_tsb_size = tsb_ctrl & 0xF;
+
+ obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
+
+ base_tte_addr = pxu_p->tsb_vaddr +
+ ((pxu_p->tsb_size >> 3) - obp_tsb_entries);
+
+ for (i = 0; i < obp_tsb_entries; i++) {
+ uint64_t tte = lddphys(obp_tsb_pa + i * 8);
+
+ if (!MMU_TTE_VALID(tte))
+ continue;
+
+ base_tte_addr[i] = tte;
+ }
+
+ /*
+ * Invalidate the TLB through the diagnostic register.
+ */
+
+ CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull);
+
+ /*
+ * Configure the Fire MMU TSB Control Register. Determine
+ * the encoding for either 8KB pages (0) or 64KB pages (1).
+ *
+ * Write the most significant 30 bits of the TSB physical address
+ * and the encoded TSB table size.
+ */
+ for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--);
+
+ val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) |
+ ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i);
+
+ CSR_XS(csr_base, MMU_TSB_CONTROL, val);
+
+ /*
+ * Enable the MMU, set the "TSB Cache Snoop Enable",
+ * the "Cache Mode", the "Bypass Enable" and
+ * the "Translation Enable" bits.
+ */
+ val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
+ val |= ((1ull << MMU_CONTROL_AND_STATUS_SE)
+ | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM)
+ | (1ull << MMU_CONTROL_AND_STATUS_BE)
+ | (1ull << MMU_CONTROL_AND_STATUS_TE));
+
+ CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val);
+
+ /*
+ * Read the register here to ensure that the previous writes to
+ * the Fire MMU registers have been flushed. (Technically, this
+ * is not entirely necessary here as we will likely do later reads
+ * during Fire initialization, but it is a small price to pay for
+ * more modular code.)
+ */
+ (void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
+
+ /*
+ * Enable all primary and secondary interrupts.
+ */
+ val = -1ull;
+ CSR_XS(csr_base, MMU_INTERRUPT_ENABLE, val);
+}
+
+/*
+ * Generic IOMMU Servies
+ */
+
+/* ARGSUSED */
+uint64_t
+hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
+ pages_t pages, io_attributes_t io_attributes,
+ void *addr, size_t pfn_index, int flag)
+{
+ tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
+ uint64_t attr = MMU_TTE_V;
+ int i;
+
+ if (io_attributes & PCI_MAP_ATTR_WRITE)
+ attr |= MMU_TTE_W;
+
+ if (flag == MMU_MAP_MP) {
+ ddi_dma_impl_t *mp = (ddi_dma_impl_t *)addr;
+
+ for (i = 0; i < pages; i++, pfn_index++, tsb_index++) {
+ px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index);
+
+ pxu_p->tsb_vaddr[tsb_index] =
+ MMU_PTOB(pfn) | attr;
+ }
+ } else {
+ caddr_t a = (caddr_t)addr;
+
+ for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
+ px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
+
+ pxu_p->tsb_vaddr[tsb_index] =
+ MMU_PTOB(pfn) | attr;
+ }
+ }
+
+ return (H_EOK);
+}
+
+/* ARGSUSED */
+uint64_t
+hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
+ pages_t pages)
+{
+ tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
+ int i;
+
+ for (i = 0; i < pages; i++, tsb_index++) {
+ pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE;
+ }
+
+ return (H_EOK);
+}
+
+/* ARGSUSED */
+uint64_t
+hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
+ io_attributes_t *attributes_p, r_addr_t *r_addr_p)
+{
+ tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
+ uint64_t *tte_addr;
+ uint64_t ret = H_EOK;
+
+ tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index;
+
+ if (*tte_addr & MMU_TTE_V) {
+ *r_addr_p = MMU_TTETOPA(*tte_addr);
+ *attributes_p = (*tte_addr & MMU_TTE_W) ?
+ PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ;
+ } else {
+ *r_addr_p = 0;
+ *attributes_p = 0;
+ ret = H_ENOMAP;
+ }
+
+ return (ret);
+}
+
+/* ARGSUSED */
+uint64_t
+hvio_iommu_getbypass(devhandle_t dev_hdl, r_addr_t ra,
+ io_attributes_t io_attributes, io_addr_t *io_addr_p)
+{
+ uint64_t pfn = MMU_BTOP(ra);
+
+ *io_addr_p = MMU_BYPASS_BASE | ra |
+ (pf_is_memory(pfn) ? 0 : MMU_BYPASS_NONCACHE);
+
+ return (H_EOK);
+}
+
+/*
+ * Generic IO Interrupt Servies
+ */
+
+/*
+ * Converts a device specific interrupt number given by the
+ * arguments devhandle and devino into a system specific ino.
+ */
+/* ARGSUSED */
+uint64_t
+hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino,
+ sysino_t *sysino)
+{
+ if (devino > INTERRUPT_MAPPING_ENTRIES) {
+ DBG(DBG_IB, NULL, "ino %x is invalid\n", devino);
+ return (H_ENOINTR);
+ }
+
+ *sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino);
+
+ return (H_EOK);
+}
+
+/*
+ * Returns state in intr_valid_state if the interrupt defined by sysino
+ * is valid (enabled) or not-valid (disabled).
+ */
+uint64_t
+hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino,
+ intr_valid_state_t *intr_valid_state)
+{
+ if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
+ SYSINO_TO_DEVINO(sysino), ENTRIES_V)) {
+ *intr_valid_state = INTR_VALID;
+ } else {
+ *intr_valid_state = INTR_NOTVALID;
+ }
+
+ return (H_EOK);
+}
+
+/*
+ * Sets the 'valid' state of the interrupt defined by
+ * the argument sysino to the state defined by the
+ * argument intr_valid_state.
+ */
+uint64_t
+hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino,
+ intr_valid_state_t intr_valid_state)
+{
+ switch (intr_valid_state) {
+ case INTR_VALID:
+ CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING,
+ SYSINO_TO_DEVINO(sysino), ENTRIES_V);
+ break;
+ case INTR_NOTVALID:
+ CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING,
+ SYSINO_TO_DEVINO(sysino), ENTRIES_V);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (H_EOK);
+}
+
+/*
+ * Returns the current state of the interrupt given by the sysino
+ * argument.
+ */
+uint64_t
+hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
+ intr_state_t *intr_state)
+{
+ intr_state_t state;
+
+ state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR,
+ SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE);
+
+ switch (state) {
+ case INTERRUPT_IDLE_STATE:
+ *intr_state = INTR_IDLE_STATE;
+ break;
+ case INTERRUPT_RECEIVED_STATE:
+ *intr_state = INTR_RECEIVED_STATE;
+ break;
+ case INTERRUPT_PENDING_STATE:
+ *intr_state = INTR_DELIVERED_STATE;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (H_EOK);
+
+}
+
+/*
+ * Sets the current state of the interrupt given by the sysino
+ * argument to the value given in the argument intr_state.
+ *
+ * Note: Setting the state to INTR_IDLE clears any pending
+ * interrupt for sysino.
+ */
+uint64_t
+hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
+ intr_state_t intr_state)
+{
+ intr_state_t state;
+
+ switch (intr_state) {
+ case INTR_IDLE_STATE:
+ state = INTERRUPT_IDLE_STATE;
+ break;
+ case INTR_DELIVERED_STATE:
+ state = INTERRUPT_PENDING_STATE;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR,
+ SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state);
+
+ return (H_EOK);
+}
+
+/*
+ * Returns the cpuid that is the current target of the
+ * interrupt given by the sysino argument.
+ *
+ * The cpuid value returned is undefined if the target
+ * has not been set via intr_settarget.
+ */
+uint64_t
+hvio_intr_gettarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t *cpuid)
+{
+ *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
+ SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
+
+ return (H_EOK);
+}
+
+/*
+ * Set the target cpu for the interrupt defined by the argument
+ * sysino to the target cpu value defined by the argument cpuid.
+ */
+uint64_t
+hvio_intr_settarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t cpuid)
+{
+
+ uint64_t val, intr_controller;
+ uint32_t ino = SYSINO_TO_DEVINO(sysino);
+
+ /*
+ * For now, we assign interrupt controller in a round
+ * robin fashion. Later, we may need to come up with
+ * a more efficient assignment algorithm.
+ */
+ intr_controller = 0x1ull << (cpuid % 4);
+
+ val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
+ INTERRUPT_MAPPING_ENTRIES_T_JPID) |
+ ((intr_controller & INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
+ << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
+
+ /* For EQ interrupts, set DATA MONDO bit */
+ if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) &&
+ (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT)))
+ val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE);
+
+ CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val);
+
+ return (H_EOK);
+}
+
+/*
+ * MSIQ Functions:
+ */
+uint64_t
+hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p)
+{
+ CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0,
+ (uint64_t)pxu_p->msiq_mapped_p);
+ DBG(DBG_IB, NULL,
+ "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n",
+ CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS));
+
+ CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0,
+ (uint64_t)ID_TO_IGN(pxu_p->portid) << INO_BITS);
+ DBG(DBG_IB, NULL, "hvio_msiq_init: "
+ "INTERRUPT_MONDO_DATA_0: 0x%llx\n",
+ CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0));
+
+ return (H_EOK);
+}
+
+uint64_t
+hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
+ pci_msiq_valid_state_t *msiq_valid_state)
+{
+ uint32_t eq_state;
+ uint64_t ret = H_EOK;
+
+ eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
+ msiq_id, ENTRIES_STATE);
+
+ switch (eq_state) {
+ case EQ_IDLE_STATE:
+ *msiq_valid_state = PCI_MSIQ_INVALID;
+ break;
+ case EQ_ACTIVE_STATE:
+ case EQ_ERROR_STATE:
+ *msiq_valid_state = PCI_MSIQ_VALID;
+ break;
+ default:
+ ret = H_EIO;
+ break;
+ }
+
+ return (ret);
+}
+
+uint64_t
+hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
+ pci_msiq_valid_state_t msiq_valid_state)
+{
+ uint64_t ret = H_EOK;
+
+ switch (msiq_valid_state) {
+ case PCI_MSIQ_INVALID:
+ CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
+ msiq_id, ENTRIES_DIS);
+ break;
+ case PCI_MSIQ_VALID:
+ CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
+ msiq_id, ENTRIES_EN);
+ break;
+ default:
+ ret = H_EINVAL;
+ break;
+ }
+
+ return (ret);
+}
+
+uint64_t
+hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id,
+ pci_msiq_state_t *msiq_state)
+{
+ uint32_t eq_state;
+ uint64_t ret = H_EOK;
+
+ eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
+ msiq_id, ENTRIES_STATE);
+
+ switch (eq_state) {
+ case EQ_IDLE_STATE:
+ case EQ_ACTIVE_STATE:
+ *msiq_state = PCI_MSIQ_STATE_IDLE;
+ break;
+ case EQ_ERROR_STATE:
+ *msiq_state = PCI_MSIQ_STATE_ERROR;
+ break;
+ default:
+ ret = H_EIO;
+ }
+
+ return (ret);
+}
+
+uint64_t
+hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id,
+ pci_msiq_state_t msiq_state)
+{
+ uint32_t eq_state;
+ uint64_t ret = H_EOK;
+
+ eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
+ msiq_id, ENTRIES_STATE);
+
+ switch (eq_state) {
+ case EQ_IDLE_STATE:
+ if (msiq_state == PCI_MSIQ_STATE_ERROR)
+ ret = H_EIO;
+ break;
+ case EQ_ACTIVE_STATE:
+ if (msiq_state == PCI_MSIQ_STATE_ERROR)
+ CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
+ msiq_id, ENTRIES_ENOVERR);
+ else
+ ret = H_EIO;
+ break;
+ case EQ_ERROR_STATE:
+ if (msiq_state == PCI_MSIQ_STATE_IDLE)
+ CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
+ msiq_id, ENTRIES_E2I);
+ else
+ ret = H_EIO;
+ break;
+ default:
+ ret = H_EIO;
+ }
+
+ return (ret);
+}
+
+uint64_t
+hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id,
+ msiqhead_t *msiq_head)
+{
+ *msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD,
+ msiq_id, ENTRIES_HEAD);
+
+ return (H_EOK);
+}
+
+uint64_t
+hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id,
+ msiqhead_t msiq_head)
+{
+ CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id,
+ ENTRIES_HEAD, msiq_head);
+
+ return (H_EOK);
+}
+
+uint64_t
+hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id,
+ msiqtail_t *msiq_tail)
+{
+ *msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL,
+ msiq_id, ENTRIES_TAIL);
+
+ return (H_EOK);
+}
+
+/*
+ * MSI Functions:
+ */
+uint64_t
+hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64)
+{
+ /* PCI MEM 32 resources to perform 32 bit MSI transactions */
+ CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0,
+ ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR);
+ DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_32_BIT_ADDRESS: 0x%llx\n",
+ CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS));
+
+ /* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */
+ CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0,
+ ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR);
+ DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_64_BIT_ADDRESS: 0x%llx\n",
+ CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS));
+
+ return (H_EOK);
+}
+
+uint64_t
+hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num,
+ msiqid_t *msiq_id)
+{
+ *msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING,
+ msi_num, ENTRIES_EQNUM);
+
+ return (H_EOK);
+}
+
+uint64_t
+hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num,
+ msiqid_t msiq_id)
+{
+ CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
+ ENTRIES_EQNUM, msiq_id);
+
+ return (H_EOK);
+}
+
+uint64_t
+hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num,
+ pci_msi_valid_state_t *msi_valid_state)
+{
+ *msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
+ msi_num, ENTRIES_V);
+
+ return (H_EOK);
+}
+
+uint64_t
+hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num,
+ pci_msi_valid_state_t msi_valid_state)
+{
+ uint64_t ret = H_EOK;
+
+ switch (msi_valid_state) {
+ case PCI_MSI_VALID:
+ CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
+ ENTRIES_V);
+ break;
+ case PCI_MSI_INVALID:
+ CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
+ ENTRIES_V);
+ break;
+ default:
+ ret = H_EINVAL;
+ }
+
+ return (ret);
+}
+
+uint64_t
+hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num,
+ pci_msi_state_t *msi_state)
+{
+ *msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
+ msi_num, ENTRIES_EQWR_N);
+
+ return (H_EOK);
+}
+
+uint64_t
+hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num,
+ pci_msi_state_t msi_state)
+{
+ uint64_t ret = H_EOK;
+
+ switch (msi_state) {
+ case PCI_MSI_STATE_IDLE:
+ CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num,
+ ENTRIES_EQWR_N);
+ break;
+ case PCI_MSI_STATE_DELIVERED:
+ default:
+ ret = H_EINVAL;
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * MSG Functions:
+ */
+uint64_t
+hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
+ msiqid_t *msiq_id)
+{
+ uint64_t ret = H_EOK;
+
+ switch (msg_type) {
+ case PCIE_PME_MSG:
+ *msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM);
+ break;
+ case PCIE_PME_ACK_MSG:
+ *msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING,
+ EQNUM);
+ break;
+ case PCIE_CORR_MSG:
+ *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM);
+ break;
+ case PCIE_NONFATAL_MSG:
+ *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING,
+ EQNUM);
+ break;
+ case PCIE_FATAL_MSG:
+ *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM);
+ break;
+ default:
+ ret = H_EINVAL;
+ break;
+ }
+
+ return (ret);
+}
+
+uint64_t
+hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
+ msiqid_t msiq_id)
+{
+ uint64_t ret = H_EOK;
+
+ switch (msg_type) {
+ case PCIE_PME_MSG:
+ CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id);
+ break;
+ case PCIE_PME_ACK_MSG:
+ CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id);
+ break;
+ case PCIE_CORR_MSG:
+ CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id);
+ break;
+ case PCIE_NONFATAL_MSG:
+ CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id);
+ break;
+ case PCIE_FATAL_MSG:
+ CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id);
+ break;
+ default:
+ ret = H_EINVAL;
+ break;
+ }
+
+ return (ret);
+}
+
+uint64_t
+hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
+ pcie_msg_valid_state_t *msg_valid_state)
+{
+ uint64_t ret = H_EOK;
+
+ switch (msg_type) {
+ case PCIE_PME_MSG:
+ *msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V);
+ break;
+ case PCIE_PME_ACK_MSG:
+ *msg_valid_state = CSR_BR((caddr_t)dev_hdl,
+ PME_TO_ACK_MAPPING, V);
+ break;
+ case PCIE_CORR_MSG:
+ *msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
+ break;
+ case PCIE_NONFATAL_MSG:
+ *msg_valid_state = CSR_BR((caddr_t)dev_hdl,
+ ERR_NONFATAL_MAPPING, V);
+ break;
+ case PCIE_FATAL_MSG:
+ *msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING,
+ V);
+ break;
+ default:
+ ret = H_EINVAL;
+ break;
+ }
+
+ return (ret);
+}
+
+uint64_t
+hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
+ pcie_msg_valid_state_t msg_valid_state)
+{
+ uint64_t ret = H_EOK;
+
+ switch (msg_valid_state) {
+ case PCIE_MSG_VALID:
+ switch (msg_type) {
+ case PCIE_PME_MSG:
+ CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V);
+ break;
+ case PCIE_PME_ACK_MSG:
+ CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
+ break;
+ case PCIE_CORR_MSG:
+ CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
+ break;
+ case PCIE_NONFATAL_MSG:
+ CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
+ break;
+ case PCIE_FATAL_MSG:
+ CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
+ break;
+ default:
+ ret = H_EINVAL;
+ break;
+ }
+
+ break;
+ case PCIE_MSG_INVALID:
+ switch (msg_type) {
+ case PCIE_PME_MSG:
+ CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V);
+ break;
+ case PCIE_PME_ACK_MSG:
+ CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
+ break;
+ case PCIE_CORR_MSG:
+ CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
+ break;
+ case PCIE_NONFATAL_MSG:
+ CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
+ break;
+ case PCIE_FATAL_MSG:
+ CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
+ break;
+ default:
+ ret = H_EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = H_EINVAL;
+ }
+
+ return (ret);
+}
+
+/*
+ * Suspend/Resume Functions:
+ * (pec, mmu, ib)
+ * cb
+ * Registers saved have all been touched in the XXX_init functions.
+ */
+uint64_t
+hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
+{
+ uint64_t *config_state;
+ int total_size;
+ int i;
+
+ if (msiq_suspend(dev_hdl, pxu_p) != H_EOK)
+ return (H_EIO);
+
+ total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
+ config_state = kmem_zalloc(total_size, KM_NOSLEEP);
+
+ if (config_state == NULL) {
+ return (H_EIO);
+ }
+
+ /*
+ * Soft state for suspend/resume from pxu_t
+ * uint64_t *pec_config_state;
+ * uint64_t *mmu_config_state;
+ * uint64_t *ib_intr_map;
+ * uint64_t *ib_config_state;
+ * uint64_t *xcb_config_state;
+ */
+
+ /* Save the PEC configuration states */
+ pxu_p->pec_config_state = config_state;
+ for (i = 0; i < PEC_KEYS; i++) {
+ pxu_p->pec_config_state[i] =
+ CSR_XR((caddr_t)dev_hdl, pec_config_state_regs[i]);
+ }
+
+ /* Save the MMU configuration states */
+ pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS;
+ for (i = 0; i < MMU_KEYS; i++) {
+ pxu_p->mmu_config_state[i] =
+ CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]);
+ }
+
+ /* Save the interrupt mapping registers */
+ pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS;
+ for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
+ pxu_p->ib_intr_map[i] =
+ CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i);
+ }
+
+ /* Save the IB configuration states */
+ pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES;
+ for (i = 0; i < IB_KEYS; i++) {
+ pxu_p->ib_config_state[i] =
+ CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]);
+ }
+
+ return (H_EOK);
+}
+
+void
+hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
+{
+ int total_size;
+ sysino_t sysino;
+ int i;
+
+ /* Make sure that suspend actually did occur */
+ if (!pxu_p->pec_config_state) {
+ return;
+ }
+
+ /* Restore IB configuration states */
+ for (i = 0; i < IB_KEYS; i++) {
+ CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i],
+ pxu_p->ib_config_state[i]);
+ }
+
+ /*
+ * Restore the interrupt mapping registers
+ * And make sure the intrs are idle.
+ */
+ for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
+ CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i,
+ ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE);
+ CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i,
+ pxu_p->ib_intr_map[i]);
+ }
+
+ /* Restore MMU configuration states */
+ /* Clear the cache. */
+ CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull);
+
+ for (i = 0; i < MMU_KEYS; i++) {
+ CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i],
+ pxu_p->mmu_config_state[i]);
+ }
+
+ /* Restore PEC configuration states */
+ /* Make sure all reset bits are low until error is detected */
+ CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull);
+
+ for (i = 0; i < PEC_KEYS; i++) {
+ CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i],
+ pxu_p->pec_config_state[i]);
+ }
+
+ /* Enable PCI-E interrupt */
+ (void) hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino, &sysino);
+
+ (void) hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE);
+
+ total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
+ kmem_free(pxu_p->pec_config_state, total_size);
+
+ pxu_p->pec_config_state = NULL;
+ pxu_p->mmu_config_state = NULL;
+ pxu_p->ib_config_state = NULL;
+ pxu_p->ib_intr_map = NULL;
+
+ msiq_resume(dev_hdl, pxu_p);
+}
+
+uint64_t
+hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
+{
+ uint64_t *config_state;
+ int i;
+
+ config_state = kmem_zalloc(CB_SIZE, KM_NOSLEEP);
+
+ if (config_state == NULL) {
+ return (H_EIO);
+ }
+
+ /* Save the configuration states */
+ pxu_p->xcb_config_state = config_state;
+ for (i = 0; i < CB_KEYS; i++) {
+ pxu_p->xcb_config_state[i] =
+ CSR_XR((caddr_t)dev_hdl, cb_config_state_regs[i]);
+ }
+
+ return (H_EOK);
+}
+
+void
+hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
+ devino_t devino, pxu_t *pxu_p)
+{
+ sysino_t sysino;
+ int i;
+
+ /*
+ * No reason to have any reset bits high until an error is
+ * detected on the link.
+ */
+ CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
+
+ ASSERT(pxu_p->xcb_config_state);
+
+ /* Restore the configuration states */
+ for (i = 0; i < CB_KEYS; i++) {
+ CSR_XS((caddr_t)xbus_dev_hdl, cb_config_state_regs[i],
+ pxu_p->xcb_config_state[i]);
+ }
+
+ /* Enable XBC interrupt */
+ (void) hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino, &sysino);
+
+ (void) hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE);
+
+ kmem_free(pxu_p->xcb_config_state, CB_SIZE);
+
+ pxu_p->xcb_config_state = NULL;
+}
+
+static uint64_t
+msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
+{
+ size_t bufsz;
+ volatile uint64_t *cur_p;
+ int i;
+
+ bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
+ if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) ==
+ NULL)
+ return (H_EIO);
+
+ cur_p = pxu_p->msiq_config_state;
+
+ /* Save each EQ state */
+ for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++)
+ *cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i);
+
+ /* Save MSI mapping registers */
+ for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
+ *cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i);
+
+ /* Save all other MSIQ registers */
+ for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
+ *cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]);
+ return (H_EOK);
+}
+
+static void
+msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p)
+{
+ size_t bufsz;
+ uint64_t *cur_p;
+ int i;
+
+ bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
+ cur_p = pxu_p->msiq_config_state;
+ /*
+ * Initialize EQ base address register and
+ * Interrupt Mondo Data 0 register.
+ */
+ (void) hvio_msiq_init(dev_hdl, pxu_p);
+
+ /* Restore EQ states */
+ for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) {
+ if (((*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK) ==
+ EQ_ACTIVE_STATE) {
+ CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
+ i, ENTRIES_EN);
+ }
+ }
+
+ /* Restore MSI mapping */
+ for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
+ CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p);
+
+ /*
+ * Restore all other registers. MSI 32 bit address and
+ * MSI 64 bit address are restored as part of this.
+ */
+ for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
+ CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p);
+
+ kmem_free(pxu_p->msiq_config_state, bufsz);
+ pxu_p->msiq_config_state = NULL;
+}
+
+/*
+ * sends PME_Turn_Off message to put the link in L2/L3 ready state.
+ * called by px_goto_l23ready.
+ * returns DDI_SUCCESS or DDI_FAILURE
+ */
+int
+px_send_pme_turnoff(caddr_t csr_base)
+{
+ volatile uint64_t reg;
+
+ /* TBD: Wait for link to be in L1 state (link status reg) */
+
+ reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE);
+ /* If already pending, return failure */
+ if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) {
+ return (DDI_FAILURE);
+ }
+ /* write to PME_Turn_off reg to boradcast */
+ reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO);
+ CSR_XS(csr_base, TLU_PME_TURN_OFF_GENERATE, reg);
+ return (DDI_SUCCESS);
+}
diff --git a/usr/src/uts/sun4u/io/px/px_lib4u.c b/usr/src/uts/sun4u/io/px/px_lib4u.c
new file mode 100644
index 0000000000..a07c7cbdfb
--- /dev/null
+++ b/usr/src/uts/sun4u/io/px/px_lib4u.c
@@ -0,0 +1,1566 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/modctl.h>
+#include <sys/disp.h>
+#include <sys/stat.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/vmem.h>
+#include <sys/iommutsb.h>
+#include <sys/cpuvar.h>
+#include <px_obj.h>
+#include <pcie_pwr.h>
+#include "px_tools_var.h"
+#include <px_regs.h>
+#include <px_csr.h>
+#include "px_lib4u.h"
+
+#pragma weak jbus_stst_order
+
+extern void jbus_stst_order();
+
+ulong_t px_mmu_dvma_end = 0xfffffffful;
+uint_t px_ranges_phi_mask = 0xfffffffful;
+
+static int px_goto_l23ready(px_t *px_p);
+static uint32_t px_identity_chip(px_t *px_p);
+
+int
+px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ caddr_t xbc_csr_base = (caddr_t)px_p->px_address[PX_REG_XBC];
+ caddr_t csr_base = (caddr_t)px_p->px_address[PX_REG_CSR];
+ px_dvma_range_prop_t px_dvma_range;
+ uint32_t chip_id;
+ pxu_t *pxu_p;
+
+ DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip);
+
+ if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED)
+ return (DDI_FAILURE);
+
+ switch (chip_id) {
+ case FIRE_VER_10:
+ DBG(DBG_ATTACH, dip, "FIRE Hardware Version 1.0\n");
+ break;
+ case FIRE_VER_20:
+ DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n");
+ break;
+ default:
+ cmn_err(CE_WARN, "%s(%d): FIRE Hardware Version Unknown\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Allocate platform specific structure and link it to
+ * the px state structure.
+ */
+ pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
+
+ pxu_p->chip_id = chip_id;
+ pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "portid", -1);
+
+ /*
+ * XXX - Move all ddi_regs_map_setup() from px_util.c
+ * to to this file before complete virtualization.
+ */
+ pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
+ pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
+ pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
+
+ /*
+ * Create "virtual-dma" property to support child devices
+ * needing to know DVMA range.
+ */
+ px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
+ - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
+ px_dvma_range.dvma_len = (uint32_t)
+ px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
+
+ (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
+ "virtual-dma", (caddr_t)&px_dvma_range,
+ sizeof (px_dvma_range_prop_t));
+ /*
+ * Initilize all fire hardware specific blocks.
+ */
+ hvio_cb_init(xbc_csr_base, pxu_p);
+ hvio_ib_init(csr_base, pxu_p);
+ hvio_pec_init(csr_base, pxu_p);
+ hvio_mmu_init(csr_base, pxu_p);
+
+ px_p->px_plat_p = (void *)pxu_p;
+
+ /* Initilize device handle */
+ *dev_hdl = (devhandle_t)csr_base;
+
+ DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
+
+ return (DDI_SUCCESS);
+}
+
+int
+px_lib_dev_fini(dev_info_t *dip)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+
+ DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
+
+ iommu_tsb_free(pxu_p->tsb_cookie);
+
+ px_p->px_plat_p = NULL;
+ kmem_free(pxu_p, sizeof (pxu_t));
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
+ sysino_t *sysino)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ uint64_t ret;
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
+ "devino 0x%x\n", dip, devino);
+
+ if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
+ pxu_p, devino, sysino)) != H_EOK) {
+ DBG(DBG_LIB_INT, dip,
+ "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
+ *sysino);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
+ intr_valid_state_t *intr_valid_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
+ dip, sysino);
+
+ if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
+ sysino, intr_valid_state)) != H_EOK) {
+ DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
+ ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
+ *intr_valid_state);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
+ intr_valid_state_t intr_valid_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
+ "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
+
+ if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
+ sysino, intr_valid_state)) != H_EOK) {
+ DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
+ ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
+ intr_state_t *intr_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
+ dip, sysino);
+
+ if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
+ sysino, intr_state)) != H_EOK) {
+ DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
+ ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
+ *intr_state);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
+ intr_state_t intr_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
+ "intr_state 0x%x\n", dip, sysino, intr_state);
+
+ if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
+ sysino, intr_state)) != H_EOK) {
+ DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
+ ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
+ dip, sysino);
+
+ if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip),
+ sysino, cpuid)) != H_EOK) {
+ DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
+ ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
+ "cpuid 0x%x\n", dip, sysino, cpuid);
+
+ if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip),
+ sysino, cpuid)) != H_EOK) {
+ DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
+ ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_intr_reset(dev_info_t *dip)
+{
+ devino_t ino;
+ sysino_t sysino;
+
+ DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
+
+ /* Reset all Interrupts */
+ for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
+ if (px_lib_intr_devino_to_sysino(dip, ino,
+ &sysino) != DDI_SUCCESS)
+ return (BF_FATAL);
+
+ if (px_lib_intr_setstate(dip, sysino,
+ INTR_IDLE_STATE) != DDI_SUCCESS)
+ return (BF_FATAL);
+ }
+
+ return (BF_NONE);
+}
+
+/*ARGSUSED*/
+int
+px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
+ io_attributes_t io_attributes, void *addr, size_t pfn_index,
+ int flag)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ uint64_t ret;
+
+ DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
+ "pages 0x%x atrr 0x%x addr 0x%p pfn_index 0x%llx, flag 0x%x\n",
+ dip, tsbid, pages, io_attributes, addr, pfn_index, flag);
+
+ if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
+ io_attributes, addr, pfn_index, flag)) != H_EOK) {
+ DBG(DBG_LIB_DMA, dip,
+ "px_lib_iommu_map failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ uint64_t ret;
+
+ DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
+ "pages 0x%x\n", dip, tsbid, pages);
+
+ if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
+ != H_EOK) {
+ DBG(DBG_LIB_DMA, dip,
+ "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
+
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid,
+ io_attributes_t *attributes_p, r_addr_t *r_addr_p)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ uint64_t ret;
+
+ DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
+ dip, tsbid);
+
+ if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
+ attributes_p, r_addr_p)) != H_EOK) {
+ DBG(DBG_LIB_DMA, dip,
+ "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
+
+ return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n",
+ *attributes_p, *r_addr_p);
+
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * Checks dma attributes against system bypass ranges
+ * The bypass range is determined by the hardware. Return them so the
+ * common code can do generic checking against them.
+ */
+/*ARGSUSED*/
+int
+px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attrp, uint64_t *lo_p, uint64_t *hi_p)
+{
+ *lo_p = MMU_BYPASS_BASE;
+ *hi_p = MMU_BYPASS_END;
+
+ return (DDI_SUCCESS);
+}
+
+
+/*ARGSUSED*/
+int
+px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra,
+ io_attributes_t io_attributes, io_addr_t *io_addr_p)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
+ "attr 0x%x\n", dip, ra, io_attributes);
+
+ if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra,
+ io_attributes, io_addr_p)) != H_EOK) {
+ DBG(DBG_LIB_DMA, dip,
+ "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
+ *io_addr_p);
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * bus dma sync entry point.
+ */
+/*ARGSUSED*/
+int
+px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
+ off_t off, size_t len, uint_t cache_flags)
+{
+ ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
+
+ DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
+ "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
+ dip, rdip, handle, off, len, cache_flags);
+
+ /*
+ * jbus_stst_order is found only in certain cpu modules.
+ * Just return success if not present.
+ */
+ if (&jbus_stst_order == NULL)
+ return (DDI_SUCCESS);
+
+ if (!(mp->dmai_flags & DMAI_FLAGS_INUSE)) {
+ cmn_err(CE_WARN, "Unbound dma handle %p from %s%d", (void *)mp,
+ ddi_driver_name(rdip), ddi_get_instance(rdip));
+ return (DDI_FAILURE);
+ }
+
+ if (mp->dmai_flags & DMAI_FLAGS_NOSYNC)
+ return (DDI_SUCCESS);
+
+ /*
+ * No flush needed when sending data from memory to device.
+ * Nothing to do to "sync" memory to what device would already see.
+ */
+ if (!(mp->dmai_rflags & DDI_DMA_READ) ||
+ ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
+ return (DDI_SUCCESS);
+
+ /*
+ * Perform necessary cpu workaround to ensure jbus ordering.
+ * CPU's internal "invalidate FIFOs" are flushed.
+ */
+
+#if !defined(lint)
+ kpreempt_disable();
+#endif
+ jbus_stst_order();
+#if !defined(lint)
+ kpreempt_enable();
+#endif
+ return (DDI_SUCCESS);
+}
+
+/*
+ * MSIQ Functions:
+ */
+/*ARGSUSED*/
+int
+px_lib_msiq_init(dev_info_t *dip)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
+ caddr_t msiq_addr;
+ px_dvma_addr_t pg_index;
+ size_t size;
+ int ret;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
+
+ /*
+ * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
+ * and then initialize the base address register.
+ *
+ * Allocate entries from Fire IOMMU so that the resulting address
+ * is properly aligned. Calculate the index of the first allocated
+ * entry. Note: The size of the mapping is assumed to be a multiple
+ * of the page size.
+ */
+ msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p +
+ (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
+
+ size = msiq_state_p->msiq_cnt *
+ msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
+
+ pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
+ size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
+
+ if (pxu_p->msiq_mapped_p == NULL)
+ return (DDI_FAILURE);
+
+ pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
+ MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
+
+ if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
+ MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0,
+ MMU_MAP_BUF)) != DDI_SUCCESS) {
+ DBG(DBG_LIB_MSIQ, dip,
+ "hvio_msiq_init failed, ret 0x%lx\n", ret);
+
+ (void) px_lib_msiq_fini(dip);
+ return (DDI_FAILURE);
+ }
+
+ (void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msiq_fini(dev_info_t *dip)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
+ px_dvma_addr_t pg_index;
+ size_t size;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
+
+ /*
+ * Unmap and free the EQ memory that had been mapped
+ * into the Fire IOMMU.
+ */
+ size = msiq_state_p->msiq_cnt *
+ msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
+
+ pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
+ MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
+
+ (void) px_lib_iommu_demap(px_p->px_dip,
+ PCI_TSBID(0, pg_index), MMU_BTOP(size));
+
+ /* Free the entries from the Fire MMU */
+ vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
+ (void *)pxu_p->msiq_mapped_p, size);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
+ uint_t *msiq_rec_cnt_p)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
+ uint64_t *msiq_addr;
+ size_t msiq_size;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
+ dip, msiq_id);
+
+ msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p +
+ (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
+ msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
+ ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size));
+
+ *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
+ ra_p, *msiq_rec_cnt_p);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
+ pci_msiq_valid_state_t *msiq_valid_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
+ dip, msiq_id);
+
+ if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
+ msiq_id, msiq_valid_state)) != H_EOK) {
+ DBG(DBG_LIB_MSIQ, dip,
+ "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
+ *msiq_valid_state);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
+ pci_msiq_valid_state_t msiq_valid_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
+ "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
+
+ if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
+ msiq_id, msiq_valid_state)) != H_EOK) {
+ DBG(DBG_LIB_MSIQ, dip,
+ "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
+ pci_msiq_state_t *msiq_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
+ dip, msiq_id);
+
+ if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
+ msiq_id, msiq_state)) != H_EOK) {
+ DBG(DBG_LIB_MSIQ, dip,
+ "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
+ *msiq_state);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
+ pci_msiq_state_t msiq_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
+ "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
+
+ if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
+ msiq_id, msiq_state)) != H_EOK) {
+ DBG(DBG_LIB_MSIQ, dip,
+ "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
+ msiqhead_t *msiq_head)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
+ dip, msiq_id);
+
+ if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
+ msiq_id, msiq_head)) != H_EOK) {
+ DBG(DBG_LIB_MSIQ, dip,
+ "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
+ *msiq_head);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
+ msiqhead_t msiq_head)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
+ "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
+
+ if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
+ msiq_id, msiq_head)) != H_EOK) {
+ DBG(DBG_LIB_MSIQ, dip,
+ "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
+ msiqtail_t *msiq_tail)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
+ dip, msiq_id);
+
+ if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
+ msiq_id, msiq_tail)) != H_EOK) {
+ DBG(DBG_LIB_MSIQ, dip,
+ "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
+ *msiq_tail);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+void
+px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr;
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
+ dip, eq_rec_p);
+
+ if (!eq_rec_p->eq_rec_rid) {
+ /* Set msiq_rec_rid to zero */
+ msiq_rec_p->msiq_rec_rid = 0;
+
+ return;
+ }
+
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
+ "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
+ "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
+ "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
+ "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
+ eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
+ eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
+ eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
+
+ /*
+ * Only upper 4 bits of eq_rec_fmt_type is used
+ * to identify the EQ record type.
+ */
+ switch (eq_rec_p->eq_rec_fmt_type >> 3) {
+ case EQ_REC_MSI32:
+ msiq_rec_p->msiq_rec_type = MSI32_REC;
+
+ if (pxu_p->chip_id == FIRE_VER_10) {
+ msiq_rec_p->msiq_rec_data.msi.msi_data =
+ (eq_rec_p->eq_rec_data0 & 0xFF) << 8 |
+ (eq_rec_p->eq_rec_data0 & 0xFF00) >> 8;
+ } else {
+ /* Default case is FIRE2.0 */
+ msiq_rec_p->msiq_rec_data.msi.msi_data =
+ eq_rec_p->eq_rec_data0;
+ }
+
+ break;
+ case EQ_REC_MSI64:
+ msiq_rec_p->msiq_rec_type = MSI64_REC;
+
+ if (pxu_p->chip_id == FIRE_VER_10) {
+ msiq_rec_p->msiq_rec_data.msi.msi_data =
+ (eq_rec_p->eq_rec_data0 & 0xFF) << 8 |
+ (eq_rec_p->eq_rec_data0 & 0xFF00) >> 8;
+ } else {
+ /* Default case is FIRE2.0 */
+ msiq_rec_p->msiq_rec_data.msi.msi_data =
+ eq_rec_p->eq_rec_data0;
+ }
+
+ break;
+ case EQ_REC_MSG:
+ msiq_rec_p->msiq_rec_type = MSG_REC;
+
+ msiq_rec_p->msiq_rec_data.msg.msg_route =
+ eq_rec_p->eq_rec_fmt_type & 7;
+ msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
+ msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
+ break;
+ default:
+ cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
+ "0x%lx is an unknown EQ record type",
+ ddi_driver_name(dip), ddi_get_instance(dip),
+ eq_rec_p->eq_rec_fmt_type);
+ break;
+ }
+
+ msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
+ msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
+ (eq_rec_p->eq_rec_addr0 << 2));
+
+ /* Zero out eq_rec_rid field */
+ eq_rec_p->eq_rec_rid = 0;
+}
+
+/*
+ * MSI Functions:
+ */
+/*ARGSUSED*/
+int
+px_lib_msi_init(dev_info_t *dip)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state;
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
+
+ if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
+ msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
+ DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
+ ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
+ msiqid_t *msiq_id)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
+ dip, msi_num);
+
+ if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
+ msi_num, msiq_id)) != H_EOK) {
+ DBG(DBG_LIB_MSI, dip,
+ "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
+ *msiq_id);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
+ msiqid_t msiq_id, msi_type_t msitype)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
+ "msq_id 0x%x\n", dip, msi_num, msiq_id);
+
+ if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
+ msi_num, msiq_id)) != H_EOK) {
+ DBG(DBG_LIB_MSI, dip,
+ "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
+ pci_msi_valid_state_t *msi_valid_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
+ dip, msi_num);
+
+ if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
+ msi_num, msi_valid_state)) != H_EOK) {
+ DBG(DBG_LIB_MSI, dip,
+ "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
+ *msi_valid_state);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
+ pci_msi_valid_state_t msi_valid_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
+ "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
+
+ if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
+ msi_num, msi_valid_state)) != H_EOK) {
+ DBG(DBG_LIB_MSI, dip,
+ "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
+ pci_msi_state_t *msi_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
+ dip, msi_num);
+
+ if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
+ msi_num, msi_state)) != H_EOK) {
+ DBG(DBG_LIB_MSI, dip,
+ "hvio_msi_getstate failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
+ *msi_state);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
+ pci_msi_state_t msi_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
+ "msi_state 0x%x\n", dip, msi_num, msi_state);
+
+ if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
+ msi_num, msi_state)) != H_EOK) {
+ DBG(DBG_LIB_MSI, dip,
+ "hvio_msi_setstate failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * MSG Functions:
+ */
+/*ARGSUSED*/
+int
+px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
+ msiqid_t *msiq_id)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
+ dip, msg_type);
+
+ if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
+ msg_type, msiq_id)) != H_EOK) {
+ DBG(DBG_LIB_MSG, dip,
+ "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
+ *msiq_id);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
+ msiqid_t msiq_id)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
+ "msiq_id 0x%x\n", dip, msg_type, msiq_id);
+
+ if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
+ msg_type, msiq_id)) != H_EOK) {
+ DBG(DBG_LIB_MSG, dip,
+ "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
+ pcie_msg_valid_state_t *msg_valid_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
+ dip, msg_type);
+
+ if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
+ msg_valid_state)) != H_EOK) {
+ DBG(DBG_LIB_MSG, dip,
+ "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
+ *msg_valid_state);
+
+ return (DDI_SUCCESS);
+}
+
+/*ARGSUSED*/
+int
+px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
+ pcie_msg_valid_state_t msg_valid_state)
+{
+ uint64_t ret;
+
+ DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
+ "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
+
+ if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
+ msg_valid_state)) != H_EOK) {
+ DBG(DBG_LIB_MSG, dip,
+ "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * Suspend/Resume Functions:
+ * Currently unsupported by hypervisor
+ */
+int
+px_lib_suspend(dev_info_t *dip)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ devhandle_t dev_hdl, xbus_dev_hdl;
+ uint64_t ret;
+
+ DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
+
+ dev_hdl = (devhandle_t)px_p->px_address[PX_REG_CSR];
+ xbus_dev_hdl = (devhandle_t)px_p->px_address[PX_REG_XBC];
+
+ if ((ret = hvio_suspend(dev_hdl, pxu_p)) == H_EOK) {
+ px_p->px_cb_p->xbc_attachcnt--;
+ if (px_p->px_cb_p->xbc_attachcnt == 0)
+ if ((ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p))
+ != H_EOK)
+ px_p->px_cb_p->xbc_attachcnt++;
+ }
+
+ return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
+}
+
+void
+px_lib_resume(dev_info_t *dip)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ devhandle_t dev_hdl, xbus_dev_hdl;
+ devino_t pec_ino = px_p->px_inos[PX_INTR_PEC];
+ devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC];
+
+ DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
+
+ dev_hdl = (devhandle_t)px_p->px_address[PX_REG_CSR];
+ xbus_dev_hdl = (devhandle_t)px_p->px_address[PX_REG_XBC];
+
+ px_p->px_cb_p->xbc_attachcnt++;
+ if (px_p->px_cb_p->xbc_attachcnt == 1)
+ hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
+ hvio_resume(dev_hdl, pec_ino, pxu_p);
+}
+
+/*
+ * PCI tool Functions:
+ * Currently unsupported by hypervisor
+ */
+/*ARGSUSED*/
+int
+px_lib_tools_dev_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+
+ DBG(DBG_TOOLS, dip, "px_lib_tools_dev_reg_ops: dip 0x%p arg 0x%p "
+ "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode);
+
+ return (px_dev_reg_ops(dip, arg, cmd, mode, px_p));
+}
+
+/*ARGSUSED*/
+int
+px_lib_tools_bus_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode)
+{
+ DBG(DBG_TOOLS, dip, "px_lib_tools_bus_reg_ops: dip 0x%p arg 0x%p "
+ "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode);
+
+ return (px_bus_reg_ops(dip, arg, cmd, mode));
+}
+
+/*ARGSUSED*/
+int
+px_lib_tools_intr_admn(dev_info_t *dip, void *arg, int cmd, int mode)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+
+ DBG(DBG_TOOLS, dip, "px_lib_tools_intr_admn: dip 0x%p arg 0x%p "
+ "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode);
+
+ return (px_intr_admn(dip, arg, cmd, mode, px_p));
+}
+
+/*
+ * Misc Functions:
+ * Currently unsupported by hypervisor
+ */
+uint64_t
+px_lib_get_cb(caddr_t csr)
+{
+ return (CSR_XR(csr, JBUS_SCRATCH_1));
+}
+
+void
+px_lib_set_cb(caddr_t csr, uint64_t val)
+{
+ CSR_XS(csr, JBUS_SCRATCH_1, val);
+}
+
+/*ARGSUSED*/
+int
+px_lib_map_vconfig(dev_info_t *dip,
+ ddi_map_req_t *mp, pci_config_offset_t off,
+ pci_regspec_t *rp, caddr_t *addrp)
+{
+ /*
+ * No special config space access services in this layer.
+ */
+ return (DDI_FAILURE);
+}
+
+#ifdef DEBUG
+int px_peekfault_cnt = 0;
+int px_pokefault_cnt = 0;
+#endif /* DEBUG */
+
+/*ARGSUSED*/
+static int
+px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
+ peekpoke_ctlops_t *in_args)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ px_pec_t *pec_p = px_p->px_pec_p;
+ int err = DDI_SUCCESS;
+ on_trap_data_t otd;
+
+ mutex_enter(&pec_p->pec_pokefault_mutex);
+ pec_p->pec_ontrap_data = &otd;
+
+ /* Set up protected environment. */
+ if (!on_trap(&otd, OT_DATA_ACCESS)) {
+ uintptr_t tramp = otd.ot_trampoline;
+
+ otd.ot_trampoline = (uintptr_t)&poke_fault;
+ err = do_poke(in_args->size, (void *)in_args->dev_addr,
+ (void *)in_args->host_addr);
+ otd.ot_trampoline = tramp;
+ } else
+ err = DDI_FAILURE;
+
+ /*
+ * Read the async fault register for the PEC to see it sees
+ * a master-abort.
+ *
+ * XXX check if we need to clear errors at this point.
+ */
+ if (otd.ot_trap & OT_DATA_ACCESS)
+ err = DDI_FAILURE;
+
+ /* Take down protected environment. */
+ no_trap();
+
+ pec_p->pec_ontrap_data = NULL;
+ mutex_exit(&pec_p->pec_pokefault_mutex);
+
+#ifdef DEBUG
+ if (err == DDI_FAILURE)
+ px_pokefault_cnt++;
+#endif
+ return (err);
+}
+
+/*ARGSUSED*/
+static int
+px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
+ peekpoke_ctlops_t *cautacc_ctlops_arg)
+{
+ size_t size = cautacc_ctlops_arg->size;
+ uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
+ uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
+ ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
+ size_t repcount = cautacc_ctlops_arg->repcount;
+ uint_t flags = cautacc_ctlops_arg->flags;
+
+ px_t *px_p = DIP_TO_STATE(dip);
+ px_pec_t *pec_p = px_p->px_pec_p;
+ int err = DDI_SUCCESS;
+
+ /* Use ontrap data in handle set up by FMA */
+ pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
+
+ hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
+ i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
+
+ mutex_enter(&pec_p->pec_pokefault_mutex);
+
+ if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
+ for (; repcount; repcount--) {
+ switch (size) {
+
+ case sizeof (uint8_t):
+ i_ddi_put8(hp, (uint8_t *)dev_addr,
+ *(uint8_t *)host_addr);
+ break;
+
+ case sizeof (uint16_t):
+ i_ddi_put16(hp, (uint16_t *)dev_addr,
+ *(uint16_t *)host_addr);
+ break;
+
+ case sizeof (uint32_t):
+ i_ddi_put32(hp, (uint32_t *)dev_addr,
+ *(uint32_t *)host_addr);
+ break;
+
+ case sizeof (uint64_t):
+ i_ddi_put64(hp, (uint64_t *)dev_addr,
+ *(uint64_t *)host_addr);
+ break;
+ }
+
+ host_addr += size;
+
+ if (flags == DDI_DEV_AUTOINCR)
+ dev_addr += size;
+
+ /*
+ * Read the async fault register for the PEC to see it
+ * sees a master-abort.
+ *
+ * XXX check if we need to clear errors at this point.
+ */
+ if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
+ err = DDI_FAILURE;
+#ifdef DEBUG
+ px_pokefault_cnt++;
+#endif
+ break;
+ }
+ }
+ }
+
+ i_ddi_notrap((ddi_acc_handle_t)hp);
+ pec_p->pec_ontrap_data = NULL;
+ mutex_exit(&pec_p->pec_pokefault_mutex);
+ i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
+ hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
+
+ return (err);
+}
+
+
+int
+px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
+ peekpoke_ctlops_t *in_args)
+{
+ return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
+ px_lib_do_poke(dip, rdip, in_args));
+}
+
+
+/*ARGSUSED*/
+static int
+px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
+{
+ int err = DDI_SUCCESS;
+ on_trap_data_t otd;
+
+ if (!on_trap(&otd, OT_DATA_ACCESS)) {
+ uintptr_t tramp = otd.ot_trampoline;
+
+ otd.ot_trampoline = (uintptr_t)&peek_fault;
+ err = do_peek(in_args->size, (void *)in_args->dev_addr,
+ (void *)in_args->host_addr);
+ otd.ot_trampoline = tramp;
+ } else
+ err = DDI_FAILURE;
+
+ no_trap();
+
+#ifdef DEBUG
+ if (err == DDI_FAILURE)
+ px_peekfault_cnt++;
+#endif
+ return (err);
+}
+
+
+static int
+px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
+{
+ size_t size = cautacc_ctlops_arg->size;
+ uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
+ uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
+ ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
+ size_t repcount = cautacc_ctlops_arg->repcount;
+ uint_t flags = cautacc_ctlops_arg->flags;
+
+ px_t *px_p = DIP_TO_STATE(dip);
+ px_pec_t *pec_p = px_p->px_pec_p;
+ int err = DDI_SUCCESS;
+
+ hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
+ i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
+
+ if (repcount == 1) {
+ if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
+ i_ddi_caut_get(size, (void *)dev_addr,
+ (void *)host_addr);
+ } else {
+ int i;
+ uint8_t *ff_addr = (uint8_t *)host_addr;
+ for (i = 0; i < size; i++)
+ *ff_addr++ = 0xff;
+
+ err = DDI_FAILURE;
+#ifdef DEBUG
+ px_peekfault_cnt++;
+#endif
+ }
+ } else {
+ if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
+ for (; repcount; repcount--) {
+ i_ddi_caut_get(size, (void *)dev_addr,
+ (void *)host_addr);
+
+ host_addr += size;
+
+ if (flags == DDI_DEV_AUTOINCR)
+ dev_addr += size;
+ }
+ } else {
+ err = DDI_FAILURE;
+#ifdef DEBUG
+ px_peekfault_cnt++;
+#endif
+ }
+ }
+
+ i_ddi_notrap((ddi_acc_handle_t)hp);
+ pec_p->pec_ontrap_data = NULL;
+ i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
+ hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
+
+ return (err);
+}
+
+/*ARGSUSED*/
+int
+px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
+ peekpoke_ctlops_t *in_args, void *result)
+{
+ result = (void *)in_args->host_addr;
+ return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
+ px_lib_do_peek(dip, in_args));
+}
+/*
+ * implements PPM interface
+ */
+int
+px_lib_pmctl(int cmd, px_t *px_p)
+{
+ ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
+ switch (cmd) {
+ case PPMREQ_PRE_PWR_OFF:
+ /*
+ * Currently there is no device power management for
+ * the root complex (fire). When there is we need to make
+ * sure that it is at full power before trying to send the
+ * PME_Turn_Off message.
+ */
+ DBG(DBG_PWR, px_p->px_dip,
+ "ioctl: request to send PME_Turn_Off\n");
+ return (px_goto_l23ready(px_p));
+
+ case PPMREQ_PRE_PWR_ON:
+ case PPMREQ_POST_PWR_ON:
+ /* code to be written for Fire 2.0. return failure for now */
+ return (DDI_FAILURE);
+
+ default:
+ return (DDI_FAILURE);
+ }
+}
+
+/*
+ * sends PME_Turn_Off message to put the link in L2/L3 ready state.
+ * called by px_ioctl.
+ * returns DDI_SUCCESS or DDI_FAILURE
+ * 1. Wait for link to be in L1 state (link status reg)
+ * 2. write to PME_Turn_off reg to boradcast
+ * 3. set timeout
+ * 4. If timeout, return failure.
+ * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
+ */
+static int
+px_goto_l23ready(px_t *px_p)
+{
+ pcie_pwr_t *pwr_p;
+ caddr_t csr_base = (caddr_t)px_p->px_address[PX_REG_CSR];
+ int ret = DDI_SUCCESS;
+ clock_t end, timeleft;
+
+ /* If no PM info, return failure */
+ if (!PCIE_PMINFO(px_p->px_dip) ||
+ !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
+ return (DDI_FAILURE);
+
+ mutex_enter(&pwr_p->pwr_lock);
+ mutex_enter(&pwr_p->pwr_intr_lock);
+ /* Clear the PME_To_ACK receieved flag */
+ pwr_p->pwr_flags &= ~PCIE_PMETOACK_RECVD;
+ if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
+ ret = DDI_FAILURE;
+ goto l23ready_done;
+ }
+ pwr_p->pwr_flags |= PCIE_PME_TURNOFF_PENDING;
+
+ end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
+ while (!(pwr_p->pwr_flags & PCIE_PMETOACK_RECVD)) {
+ timeleft = cv_timedwait(&pwr_p->pwr_cv,
+ &pwr_p->pwr_intr_lock, end);
+ /*
+ * if cv_timedwait returns -1, it is either
+ * 1) timed out or
+ * 2) there was a pre-mature wakeup but by the time
+ * cv_timedwait is called again end < lbolt i.e.
+ * end is in the past.
+ * 3) By the time we make first cv_timedwait call,
+ * end < lbolt is true.
+ */
+ if (timeleft == -1)
+ break;
+ }
+ if (!(pwr_p->pwr_flags & PCIE_PMETOACK_RECVD)) {
+ /*
+ * Either timedout or interrupt didn't get a
+ * chance to grab the mutex and set the flag.
+ * release the mutex and delay for sometime.
+ * This will 1) give a chance for interrupt to
+ * set the flag 2) creates a delay between two
+ * consequetive requests.
+ */
+ mutex_exit(&pwr_p->pwr_intr_lock);
+ delay(5);
+ mutex_enter(&pwr_p->pwr_intr_lock);
+ if (!(pwr_p->pwr_flags & PCIE_PMETOACK_RECVD)) {
+ ret = DDI_FAILURE;
+ DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
+ " for PME_TO_ACK\n");
+ }
+ }
+ /* PME_To_ACK receieved */
+ pwr_p->pwr_flags &= ~(PCIE_PME_TURNOFF_PENDING | PCIE_PMETOACK_RECVD);
+
+ /* TBD: wait till link is in L2/L3 ready (link status reg) */
+
+l23ready_done:
+ mutex_exit(&pwr_p->pwr_intr_lock);
+ mutex_exit(&pwr_p->pwr_lock);
+ return (ret);
+}
+
+
+/*
+ * Extract the drivers binding name to identify which chip we're binding to.
+ * Whenever a new bus bridge is created, the driver alias entry should be
+ * added here to identify the device if needed. If a device isn't added,
+ * the identity defaults to PX_CHIP_UNIDENTIFIED.
+ */
+static uint32_t
+px_identity_chip(px_t *px_p)
+{
+ dev_info_t *dip = px_p->px_dip;
+ char *name = ddi_binding_name(dip);
+ uint32_t revision = 0;
+
+ revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "module-revision#", 0);
+
+ /* Check for Fire driver binding name */
+ if (strcmp(name, "pci108e,80f0") == 0) {
+ DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: "
+ "name %s module-revision %d\n", ddi_driver_name(dip),
+ ddi_get_instance(dip), name, revision);
+
+ return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00));
+ }
+
+ DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
+ ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
+
+ return (PX_CHIP_UNIDENTIFIED);
+}
diff --git a/usr/src/uts/sun4u/io/px/px_lib4u.h b/usr/src/uts/sun4u/io/px/px_lib4u.h
new file mode 100644
index 0000000000..2a4f6c965e
--- /dev/null
+++ b/usr/src/uts/sun4u/io/px/px_lib4u.h
@@ -0,0 +1,337 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PX_LIB4U_H
+#define _SYS_PX_LIB4U_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Errors returned.
+ */
+#define H_EOK 0 /* Successful return */
+#define H_ENOINTR 1 /* Invalid interrupt id */
+#define H_EINVAL 2 /* Invalid argument */
+#define H_ENOACCESS 3 /* No access to resource */
+#define H_EIO 4 /* I/O error */
+#define H_ENOTSUPPORTED 5 /* Function not supported */
+#define H_ENOMAP 6 /* Mapping is not valid, */
+ /* no translation exists */
+
+/*
+ * SUN4U px specific data structure.
+ */
+typedef struct pxu {
+ uint32_t chip_id;
+ uint8_t portid;
+ uint16_t tsb_cookie;
+ uint32_t tsb_size;
+ uint64_t *tsb_vaddr;
+ void *msiq_mapped_p;
+
+ /* Soft state for suspend/resume */
+ uint64_t *pec_config_state;
+ uint64_t *mmu_config_state;
+ uint64_t *ib_intr_map;
+ uint64_t *ib_config_state;
+ uint64_t *xcb_config_state;
+ uint64_t *msiq_config_state;
+} pxu_t;
+
+/*
+ * Event Queue data structure.
+ */
+typedef struct eq_rec {
+ uint64_t eq_rec_rsvd0 : 1, /* DW 0 - 63 */
+ eq_rec_fmt_type : 7, /* DW 0 - 62:56 */
+ eq_rec_len : 10, /* DW 0 - 55:46 */
+ eq_rec_addr0 : 14, /* DW 0 - 45:32 */
+ eq_rec_rid : 16, /* DW 0 - 31:16 */
+ eq_rec_data0 : 16; /* DW 0 - 15:00 */
+ uint64_t eq_rec_addr1 : 48, /* DW 1 - 63:16 */
+ eq_rec_data1 : 16; /* DW 1 - 15:0 */
+ uint64_t eq_rec_rsvd[6]; /* DW 2-7 */
+} eq_rec_t;
+
+/*
+ * EQ record type
+ *
+ * Upper 4 bits of eq_rec_fmt_type is used
+ * to identify the EQ record type.
+ */
+#define EQ_REC_MSG 0x6 /* MSG - 0x3X */
+#define EQ_REC_MSI32 0xB /* MSI32 - 0x58 */
+#define EQ_REC_MSI64 0xF /* MSI64 - 0x78 */
+
+/* EQ State */
+#define EQ_IDLE_STATE 0x1 /* IDLE */
+#define EQ_ACTIVE_STATE 0x2 /* ACTIVE */
+#define EQ_ERROR_STATE 0x4 /* ERROR */
+
+#define MMU_INVALID_TTE 0ull
+#define MMU_TTE_VALID(tte) (((tte) & MMU_TTE_V) == MMU_TTE_V)
+#define MMU_TTETOPA(x) ((x & 0x7ffffffffff) >> MMU_PAGE_SHIFT)
+
+/*
+ * control register decoding
+ */
+/* tsb size: 0=1k 1=2k 2=4k 3=8k 4=16k 5=32k 6=64k 7=128k */
+#define MMU_CTL_TO_TSBSIZE(ctl) ((ctl) >> 16)
+#define MMU_TSBSIZE_TO_TSBENTRIES(s) ((1 << (s)) << (13 - 3))
+
+/*
+ * For mmu bypass addresses, bit 43 specifies cacheability.
+ */
+#define MMU_BYPASS_NONCACHE (1ull << 43)
+
+/*
+ * The following macros define the address ranges supported for DVMA
+ * and mmu bypass transfers.
+ */
+#define MMU_BYPASS_BASE 0xFFFC000000000000ull
+#define MMU_BYPASS_END 0xFFFC01FFFFFFFFFFull
+
+/*
+ * The following macros are for loading and unloading io tte
+ * entries.
+ */
+#define MMU_TTE_SIZE 8
+#define MMU_TTE_V (1ull << 63)
+#define MMU_TTE_W (1ull << 1)
+
+#define INO_BITS 6 /* INO#s are 6 bits long */
+#define IGN_BITS 5 /* IGN#s are 5 bits long */
+#define INO_MASK 0x3F /* INO#s mask */
+#define IGN_MASK 0x1F /* IGN#s mask */
+
+#define ID_TO_IGN(portid) ((uint16_t)((portid) & IGN_MASK))
+#define ID_TO_NODEID(portid) ((uint16_t)((portid) >> IGN_BITS))
+#define DEVINO_TO_SYSINO(portid, devino) \
+ ((ID_TO_NODEID(portid) << (IGN_BITS + INO_BITS)) | \
+ ((ID_TO_IGN(portid) << INO_BITS) | (devino & INO_MASK)))
+#define SYSINO_TO_DEVINO(sysino) (sysino & INO_MASK)
+
+/* Interrupt states */
+#define INTERRUPT_IDLE_STATE 0
+#define INTERRUPT_RECEIVED_STATE 1
+#define INTERRUPT_PENDING_STATE 3
+
+/*
+ * Interrupt directives needed for reading proper interrupt diag register for
+ * given ino.
+ */
+#define PX_INTR_DIAG_REG(CSRBASE, INO) \
+ (INO <= 32 ? CSRBASE + INTERRUPT_STATE_STATUS_1 : \
+ CSRBASE + INTERRUPT_STATE_STATUS_2)
+
+#define PX_INTR_STAT_BITMAP(INO) (0x3 << (INO & 0x1f))
+#define PX_INTR_STATUS(INTRDIAG_REG, INO) (((*INTRDIAG_REG) \
+ & PX_INTR_STAT_BITMAP(INO)) >> (INO & 0x1f))
+
+/*
+ * Defines for link width and max packet size for ACKBAK Latency Threshold Timer
+ * and TxLink Replay Timer Latency Table array sizes
+ * Num Link Width Packet Size
+ * 0 1 128
+ * 1 4 256
+ * 2 8 512
+ * 3 16 1024
+ * 4 - 2048
+ * 5 - 4096
+ */
+#define LINK_WIDTH_ARR_SIZE 4
+#define LINK_MAX_PKT_ARR_SIZE 6
+
+/*
+ * Defines for registers which have multi-bit fields.
+ */
+#define TLU_LINK_CONTROL_ASPM_DISABLED 0x0
+#define TLU_LINK_CONTROL_ASPM_L0S_EN 0x1
+#define TLU_LINK_CONTROL_ASPM_L1_EN 0x2
+#define TLU_LINK_CONTROL_ASPM_L0S_L1_EN 0x3
+
+#define TLU_CONTROL_CONFIG_DEFAULT 0x1
+#define TLU_CONTROL_L0S_TIM_DEFAULT 0xdaull
+#define TLU_CONTROL_MPS_MASK 0x1C
+#define TLU_CONTROL_MPS_SHIFT 2
+
+#define LPU_TXLINK_REPLAY_NUMBER_STATUS_RPLAY_NUM_0 0x0
+#define LPU_TXLINK_REPLAY_NUMBER_STATUS_RPLAY_NUM_1 0x1
+#define LPU_TXLINK_REPLAY_NUMBER_STATUS_RPLAY_NUM_2 0x2
+#define LPU_TXLINK_REPLAY_NUMBER_STATUS_RPLAY_NUM_3 0x3
+
+#define LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT 0xFFFFull
+#define LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT 0x0ull
+
+#define LPU_TXLINK_SEQUENCE_COUNTER_ACK_SEQ_CNTR_DEFAULT 0xFFF
+#define LPU_TXLINK_SEQUENCE_COUNTER_NXT_TX_SEQ_CNTR_DEFAULT 0x0
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR_SEQ_CNT_MAX_ADDR_DEF 0x157
+
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS_SEQ_CNT_TLPTR_DEFAULT 0xFFF
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS_SEQ_CNT_HDPTR_DEFAULT 0x0
+
+#define LPU_LTSSM_CONFIG1_LTSSM_8_TO_DEFAULT 0x2
+#define LPU_LTSSM_CONFIG1_LTSSM_20_TO_DEFAULT 0x5
+#define LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT 0x2DC6C0
+#define LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT 0x7A120
+/*
+ * XXX fix LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT &
+ * LPU_LTSSM_CONFIG4_N_FTS_DEFAULT in px_pec.h
+ */
+#define LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT 0x2
+#define LPU_LTSSM_CONFIG4_N_FTS_DEFAULT 0x8c
+
+/*
+ * The sequence of the chip_type appearance is significant.
+ * There are code depending on it: PX_CHIP_TYPE(pxu_p) < PX_CHIP_FIRE.
+ */
+typedef enum {
+ PX_CHIP_UNIDENTIFIED = 0,
+ PX_CHIP_FIRE = 1
+} px_chip_id_t;
+
+/*
+ * [msb] [lsb]
+ * 0x00 <chip_type> <version#> <module-revision#>
+ */
+#define PX_CHIP_ID(t, v, m) (((t) << 16) | ((v) << 8) | (m))
+#define PX_ID_CHIP_TYPE(id) ((id) >> 16)
+#define PX_CHIP_TYPE(pxu_p) PX_ID_CHIP_TYPE(PX_CHIP_ID((pxu_p)->chip_id))
+#define PX_CHIP_REV(pxu_p) PX_CHIP_ID(((pxu_p)->chip_id) & 0xFF)
+#define PX_CHIP_VER(pxu_p) PX_CHIP_ID((((pxu_p)->chip_id) >> 8) & 0xFF)
+
+/*
+ * Fire hardware specific version definitions.
+ */
+#define FIRE_VER_10 PX_CHIP_ID(PX_CHIP_FIRE, 0x01, 0x00)
+#define FIRE_VER_20 PX_CHIP_ID(PX_CHIP_FIRE, 0x03, 0x00)
+
+extern void hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
+extern void hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p);
+extern void hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p);
+extern void hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p);
+
+#ifdef VPCI_CONFIG_ACCESS
+extern uint64_t hvio_config_get(devhandle_t dev_hdl, pci_device_t bdf,
+ pci_config_offset_t off, pci_config_size_t size, pci_cfg_data_t *data_p);
+extern uint64_t hvio_config_put(devhandle_t dev_hdl, pci_device_t bdf,
+ pci_config_offset_t off, pci_config_size_t size, pci_cfg_data_t data);
+#endif /* VPCI_CONFIG_ACCESS */
+
+extern uint64_t hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p,
+ devino_t devino, sysino_t *sysino);
+extern uint64_t hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino,
+ intr_valid_state_t *intr_valid_state);
+extern uint64_t hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino,
+ intr_valid_state_t intr_valid_state);
+extern uint64_t hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
+ intr_state_t *intr_state);
+extern uint64_t hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
+ intr_state_t intr_state);
+extern uint64_t hvio_intr_gettarget(devhandle_t dev_hdl, sysino_t sysino,
+ cpuid_t *cpuid);
+extern uint64_t hvio_intr_settarget(devhandle_t dev_hdl, sysino_t sysino,
+ cpuid_t cpuid);
+
+extern uint64_t hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
+ pages_t pages, io_attributes_t io_attributes,
+ void *addr, size_t pfn_index, int flag);
+extern uint64_t hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p,
+ tsbid_t tsbid, pages_t pages);
+extern uint64_t hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p,
+ tsbid_t tsbid, io_attributes_t *attributes_p, r_addr_t *r_addr_p);
+extern uint64_t hvio_iommu_getbypass(devhandle_t dev_hdl, r_addr_t ra,
+ io_attributes_t io_attributes, io_addr_t *io_addr_p);
+
+/*
+ * MSIQ Functions:
+ */
+extern uint64_t hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p);
+extern uint64_t hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
+ pci_msiq_valid_state_t *msiq_valid_state);
+extern uint64_t hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
+ pci_msiq_valid_state_t msiq_valid_state);
+extern uint64_t hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id,
+ pci_msiq_state_t *msiq_state);
+extern uint64_t hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id,
+ pci_msiq_state_t msiq_state);
+extern uint64_t hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id,
+ msiqhead_t *msiq_head);
+extern uint64_t hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id,
+ msiqhead_t msiq_head);
+extern uint64_t hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id,
+ msiqtail_t *msiq_tail);
+
+/*
+ * MSI Functions:
+ */
+extern uint64_t hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32,
+ uint64_t addr64);
+extern uint64_t hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num,
+ msiqid_t *msiq_id);
+extern uint64_t hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num,
+ msiqid_t msiq_id);
+extern uint64_t hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num,
+ pci_msi_valid_state_t *msi_valid_state);
+extern uint64_t hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num,
+ pci_msi_valid_state_t msi_valid_state);
+extern uint64_t hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num,
+ pci_msi_state_t *msi_state);
+extern uint64_t hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num,
+ pci_msi_state_t msi_state);
+
+/*
+ * MSG Functions:
+ */
+extern uint64_t hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
+ msiqid_t *msiq_id);
+extern uint64_t hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
+ msiqid_t msiq_id);
+extern uint64_t hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
+ pcie_msg_valid_state_t *msg_valid_state);
+extern uint64_t hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
+ pcie_msg_valid_state_t msg_valid_state);
+
+/*
+ * Suspend/Resume Functions:
+ */
+extern uint64_t hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
+extern void hvio_resume(devhandle_t dev_hdl,
+ devino_t devino, pxu_t *pxu_p);
+extern uint64_t hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
+extern void hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
+ devino_t devino, pxu_t *pxu_p);
+extern int px_send_pme_turnoff(caddr_t csr_base);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PX_LIB4U_H */
diff --git a/usr/src/uts/sun4u/io/px/px_regs.h b/usr/src/uts/sun4u/io/px/px_regs.h
new file mode 100644
index 0000000000..f181f642e0
--- /dev/null
+++ b/usr/src/uts/sun4u/io/px/px_regs.h
@@ -0,0 +1,2637 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PX_REGS_H
+#define _SYS_PX_REGS_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Register tools history */
+#pragma ident "@(#)hdgen 1.3 03/11/10"
+#pragma ident "@(#)firedefiner.pl 1.7 03/11/19"
+
+/* jcs.csr JCS module defines */
+
+#define JCS_CSR_BASE 0x000000
+#define JBUS_DEVICE_ID 0x0
+#define JBUS_DEVICE_ID_COOKIE 56
+#define JBUS_DEVICE_ID_COOKIE_MASK 0xff
+#define JBUS_DEVICE_ID_JVPORT 27
+#define JBUS_DEVICE_ID_JVPORT_MASK 0x7f
+#define JBUS_DEVICE_ID_JPID_4 21
+#define JBUS_DEVICE_ID_JPID_3_0 17
+#define JBUS_DEVICE_ID_JPID_3_0_MASK 0xf
+#define JBUS_DEVICE_ID_M_S 16
+#define JBUS_DEVICE_ID_MID 10
+#define JBUS_DEVICE_ID_MID_MASK 0x3f
+#define JBUS_DEVICE_ID_MT 4
+#define JBUS_DEVICE_ID_MT_MASK 0x3f
+#define JBUS_DEVICE_ID_MR 0
+#define JBUS_DEVICE_ID_MR_MASK 0xf
+#define EBUS_OFFSET_BASE 0x400020
+#define EBUS_OFFSET_BASE_V 63
+#define EBUS_OFFSET_BASE_BASE 24
+#define EBUS_OFFSET_BASE_BASE_MASK 0xfff
+#define EBUS_OFFSET_MASK 0x400028
+#define EBUS_OFFSET_MASK_MASK_HI 36
+#define EBUS_OFFSET_MASK_MASK_HI_MASK 0x7f
+#define EBUS_OFFSET_MASK_MASK 24
+#define EBUS_OFFSET_MASK_MASK_MASK 0xfff
+#define PCIE_A_MEM32_OFFSET_BASE 0x400040
+#define PCIE_A_MEM32_OFFSET_BASE_V 63
+#define PCIE_A_MEM32_OFFSET_BASE_BASE 24
+#define PCIE_A_MEM32_OFFSET_BASE_BASE_MASK 0xfff
+#define PCIE_A_MEM32_OFFSET_MASK 0x400048
+#define PCIE_A_MEM32_OFFSET_MASK_MASK_HI 36
+#define PCIE_A_MEM32_OFFSET_MASK_MASK_HI_MASK 0x7f
+#define PCIE_A_MEM32_OFFSET_MASK_MASK 24
+#define PCIE_A_MEM32_OFFSET_MASK_MASK_MASK 0xfff
+#define PCIE_A_CFG_IO_OFFSET_BASE 0x400050
+#define PCIE_A_CFG_IO_OFFSET_BASE_V 63
+#define PCIE_A_CFG_IO_OFFSET_BASE_BASE 24
+#define PCIE_A_CFG_IO_OFFSET_BASE_BASE_MASK 0xfff
+#define PCIE_A_CFG_IO_OFFSET_MASK 0x400058
+#define PCIE_A_CFG_IO_OFFSET_MASK_MASK_HI 36
+#define PCIE_A_CFG_IO_OFFSET_MASK_MASK_HI_MASK 0x7f
+#define PCIE_A_CFG_IO_OFFSET_MASK_MASK 24
+#define PCIE_A_CFG_IO_OFFSET_MASK_MASK_MASK 0xfff
+#define PCIE_B_MEM32_OFFSET_BASE 0x400060
+#define PCIE_B_MEM32_OFFSET_BASE_V 63
+#define PCIE_B_MEM32_OFFSET_BASE_BASE 24
+#define PCIE_B_MEM32_OFFSET_BASE_BASE_MASK 0xfff
+#define PCIE_B_MEM32_OFFSET_MASK 0x400068
+#define PCIE_B_MEM32_OFFSET_MASK_MASK_HI 36
+#define PCIE_B_MEM32_OFFSET_MASK_MASK_HI_MASK 0x7f
+#define PCIE_B_MEM32_OFFSET_MASK_MASK 24
+#define PCIE_B_MEM32_OFFSET_MASK_MASK_MASK 0xfff
+#define PCIE_B_CFG_IO_OFFSET_BASE 0x400070
+#define PCIE_B_CFG_IO_OFFSET_BASE_V 63
+#define PCIE_B_CFG_IO_OFFSET_BASE_BASE 24
+#define PCIE_B_CFG_IO_OFFSET_BASE_BASE_MASK 0xfff
+#define PCIE_B_CFG_IO_OFFSET_MASK 0x400078
+#define PCIE_B_CFG_IO_OFFSET_MASK_MASK_HI 36
+#define PCIE_B_CFG_IO_OFFSET_MASK_MASK_HI_MASK 0x7f
+#define PCIE_B_CFG_IO_OFFSET_MASK_MASK 24
+#define PCIE_B_CFG_IO_OFFSET_MASK_MASK_MASK 0xfff
+#define PCIE_A_MEM64_OFFSET_BASE 0x400080
+#define PCIE_A_MEM64_OFFSET_BASE_V 63
+#define PCIE_A_MEM64_OFFSET_BASE_BASE 24
+#define PCIE_A_MEM64_OFFSET_BASE_BASE_MASK 0xfff
+#define PCIE_A_MEM64_OFFSET_MASK 0x400088
+#define PCIE_A_MEM64_OFFSET_MASK_MASK_HI 36
+#define PCIE_A_MEM64_OFFSET_MASK_MASK_HI_MASK 0x7f
+#define PCIE_A_MEM64_OFFSET_MASK_MASK 24
+#define PCIE_A_MEM64_OFFSET_MASK_MASK_MASK 0xfff
+#define PCIE_B_MEM64_OFFSET_BASE 0x400090
+#define PCIE_B_MEM64_OFFSET_BASE_V 63
+#define PCIE_B_MEM64_OFFSET_BASE_BASE 24
+#define PCIE_B_MEM64_OFFSET_BASE_BASE_MASK 0xfff
+#define PCIE_B_MEM64_OFFSET_MASK 0x400098
+#define PCIE_B_MEM64_OFFSET_MASK_MASK_HI 36
+#define PCIE_B_MEM64_OFFSET_MASK_MASK_HI_MASK 0x7f
+#define PCIE_B_MEM64_OFFSET_MASK_MASK 24
+#define PCIE_B_MEM64_OFFSET_MASK_MASK_MASK 0xfff
+#define FIRE_CONTROL_STATUS 0x410000
+#define FIRE_CONTROL_STATUS_SPARE_CONTROL_LOAD_4 63
+#define FIRE_CONTROL_STATUS_SPARE_CONTROL_LOAD_3 62
+#define FIRE_CONTROL_STATUS_SPARE_CONTROL_LOAD_2 61
+#define FIRE_CONTROL_STATUS_SPARE_CONTROL_LOAD_1 60
+#define FIRE_CONTROL_STATUS_SPARE_CONTROL_LOAD_0 59
+#define FIRE_CONTROL_STATUS_SPARE_CONTROL 54
+#define FIRE_CONTROL_STATUS_SPARE_CONTROL_MASK 0x1f
+#define FIRE_CONTROL_STATUS_SPARE_STATUS 49
+#define FIRE_CONTROL_STATUS_SPARE_STATUS_MASK 0x1f
+#define FIRE_CONTROL_STATUS_PAR_DELAY 44
+#define FIRE_CONTROL_STATUS_PAR_EN 43
+#define FIRE_CONTROL_STATUS_JPACK_DELAY 36
+#define FIRE_CONTROL_STATUS_JPACK_DELAY_MASK 0x7f
+#define FIRE_CONTROL_STATUS_DTL_MODE 34
+#define FIRE_CONTROL_STATUS_DTL_MODE_MASK 0x3
+#define FIRE_CONTROL_STATUS_JTO 32
+#define FIRE_CONTROL_STATUS_JTO_MASK 0x3
+#define FIRE_CONTROL_STATUS_ARB_MODE 27
+#define FIRE_CONTROL_STATUS_ARB_MODE_MASK 0x3
+#define FIRE_CONTROL_STATUS_UE_PROP_MODE 26
+#define FIRE_CONTROL_STATUS_JPID_4 25
+#define FIRE_CONTROL_STATUS_JPID_3_0 21
+#define FIRE_CONTROL_STATUS_JPID_3_0_MASK 0xf
+#define FIRE_CONTROL_STATUS_AOK_THRESH 17
+#define FIRE_CONTROL_STATUS_AOK_THRESH_MASK 0xf
+#define FIRE_CONTROL_STATUS_DOK_THRESH 13
+#define FIRE_CONTROL_STATUS_DOK_THRESH_MASK 0xf
+#define FIRE_CONTROL_STATUS_NIAGARA_MODE 12
+#define FIRE_CONTROL_STATUS_PDQ 10
+#define FIRE_CONTROL_STATUS_PDQ_MASK 0x3
+#define FIRE_CONTROL_STATUS_J_AD4_DIAG 9
+#define FIRE_CONTROL_STATUS_LPDQ 0
+#define FIRE_CONTROL_STATUS_LPDQ_MASK 0x1ff
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL 0x410050
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EP_50 55
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EP_50_MASK 0x1f
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EP_25 50
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EP_25_MASK 0x1f
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EN_50 45
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EN_50_MASK 0x1f
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EN_25 40
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EN_25_MASK 0x1f
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_DTL_TST2_SCHEME 39
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EP_50_O 32
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EP_50_O_MASK 0x7f
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EP_25_O 24
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EP_25_O_MASK 0x7f
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EN_50_O 16
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EN_50_O_MASK 0x7f
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EN_25_O 8
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_EN_25_O_MASK 0x7f
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_DTL_TST2_MODE 6
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_DTL_TST2_MODE_MASK 0x3
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_PLL_LOCK 5
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_DTL_CHAR 4
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_JITLMT 2
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_JITLMT_MASK 0x3
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_CNTLMT 0
+#define JBUS_PLL_CONTROL_AND_DTL_CONTROL_CNTLMT_MASK 0x3
+#define JBUS_ENERGY_STAR_CONTROL 0x410058
+#define JBUS_ENERGY_STAR_CONTROL_S1_32 5
+#define JBUS_ENERGY_STAR_CONTROL_S1_2 1
+#define JBUS_ENERGY_STAR_CONTROL_FULL 0
+#define JBUS_CHANGE_INITIATION_CONTROL 0x410060
+#define JBUS_CHANGE_INITIATION_CONTROL_CINIT 3
+#define JBUS_CHANGE_INITIATION_CONTROL_CINIT_MASK 0x3
+#define JBUS_CHANGE_INITIATION_CONTROL_CDELAY 0
+#define JBUS_CHANGE_INITIATION_CONTROL_CDELAY_MASK 0x7
+#define RESET_GENERATION 0x417010
+#define RESET_GENERATION_PU_RST 2
+#define RESET_GENERATION_XIR 1
+#define RESET_GENERATION_PO_RST 0
+#define RESET_SOURCE 0x417018
+#define RESET_SOURCE_FATAL 6
+#define RESET_SOURCE_PB_XIR 5
+#define RESET_SOURCE_PB_RST 4
+#define RESET_SOURCE_PU 3
+#define RESET_SOURCE_PU_RST 2
+#define RESET_SOURCE_XIR 1
+#define RESET_SOURCE_PO_RST 0
+#define GPIO_PORT_0_PIN_0_DATA 0x460000
+#define GPIO_PORT_0_PIN_0_DATA_DATA 0
+#define GPIO_PORT_0_PIN_1_DATA 0x460008
+#define GPIO_PORT_0_PIN_1_DATA_DATA 0
+#define GPIO_PORT_0_PIN_2_DATA 0x460010
+#define GPIO_PORT_0_PIN_2_DATA_DATA 0
+#define GPIO_PORT_0_PIN_3_DATA 0x460018
+#define GPIO_PORT_0_PIN_3_DATA_DATA 0
+#define GPIO_PORT_0_DATA 0x460020
+#define GPIO_PORT_0_DATA_DATA_3 3
+#define GPIO_PORT_0_DATA_DATA_2 2
+#define GPIO_PORT_0_DATA_DATA_1 1
+#define GPIO_PORT_0_DATA_DATA_0 0
+#define GPIO_PORT_0_CONTROL 0x460028
+#define GPIO_PORT_0_CONTROL_DIR_3 3
+#define GPIO_PORT_0_CONTROL_DIR_2 2
+#define GPIO_PORT_0_CONTROL_DIR_1 1
+#define GPIO_PORT_0_CONTROL_DIR_0 0
+#define GPIO_PORT_1_PIN_0_DATA 0x462000
+#define GPIO_PORT_1_PIN_0_DATA_DATA 0
+#define GPIO_PORT_1_PIN_1_DATA 0x462008
+#define GPIO_PORT_1_PIN_1_DATA_DATA 0
+#define GPIO_PORT_1_PIN_2_DATA 0x462010
+#define GPIO_PORT_1_PIN_2_DATA_DATA 0
+#define GPIO_PORT_1_PIN_3_DATA 0x462018
+#define GPIO_PORT_1_PIN_3_DATA_DATA 0
+#define GPIO_PORT_1_DATA 0x462020
+#define GPIO_PORT_1_DATA_DATA_3 3
+#define GPIO_PORT_1_DATA_DATA_2 2
+#define GPIO_PORT_1_DATA_DATA_1 1
+#define GPIO_PORT_1_DATA_DATA_0 0
+#define GPIO_PORT_1_CONTROL 0x462028
+#define GPIO_PORT_1_CONTROL_DIR_3 3
+#define GPIO_PORT_1_CONTROL_DIR_2 2
+#define GPIO_PORT_1_CONTROL_DIR_1 1
+#define GPIO_PORT_1_CONTROL_DIR_0 0
+#define EBUS_EPROM_TIMING_CONTROL 0x464000
+#define EBUS_EPROM_TIMING_CONTROL_ENABLE 61
+#define EBUS_EPROM_TIMING_CONTROL_READY_COUNT 40
+#define EBUS_EPROM_TIMING_CONTROL_READY_COUNT_MASK 0x1fffff
+#define EBUS_EPROM_TIMING_CONTROL_PROTOCOL_COUNT 32
+#define EBUS_EPROM_TIMING_CONTROL_PROTOCOL_COUNT_MASK 0xff
+#define EBUS_EPROM_TIMING_CONTROL_STROBE_COUNT 24
+#define EBUS_EPROM_TIMING_CONTROL_STROBE_COUNT_MASK 0xff
+#define EBUS_EPROM_TIMING_CONTROL_RECOVERY_COUNT 16
+#define EBUS_EPROM_TIMING_CONTROL_RECOVERY_COUNT_MASK 0xff
+#define EBUS_EPROM_TIMING_CONTROL_HOLD_COUNT 8
+#define EBUS_EPROM_TIMING_CONTROL_HOLD_COUNT_MASK 0xff
+#define EBUS_EPROM_TIMING_CONTROL_SETUP_COUNT 0
+#define EBUS_EPROM_TIMING_CONTROL_SETUP_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL 0x464008
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_READY_COUNT 40
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_READY_COUNT_MASK 0x1fffff
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_PROTOCOL_COUNT 32
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_PROTOCOL_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_STROBE_COUNT 24
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_STROBE_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_RECOVERY_COUNT 16
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_RECOVERY_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_HOLD_COUNT 8
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_HOLD_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_SETUP_COUNT 0
+#define EBUS_CHIP_SELECT_1_TIMING_CONTROL_SETUP_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL 0x464010
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_READY_COUNT 40
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_READY_COUNT_MASK 0x1fffff
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_PROTOCOL_COUNT 32
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_PROTOCOL_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_STROBE_COUNT 24
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_STROBE_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_RECOVERY_COUNT 16
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_RECOVERY_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_HOLD_COUNT 8
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_HOLD_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_SETUP_COUNT 0
+#define EBUS_CHIP_SELECT_2_TIMING_CONTROL_SETUP_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL 0x464018
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_READY_COUNT 40
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_READY_COUNT_MASK 0x1fffff
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_PROTOCOL_COUNT 32
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_PROTOCOL_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_STROBE_COUNT 24
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_STROBE_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_RECOVERY_COUNT 16
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_RECOVERY_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_HOLD_COUNT 8
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_HOLD_COUNT_MASK 0xff
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_SETUP_COUNT 0
+#define EBUS_CHIP_SELECT_3_TIMING_CONTROL_SETUP_COUNT_MASK 0xff
+#define I2C_0_INPUT_MONITOR 0x466000
+#define I2C_0_INPUT_MONITOR_SDC 1
+#define I2C_0_INPUT_MONITOR_SDA 0
+#define I2C_0_DATA_DRIVE 0x466008
+#define I2C_0_DATA_DRIVE_SDA 0
+#define I2C_0_CLOCK_DRIVE 0x466010
+#define I2C_0_CLOCK_DRIVE_SCL 0
+#define I2C_1_INPUT_MONITOR 0x468000
+#define I2C_1_INPUT_MONITOR_SDC 1
+#define I2C_1_INPUT_MONITOR_SDA 0
+#define I2C_1_DATA_DRIVE 0x468008
+#define I2C_1_DATA_DRIVE_SDA 0
+#define I2C_1_CLOCK_DRIVE 0x468010
+#define I2C_1_CLOCK_DRIVE_SCL 0
+#define PCIE_A_LEAF_CSR_RING_SLOW_ONLY_ACCESS 0x470000
+#define PCIE_A_LEAF_CSR_RING_SLOW_ONLY_ACCESS_SLOW_ONLY 0
+#define PCIE_B_LEAF_CSR_RING_SLOW_ONLY_ACCESS 0x470008
+#define PCIE_B_LEAF_CSR_RING_SLOW_ONLY_ACCESS_SLOW_ONLY 0
+#define JBUS_PARITY_CONTROL 0x470010
+#define JBUS_PARITY_CONTROL_P_EN 63
+#define JBUS_PARITY_CONTROL_INVERT_PAR 2
+#define JBUS_PARITY_CONTROL_INVERT_PAR_MASK 0xf
+#define JBUS_PARITY_CONTROL_NEXT_DATA 1
+#define JBUS_PARITY_CONTROL_NEXT_ADDR 0
+#define JBUS_SCRATCH_1 0x470018
+#define JBUS_SCRATCH_1_DATA 0
+#define JBUS_SCRATCH_1_DATA_MASK 0xffffffffffffffff
+#define JBUS_SCRATCH_2 0x470020
+#define JBUS_SCRATCH_2_DATA 0
+#define JBUS_SCRATCH_2_DATA_MASK 0xffffffffffffffff
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR 0x470028
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_SPARE_S_INT_EN 61
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_SPARE_S_INT_EN_MASK 0x7
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_PIO_UNMAP_RD_S_INT_EN 60
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_ILL_ACC_RD_S_INT_EN 59
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_EBUS_TO_S_LOG_EN 58
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_MB_PEA_S_INT_EN 57
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_MB_PER_S_INT_EN 56
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_MB_PEW_S_INT_EN 55
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_UE_ASYN_S_INT_EN 54
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_CE_ASYN_S_INT_EN 53
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JTE_S_INT_EN 52
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JBE_S_INT_EN 51
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JUE_S_INT_EN 50
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_IJP_S_INT_EN 49
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_ICISE_S_INT_EN 48
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_CPE_S_INT_EN 47
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_APE_S_INT_EN 46
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_WR_DPE_S_INT_EN 45
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_RD_DPE_S_INT_EN 44
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_ILL_BMW_S_INT_EN 43
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_ILL_BMR_S_INT_EN 42
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_BJC_S_INT_EN 41
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_PIO_UNMAP_S_INT_EN 40
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_PIO_DPE_S_INT_EN 39
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_PIO_CPE_S_INT_EN 38
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_ILL_ACC_S_INT_EN 37
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_UNSOL_RD_S_INT_EN 36
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_UNSOL_INTR_S_INT_EN 35
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JTCEEW_S_INT_EN 34
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JTCEEI_S_INT_EN 33
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JTCEER_S_INT_EN 32
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_SPARE_P_INT_EN 29
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_SPARE_P_INT_EN_MASK 0x7
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_PIO_UNMAP_RD_P_INT_EN 28
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_ILL_ACC_RD_P_INT_EN 27
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_EBUS_TO_P_LOG_EN 26
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_MB_PEA_P_INT_EN 25
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_MB_PER_P_INT_EN 24
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_MB_PEW_P_INT_EN 23
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_UE_ASYN_P_INT_EN 22
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_CE_ASYN_P_INT_EN 21
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JTE_P_INT_EN 20
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JBE_P_INT_EN 19
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JUE_P_INT_EN 18
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_IJP_P_INT_EN 17
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_ICISE_P_INT_EN 16
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_CPE_P_INT_EN 15
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_APE_P_INT_EN 14
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_WR_DPE_P_INT_EN 13
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_RD_DPE_P_INT_EN 12
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_ILL_BMW_P_INT_EN 11
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_ILL_BMR_P_INT_EN 10
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_BJC_P_INT_EN 9
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_PIO_UNMAP_P_INT_EN 8
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_PIO_DPE_P_INT_EN 7
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_PIO_CPE_P_INT_EN 6
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_ILL_ACC_P_INT_EN 5
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_UNSOL_RD_P_INT_EN 4
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_UNSOL_INTR_P_INT_EN 3
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JTCEEW_P_INT_EN 2
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JTCEEI_P_INT_EN 1
+#define JBC_ERR_LG_ANLYZ_TRIG_ENB_FOR_J_ERR_JTCEER_P_INT_EN 0
+#define JBUS_SCRATCH_PERSISTENT 0x470030
+#define JBUS_SCRATCH_PERSISTENT_DATA 0
+#define JBUS_SCRATCH_PERSISTENT_DATA_MASK 0xffffffffffffffff
+#define JBC_ERROR_LOG_ENABLE 0x471000
+#define JBC_ERROR_LOG_ENABLE_SPARE_LOG_EN 29
+#define JBC_ERROR_LOG_ENABLE_SPARE_LOG_EN_MASK 0x7
+#define JBC_ERROR_LOG_ENABLE_PIO_UNMAP_RD_LOG_EN 28
+#define JBC_ERROR_LOG_ENABLE_ILL_ACC_RD_LOG_EN 27
+#define JBC_ERROR_LOG_ENABLE_EBUS_TO_LOG_EN 26
+#define JBC_ERROR_LOG_ENABLE_MB_PEA_LOG_EN 25
+#define JBC_ERROR_LOG_ENABLE_MB_PER_LOG_EN 24
+#define JBC_ERROR_LOG_ENABLE_MB_PEW_LOG_EN 23
+#define JBC_ERROR_LOG_ENABLE_UE_ASYN_LOG_EN 22
+#define JBC_ERROR_LOG_ENABLE_CE_ASYN_LOG_EN 21
+#define JBC_ERROR_LOG_ENABLE_JTE_LOG_EN 20
+#define JBC_ERROR_LOG_ENABLE_JBE_LOG_EN 19
+#define JBC_ERROR_LOG_ENABLE_JUE_LOG_EN 18
+#define JBC_ERROR_LOG_ENABLE_IJP_LOG_EN 17
+#define JBC_ERROR_LOG_ENABLE_ICISE_LOG_EN 16
+#define JBC_ERROR_LOG_ENABLE_CPE_LOG_EN 15
+#define JBC_ERROR_LOG_ENABLE_APE_LOG_EN 14
+#define JBC_ERROR_LOG_ENABLE_WR_DPE_LOG_EN 13
+#define JBC_ERROR_LOG_ENABLE_RD_DPE_LOG_EN 12
+#define JBC_ERROR_LOG_ENABLE_ILL_BMW_LOG_EN 11
+#define JBC_ERROR_LOG_ENABLE_ILL_BMR_LOG_EN 10
+#define JBC_ERROR_LOG_ENABLE_BJC_LOG_EN 9
+#define JBC_ERROR_LOG_ENABLE_PIO_UNMAP_LOG_EN 8
+#define JBC_ERROR_LOG_ENABLE_PIO_DPE_LOG_EN 7
+#define JBC_ERROR_LOG_ENABLE_PIO_CPE_LOG_EN 6
+#define JBC_ERROR_LOG_ENABLE_ILL_ACC_LOG_EN 5
+#define JBC_ERROR_LOG_ENABLE_UNSOL_RD_LOG_EN 4
+#define JBC_ERROR_LOG_ENABLE_UNSOL_INTR_LOG_EN 3
+#define JBC_ERROR_LOG_ENABLE_JTCEEW_LOG_EN 2
+#define JBC_ERROR_LOG_ENABLE_JTCEEI_LOG_EN 1
+#define JBC_ERROR_LOG_ENABLE_JTCEER_LOG_EN 0
+#define JBC_INTERRUPT_ENABLE 0x471008
+#define JBC_INTERRUPT_ENABLE_SPARE_S_INT_EN 61
+#define JBC_INTERRUPT_ENABLE_SPARE_S_INT_EN_MASK 0x7
+#define JBC_INTERRUPT_ENABLE_PIO_UNMAP_RD_S_INT_EN 60
+#define JBC_INTERRUPT_ENABLE_ILL_ACC_RD_S_INT_EN 59
+#define JBC_INTERRUPT_ENABLE_EBUS_TO_S_LOG_EN 58
+#define JBC_INTERRUPT_ENABLE_MB_PEA_S_INT_EN 57
+#define JBC_INTERRUPT_ENABLE_MB_PER_S_INT_EN 56
+#define JBC_INTERRUPT_ENABLE_MB_PEW_S_INT_EN 55
+#define JBC_INTERRUPT_ENABLE_UE_ASYN_S_INT_EN 54
+#define JBC_INTERRUPT_ENABLE_CE_ASYN_S_INT_EN 53
+#define JBC_INTERRUPT_ENABLE_JTE_S_INT_EN 52
+#define JBC_INTERRUPT_ENABLE_JBE_S_INT_EN 51
+#define JBC_INTERRUPT_ENABLE_JUE_S_INT_EN 50
+#define JBC_INTERRUPT_ENABLE_IJP_S_INT_EN 49
+#define JBC_INTERRUPT_ENABLE_ICISE_S_INT_EN 48
+#define JBC_INTERRUPT_ENABLE_CPE_S_INT_EN 47
+#define JBC_INTERRUPT_ENABLE_APE_S_INT_EN 46
+#define JBC_INTERRUPT_ENABLE_WR_DPE_S_INT_EN 45
+#define JBC_INTERRUPT_ENABLE_RD_DPE_S_INT_EN 44
+#define JBC_INTERRUPT_ENABLE_ILL_BMW_S_INT_EN 43
+#define JBC_INTERRUPT_ENABLE_ILL_BMR_S_INT_EN 42
+#define JBC_INTERRUPT_ENABLE_BJC_S_INT_EN 41
+#define JBC_INTERRUPT_ENABLE_PIO_UNMAP_S_INT_EN 40
+#define JBC_INTERRUPT_ENABLE_PIO_DPE_S_INT_EN 39
+#define JBC_INTERRUPT_ENABLE_PIO_CPE_S_INT_EN 38
+#define JBC_INTERRUPT_ENABLE_ILL_ACC_S_INT_EN 37
+#define JBC_INTERRUPT_ENABLE_UNSOL_RD_S_INT_EN 36
+#define JBC_INTERRUPT_ENABLE_UNSOL_INTR_S_INT_EN 35
+#define JBC_INTERRUPT_ENABLE_JTCEEW_S_INT_EN 34
+#define JBC_INTERRUPT_ENABLE_JTCEEI_S_INT_EN 33
+#define JBC_INTERRUPT_ENABLE_JTCEER_S_INT_EN 32
+#define JBC_INTERRUPT_ENABLE_SPARE_P_INT_EN 29
+#define JBC_INTERRUPT_ENABLE_SPARE_P_INT_EN_MASK 0x7
+#define JBC_INTERRUPT_ENABLE_PIO_UNMAP_RD_P_INT_EN 28
+#define JBC_INTERRUPT_ENABLE_ILL_ACC_RD_P_INT_EN 27
+#define JBC_INTERRUPT_ENABLE_EBUS_TO_P_LOG_EN 26
+#define JBC_INTERRUPT_ENABLE_MB_PEA_P_INT_EN 25
+#define JBC_INTERRUPT_ENABLE_MB_PER_P_INT_EN 24
+#define JBC_INTERRUPT_ENABLE_MB_PEW_P_INT_EN 23
+#define JBC_INTERRUPT_ENABLE_UE_ASYN_P_INT_EN 22
+#define JBC_INTERRUPT_ENABLE_CE_ASYN_P_INT_EN 21
+#define JBC_INTERRUPT_ENABLE_JTE_P_INT_EN 20
+#define JBC_INTERRUPT_ENABLE_JBE_P_INT_EN 19
+#define JBC_INTERRUPT_ENABLE_JUE_P_INT_EN 18
+#define JBC_INTERRUPT_ENABLE_IJP_P_INT_EN 17
+#define JBC_INTERRUPT_ENABLE_ICISE_P_INT_EN 16
+#define JBC_INTERRUPT_ENABLE_CPE_P_INT_EN 15
+#define JBC_INTERRUPT_ENABLE_APE_P_INT_EN 14
+#define JBC_INTERRUPT_ENABLE_WR_DPE_P_INT_EN 13
+#define JBC_INTERRUPT_ENABLE_RD_DPE_P_INT_EN 12
+#define JBC_INTERRUPT_ENABLE_ILL_BMW_P_INT_EN 11
+#define JBC_INTERRUPT_ENABLE_ILL_BMR_P_INT_EN 10
+#define JBC_INTERRUPT_ENABLE_BJC_P_INT_EN 9
+#define JBC_INTERRUPT_ENABLE_PIO_UNMAP_P_INT_EN 8
+#define JBC_INTERRUPT_ENABLE_PIO_DPE_P_INT_EN 7
+#define JBC_INTERRUPT_ENABLE_PIO_CPE_P_INT_EN 6
+#define JBC_INTERRUPT_ENABLE_ILL_ACC_P_INT_EN 5
+#define JBC_INTERRUPT_ENABLE_UNSOL_RD_P_INT_EN 4
+#define JBC_INTERRUPT_ENABLE_UNSOL_INTR_P_INT_EN 3
+#define JBC_INTERRUPT_ENABLE_JTCEEW_P_INT_EN 2
+#define JBC_INTERRUPT_ENABLE_JTCEEI_P_INT_EN 1
+#define JBC_INTERRUPT_ENABLE_JTCEER_P_INT_EN 0
+#define JBC_INTERRUPT_STATUS 0x471010
+#define JBC_INTERRUPT_STATUS_SPARE_S 61
+#define JBC_INTERRUPT_STATUS_SPARE_S_MASK 0x7
+#define JBC_INTERRUPT_STATUS_PIO_UNMAP_RD_S 60
+#define JBC_INTERRUPT_STATUS_ILL_ACC_RD_S 59
+#define JBC_INTERRUPT_STATUS_EBUS_TO_S 58
+#define JBC_INTERRUPT_STATUS_MB_PEA_S 57
+#define JBC_INTERRUPT_STATUS_MB_PER_S 56
+#define JBC_INTERRUPT_STATUS_MB_PEW_S 55
+#define JBC_INTERRUPT_STATUS_UE_ASYN_S 54
+#define JBC_INTERRUPT_STATUS_CE_ASYN_S 53
+#define JBC_INTERRUPT_STATUS_JTE_S 52
+#define JBC_INTERRUPT_STATUS_JBE_S 51
+#define JBC_INTERRUPT_STATUS_JUE_S 50
+#define JBC_INTERRUPT_STATUS_IJP_S 49
+#define JBC_INTERRUPT_STATUS_ICISE_S 48
+#define JBC_INTERRUPT_STATUS_CPE_S 47
+#define JBC_INTERRUPT_STATUS_APE_S 46
+#define JBC_INTERRUPT_STATUS_WR_DPE_S 45
+#define JBC_INTERRUPT_STATUS_RD_DPE_S 44
+#define JBC_INTERRUPT_STATUS_ILL_BMW_S 43
+#define JBC_INTERRUPT_STATUS_ILL_BMR_S 42
+#define JBC_INTERRUPT_STATUS_BJC_S 41
+#define JBC_INTERRUPT_STATUS_PIO_UNMAP_S 40
+#define JBC_INTERRUPT_STATUS_PIO_DPE_S 39
+#define JBC_INTERRUPT_STATUS_PIO_CPE_S 38
+#define JBC_INTERRUPT_STATUS_ILL_ACC_S 37
+#define JBC_INTERRUPT_STATUS_UNSOL_RD_S 36
+#define JBC_INTERRUPT_STATUS_UNSOL_INTR_S 35
+#define JBC_INTERRUPT_STATUS_JTCEEW_S 34
+#define JBC_INTERRUPT_STATUS_JTCEEI_S 33
+#define JBC_INTERRUPT_STATUS_JTCEER_S 32
+#define JBC_INTERRUPT_STATUS_SPARE_P 29
+#define JBC_INTERRUPT_STATUS_SPARE_P_MASK 0x7
+#define JBC_INTERRUPT_STATUS_PIO_UNMAP_RD_P 28
+#define JBC_INTERRUPT_STATUS_ILL_ACC_RD_P 27
+#define JBC_INTERRUPT_STATUS_EBUS_TO_P 26
+#define JBC_INTERRUPT_STATUS_MB_PEA_P 25
+#define JBC_INTERRUPT_STATUS_MB_PER_P 24
+#define JBC_INTERRUPT_STATUS_MB_PEW_P 23
+#define JBC_INTERRUPT_STATUS_UE_ASYN_P 22
+#define JBC_INTERRUPT_STATUS_CE_ASYN_P 21
+#define JBC_INTERRUPT_STATUS_JTE_P 20
+#define JBC_INTERRUPT_STATUS_JBE_P 19
+#define JBC_INTERRUPT_STATUS_JUE_P 18
+#define JBC_INTERRUPT_STATUS_IJP_P 17
+#define JBC_INTERRUPT_STATUS_ICISE_P 16
+#define JBC_INTERRUPT_STATUS_CPE_P 15
+#define JBC_INTERRUPT_STATUS_APE_P 14
+#define JBC_INTERRUPT_STATUS_WR_DPE_P 13
+#define JBC_INTERRUPT_STATUS_RD_DPE_P 12
+#define JBC_INTERRUPT_STATUS_ILL_BMW_P 11
+#define JBC_INTERRUPT_STATUS_ILL_BMR_P 10
+#define JBC_INTERRUPT_STATUS_BJC_P 9
+#define JBC_INTERRUPT_STATUS_PIO_UNMAP_P 8
+#define JBC_INTERRUPT_STATUS_PIO_DPE_P 7
+#define JBC_INTERRUPT_STATUS_PIO_CPE_P 6
+#define JBC_INTERRUPT_STATUS_ILL_ACC_P 5
+#define JBC_INTERRUPT_STATUS_UNSOL_RD_P 4
+#define JBC_INTERRUPT_STATUS_UNSOL_INTR_P 3
+#define JBC_INTERRUPT_STATUS_JTCEEW_P 2
+#define JBC_INTERRUPT_STATUS_JTCEEI_P 1
+#define JBC_INTERRUPT_STATUS_JTCEER_P 0
+#define JBC_ERROR_STATUS_CLEAR 0x471018
+#define JBC_ERROR_STATUS_CLEAR_SPARE_S 61
+#define JBC_ERROR_STATUS_CLEAR_SPARE_S_MASK 0x7
+#define JBC_ERROR_STATUS_CLEAR_PIO_UNMAP_RD_S 60
+#define JBC_ERROR_STATUS_CLEAR_ILL_ACC_RD_S 59
+#define JBC_ERROR_STATUS_CLEAR_EBUS_TO_S 58
+#define JBC_ERROR_STATUS_CLEAR_MB_PEA_S 57
+#define JBC_ERROR_STATUS_CLEAR_MB_PER_S 56
+#define JBC_ERROR_STATUS_CLEAR_MB_PEW_S 55
+#define JBC_ERROR_STATUS_CLEAR_UE_ASYN_S 54
+#define JBC_ERROR_STATUS_CLEAR_CE_ASYN_S 53
+#define JBC_ERROR_STATUS_CLEAR_JTE_S 52
+#define JBC_ERROR_STATUS_CLEAR_JBE_S 51
+#define JBC_ERROR_STATUS_CLEAR_JUE_S 50
+#define JBC_ERROR_STATUS_CLEAR_IJP_S 49
+#define JBC_ERROR_STATUS_CLEAR_ICISE_S 48
+#define JBC_ERROR_STATUS_CLEAR_CPE_S 47
+#define JBC_ERROR_STATUS_CLEAR_APE_S 46
+#define JBC_ERROR_STATUS_CLEAR_WR_DPE_S 45
+#define JBC_ERROR_STATUS_CLEAR_RD_DPE_S 44
+#define JBC_ERROR_STATUS_CLEAR_ILL_BMW_S 43
+#define JBC_ERROR_STATUS_CLEAR_ILL_BMR_S 42
+#define JBC_ERROR_STATUS_CLEAR_BJC_S 41
+#define JBC_ERROR_STATUS_CLEAR_PIO_UNMAP_S 40
+#define JBC_ERROR_STATUS_CLEAR_PIO_DPE_S 39
+#define JBC_ERROR_STATUS_CLEAR_PIO_CPE_S 38
+#define JBC_ERROR_STATUS_CLEAR_ILL_ACC_S 37
+#define JBC_ERROR_STATUS_CLEAR_UNSOL_RD_S 36
+#define JBC_ERROR_STATUS_CLEAR_UNSOL_INTR_S 35
+#define JBC_ERROR_STATUS_CLEAR_JTCEEW_S 34
+#define JBC_ERROR_STATUS_CLEAR_JTCEEI_S 33
+#define JBC_ERROR_STATUS_CLEAR_JTCEER_S 32
+#define JBC_ERROR_STATUS_CLEAR_SPARE_P 29
+#define JBC_ERROR_STATUS_CLEAR_SPARE_P_MASK 0x7
+#define JBC_ERROR_STATUS_CLEAR_PIO_UNMAP_RD_P 28
+#define JBC_ERROR_STATUS_CLEAR_ILL_ACC_RD_P 27
+#define JBC_ERROR_STATUS_CLEAR_EBUS_TO_P 26
+#define JBC_ERROR_STATUS_CLEAR_MB_PEA_P 25
+#define JBC_ERROR_STATUS_CLEAR_MB_PER_P 24
+#define JBC_ERROR_STATUS_CLEAR_MB_PEW_P 23
+#define JBC_ERROR_STATUS_CLEAR_UE_ASYN_P 22
+#define JBC_ERROR_STATUS_CLEAR_CE_ASYN_P 21
+#define JBC_ERROR_STATUS_CLEAR_JTE_P 20
+#define JBC_ERROR_STATUS_CLEAR_JBE_P 19
+#define JBC_ERROR_STATUS_CLEAR_JUE_P 18
+#define JBC_ERROR_STATUS_CLEAR_IJP_P 17
+#define JBC_ERROR_STATUS_CLEAR_ICISE_P 16
+#define JBC_ERROR_STATUS_CLEAR_CPE_P 15
+#define JBC_ERROR_STATUS_CLEAR_APE_P 14
+#define JBC_ERROR_STATUS_CLEAR_WR_DPE_P 13
+#define JBC_ERROR_STATUS_CLEAR_RD_DPE_P 12
+#define JBC_ERROR_STATUS_CLEAR_ILL_BMW_P 11
+#define JBC_ERROR_STATUS_CLEAR_ILL_BMR_P 10
+#define JBC_ERROR_STATUS_CLEAR_BJC_P 9
+#define JBC_ERROR_STATUS_CLEAR_PIO_UNMAP_P 8
+#define JBC_ERROR_STATUS_CLEAR_PIO_DPE_P 7
+#define JBC_ERROR_STATUS_CLEAR_PIO_CPE_P 6
+#define JBC_ERROR_STATUS_CLEAR_ILL_ACC_P 5
+#define JBC_ERROR_STATUS_CLEAR_UNSOL_RD_P 4
+#define JBC_ERROR_STATUS_CLEAR_UNSOL_INTR_P 3
+#define JBC_ERROR_STATUS_CLEAR_JTCEEW_P 2
+#define JBC_ERROR_STATUS_CLEAR_JTCEEI_P 1
+#define JBC_ERROR_STATUS_CLEAR_JTCEER_P 0
+#define JBC_ERROR_STATUS_SET 0x471020
+#define JBC_ERROR_STATUS_SET_SPARE_S 61
+#define JBC_ERROR_STATUS_SET_SPARE_S_MASK 0xfc
+#define JBC_ERROR_STATUS_SET_PIO_UNMAP_RD_S 60
+#define JBC_ERROR_STATUS_SET_ILL_ACC_RD_S 59
+#define JBC_ERROR_STATUS_SET_EBUS_TO_S 58
+#define JBC_ERROR_STATUS_SET_MB_PEA_S 57
+#define JBC_ERROR_STATUS_SET_MB_PER_S 56
+#define JBC_ERROR_STATUS_SET_MB_PEW_S 55
+#define JBC_ERROR_STATUS_SET_UE_ASYN_S 54
+#define JBC_ERROR_STATUS_SET_CE_ASYN_S 53
+#define JBC_ERROR_STATUS_SET_JTE_S 52
+#define JBC_ERROR_STATUS_SET_JBE_S 51
+#define JBC_ERROR_STATUS_SET_JUE_S 50
+#define JBC_ERROR_STATUS_SET_IJP_S 49
+#define JBC_ERROR_STATUS_SET_ICISE_S 48
+#define JBC_ERROR_STATUS_SET_CPE_S 47
+#define JBC_ERROR_STATUS_SET_APE_S 46
+#define JBC_ERROR_STATUS_SET_WR_DPE_S 45
+#define JBC_ERROR_STATUS_SET_RD_DPE_S 44
+#define JBC_ERROR_STATUS_SET_ILL_BMW_S 43
+#define JBC_ERROR_STATUS_SET_ILL_BMR_S 42
+#define JBC_ERROR_STATUS_SET_BJC_S 41
+#define JBC_ERROR_STATUS_SET_PIO_UNMAP_S 40
+#define JBC_ERROR_STATUS_SET_PIO_DPE_S 39
+#define JBC_ERROR_STATUS_SET_PIO_CPE_S 38
+#define JBC_ERROR_STATUS_SET_ILL_ACC_S 37
+#define JBC_ERROR_STATUS_SET_UNSOL_RD_S 36
+#define JBC_ERROR_STATUS_SET_UNSOL_INTR_S 35
+#define JBC_ERROR_STATUS_SET_JTCEEW_S 34
+#define JBC_ERROR_STATUS_SET_JTCEEI_S 33
+#define JBC_ERROR_STATUS_SET_JTCEER_S 32
+#define JBC_ERROR_STATUS_SET_SPARE_P 29
+#define JBC_ERROR_STATUS_SET_SPARE_P_MASK 0xfc
+#define JBC_ERROR_STATUS_SET_PIO_UNMAP_RD_P 28
+#define JBC_ERROR_STATUS_SET_ILL_ACC_RD_P 27
+#define JBC_ERROR_STATUS_SET_EBUS_TO_P 26
+#define JBC_ERROR_STATUS_SET_MB_PEA_P 25
+#define JBC_ERROR_STATUS_SET_MB_PER_P 24
+#define JBC_ERROR_STATUS_SET_MB_PEW_P 23
+#define JBC_ERROR_STATUS_SET_UE_ASYN_P 22
+#define JBC_ERROR_STATUS_SET_CE_ASYN_P 21
+#define JBC_ERROR_STATUS_SET_JTE_P 20
+#define JBC_ERROR_STATUS_SET_JBE_P 19
+#define JBC_ERROR_STATUS_SET_JUE_P 18
+#define JBC_ERROR_STATUS_SET_IJP_P 17
+#define JBC_ERROR_STATUS_SET_ICISE_P 16
+#define JBC_ERROR_STATUS_SET_CPE_P 15
+#define JBC_ERROR_STATUS_SET_APE_P 14
+#define JBC_ERROR_STATUS_SET_WR_DPE_P 13
+#define JBC_ERROR_STATUS_SET_RD_DPE_P 12
+#define JBC_ERROR_STATUS_SET_ILL_BMW_P 11
+#define JBC_ERROR_STATUS_SET_ILL_BMR_P 10
+#define JBC_ERROR_STATUS_SET_BJC_P 9
+#define JBC_ERROR_STATUS_SET_PIO_UNMAP_P 8
+#define JBC_ERROR_STATUS_SET_PIO_DPE_P 7
+#define JBC_ERROR_STATUS_SET_PIO_CPE_P 6
+#define JBC_ERROR_STATUS_SET_ILL_ACC_P 5
+#define JBC_ERROR_STATUS_SET_UNSOL_RD_P 4
+#define JBC_ERROR_STATUS_SET_UNSOL_INTR_P 3
+#define JBC_ERROR_STATUS_SET_JTCEEW_P 2
+#define JBC_ERROR_STATUS_SET_JTCEEI_P 1
+#define JBC_ERROR_STATUS_SET_JTCEER_P 0
+#define JBC_FATAL_RESET_ENABLE 0x471028
+#define JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN 26
+#define JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN_MASK 0x3
+#define JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN 25
+#define JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN 15
+#define JBC_FATAL_RESET_ENABLE_APE_P_INT_EN 14
+#define JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN 6
+#define JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN 2
+#define JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN 1
+#define JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN 0
+#define JBCINT_IN_TRANSACTION_ERROR_LOG 0x471030
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_Q_WORD 54
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_Q_WORD_MASK 0x3
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_TRANSID 48
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_TRANSID_MASK 0x3f
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_ADDRESS 0
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_ADDRESS_MASK 0x7ffffffffff
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_2 0x471038
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_2_ARB_WIN 28
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_2_ARB_WIN_MASK 0xffffff
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_2_J_REQ 21
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_2_J_REQ_MASK 0x7f
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_2_J_PACK 0
+#define JBCINT_IN_TRANSACTION_ERROR_LOG_2_J_PACK_MASK 0x1fffff
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG 0x471040
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_TRANSID 48
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_TRANSID_MASK 0x3f
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_ADDRESS 0
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_ADDRESS_MASK 0x7ffffffffff
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_2 0x471048
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_2_ARB_WIN 28
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_2_ARB_WIN_MASK 0xffffff
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_2_J_REQ 21
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_2_J_REQ_MASK 0x7f
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_2_J_PACK 0
+#define JBCINT_OUT_TRANSACTION_ERROR_LOG_2_J_PACK_MASK 0x1fffff
+#define FATAL_ERROR_LOG_1 0x471050
+#define FATAL_ERROR_LOG_1_DATA 0
+#define FATAL_ERROR_LOG_1_DATA_MASK 0xffffffffffffffff
+#define FATAL_ERROR_LOG_2 0x471058
+#define FATAL_ERROR_LOG_2_ARB_WIN 28
+#define FATAL_ERROR_LOG_2_ARB_WIN_MASK 0xffffff
+#define FATAL_ERROR_LOG_2_J_REQ 21
+#define FATAL_ERROR_LOG_2_J_REQ_MASK 0x7f
+#define FATAL_ERROR_LOG_2_J_PACK 0
+#define FATAL_ERROR_LOG_2_J_PACK_MASK 0x1fffff
+#define MERGE_TRANSACTION_ERROR_LOG 0x471060
+#define MERGE_TRANSACTION_ERROR_LOG_Q_WORD 54
+#define MERGE_TRANSACTION_ERROR_LOG_Q_WORD_MASK 0x3
+#define MERGE_TRANSACTION_ERROR_LOG_TRANSID 48
+#define MERGE_TRANSACTION_ERROR_LOG_TRANSID_MASK 0x3f
+#define MERGE_TRANSACTION_ERROR_LOG_JBC_TAG 43
+#define MERGE_TRANSACTION_ERROR_LOG_JBC_TAG_MASK 0x1f
+#define MERGE_TRANSACTION_ERROR_LOG_ADDRESS 0
+#define MERGE_TRANSACTION_ERROR_LOG_ADDRESS_MASK 0x7ffffffffff
+#define DMCINT_ODCD_ERROR_LOG 0x471068
+#define DMCINT_ODCD_ERROR_LOG_TRANS_ID 52
+#define DMCINT_ODCD_ERROR_LOG_TRANS_ID_MASK 0x3
+#define DMCINT_ODCD_ERROR_LOG_AID 48
+#define DMCINT_ODCD_ERROR_LOG_AID_MASK 0xf
+#define DMCINT_ODCD_ERROR_LOG_TRANS_TYPE 43
+#define DMCINT_ODCD_ERROR_LOG_TRANS_TYPE_MASK 0x1f
+#define DMCINT_ODCD_ERROR_LOG_ADDRESS 0
+#define DMCINT_ODCD_ERROR_LOG_ADDRESS_MASK 0x7ffffffffff
+#define DMCINT_IDC_ERROR_LOG 0x471070
+#define DMCINT_IDC_ERROR_LOG_DMC_CTAG 16
+#define DMCINT_IDC_ERROR_LOG_DMC_CTAG_MASK 0xfff
+#define DMCINT_IDC_ERROR_LOG_TRANSID 14
+#define DMCINT_IDC_ERROR_LOG_TRANSID_MASK 0x3
+#define DMCINT_IDC_ERROR_LOG_AGNTID 10
+#define DMCINT_IDC_ERROR_LOG_AGNTID_MASK 0xf
+#define DMCINT_IDC_ERROR_LOG_SRCID 5
+#define DMCINT_IDC_ERROR_LOG_SRCID_MASK 0x1f
+#define DMCINT_IDC_ERROR_LOG_TARGID 0
+#define DMCINT_IDC_ERROR_LOG_TARGID_MASK 0x1f
+#define CSR_ERROR_LOG 0x471078
+#define CSR_ERROR_LOG_WRITE 42
+#define CSR_ERROR_LOG_BMASK 26
+#define CSR_ERROR_LOG_BMASK_MASK 0xffff
+#define CSR_ERROR_LOG_ADDRESS 0
+#define CSR_ERROR_LOG_ADDRESS_MASK 0x3ffffff
+#define JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE 0x471800
+#define JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE_JBC 63
+#define JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE_CSR 3
+#define JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE_MERGE 2
+#define JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE_JBCINT 1
+#define JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE_DMCINT 0
+#define JBC_CORE_AND_BLOCK_ERROR_STATUS 0x471808
+#define JBC_CORE_AND_BLOCK_ERROR_STATUS_CSR 3
+#define JBC_CORE_AND_BLOCK_ERROR_STATUS_MERGE 2
+#define JBC_CORE_AND_BLOCK_ERROR_STATUS_JBCINT 1
+#define JBC_CORE_AND_BLOCK_ERROR_STATUS_DMCINT 0
+#define JBC_PERFORMANCE_COUNTER_SELECT 0x472000
+#define JBC_PERFORMANCE_COUNTER_SELECT_SEL1 8
+#define JBC_PERFORMANCE_COUNTER_SELECT_SEL1_MASK 0xff
+#define JBC_PERFORMANCE_COUNTER_SELECT_SEL0 0
+#define JBC_PERFORMANCE_COUNTER_SELECT_SEL0_MASK 0xff
+#define JBC_PERFORMANCE_COUNTER_ZERO 0x472008
+#define JBC_PERFORMANCE_COUNTER_ZERO_CNT 0
+#define JBC_PERFORMANCE_COUNTER_ZERO_CNT_MASK 0xffffffffffffffff
+#define JBC_PERFORMANCE_COUNTER_ONE 0x472010
+#define JBC_PERFORMANCE_COUNTER_ONE_CNT 0
+#define JBC_PERFORMANCE_COUNTER_ONE_CNT_MASK 0xffffffffffffffff
+#define FIRE_AND_JBC_DEBUG_SELECT_A 0x473000
+#define FIRE_AND_JBC_DEBUG_SELECT_A_CORE_SEL 10
+#define FIRE_AND_JBC_DEBUG_SELECT_A_CORE_SEL_MASK 0x3
+#define FIRE_AND_JBC_DEBUG_SELECT_A_BLOCK_SEL 6
+#define FIRE_AND_JBC_DEBUG_SELECT_A_BLOCK_SEL_MASK 0x7
+#define FIRE_AND_JBC_DEBUG_SELECT_A_SUB_SEL 3
+#define FIRE_AND_JBC_DEBUG_SELECT_A_SUB_SEL_MASK 0x7
+#define FIRE_AND_JBC_DEBUG_SELECT_A_SIGNAL_SEL 0
+#define FIRE_AND_JBC_DEBUG_SELECT_A_SIGNAL_SEL_MASK 0x7
+#define FIRE_AND_JBC_DEBUG_SELECT_B 0x473008
+#define FIRE_AND_JBC_DEBUG_SELECT_B_CORE_SEL 10
+#define FIRE_AND_JBC_DEBUG_SELECT_B_CORE_SEL_MASK 0x3
+#define FIRE_AND_JBC_DEBUG_SELECT_B_BLOCK_SEL 6
+#define FIRE_AND_JBC_DEBUG_SELECT_B_BLOCK_SEL_MASK 0x7
+#define FIRE_AND_JBC_DEBUG_SELECT_B_SUB_SEL 3
+#define FIRE_AND_JBC_DEBUG_SELECT_B_SUB_SEL_MASK 0x7
+#define FIRE_AND_JBC_DEBUG_SELECT_B_SIGNAL_SEL 0
+#define FIRE_AND_JBC_DEBUG_SELECT_B_SIGNAL_SEL_MASK 0x7
+
+/* iss.csr ISS module defines */
+
+#define ISS_CSR_BASE 0x600000
+#define INTERRUPT_MAPPING 0x1000
+#define INTERRUPT_MAPPING_ENTRIES 64
+#define INTERRUPT_MAPPING_ENTRIES_MDO_MODE 63
+#define INTERRUPT_MAPPING_ENTRIES_V 31
+#define INTERRUPT_MAPPING_ENTRIES_T_JPID 26
+#define INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK 0x1f
+#define INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM 6
+#define INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK 0xf
+
+/* Reserved 0x1200 - 0x13f8 */
+
+#define INTERRUPT_CLEAR 0x1400
+#define INTERRUPT_CLEAR_ENTRIES 64
+#define INTERRUPT_CLEAR_ENTRIES_INT_STATE 0
+#define INTERRUPT_CLEAR_ENTRIES_INT_STATE_MASK 0x3
+
+/* Reserved 0x1600 - 0x17f8 */
+
+
+/* Reserved 0x1808 - 0x19f8 */
+
+#define INTERRUPT_RETRY_TIMER 0x1a00
+#define INTERRUPT_RETRY_TIMER_LIMIT 0
+#define INTERRUPT_RETRY_TIMER_LIMIT_MASK 0x1ffffff
+
+/* Reserved 0x1a08 - 0x1a08 */
+
+#define INTERRUPT_STATE_STATUS_1 0x1a10
+#define INTERRUPT_STATE_STATUS_1_STATE 0
+#define INTERRUPT_STATE_STATUS_1_STATE_MASK 0xffffffffffffffff
+#define INTERRUPT_STATE_STATUS_2 0x1a18
+#define INTERRUPT_STATE_STATUS_2_STATE 0
+#define INTERRUPT_STATE_STATUS_2_STATE_MASK 0xffffffffffffffff
+
+/* intx.csr INTX module defines */
+
+#define INTX_CSR_BASE 0x600000
+#define INTX_STATUS 0xb000
+#define INTX_STATUS_INT_A 3
+#define INTX_STATUS_INT_B 2
+#define INTX_STATUS_INT_C 1
+#define INTX_STATUS_INT_D 0
+#define INT_A_CLEAR 0xb008
+#define INT_A_CLEAR_CLR 0
+#define INT_B_CLEAR 0xb010
+#define INT_B_CLEAR_CLR 0
+#define INT_C_CLEAR 0xb018
+#define INT_C_CLEAR_CLR 0
+#define INT_D_CLEAR 0xb020
+#define INT_D_CLEAR_CLR 0
+
+/* eqs.csr EQS module defines */
+
+#define EQS_CSR_BASE 0x600000
+#define EVENT_QUEUE_BASE_ADDRESS 0x10000
+#define EVENT_QUEUE_BASE_ADDRESS_ADDRESS 19
+#define EVENT_QUEUE_BASE_ADDRESS_ADDRESS_MASK 0x1fffffffffff
+
+/* Reserved 0x10008 - 0x10ff8 */
+
+#define EVENT_QUEUE_CONTROL_SET 0x11000
+#define EVENT_QUEUE_CONTROL_SET_ENTRIES 36
+#define EVENT_QUEUE_CONTROL_SET_ENTRIES_ENOVERR 57
+#define EVENT_QUEUE_CONTROL_SET_ENTRIES_EN 44
+
+/* Reserved 0x11120 - 0x111f8 */
+
+#define EVENT_QUEUE_CONTROL_CLEAR 0x11200
+#define EVENT_QUEUE_CONTROL_CLEAR_ENTRIES 36
+#define EVENT_QUEUE_CONTROL_CLEAR_ENTRIES_COVERR 57
+#define EVENT_QUEUE_CONTROL_CLEAR_ENTRIES_E2I 47
+#define EVENT_QUEUE_CONTROL_CLEAR_ENTRIES_DIS 44
+
+/* Reserved 0x11320 - 0x113f8 */
+
+#define EVENT_QUEUE_STATE 0x11400
+#define EVENT_QUEUE_STATE_ENTRIES 36
+#define EVENT_QUEUE_STATE_ENTRIES_STATE 0
+#define EVENT_QUEUE_STATE_ENTRIES_STATE_MASK 0x7
+
+/* Reserved 0x11520 - 0x115f8 */
+
+#define EVENT_QUEUE_TAIL 0x11600
+#define EVENT_QUEUE_TAIL_ENTRIES 36
+#define EVENT_QUEUE_TAIL_ENTRIES_OVERR 57
+#define EVENT_QUEUE_TAIL_ENTRIES_TAIL 0
+#define EVENT_QUEUE_TAIL_ENTRIES_TAIL_MASK 0x7f
+
+/* Reserved 0x11720 - 0x117f8 */
+
+#define EVENT_QUEUE_HEAD 0x11800
+#define EVENT_QUEUE_HEAD_ENTRIES 36
+#define EVENT_QUEUE_HEAD_ENTRIES_HEAD 0
+#define EVENT_QUEUE_HEAD_ENTRIES_HEAD_MASK 0x7f
+
+/* msi.csr MSI module defines */
+
+#define MSI_CSR_BASE 0x600000
+#define MSI_MAPPING 0x20000
+#define MSI_MAPPING_ENTRIES 256
+#define MSI_MAPPING_ENTRIES_V 63
+#define MSI_MAPPING_ENTRIES_EQWR_N 62
+#define MSI_MAPPING_ENTRIES_EQNUM 0
+#define MSI_MAPPING_ENTRIES_EQNUM_MASK 0x3f
+
+/* Reserved 0x20800 - 0x27ff8 */
+
+#define MSI_CLEAR 0x28000
+#define MSI_CLEAR_ENTRIES 256
+#define MSI_CLEAR_ENTRIES_EQWR_N 62
+
+/* Reserved 0x28800 - 0x2bff8 */
+
+#define INTERRUPT_MONDO_DATA_0 0x2c000
+#define INTERRUPT_MONDO_DATA_0_DATA 6
+#define INTERRUPT_MONDO_DATA_0_DATA_MASK 0x3ffffffffffffff
+#define INTERRUPT_MONDO_DATA_1 0x2c008
+#define INTERRUPT_MONDO_DATA_1_DATA 0
+#define INTERRUPT_MONDO_DATA_1_DATA_MASK 0xffffffffffffffff
+
+/* mess.csr MESS module defines */
+
+#define MESS_CSR_BASE 0x600000
+#define ERR_COR_MAPPING 0x30000
+#define ERR_COR_MAPPING_V 63
+#define ERR_COR_MAPPING_EQNUM 0
+#define ERR_COR_MAPPING_EQNUM_MASK 0x3f
+#define ERR_NONFATAL_MAPPING 0x30008
+#define ERR_NONFATAL_MAPPING_V 63
+#define ERR_NONFATAL_MAPPING_EQNUM 0
+#define ERR_NONFATAL_MAPPING_EQNUM_MASK 0x3f
+#define ERR_FATAL_MAPPING 0x30010
+#define ERR_FATAL_MAPPING_V 63
+#define ERR_FATAL_MAPPING_EQNUM 0
+#define ERR_FATAL_MAPPING_EQNUM_MASK 0x3f
+#define PM_PME_MAPPING 0x30018
+#define PM_PME_MAPPING_V 63
+#define PM_PME_MAPPING_EQNUM 0
+#define PM_PME_MAPPING_EQNUM_MASK 0x3f
+#define PME_TO_ACK_MAPPING 0x30020
+#define PME_TO_ACK_MAPPING_V 63
+#define PME_TO_ACK_MAPPING_EQNUM 0
+#define PME_TO_ACK_MAPPING_EQNUM_MASK 0x3f
+
+/* ics.csr ICS module defines */
+
+#define ICS_CSR_BASE 0x600000
+#define IMU_ERROR_LOG_ENABLE 0x31000
+#define IMU_ERROR_LOG_ENABLE_SPARE_LOG_EN 10
+#define IMU_ERROR_LOG_ENABLE_SPARE_LOG_EN_MASK 0x1f
+#define IMU_ERROR_LOG_ENABLE_EQ_OVER_LOG_EN 9
+#define IMU_ERROR_LOG_ENABLE_EQ_NOT_EN_LOG_EN 8
+#define IMU_ERROR_LOG_ENABLE_MSI_MAL_ERR_LOG_EN 7
+#define IMU_ERROR_LOG_ENABLE_MSI_PAR_ERR_LOG_EN 6
+#define IMU_ERROR_LOG_ENABLE_PMEACK_MES_NOT_EN_LOG_EN 5
+#define IMU_ERROR_LOG_ENABLE_PMPME_MES_NOT_EN_LOG_EN 4
+#define IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN 3
+#define IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN 2
+#define IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN 1
+#define IMU_ERROR_LOG_ENABLE_MSI_NOT_EN_LOG_EN 0
+#define IMU_INTERRUPT_ENABLE 0x31008
+#define IMU_INTERRUPT_ENABLE_SPARE_S_INT_EN 42
+#define IMU_INTERRUPT_ENABLE_SPARE_S_INT_EN_MASK 0x1f
+#define IMU_INTERRUPT_ENABLE_EQ_OVER_S_INT_EN 41
+#define IMU_INTERRUPT_ENABLE_EQ_NOT_EN_S_INT_EN 40
+#define IMU_INTERRUPT_ENABLE_MSI_MAL_ERR_S_INT_EN 39
+#define IMU_INTERRUPT_ENABLE_MSI_PAR_ERR_S_INT_EN 38
+#define IMU_INTERRUPT_ENABLE_PMEACK_MES_NOT_EN_S_INT_EN 37
+#define IMU_INTERRUPT_ENABLE_PMPME_MES_NOT_EN_S_INT_EN 36
+#define IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN 35
+#define IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN 34
+#define IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN 33
+#define IMU_INTERRUPT_ENABLE_MSI_NOT_EN_S_INT_EN 32
+#define IMU_INTERRUPT_ENABLE_SPARE_P_INT_EN 10
+#define IMU_INTERRUPT_ENABLE_SPARE_P_INT_EN_MASK 0x1f
+#define IMU_INTERRUPT_ENABLE_EQ_OVER_P_INT_EN 9
+#define IMU_INTERRUPT_ENABLE_EQ_NOT_EN_P_INT_EN 8
+#define IMU_INTERRUPT_ENABLE_MSI_MAL_ERR_P_INT_EN 7
+#define IMU_INTERRUPT_ENABLE_MSI_PAR_ERR_P_INT_EN 6
+#define IMU_INTERRUPT_ENABLE_PMEACK_MES_NOT_EN_P_INT_EN 5
+#define IMU_INTERRUPT_ENABLE_PMPME_MES_NOT_EN_P_INT_EN 4
+#define IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN 3
+#define IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN 2
+#define IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN 1
+#define IMU_INTERRUPT_ENABLE_MSI_NOT_EN_P_INT_EN 0
+#define IMU_INTERRUPT_STATUS 0x31010
+#define IMU_INTERRUPT_STATUS_SPARE_S 42
+#define IMU_INTERRUPT_STATUS_SPARE_S_MASK 0x1f
+#define IMU_INTERRUPT_STATUS_EQ_OVER_S 41
+#define IMU_INTERRUPT_STATUS_EQ_NOT_EN_S 40
+#define IMU_INTERRUPT_STATUS_MSI_MAL_ERR_S 39
+#define IMU_INTERRUPT_STATUS_MSI_PAR_ERR_S 38
+#define IMU_INTERRUPT_STATUS_PMEACK_MES_NOT_EN_S 37
+#define IMU_INTERRUPT_STATUS_PMPME_MES_NOT_EN_S 36
+#define IMU_INTERRUPT_STATUS_FATAL_MES_NOT_EN_S 35
+#define IMU_INTERRUPT_STATUS_NONFATAL_MES_NOT_EN_S 34
+#define IMU_INTERRUPT_STATUS_COR_MES_NOT_EN_S 33
+#define IMU_INTERRUPT_STATUS_MSI_NOT_EN_S 32
+#define IMU_INTERRUPT_STATUS_SPARE_P 10
+#define IMU_INTERRUPT_STATUS_SPARE_P_MASK 0x1f
+#define IMU_INTERRUPT_STATUS_EQ_OVER_P 9
+#define IMU_INTERRUPT_STATUS_EQ_NOT_EN_P 8
+#define IMU_INTERRUPT_STATUS_MSI_MAL_ERR_P 7
+#define IMU_INTERRUPT_STATUS_MSI_PAR_ERR_P 6
+#define IMU_INTERRUPT_STATUS_PMEACK_MES_NOT_EN_P 5
+#define IMU_INTERRUPT_STATUS_PMPME_MES_NOT_EN_P 4
+#define IMU_INTERRUPT_STATUS_FATAL_MES_NOT_EN_P 3
+#define IMU_INTERRUPT_STATUS_NONFATAL_MES_NOT_EN_P 2
+#define IMU_INTERRUPT_STATUS_COR_MES_NOT_EN_P 1
+#define IMU_INTERRUPT_STATUS_MSI_NOT_EN_P 0
+#define IMU_ERROR_STATUS_CLEAR 0x31018
+#define IMU_ERROR_STATUS_CLEAR_SPARE_S 42
+#define IMU_ERROR_STATUS_CLEAR_SPARE_S_MASK 0x1f
+#define IMU_ERROR_STATUS_CLEAR_EQ_OVER_S 41
+#define IMU_ERROR_STATUS_CLEAR_EQ_NOT_EN_S 40
+#define IMU_ERROR_STATUS_CLEAR_MSI_MAL_ERR_S 39
+#define IMU_ERROR_STATUS_CLEAR_MSI_PAR_ERR_S 38
+#define IMU_ERROR_STATUS_CLEAR_PMEACK_MES_NOT_EN_S 37
+#define IMU_ERROR_STATUS_CLEAR_PMPME_MES_NOT_EN_S 36
+#define IMU_ERROR_STATUS_CLEAR_FATAL_MES_NOT_EN_S 35
+#define IMU_ERROR_STATUS_CLEAR_NONFATAL_MES_NOT_EN_S 34
+#define IMU_ERROR_STATUS_CLEAR_COR_MES_NOT_EN_S 33
+#define IMU_ERROR_STATUS_CLEAR_MSI_NOT_EN_S 32
+#define IMU_ERROR_STATUS_CLEAR_SPARE_P 10
+#define IMU_ERROR_STATUS_CLEAR_SPARE_P_MASK 0x1f
+#define IMU_ERROR_STATUS_CLEAR_EQ_OVER_P 9
+#define IMU_ERROR_STATUS_CLEAR_EQ_NOT_EN_P 8
+#define IMU_ERROR_STATUS_CLEAR_MSI_MAL_ERR_P 7
+#define IMU_ERROR_STATUS_CLEAR_MSI_PAR_ERR_P 6
+#define IMU_ERROR_STATUS_CLEAR_PMEACK_MES_NOT_EN_P 5
+#define IMU_ERROR_STATUS_CLEAR_PMPME_MES_NOT_EN_P 4
+#define IMU_ERROR_STATUS_CLEAR_FATAL_MES_NOT_EN_P 3
+#define IMU_ERROR_STATUS_CLEAR_NONFATAL_MES_NOT_EN_P 2
+#define IMU_ERROR_STATUS_CLEAR_COR_MES_NOT_EN_P 1
+#define IMU_ERROR_STATUS_CLEAR_MSI_NOT_EN_P 0
+#define IMU_ERROR_STATUS_SET 0x31020
+#define IMU_ERROR_STATUS_SET_SPARE_S 42
+#define IMU_ERROR_STATUS_SET_SPARE_S_MASK 0xfa
+#define IMU_ERROR_STATUS_SET_EQ_OVER_S 41
+#define IMU_ERROR_STATUS_SET_EQ_NOT_EN_S 40
+#define IMU_ERROR_STATUS_SET_MSI_MAL_ERR_S 39
+#define IMU_ERROR_STATUS_SET_MSI_PAR_ERR_S 38
+#define IMU_ERROR_STATUS_SET_PMEACK_MES_NOT_EN_S 37
+#define IMU_ERROR_STATUS_SET_PMPME_MES_NOT_EN_S 36
+#define IMU_ERROR_STATUS_SET_FATAL_MES_NOT_EN_S 35
+#define IMU_ERROR_STATUS_SET_NONFATAL_MES_NOT_EN_S 34
+#define IMU_ERROR_STATUS_SET_COR_MES_NOT_EN_S 33
+#define IMU_ERROR_STATUS_SET_MSI_NOT_EN_S 32
+#define IMU_ERROR_STATUS_SET_SPARE_P 10
+#define IMU_ERROR_STATUS_SET_SPARE_P_MASK 0xfa
+#define IMU_ERROR_STATUS_SET_EQ_OVER_P 9
+#define IMU_ERROR_STATUS_SET_EQ_NOT_EN_P 8
+#define IMU_ERROR_STATUS_SET_MSI_MAL_ERR_P 7
+#define IMU_ERROR_STATUS_SET_MSI_PAR_ERR_P 6
+#define IMU_ERROR_STATUS_SET_PMEACK_MES_NOT_EN_P 5
+#define IMU_ERROR_STATUS_SET_PMPME_MES_NOT_EN_P 4
+#define IMU_ERROR_STATUS_SET_FATAL_MES_NOT_EN_P 3
+#define IMU_ERROR_STATUS_SET_NONFATAL_MES_NOT_EN_P 2
+#define IMU_ERROR_STATUS_SET_COR_MES_NOT_EN_P 1
+#define IMU_ERROR_STATUS_SET_MSI_NOT_EN_P 0
+#define IMU_RDS_ERROR_LOG 0x31028
+#define IMU_RDS_ERROR_LOG_TYPE 58
+#define IMU_RDS_ERROR_LOG_TYPE_MASK 0x3f
+#define IMU_RDS_ERROR_LOG_LENGTH 48
+#define IMU_RDS_ERROR_LOG_LENGTH_MASK 0x3ff
+#define IMU_RDS_ERROR_LOG_REQ_ID 32
+#define IMU_RDS_ERROR_LOG_REQ_ID_MASK 0xffff
+#define IMU_RDS_ERROR_LOG_TLP_TAG 24
+#define IMU_RDS_ERROR_LOG_TLP_TAG_MASK 0xff
+#define IMU_RDS_ERROR_LOG_BE_MESS_CODE 16
+#define IMU_RDS_ERROR_LOG_BE_MESS_CODE_MASK 0xff
+#define IMU_RDS_ERROR_LOG_MSI_DATA 0
+#define IMU_RDS_ERROR_LOG_MSI_DATA_MASK 0xffff
+#define IMU_SCS_ERROR_LOG 0x31030
+#define IMU_SCS_ERROR_LOG_TYPE 58
+#define IMU_SCS_ERROR_LOG_TYPE_MASK 0x3f
+#define IMU_SCS_ERROR_LOG_LENGTH 48
+#define IMU_SCS_ERROR_LOG_LENGTH_MASK 0x3ff
+#define IMU_SCS_ERROR_LOG_REQ_ID 32
+#define IMU_SCS_ERROR_LOG_REQ_ID_MASK 0xffff
+#define IMU_SCS_ERROR_LOG_TLP_TAG 24
+#define IMU_SCS_ERROR_LOG_TLP_TAG_MASK 0xff
+#define IMU_SCS_ERROR_LOG_BE_MESS_CODE 16
+#define IMU_SCS_ERROR_LOG_BE_MESS_CODE_MASK 0xff
+#define IMU_SCS_ERROR_LOG_EQ_NUM 0
+#define IMU_SCS_ERROR_LOG_EQ_NUM_MASK 0x3f
+#define IMU_EQS_ERROR_LOG 0x31038
+#define IMU_EQS_ERROR_LOG_EQ_NUM 0
+#define IMU_EQS_ERROR_LOG_EQ_NUM_MASK 0x3f
+
+/* Reserved 0x31040 - 0x317f8 */
+
+#define DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE 0x31800
+#define DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE_DMC 63
+#define DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE_MMU 1
+#define DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE_IMU 0
+#define DMC_CORE_AND_BLOCK_ERROR_STATUS 0x31808
+#define DMC_CORE_AND_BLOCK_ERROR_STATUS_MMU 1
+#define DMC_CORE_AND_BLOCK_ERROR_STATUS_IMU 0
+#define MULTI_CORE_ERROR_STATUS 0x31810
+#define MULTI_CORE_ERROR_STATUS_PEC 1
+#define MULTI_CORE_ERROR_STATUS_DMC 0
+
+/* Reserved 0x31818 - 0x31ff8 */
+
+#define IMU_PERFORMANCE_COUNTER_SELECT 0x32000
+#define IMU_PERFORMANCE_COUNTER_SELECT_SEL1 8
+#define IMU_PERFORMANCE_COUNTER_SELECT_SEL1_MASK 0xff
+#define IMU_PERFORMANCE_COUNTER_SELECT_SEL0 0
+#define IMU_PERFORMANCE_COUNTER_SELECT_SEL0_MASK 0xff
+#define IMU_PERFORMANCE_COUNTER_ZERO 0x32008
+#define IMU_PERFORMANCE_COUNTER_ZERO_CNT 0
+#define IMU_PERFORMANCE_COUNTER_ZERO_CNT_MASK 0xffffffffffffffff
+#define IMU_PERFORMANCE_COUNTER_ONE 0x32010
+#define IMU_PERFORMANCE_COUNTER_ONE_CNT 0
+#define IMU_PERFORMANCE_COUNTER_ONE_CNT_MASK 0xffffffffffffffff
+
+/* Reserved 0x32018 - 0x33ff8 */
+
+#define MSI_32_BIT_ADDRESS 0x34000
+#define MSI_32_BIT_ADDRESS_ADDR 16
+#define MSI_32_BIT_ADDRESS_ADDR_MASK 0xffff
+#define MSI_64_BIT_ADDRESS 0x34008
+#define MSI_64_BIT_ADDRESS_ADDR 16
+#define MSI_64_BIT_ADDRESS_ADDR_MASK 0xffffffffffff
+
+/* Reserved 0x34010 - 0x34010 */
+
+#define MEM_64_PCIE_OFFSET 0x34018
+#define MEM_64_PCIE_OFFSET_ADDR 24
+#define MEM_64_PCIE_OFFSET_ADDR_MASK 0xffffffffff
+#define MEM_64_PCIE_OFFSET_SPARE_CONTROL_LOAD_7 23
+#define MEM_64_PCIE_OFFSET_SPARE_CONTROL_LOAD_6 22
+#define MEM_64_PCIE_OFFSET_SPARE_CONTROL_LOAD_5 21
+#define MEM_64_PCIE_OFFSET_SPARE_CONTROL_LOAD_4 20
+#define MEM_64_PCIE_OFFSET_SPARE_CONTROL_LOAD_3 19
+#define MEM_64_PCIE_OFFSET_SPARE_CONTROL_LOAD_2 18
+#define MEM_64_PCIE_OFFSET_SPARE_CONTROL_LOAD_1 17
+#define MEM_64_PCIE_OFFSET_SPARE_CONTROL_LOAD_0 16
+#define MEM_64_PCIE_OFFSET_SPARE_CONTROL 8
+#define MEM_64_PCIE_OFFSET_SPARE_CONTROL_MASK 0xff
+#define MEM_64_PCIE_OFFSET_SPARE_STATUS 0
+#define MEM_64_PCIE_OFFSET_SPARE_STATUS_MASK 0xff
+
+/* csr.csr CSR module defines */
+
+#define CSR_CSR_BASE 0x600000
+#define MMU_CONTROL_AND_STATUS 0x40000
+#define MMU_CONTROL_AND_STATUS_SPARES 48
+#define MMU_CONTROL_AND_STATUS_SPARES_MASK 0xf
+#define MMU_CONTROL_AND_STATUS_PAQ 45
+#define MMU_CONTROL_AND_STATUS_VAQ 44
+#define MMU_CONTROL_AND_STATUS_TPL 43
+#define MMU_CONTROL_AND_STATUS_TIP 42
+#define MMU_CONTROL_AND_STATUS_TCM 40
+#define MMU_CONTROL_AND_STATUS_TCM_MASK 0x3
+#define MMU_CONTROL_AND_STATUS_SPAREC 16
+#define MMU_CONTROL_AND_STATUS_SPAREC_MASK 0xf
+#define MMU_CONTROL_AND_STATUS_PD 12
+#define MMU_CONTROL_AND_STATUS_SE 10
+#define MMU_CONTROL_AND_STATUS_CM 8
+#define MMU_CONTROL_AND_STATUS_CM_MASK 0x3
+#define MMU_CONTROL_AND_STATUS_BE 1
+#define MMU_CONTROL_AND_STATUS_TE 0
+#define MMU_TSB_CONTROL 0x40008
+#define MMU_TSB_CONTROL_TB 13
+#define MMU_TSB_CONTROL_TB_MASK 0x3fffffff
+#define MMU_TSB_CONTROL_PS 8
+#define MMU_TSB_CONTROL_TS 0
+#define MMU_TSB_CONTROL_TS_MASK 0xf
+
+/* Reserved 0x40010 - 0x400f8 */
+
+#define MMU_TTE_CACHE_FLUSH_ADDRESS 0x40100
+#define MMU_TTE_CACHE_FLUSH_ADDRESS_FLSH_ADDR 6
+#define MMU_TTE_CACHE_FLUSH_ADDRESS_FLSH_ADDR_MASK 0x1fffffffff
+#define MMU_TTE_CACHE_INVALIDATE 0x40108
+#define MMU_TTE_CACHE_INVALIDATE_FLSH_TTE 0
+#define MMU_TTE_CACHE_INVALIDATE_FLSH_TTE_MASK 0xffffffffffffffff
+
+/* Reserved 0x40110 - 0x40ff8 */
+
+#define MMU_ERROR_LOG_ENABLE 0x41000
+#define MMU_ERROR_LOG_ENABLE_EN 0
+#define MMU_ERROR_LOG_ENABLE_EN_MASK 0xffff
+#define MMU_INTERRUPT_ENABLE 0x41008
+#define MMU_INTERRUPT_ENABLE_EN_S 32
+#define MMU_INTERRUPT_ENABLE_EN_S_MASK 0xffff
+#define MMU_INTERRUPT_ENABLE_EN_P 0
+#define MMU_INTERRUPT_ENABLE_EN_P_MASK 0xffff
+#define MMU_INTERRUPT_STATUS 0x41010
+#define MMU_INTERRUPT_STATUS_ERR_S 32
+#define MMU_INTERRUPT_STATUS_ERR_S_MASK 0xffff
+#define MMU_INTERRUPT_STATUS_ERR_P 0
+#define MMU_INTERRUPT_STATUS_ERR_P_MASK 0xffff
+#define MMU_ERROR_STATUS_CLEAR 0x41018
+#define MMU_ERROR_STATUS_CLEAR_TBW_DPE_S 47
+#define MMU_ERROR_STATUS_CLEAR_TBW_ERR_S 46
+#define MMU_ERROR_STATUS_CLEAR_TBW_UDE_S 45
+#define MMU_ERROR_STATUS_CLEAR_TBW_DME_S 44
+#define MMU_ERROR_STATUS_CLEAR_SPARE3_S 43
+#define MMU_ERROR_STATUS_CLEAR_SPARE2_S 42
+#define MMU_ERROR_STATUS_CLEAR_TTC_CAE_S 41
+#define MMU_ERROR_STATUS_CLEAR_TTC_DPE_S 40
+#define MMU_ERROR_STATUS_CLEAR_TTE_PRT_S 39
+#define MMU_ERROR_STATUS_CLEAR_TTE_INV_S 38
+#define MMU_ERROR_STATUS_CLEAR_TRN_OOR_S 37
+#define MMU_ERROR_STATUS_CLEAR_TRN_ERR_S 36
+#define MMU_ERROR_STATUS_CLEAR_SPARE1_S 35
+#define MMU_ERROR_STATUS_CLEAR_SPARE0_S 34
+#define MMU_ERROR_STATUS_CLEAR_BYP_OOR_S 33
+#define MMU_ERROR_STATUS_CLEAR_BYP_ERR_S 32
+#define MMU_ERROR_STATUS_CLEAR_TBW_DPE_P 15
+#define MMU_ERROR_STATUS_CLEAR_TBW_ERR_P 14
+#define MMU_ERROR_STATUS_CLEAR_TBW_UDE_P 13
+#define MMU_ERROR_STATUS_CLEAR_TBW_DME_P 12
+#define MMU_ERROR_STATUS_CLEAR_SPARE3_P 11
+#define MMU_ERROR_STATUS_CLEAR_SPARE2_P 10
+#define MMU_ERROR_STATUS_CLEAR_TTC_CAE_P 9
+#define MMU_ERROR_STATUS_CLEAR_TTC_DPE_P 8
+#define MMU_ERROR_STATUS_CLEAR_TTE_PRT_P 7
+#define MMU_ERROR_STATUS_CLEAR_TTE_INV_P 6
+#define MMU_ERROR_STATUS_CLEAR_TRN_OOR_P 5
+#define MMU_ERROR_STATUS_CLEAR_TRN_ERR_P 4
+#define MMU_ERROR_STATUS_CLEAR_SPARE1_P 3
+#define MMU_ERROR_STATUS_CLEAR_SPARE0_P 2
+#define MMU_ERROR_STATUS_CLEAR_BYP_OOR_P 1
+#define MMU_ERROR_STATUS_CLEAR_BYP_ERR_P 0
+#define MMU_ERROR_STATUS_SET 0x41020
+#define MMU_ERROR_STATUS_SET_TBW_DPE_S 47
+#define MMU_ERROR_STATUS_SET_TBW_ERR_S 46
+#define MMU_ERROR_STATUS_SET_TBW_UDE_S 45
+#define MMU_ERROR_STATUS_SET_TBW_DME_S 44
+#define MMU_ERROR_STATUS_SET_SPARE3_S 43
+#define MMU_ERROR_STATUS_SET_SPARE2_S 42
+#define MMU_ERROR_STATUS_SET_TTC_CAE_S 41
+#define MMU_ERROR_STATUS_SET_TTC_DPE_S 40
+#define MMU_ERROR_STATUS_SET_TTE_PRT_S 39
+#define MMU_ERROR_STATUS_SET_TTE_INV_S 38
+#define MMU_ERROR_STATUS_SET_TRN_OOR_S 37
+#define MMU_ERROR_STATUS_SET_TRN_ERR_S 36
+#define MMU_ERROR_STATUS_SET_SPARE1_S 35
+#define MMU_ERROR_STATUS_SET_SPARE0_S 34
+#define MMU_ERROR_STATUS_SET_BYP_OOR_S 33
+#define MMU_ERROR_STATUS_SET_BYP_ERR_S 32
+#define MMU_ERROR_STATUS_SET_TBW_DPE_P 15
+#define MMU_ERROR_STATUS_SET_TBW_ERR_P 14
+#define MMU_ERROR_STATUS_SET_TBW_UDE_P 13
+#define MMU_ERROR_STATUS_SET_TBW_DME_P 12
+#define MMU_ERROR_STATUS_SET_SPARE3_P 11
+#define MMU_ERROR_STATUS_SET_SPARE2_P 10
+#define MMU_ERROR_STATUS_SET_TTC_CAE_P 9
+#define MMU_ERROR_STATUS_SET_TTC_DPE_P 8
+#define MMU_ERROR_STATUS_SET_TTE_PRT_P 7
+#define MMU_ERROR_STATUS_SET_TTE_INV_P 6
+#define MMU_ERROR_STATUS_SET_TRN_OOR_P 5
+#define MMU_ERROR_STATUS_SET_TRN_ERR_P 4
+#define MMU_ERROR_STATUS_SET_SPARE1_P 3
+#define MMU_ERROR_STATUS_SET_SPARE0_P 2
+#define MMU_ERROR_STATUS_SET_BYP_OOR_P 1
+#define MMU_ERROR_STATUS_SET_BYP_ERR_P 0
+#define MMU_TRANSLATION_FAULT_ADDRESS 0x41028
+#define MMU_TRANSLATION_FAULT_ADDRESS_VA 2
+#define MMU_TRANSLATION_FAULT_ADDRESS_VA_MASK 0x3fffffffffffffff
+#define MMU_TRANSLATION_FAULT_STATUS 0x41030
+#define MMU_TRANSLATION_FAULT_STATUS_ENTRY 32
+#define MMU_TRANSLATION_FAULT_STATUS_ENTRY_MASK 0x1ff
+#define MMU_TRANSLATION_FAULT_STATUS_TYPE 16
+#define MMU_TRANSLATION_FAULT_STATUS_TYPE_MASK 0x7f
+#define MMU_TRANSLATION_FAULT_STATUS_ID 0
+#define MMU_TRANSLATION_FAULT_STATUS_ID_MASK 0xffff
+
+/* Reserved 0x41038 - 0x41ff8 */
+
+#define MMU_PERFORMANCE_COUNTER_SELECT 0x42000
+#define MMU_PERFORMANCE_COUNTER_SELECT_SEL1 8
+#define MMU_PERFORMANCE_COUNTER_SELECT_SEL1_MASK 0xff
+#define MMU_PERFORMANCE_COUNTER_SELECT_SEL0 0
+#define MMU_PERFORMANCE_COUNTER_SELECT_SEL0_MASK 0xff
+#define MMU_PERFORMANCE_COUNTER_ZERO 0x42008
+#define MMU_PERFORMANCE_COUNTER_ZERO_CNT 0
+#define MMU_PERFORMANCE_COUNTER_ZERO_CNT_MASK 0xffffffffffffffff
+#define MMU_PERFORMANCE_COUNTER_ONE 0x42010
+#define MMU_PERFORMANCE_COUNTER_ONE_CNT 0
+#define MMU_PERFORMANCE_COUNTER_ONE_CNT_MASK 0xffffffffffffffff
+
+/* Reserved 0x42018 - 0x43ff8 */
+
+
+/* Reserved 0x44008 - 0x45ff8 */
+
+#define MMU_TTE_CACHE_VIRTUAL_TAG 0x46000
+#define MMU_TTE_CACHE_VIRTUAL_TAG_ENTRIES 64
+#define MMU_TTE_CACHE_VIRTUAL_TAG_ENTRIES_CNT 32
+#define MMU_TTE_CACHE_VIRTUAL_TAG_ENTRIES_CNT_MASK 0xfff
+#define MMU_TTE_CACHE_VIRTUAL_TAG_ENTRIES_TAG 16
+#define MMU_TTE_CACHE_VIRTUAL_TAG_ENTRIES_TAG_MASK 0xffff
+#define MMU_TTE_CACHE_VIRTUAL_TAG_ENTRIES_VLD 0
+
+/* Reserved 0x46200 - 0x46ff8 */
+
+#define MMU_TTE_CACHE_PHYSICAL_TAG 0x47000
+#define MMU_TTE_CACHE_PHYSICAL_TAG_ENTRIES 64
+#define MMU_TTE_CACHE_PHYSICAL_TAG_ENTRIES_TAG 6
+#define MMU_TTE_CACHE_PHYSICAL_TAG_ENTRIES_TAG_MASK 0x1fffffffff
+#define MMU_TTE_CACHE_PHYSICAL_TAG_ENTRIES_VLD 0
+
+/* Reserved 0x47200 - 0x47ff8 */
+
+#define MMU_TTE_CACHE_DATA 0x48000
+#define MMU_TTE_CACHE_DATA_ENTRIES 512
+#define MMU_TTE_CACHE_DATA_ENTRIES_PAR 60
+#define MMU_TTE_CACHE_DATA_ENTRIES_PAR_MASK 0xf
+#define MMU_TTE_CACHE_DATA_ENTRIES_PPN 13
+#define MMU_TTE_CACHE_DATA_ENTRIES_PPN_MASK 0x3fffffff
+#define MMU_TTE_CACHE_DATA_ENTRIES_WRT 1
+#define MMU_TTE_CACHE_DATA_ENTRIES_VLD 0
+
+/* cib.csr CIB module defines */
+
+#define CIB_CSR_BASE 0x600000
+
+/* Reserved 0x50008 - 0x50ff8 */
+
+#define ILU_ERROR_LOG_ENABLE 0x51000
+#define ILU_ERROR_LOG_ENABLE_SPARE3 7
+#define ILU_ERROR_LOG_ENABLE_SPARE2 6
+#define ILU_ERROR_LOG_ENABLE_SPARE1 5
+#define ILU_ERROR_LOG_ENABLE_IHB_PE 4
+#define ILU_INTERRUPT_ENABLE 0x51008
+#define ILU_INTERRUPT_ENABLE_SPARE3_S 39
+#define ILU_INTERRUPT_ENABLE_SPARE2_S 38
+#define ILU_INTERRUPT_ENABLE_SPARE1_S 37
+#define ILU_INTERRUPT_ENABLE_IHB_PE_S 36
+#define ILU_INTERRUPT_ENABLE_SPARE3_P 7
+#define ILU_INTERRUPT_ENABLE_SPARE2_P 6
+#define ILU_INTERRUPT_ENABLE_SPARE1_P 5
+#define ILU_INTERRUPT_ENABLE_IHB_PE_P 4
+#define ILU_INTERRUPT_STATUS 0x51010
+#define ILU_INTERRUPT_STATUS_SPARE3_S 39
+#define ILU_INTERRUPT_STATUS_SPARE2_S 38
+#define ILU_INTERRUPT_STATUS_SPARE1_S 37
+#define ILU_INTERRUPT_STATUS_IHB_PE_S 36
+#define ILU_INTERRUPT_STATUS_SPARE3_P 7
+#define ILU_INTERRUPT_STATUS_SPARE2_P 6
+#define ILU_INTERRUPT_STATUS_SPARE1_P 5
+#define ILU_INTERRUPT_STATUS_IHB_PE_P 4
+#define ILU_ERROR_STATUS_CLEAR 0x51018
+#define ILU_ERROR_STATUS_CLEAR_SPARE3_S 39
+#define ILU_ERROR_STATUS_CLEAR_SPARE2_S 38
+#define ILU_ERROR_STATUS_CLEAR_SPARE1_S 37
+#define ILU_ERROR_STATUS_CLEAR_IHB_PE_S 36
+#define ILU_ERROR_STATUS_CLEAR_SPARE3_P 7
+#define ILU_ERROR_STATUS_CLEAR_SPARE2_P 6
+#define ILU_ERROR_STATUS_CLEAR_SPARE1_P 5
+#define ILU_ERROR_STATUS_CLEAR_IHB_PE_P 4
+#define ILU_ERROR_STATUS_SET 0x51020
+#define ILU_ERROR_STATUS_SET_SPARE3_S 39
+#define ILU_ERROR_STATUS_SET_SPARE2_S 38
+#define ILU_ERROR_STATUS_SET_SPARE1_S 37
+#define ILU_ERROR_STATUS_SET_IHB_PE_S 36
+#define ILU_ERROR_STATUS_SET_SPARE3_P 7
+#define ILU_ERROR_STATUS_SET_SPARE2_P 6
+#define ILU_ERROR_STATUS_SET_SPARE1_P 5
+#define ILU_ERROR_STATUS_SET_IHB_PE_P 4
+
+/* Reserved 0x51028 - 0x517f8 */
+
+#define PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE 0x51800
+#define PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE_PEC 63
+#define PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE_PEC_ILU 3
+#define PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE_PEC_UE 2
+#define PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE_PEC_CE 1
+#define PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE_PEC_OE 0
+#define PEC_CORE_AND_BLOCK_INTERRUPT_STATUS 0x51808
+#define PEC_CORE_AND_BLOCK_INTERRUPT_STATUS_ILU 3
+#define PEC_CORE_AND_BLOCK_INTERRUPT_STATUS_UE 2
+#define PEC_CORE_AND_BLOCK_INTERRUPT_STATUS_CE 1
+#define PEC_CORE_AND_BLOCK_INTERRUPT_STATUS_OE 0
+
+/* Reserved 0x51810 - 0x51ff8 */
+
+#define ILU_DEVICE_CAPABILITIES 0x52000
+#define ILU_DEVICE_CAPABILITIES_ESTAR 0
+
+/* cru.csr CRU module defines */
+
+#define CRU_CSR_BASE 0x600000
+#define DMC_DEBUG_SELECT_FOR_PORT_A 0x53000
+#define DMC_DEBUG_SELECT_FOR_PORT_A_BLOCK_SEL 6
+#define DMC_DEBUG_SELECT_FOR_PORT_A_BLOCK_SEL_MASK 0xf
+#define DMC_DEBUG_SELECT_FOR_PORT_A_SUB_SEL 3
+#define DMC_DEBUG_SELECT_FOR_PORT_A_SUB_SEL_MASK 0x7
+#define DMC_DEBUG_SELECT_FOR_PORT_A_SIGNAL_SEL 0
+#define DMC_DEBUG_SELECT_FOR_PORT_A_SIGNAL_SEL_MASK 0x7
+#define DMC_DEBUG_SELECT_FOR_PORT_B 0x53008
+#define DMC_DEBUG_SELECT_FOR_PORT_B_BLOCK_SEL 6
+#define DMC_DEBUG_SELECT_FOR_PORT_B_BLOCK_SEL_MASK 0xf
+#define DMC_DEBUG_SELECT_FOR_PORT_B_SUB_SEL 3
+#define DMC_DEBUG_SELECT_FOR_PORT_B_SUB_SEL_MASK 0x7
+#define DMC_DEBUG_SELECT_FOR_PORT_B_SIGNAL_SEL 0
+#define DMC_DEBUG_SELECT_FOR_PORT_B_SIGNAL_SEL_MASK 0x7
+
+/* Reserved 0x53010 - 0x530f8 */
+
+#define DMC_PCI_EXPRESS_CONFIGURATION 0x53100
+#define DMC_PCI_EXPRESS_CONFIGURATION_BUS_NUM 24
+#define DMC_PCI_EXPRESS_CONFIGURATION_BUS_NUM_MASK 0xff
+#define DMC_PCI_EXPRESS_CONFIGURATION_REQ_ID 0
+#define DMC_PCI_EXPRESS_CONFIGURATION_REQ_ID_MASK 0xffff
+
+/* psb.csr PSB module defines */
+
+#define PSB_CSR_BASE 0x600000
+#define PACKET_SCOREBOARD_DMA_SET 0x60000
+#define PACKET_SCOREBOARD_DMA_SET_ENTRIES 32
+#define PACKET_SCOREBOARD_DMA_SET_ENTRIES_ENTRY 0
+#define PACKET_SCOREBOARD_DMA_SET_ENTRIES_ENTRY_MASK 0x1ffffffffff
+
+/* Reserved 0x60100 - 0x63ff8 */
+
+#define PACKET_SCOREBOARD_PIO_SET 0x64000
+#define PACKET_SCOREBOARD_PIO_SET_ENTRIES 16
+#define PACKET_SCOREBOARD_PIO_SET_ENTRIES_ENTRY 0
+#define PACKET_SCOREBOARD_PIO_SET_ENTRIES_ENTRY_MASK 0x3f
+
+/* tsb.csr TSB module defines */
+
+#define TSB_CSR_BASE 0x600000
+#define TRANSACTION_SCOREBOARD_SET 0x70000
+#define TRANSACTION_SCOREBOARD_SET_ENTRIES 32
+#define TRANSACTION_SCOREBOARD_SET_ENTRIES_ENTRY 0
+#define TRANSACTION_SCOREBOARD_SET_ENTRIES_ENTRY_MASK 0xffffffffffff
+#define TRANSACTION_SCOREBOARD_STATUS 0x70100
+#define TRANSACTION_SCOREBOARD_STATUS_FULL 7
+#define TRANSACTION_SCOREBOARD_STATUS_NUM_PND_DMA 1
+#define TRANSACTION_SCOREBOARD_STATUS_NUM_PND_DMA_MASK 0x3f
+#define TRANSACTION_SCOREBOARD_STATUS_EMPTY 0
+
+/* tlr.csr TLR module defines */
+
+#define TLR_CSR_BASE 0x600000
+#define TLU_CONTROL 0x80000
+#define FIRE10_TLU_CONTROL_L0S_TIM 16
+#define FIRE10_TLU_CONTROL_L0S_TIM_MASK 0xff
+#define FIRE10_TLU_CONTROL_NPWR_EN 12
+#define FIRE10_TLU_CONTROL_CTO_SEL 8
+#define FIRE10_TLU_CONTROL_CTO_SEL_MASK 0x3
+#define FIRE10_TLU_CONTROL_CONFIG 0
+#define FIRE10_TLU_CONTROL_CONFIG_MASK 0xff
+#define TLU_CONTROL_L0S_TIM 24
+#define TLU_CONTROL_L0S_TIM_MASK 0xff
+#define TLU_CONTROL_NPWR_EN 20
+#define TLU_CONTROL_CTO_SEL 16
+#define TLU_CONTROL_CTO_SEL_MASK 0x7
+#define TLU_CONTROL_CONFIG 0
+#define TLU_CONTROL_CONFIG_MASK 0xffff
+#define TLU_STATUS 0x80008
+#define TLU_STATUS_DRAIN 8
+#define TLU_STATUS_STATUS 0
+#define TLU_STATUS_STATUS_MASK 0xff
+#define TLU_PME_TURN_OFF_GENERATE 0x80010
+#define TLU_PME_TURN_OFF_GENERATE_PTO 0
+#define TLU_INGRESS_CREDITS_INITIAL 0x80018
+#define TLU_INGRESS_CREDITS_INITIAL_CHC 52
+#define TLU_INGRESS_CREDITS_INITIAL_CHC_MASK 0xff
+#define TLU_INGRESS_CREDITS_INITIAL_CDC 40
+#define TLU_INGRESS_CREDITS_INITIAL_CDC_MASK 0xfff
+#define TLU_INGRESS_CREDITS_INITIAL_NHC 32
+#define TLU_INGRESS_CREDITS_INITIAL_NHC_MASK 0xff
+#define TLU_INGRESS_CREDITS_INITIAL_NDC 20
+#define TLU_INGRESS_CREDITS_INITIAL_NDC_MASK 0xfff
+#define TLU_INGRESS_CREDITS_INITIAL_PHC 12
+#define TLU_INGRESS_CREDITS_INITIAL_PHC_MASK 0xff
+#define TLU_INGRESS_CREDITS_INITIAL_PDC 0
+#define TLU_INGRESS_CREDITS_INITIAL_PDC_MASK 0xfff
+
+/* Reserved 0x80020 - 0x800f8 */
+
+#define TLU_DIAGNOSTIC 0x80100
+#define TLU_DIAGNOSTIC_LNK_MAX 48
+#define TLU_DIAGNOSTIC_LNK_MAX_MASK 0x3f
+#define TLU_DIAGNOSTIC_CHK_DIS 32
+#define TLU_DIAGNOSTIC_CHK_DIS_MASK 0xffff
+#define TLU_DIAGNOSTIC_EPI_PAR 16
+#define TLU_DIAGNOSTIC_EPI_PAR_MASK 0xff
+#define TLU_DIAGNOSTIC_IDI_PAR 12
+#define TLU_DIAGNOSTIC_IDI_PAR_MASK 0xf
+#define TLU_DIAGNOSTIC_IHI_PAR 8
+#define TLU_DIAGNOSTIC_IHI_PAR_MASK 0xf
+#define TLU_DIAGNOSTIC_EPI_TRG 7
+#define TLU_DIAGNOSTIC_IDI_TRG 6
+#define TLU_DIAGNOSTIC_IHI_TRG 5
+#define TLU_DIAGNOSTIC_MRC_TRG 4
+#define TLU_DIAGNOSTIC_EPP_DIS 1
+#define TLU_DIAGNOSTIC_IFC_DIS 0
+
+/* Reserved 0x80108 - 0x801f8 */
+
+#define TLU_EGRESS_CREDITS_CONSUMED 0x80200
+#define TLU_EGRESS_CREDITS_CONSUMED_CHI 62
+#define TLU_EGRESS_CREDITS_CONSUMED_NHI 61
+#define TLU_EGRESS_CREDITS_CONSUMED_PHI 60
+#define TLU_EGRESS_CREDITS_CONSUMED_CHC 52
+#define TLU_EGRESS_CREDITS_CONSUMED_CHC_MASK 0xff
+#define TLU_EGRESS_CREDITS_CONSUMED_CDC 40
+#define TLU_EGRESS_CREDITS_CONSUMED_CDC_MASK 0xfff
+#define TLU_EGRESS_CREDITS_CONSUMED_NHC 32
+#define TLU_EGRESS_CREDITS_CONSUMED_NHC_MASK 0xff
+#define TLU_EGRESS_CREDITS_CONSUMED_NDC 20
+#define TLU_EGRESS_CREDITS_CONSUMED_NDC_MASK 0xfff
+#define TLU_EGRESS_CREDITS_CONSUMED_PHC 12
+#define TLU_EGRESS_CREDITS_CONSUMED_PHC_MASK 0xff
+#define TLU_EGRESS_CREDITS_CONSUMED_PDC 0
+#define TLU_EGRESS_CREDITS_CONSUMED_PDC_MASK 0xfff
+#define TLU_EGRESS_CREDIT_LIMIT 0x80208
+#define TLU_EGRESS_CREDIT_LIMIT_CDI 62
+#define TLU_EGRESS_CREDIT_LIMIT_NDI 61
+#define TLU_EGRESS_CREDIT_LIMIT_PDI 60
+#define TLU_EGRESS_CREDIT_LIMIT_CHC 52
+#define TLU_EGRESS_CREDIT_LIMIT_CHC_MASK 0xff
+#define TLU_EGRESS_CREDIT_LIMIT_CDC 40
+#define TLU_EGRESS_CREDIT_LIMIT_CDC_MASK 0xfff
+#define TLU_EGRESS_CREDIT_LIMIT_NHC 32
+#define TLU_EGRESS_CREDIT_LIMIT_NHC_MASK 0xff
+#define TLU_EGRESS_CREDIT_LIMIT_NDC 20
+#define TLU_EGRESS_CREDIT_LIMIT_NDC_MASK 0xfff
+#define TLU_EGRESS_CREDIT_LIMIT_PHC 12
+#define TLU_EGRESS_CREDIT_LIMIT_PHC_MASK 0xff
+#define TLU_EGRESS_CREDIT_LIMIT_PDC 0
+#define TLU_EGRESS_CREDIT_LIMIT_PDC_MASK 0xfff
+#define TLU_EGRESS_RETRY_BUFFER 0x80210
+#define TLU_EGRESS_RETRY_BUFFER_CC 32
+#define TLU_EGRESS_RETRY_BUFFER_CC_MASK 0xffff
+#define TLU_EGRESS_RETRY_BUFFER_CL 0
+#define TLU_EGRESS_RETRY_BUFFER_CL_MASK 0xffff
+#define TLU_INGRESS_CREDITS_ALLOCATED 0x80218
+#define TLU_INGRESS_CREDITS_ALLOCATED_CHC 52
+#define TLU_INGRESS_CREDITS_ALLOCATED_CHC_MASK 0xff
+#define TLU_INGRESS_CREDITS_ALLOCATED_CDC 40
+#define TLU_INGRESS_CREDITS_ALLOCATED_CDC_MASK 0xfff
+#define TLU_INGRESS_CREDITS_ALLOCATED_NHC 32
+#define TLU_INGRESS_CREDITS_ALLOCATED_NHC_MASK 0xff
+#define TLU_INGRESS_CREDITS_ALLOCATED_NDC 20
+#define TLU_INGRESS_CREDITS_ALLOCATED_NDC_MASK 0xfff
+#define TLU_INGRESS_CREDITS_ALLOCATED_PHC 12
+#define TLU_INGRESS_CREDITS_ALLOCATED_PHC_MASK 0xff
+#define TLU_INGRESS_CREDITS_ALLOCATED_PDC 0
+#define TLU_INGRESS_CREDITS_ALLOCATED_PDC_MASK 0xfff
+#define TLU_INGRESS_CREDITS_RECEIVED 0x80220
+#define TLU_INGRESS_CREDITS_RECEIVED_CHC 52
+#define TLU_INGRESS_CREDITS_RECEIVED_CHC_MASK 0xff
+#define TLU_INGRESS_CREDITS_RECEIVED_CDC 40
+#define TLU_INGRESS_CREDITS_RECEIVED_CDC_MASK 0xfff
+#define TLU_INGRESS_CREDITS_RECEIVED_NHC 32
+#define TLU_INGRESS_CREDITS_RECEIVED_NHC_MASK 0xff
+#define TLU_INGRESS_CREDITS_RECEIVED_NDC 20
+#define TLU_INGRESS_CREDITS_RECEIVED_NDC_MASK 0xfff
+#define TLU_INGRESS_CREDITS_RECEIVED_PHC 12
+#define TLU_INGRESS_CREDITS_RECEIVED_PHC_MASK 0xff
+#define TLU_INGRESS_CREDITS_RECEIVED_PDC 0
+#define TLU_INGRESS_CREDITS_RECEIVED_PDC_MASK 0xfff
+
+/* Reserved 0x80228 - 0x80ff8 */
+
+#define TLU_OTHER_EVENT_LOG_ENABLE 0x81000
+#define TLU_OTHER_EVENT_LOG_ENABLE_EN 0
+#define TLU_OTHER_EVENT_LOG_ENABLE_EN_MASK 0xffffff
+#define TLU_OTHER_EVENT_INTERRUPT_ENABLE 0x81008
+#define TLU_OTHER_EVENT_INTERRUPT_ENABLE_EN_S 32
+#define TLU_OTHER_EVENT_INTERRUPT_ENABLE_EN_S_MASK 0xffffff
+#define TLU_OTHER_EVENT_INTERRUPT_ENABLE_EN_P 0
+#define TLU_OTHER_EVENT_INTERRUPT_ENABLE_EN_P_MASK 0xffffff
+#define TLU_OTHER_EVENT_INTERRUPT_STATUS 0x81010
+#define TLU_OTHER_EVENT_INTERRUPT_STATUS_ERR_S 32
+#define TLU_OTHER_EVENT_INTERRUPT_STATUS_ERR_S_MASK 0xffffff
+#define TLU_OTHER_EVENT_INTERRUPT_STATUS_ERR_P 0
+#define TLU_OTHER_EVENT_INTERRUPT_STATUS_ERR_P_MASK 0xffffff
+#define TLU_OTHER_EVENT_STATUS_CLEAR 0x81018
+#define TLU_OTHER_EVENT_STATUS_CLEAR_SPARE_S 55
+#define TLU_OTHER_EVENT_STATUS_CLEAR_MFC_S 54
+#define TLU_OTHER_EVENT_STATUS_CLEAR_CTO_S 53
+#define TLU_OTHER_EVENT_STATUS_CLEAR_NFP_S 52
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LWC_S 51
+#define TLU_OTHER_EVENT_STATUS_CLEAR_MRC_S 50
+#define TLU_OTHER_EVENT_STATUS_CLEAR_WUC_S 49
+#define TLU_OTHER_EVENT_STATUS_CLEAR_RUC_S 48
+#define TLU_OTHER_EVENT_STATUS_CLEAR_CRS_S 47
+#define TLU_OTHER_EVENT_STATUS_CLEAR_IIP_S 46
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EDP_S 45
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EHP_S 44
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LIN_S 43
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LRS_S 42
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LDN_S 41
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LUP_S 40
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LPU_S 38
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LPU_S_MASK 0x3
+#define TLU_OTHER_EVENT_STATUS_CLEAR_ERU_S 37
+#define TLU_OTHER_EVENT_STATUS_CLEAR_ERO_S 36
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EMP_S 35
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EPE_S 34
+#define TLU_OTHER_EVENT_STATUS_CLEAR_ERP_S 33
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EIP_S 32
+#define TLU_OTHER_EVENT_STATUS_CLEAR_SPARE_P 23
+#define TLU_OTHER_EVENT_STATUS_CLEAR_MFC_P 22
+#define TLU_OTHER_EVENT_STATUS_CLEAR_CTO_P 21
+#define TLU_OTHER_EVENT_STATUS_CLEAR_NFP_P 20
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LWC_P 19
+#define TLU_OTHER_EVENT_STATUS_CLEAR_MRC_P 18
+#define TLU_OTHER_EVENT_STATUS_CLEAR_WUC_P 17
+#define TLU_OTHER_EVENT_STATUS_CLEAR_RUC_P 16
+#define TLU_OTHER_EVENT_STATUS_CLEAR_CRS_P 15
+#define TLU_OTHER_EVENT_STATUS_CLEAR_IIP_P 14
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EDP_P 13
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EHP_P 12
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LIN_P 11
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LRS_P 10
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LDN_P 9
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LUP_P 8
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LPU_P 6
+#define TLU_OTHER_EVENT_STATUS_CLEAR_LPU_P_MASK 0x3
+#define TLU_OTHER_EVENT_STATUS_CLEAR_ERU_P 5
+#define TLU_OTHER_EVENT_STATUS_CLEAR_ERO_P 4
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EMP_P 3
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EPE_P 2
+#define TLU_OTHER_EVENT_STATUS_CLEAR_ERP_P 1
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EIP_P 0
+#define TLU_OTHER_EVENT_STATUS_SET 0x81020
+#define TLU_OTHER_EVENT_STATUS_SET_SPARE_S 55
+#define TLU_OTHER_EVENT_STATUS_SET_MFC_S 54
+#define TLU_OTHER_EVENT_STATUS_SET_CTO_S 53
+#define TLU_OTHER_EVENT_STATUS_SET_NFP_S 52
+#define TLU_OTHER_EVENT_STATUS_SET_LWC_S 51
+#define TLU_OTHER_EVENT_STATUS_SET_MRC_S 50
+#define TLU_OTHER_EVENT_STATUS_SET_WUC_S 49
+#define TLU_OTHER_EVENT_STATUS_SET_RUC_S 48
+#define TLU_OTHER_EVENT_STATUS_SET_CRS_S 47
+#define TLU_OTHER_EVENT_STATUS_SET_IIP_S 46
+#define TLU_OTHER_EVENT_STATUS_SET_EDP_S 45
+#define TLU_OTHER_EVENT_STATUS_SET_EHP_S 44
+#define TLU_OTHER_EVENT_STATUS_SET_LIN_S 43
+#define TLU_OTHER_EVENT_STATUS_SET_LRS_S 42
+#define TLU_OTHER_EVENT_STATUS_SET_LDN_S 41
+#define TLU_OTHER_EVENT_STATUS_SET_LUP_S 40
+#define TLU_OTHER_EVENT_STATUS_SET_LPU_S 38
+#define TLU_OTHER_EVENT_STATUS_SET_LPU_S_MASK 0xfd
+#define TLU_OTHER_EVENT_STATUS_SET_ERU_S 37
+#define TLU_OTHER_EVENT_STATUS_SET_ERO_S 36
+#define TLU_OTHER_EVENT_STATUS_SET_EMP_S 35
+#define TLU_OTHER_EVENT_STATUS_SET_EPE_S 34
+#define TLU_OTHER_EVENT_STATUS_SET_ERP_S 33
+#define TLU_OTHER_EVENT_STATUS_SET_EIP_S 32
+#define TLU_OTHER_EVENT_STATUS_SET_SPARE_P 23
+#define TLU_OTHER_EVENT_STATUS_SET_MFC_P 22
+#define TLU_OTHER_EVENT_STATUS_SET_CTO_P 21
+#define TLU_OTHER_EVENT_STATUS_SET_NFP_P 20
+#define TLU_OTHER_EVENT_STATUS_SET_LWC_P 19
+#define TLU_OTHER_EVENT_STATUS_SET_MRC_P 18
+#define TLU_OTHER_EVENT_STATUS_SET_WUC_P 17
+#define TLU_OTHER_EVENT_STATUS_SET_RUC_P 16
+#define TLU_OTHER_EVENT_STATUS_SET_CRS_P 15
+#define TLU_OTHER_EVENT_STATUS_SET_IIP_P 14
+#define TLU_OTHER_EVENT_STATUS_SET_EDP_P 13
+#define TLU_OTHER_EVENT_STATUS_SET_EHP_P 12
+#define TLU_OTHER_EVENT_STATUS_SET_LIN_P 11
+#define TLU_OTHER_EVENT_STATUS_SET_LRS_P 10
+#define TLU_OTHER_EVENT_STATUS_SET_LDN_P 9
+#define TLU_OTHER_EVENT_STATUS_SET_LUP_P 8
+#define TLU_OTHER_EVENT_STATUS_SET_LPU_P 6
+#define TLU_OTHER_EVENT_STATUS_SET_LPU_P_MASK 0xfd
+#define TLU_OTHER_EVENT_STATUS_SET_ERU_P 5
+#define TLU_OTHER_EVENT_STATUS_SET_ERO_P 4
+#define TLU_OTHER_EVENT_STATUS_SET_EMP_P 3
+#define TLU_OTHER_EVENT_STATUS_SET_EPE_P 2
+#define TLU_OTHER_EVENT_STATUS_SET_ERP_P 1
+#define TLU_OTHER_EVENT_STATUS_SET_EIP_P 0
+#define TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG 0x81028
+#define TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG_HDR 0
+#define TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG_HDR_MASK 0xffffffffffffffff
+#define TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG 0x81030
+#define TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG_HDR 0
+#define TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG_HDR_MASK 0xffffffffffffffff
+#define TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG 0x81038
+#define TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG_HDR 0
+#define TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG_HDR_MASK 0xffffffffffffffff
+#define TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG 0x81040
+#define TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG_HDR 0
+#define TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG_HDR_MASK 0xffffffffffffffff
+
+/* Reserved 0x81048 - 0x81ff8 */
+
+#define TLU_PERFORMANCE_COUNTER_SELECT 0x82000
+#define TLU_PERFORMANCE_COUNTER_SELECT_SEL2 16
+#define TLU_PERFORMANCE_COUNTER_SELECT_SEL2_MASK 0x3
+#define TLU_PERFORMANCE_COUNTER_SELECT_SEL1 8
+#define TLU_PERFORMANCE_COUNTER_SELECT_SEL1_MASK 0xff
+#define TLU_PERFORMANCE_COUNTER_SELECT_SEL0 0
+#define TLU_PERFORMANCE_COUNTER_SELECT_SEL0_MASK 0xff
+#define TLU_PERFORMANCE_COUNTER_ZERO 0x82008
+#define TLU_PERFORMANCE_COUNTER_ZERO_CNT 0
+#define TLU_PERFORMANCE_COUNTER_ZERO_CNT_MASK 0xffffffffffffffff
+#define TLU_PERFORMANCE_COUNTER_ONE 0x82010
+#define TLU_PERFORMANCE_COUNTER_ONE_CNT 0
+#define TLU_PERFORMANCE_COUNTER_ONE_CNT_MASK 0xffffffffffffffff
+#define TLU_PERFORMANCE_COUNTER_TWO 0x82018
+#define TLU_PERFORMANCE_COUNTER_TWO_CNT 0
+#define TLU_PERFORMANCE_COUNTER_TWO_CNT_MASK 0xffffffff
+
+/* Reserved 0x82020 - 0x82ff8 */
+
+#define TLU_DEBUG_SELECT_A 0x83000
+#define TLU_DEBUG_SELECT_A_BLOCK 6
+#define TLU_DEBUG_SELECT_A_BLOCK_MASK 0x7
+#define TLU_DEBUG_SELECT_A_MODULE 3
+#define TLU_DEBUG_SELECT_A_MODULE_MASK 0x7
+#define TLU_DEBUG_SELECT_A_SIGNAL 0
+#define TLU_DEBUG_SELECT_A_SIGNAL_MASK 0x7
+#define TLU_DEBUG_SELECT_B 0x83008
+#define TLU_DEBUG_SELECT_B_BLOCK 6
+#define TLU_DEBUG_SELECT_B_BLOCK_MASK 0x7
+#define TLU_DEBUG_SELECT_B_MODULE 3
+#define TLU_DEBUG_SELECT_B_MODULE_MASK 0x7
+#define TLU_DEBUG_SELECT_B_SIGNAL 0
+#define TLU_DEBUG_SELECT_B_SIGNAL_MASK 0x7
+
+/* Reserved 0x83010 - 0x8fff8 */
+
+#define TLU_DEVICE_CAPABILITIES 0x90000
+#define TLU_DEVICE_CAPABILITIES_L1 9
+#define TLU_DEVICE_CAPABILITIES_L1_MASK 0x7
+#define TLU_DEVICE_CAPABILITIES_L0S 6
+#define TLU_DEVICE_CAPABILITIES_L0S_MASK 0x7
+#define TLU_DEVICE_CAPABILITIES_MPS 0
+#define TLU_DEVICE_CAPABILITIES_MPS_MASK 0x7
+#define TLU_DEVICE_CONTROL 0x90008
+#define TLU_DEVICE_CONTROL_MRRS 12
+#define TLU_DEVICE_CONTROL_MRRS_MASK 0x7
+#define TLU_DEVICE_CONTROL_MPS 5
+#define TLU_DEVICE_CONTROL_MPS_MASK 0x7
+#define TLU_DEVICE_STATUS 0x90010
+#define TLU_DEVICE_STATUS_TP 5
+#define TLU_LINK_CAPABILITIES 0x90018
+#define TLU_LINK_CAPABILITIES_PORT 24
+#define TLU_LINK_CAPABILITIES_PORT_MASK 0xff
+#define TLU_LINK_CAPABILITIES_L1 15
+#define TLU_LINK_CAPABILITIES_L1_MASK 0x7
+#define TLU_LINK_CAPABILITIES_L0S 12
+#define TLU_LINK_CAPABILITIES_L0S_MASK 0x7
+#define TLU_LINK_CAPABILITIES_ASPM 10
+#define TLU_LINK_CAPABILITIES_ASPM_MASK 0x3
+#define TLU_LINK_CAPABILITIES_WIDTH 4
+#define TLU_LINK_CAPABILITIES_WIDTH_MASK 0x3f
+#define TLU_LINK_CAPABILITIES_SPEED 0
+#define TLU_LINK_CAPABILITIES_SPEED_MASK 0xf
+#define TLU_LINK_CONTROL 0x90020
+#define TLU_LINK_CONTROL_EXTSYNC 7
+#define TLU_LINK_CONTROL_CLOCK 6
+#define TLU_LINK_CONTROL_RETRAIN 5
+#define TLU_LINK_CONTROL_DISABLE 4
+#define TLU_LINK_CONTROL_RCB 3
+#define TLU_LINK_CONTROL_ASPM 0
+#define TLU_LINK_CONTROL_ASPM_MASK 0x3
+#define TLU_LINK_STATUS 0x90028
+#define TLU_LINK_STATUS_CLOCK 12
+#define TLU_LINK_STATUS_TRAIN 11
+#define TLU_LINK_STATUS_ERROR 10
+#define TLU_LINK_STATUS_WIDTH 4
+#define TLU_LINK_STATUS_WIDTH_MASK 0x3f
+#define TLU_LINK_STATUS_SPEED 0
+#define TLU_LINK_STATUS_SPEED_MASK 0xf
+#define TLU_SLOT_CAPABILITIES 0x90030
+#define TLU_SLOT_CAPABILITIES_SPLS 15
+#define TLU_SLOT_CAPABILITIES_SPLS_MASK 0x3
+#define TLU_SLOT_CAPABILITIES_SPLV 7
+#define TLU_SLOT_CAPABILITIES_SPLV_MASK 0xff
+
+/* Reserved 0x90038 - 0x90ff8 */
+
+#define TLU_UNCORRECTABLE_ERROR_LOG_ENABLE 0x91000
+#define TLU_UNCORRECTABLE_ERROR_LOG_ENABLE_EN 0
+#define TLU_UNCORRECTABLE_ERROR_LOG_ENABLE_EN_MASK 0x1fffff
+#define TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE 0x91008
+#define TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE_EN_S 32
+#define TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE_EN_S_MASK 0x1fffff
+#define TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE_EN_P 0
+#define TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE_EN_P_MASK 0x1fffff
+#define TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS 0x91010
+#define TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS_ERR_S 32
+#define TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS_ERR_S_MASK 0x1fffff
+#define TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS_ERR_P 0
+#define TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS_ERR_P_MASK 0x1fffff
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR 0x91018
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_UR_S 52
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_MFP_S 50
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_ROF_S 49
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_UC_S 48
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_CA_S 47
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_CTO_S 46
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_FCP_S 45
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_PP_S 44
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_DLP_S 36
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_TE_S 32
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_UR_P 20
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_MFP_P 18
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_ROF_P 17
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_UC_P 16
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_CA_P 15
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_CTO_P 14
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_FCP_P 13
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_PP_P 12
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_DLP_P 4
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_TE_P 0
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET 0x91020
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_UR_S 52
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_MFP_S 50
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_ROF_S 49
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_UC_S 48
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_CA_S 47
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_CTO_S 46
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_FCP_S 45
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_PP_S 44
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_DLP_S 36
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_TE_S 32
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_UR_P 20
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_MFP_P 18
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_ROF_P 17
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_UC_P 16
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_CA_P 15
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_CTO_P 14
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_FCP_P 13
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_PP_P 12
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_DLP_P 4
+#define TLU_UNCORRECTABLE_ERROR_STATUS_SET_TE_P 0
+#define TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG 0x91028
+#define TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG_HDR 0
+#define TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG_HDR_MASK \
+ 0xffffffffffffffff
+#define TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG 0x91030
+#define TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG_HDR 0
+#define TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG_HDR_MASK \
+ 0xffffffffffffffff
+#define TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG 0x91038
+#define TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG_HDR 0
+#define TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG_HDR_MASK \
+ 0xffffffffffffffff
+#define TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG 0x91040
+#define TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG_HDR 0
+#define TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG_HDR_MASK \
+ 0xffffffffffffffff
+
+/* Reserved 0x91048 - 0x9fff8 */
+
+
+/* Reserved 0xa0008 - 0xa0ff8 */
+
+#define TLU_CORRECTABLE_ERROR_LOG_ENABLE 0xa1000
+#define TLU_CORRECTABLE_ERROR_LOG_ENABLE_EN 0
+#define TLU_CORRECTABLE_ERROR_LOG_ENABLE_EN_MASK 0x1fff
+#define TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE 0xa1008
+#define TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE_EN_S 32
+#define TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE_EN_S_MASK 0x1fff
+#define TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE_EN_P 0
+#define TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE_EN_P_MASK 0x1fff
+#define TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS 0xa1010
+#define TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS_ERR_S 32
+#define TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS_ERR_S_MASK 0x1fff
+#define TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS_ERR_P 0
+#define TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS_ERR_P_MASK 0x1fff
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR 0xa1018
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR_RTO_S 44
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR_RNR_S 40
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR_BDP_S 39
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR_BTP_S 38
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR_RE_S 32
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR_RTO_P 12
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR_RNR_P 8
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR_BDP_P 7
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR_BTP_P 6
+#define TLU_CORRECTABLE_ERROR_STATUS_CLEAR_RE_P 0
+#define TLU_CORRECTABLE_ERROR_STATUS_SET 0xa1020
+#define TLU_CORRECTABLE_ERROR_STATUS_SET_RTO_S 44
+#define TLU_CORRECTABLE_ERROR_STATUS_SET_RNR_S 40
+#define TLU_CORRECTABLE_ERROR_STATUS_SET_BDP_S 39
+#define TLU_CORRECTABLE_ERROR_STATUS_SET_BTP_S 38
+#define TLU_CORRECTABLE_ERROR_STATUS_SET_RE_S 32
+#define TLU_CORRECTABLE_ERROR_STATUS_SET_RTO_P 12
+#define TLU_CORRECTABLE_ERROR_STATUS_SET_RNR_P 8
+#define TLU_CORRECTABLE_ERROR_STATUS_SET_BDP_P 7
+#define TLU_CORRECTABLE_ERROR_STATUS_SET_BTP_P 6
+#define TLU_CORRECTABLE_ERROR_STATUS_SET_RE_P 0
+
+/* lpr.csr LPR module defines */
+
+#define LPR_CSR_BASE 0x600000
+
+/* Reserved 0xe0008 - 0xe1ff8 */
+
+#define LPU_ID 0xe2000
+#define LPU_ID_LTBWDTH 20
+#define LPU_ID_LTBWDTH_MASK 0xf
+#define LPU_ID_PTLWDTH 16
+#define LPU_ID_PTLWDTH_MASK 0xf
+#define LPU_ID_TRID 12
+#define LPU_ID_TRID_MASK 0xf
+#define LPU_ID_LNKID 8
+#define LPU_ID_LNKID_MASK 0xf
+#define LPU_ID_PHYID 4
+#define LPU_ID_PHYID_MASK 0xf
+#define LPU_ID_GBID 0
+#define LPU_ID_GBID_MASK 0xf
+#define LPU_RESET 0xe2008
+#define LPU_RESET_RSTWE 31
+#define LPU_RESET_RSTUNUSED 9
+#define LPU_RESET_RSTUNUSED_MASK 0x7
+#define LPU_RESET_RSTERROR 8
+#define LPU_RESET_RSTTXLINK 7
+#define LPU_RESET_RSTRXLINK 6
+#define LPU_RESET_RSTSMLINK 5
+#define LPU_RESET_RSTLTSSM 4
+#define LPU_RESET_RSTTXPHY 3
+#define LPU_RESET_RSTRXPHY 2
+#define LPU_RESET_RSTTXPCS 1
+#define LPU_RESET_RSTRXPCS 0
+#define LPU_DEBUG_STATUS 0xe2010
+#define LPU_DEBUG_STATUS_DEBUGB 8
+#define LPU_DEBUG_STATUS_DEBUGB_MASK 0xff
+#define LPU_DEBUG_STATUS_DEBUGA 0
+#define LPU_DEBUG_STATUS_DEBUGA_MASK 0xff
+#define LPU_DEBUG_CONFIG 0xe2018
+#define LPU_DEBUG_CONFIG_DBUGB_BLK_SEL 24
+#define LPU_DEBUG_CONFIG_DBUGB_BLK_SEL_MASK 0xff
+#define LPU_DEBUG_CONFIG_DBUGB_SIG_SEL 16
+#define LPU_DEBUG_CONFIG_DBUGB_SIG_SEL_MASK 0xff
+#define LPU_DEBUG_CONFIG_DBUGA_BLK_SEL 8
+#define LPU_DEBUG_CONFIG_DBUGA_BLK_SEL_MASK 0xff
+#define LPU_DEBUG_CONFIG_DBUGA_SIG_SEL 0
+#define LPU_DEBUG_CONFIG_DBUGA_SIG_SEL_MASK 0xff
+#define LPU_LTSSM_CONTROL 0xe2020
+#define LPU_LTSSM_CONTROL_WR_ENABLE 31
+#define LPU_LTSSM_CONTROL_RCOVER_TO_CONFIG 11
+#define LPU_LTSSM_CONTROL_L0_TO_RECOVER 10
+#define LPU_LTSSM_CONTROL_UNUSED_0 9
+#define LPU_LTSSM_CONTROL_GO_TO_DETECT 8
+#define LPU_LTSSM_CONTROL_UNUSED_1 4
+#define LPU_LTSSM_CONTROL_UNUSED_1_MASK 0xf
+#define LPU_LTSSM_CONTROL_DISABLE_SCRAMBLING 3
+#define LPU_LTSSM_CONTROL_LINK_LOOPBK_REQ 2
+#define LPU_LTSSM_CONTROL_LINK_DISABLE_REQ 1
+#define LPU_LTSSM_CONTROL_HOT_RESET 0
+#define LPU_LINK_STATUS 0xe2028
+#define LPU_LINK_STATUS_SLOT_CLK_CONFG_PIN 12
+#define LPU_LINK_STATUS_LINK_TRAINING 11
+#define LPU_LINK_STATUS_LINK_TRAINING_ERR 10
+#define LPU_LINK_STATUS_NEGOTIATED_WIDTH 4
+#define LPU_LINK_STATUS_NEGOTIATED_WIDTH_MASK 0x3f
+#define LPU_LINK_STATUS_LINK_SPEED 0
+#define LPU_LINK_STATUS_LINK_SPEED_MASK 0xf
+
+/* Reserved 0xe2030 - 0xe2038 */
+
+#define LPU_INTERRUPT_STATUS 0xe2040
+#define LPU_INTERRUPT_STATUS_INTERRUPT 31
+#define LPU_INTERRUPT_STATUS_INT_PERF_CNTR_2_OVFLW 7
+#define LPU_INTERRUPT_STATUS_INT_PERF_CNTR_1_OVFLW 6
+#define LPU_INTERRUPT_STATUS_INT_LINK_LAYER 5
+#define LPU_INTERRUPT_STATUS_INT_PHY_ERROR 4
+#define LPU_INTERRUPT_STATUS_INT_LTSSM 3
+#define LPU_INTERRUPT_STATUS_INT_PHY_TX 2
+#define LPU_INTERRUPT_STATUS_INT_PHY_RX 1
+#define LPU_INTERRUPT_STATUS_INT_PHY_GB 0
+#define LPU_INTERRUPT_MASK 0xe2048
+#define LPU_INTERRUPT_MASK_MSK_INTERRUPT_EN 31
+#define LPU_INTERRUPT_MASK_MSK_PERF_CNTR_2_OVFLW 7
+#define LPU_INTERRUPT_MASK_MSK_PERF_CNTR_1_OVFLW 6
+#define LPU_INTERRUPT_MASK_MSK_LINK_LAYER 5
+#define LPU_INTERRUPT_MASK_MSK_PHY_ERROR 4
+#define LPU_INTERRUPT_MASK_MSK_LTSSM 3
+#define LPU_INTERRUPT_MASK_MSK_PHY_TX 2
+#define LPU_INTERRUPT_MASK_MSK_PHY_RX 1
+#define LPU_INTERRUPT_MASK_MSK_PHY_GB 0
+
+/* Reserved 0xe2050 - 0xe20f8 */
+
+#define LPU_LINK_PERFORMANCE_COUNTER_SELECT 0xe2100
+#define LPU_LINK_PERFORMANCE_COUNTER_SELECT_PERF_CNTR2_SELECT 16
+#define LPU_LINK_PERFORMANCE_COUNTER_SELECT_PERF_CNTR2_SELECT_MASK 0xffff
+#define LPU_LINK_PERFORMANCE_COUNTER_SELECT_PERF_CNTR1_SELECT 0
+#define LPU_LINK_PERFORMANCE_COUNTER_SELECT_PERF_CNTR1_SELECT_MASK 0xffff
+
+/* Reserved 0xe2108 - 0xe2108 */
+
+#define LPU_LINK_PERFORMANCE_COUNTER_CONTROL 0xe2110
+#define LPU_LINK_PERFORMANCE_COUNTER_CONTROL_SET_PERF_CNTR2_OVERFLOW 6
+#define LPU_LINK_PERFORMANCE_COUNTER_CONTROL_SET_PERF_CNTR1_OVERFLOW 5
+#define LPU_LINK_PERFORMANCE_COUNTER_CONTROL_RST_PERF_CNTR2_OVERFLOW 3
+#define LPU_LINK_PERFORMANCE_COUNTER_CONTROL_RST_PERF_CNTR2 2
+#define LPU_LINK_PERFORMANCE_COUNTER_CONTROL_RST_PERF_CNTR1_OVERFLOW 1
+#define LPU_LINK_PERFORMANCE_COUNTER_CONTROL_RST_PERF_CNTR1 0
+
+/* Reserved 0xe2118 - 0xe2118 */
+
+#define LPU_LINK_PERFORMANCE_COUNTER1 0xe2120
+#define LPU_LINK_PERFORMANCE_COUNTER1_PERF_CNTR1 0
+#define LPU_LINK_PERFORMANCE_COUNTER1_PERF_CNTR1_MASK 0xffffffff
+#define LPU_LINK_PERFORMANCE_COUNTER1_TEST 0xe2128
+#define LPU_LINK_PERFORMANCE_COUNTER1_TEST_PERF_CNTR1_TEST 0
+#define LPU_LINK_PERFORMANCE_COUNTER1_TEST_PERF_CNTR1_TEST_MASK 0xffffffff
+#define LPU_LINK_PERFORMANCE_COUNTER2 0xe2130
+#define LPU_LINK_PERFORMANCE_COUNTER2_PERF_CNTR2 0
+#define LPU_LINK_PERFORMANCE_COUNTER2_PERF_CNTR2_MASK 0xffffffff
+#define LPU_LINK_PERFORMANCE_COUNTER2_TEST 0xe2138
+#define LPU_LINK_PERFORMANCE_COUNTER2_TEST_PERF_CNTR2_TEST 0
+#define LPU_LINK_PERFORMANCE_COUNTER2_TEST_PERF_CNTR2_TEST_MASK 0xffffffff
+
+/* Reserved 0xe2140 - 0xe21f8 */
+
+#define LPU_LINK_LAYER_CONFIG 0xe2200
+#define FIRE10_LPU_LINK_LAYER_CONFIG 0xe2200
+#define LPU_LINK_LAYER_CONFIG_AUTO_UPDATE_DIS 19
+#define LPU_LINK_LAYER_CONFIG_FREQ_NAK_EN 18
+#define LPU_LINK_LAYER_CONFIG_REPLAY_AFTER_REC 17
+#define LPU_LINK_LAYER_CONFIG_LAT_THRES_WR_EN 16
+#define LPU_LINK_LAYER_CONFIG_VC0_EN 8
+#define LPU_LINK_LAYER_CONFIG_UNUSED 5
+#define LPU_LINK_LAYER_CONFIG_UNUSED_MASK 0x7
+#define FIRE10_LPU_LINK_LAYER_CONFIG_MAX_PAYLOAD 5
+#define FIRE10_LPU_LINK_LAYER_CONFIG_MAX_PAYLOAD_MASK 0x7
+#define LPU_LINK_LAYER_CONFIG_L0S_ADJ_FAC_EN 4
+#define LPU_LINK_LAYER_CONFIG_TLP_XMIT_FC_EN 3
+#define LPU_LINK_LAYER_CONFIG_FREQ_ACK_ENABLE 2
+#define LPU_LINK_LAYER_CONFIG_RETRY_DISABLE 1
+#define LPU_LINK_LAYER_STATUS 0xe2208
+#define LPU_LINK_LAYER_STATUS_INIT_FC_SM_WE 9
+#define LPU_LINK_LAYER_STATUS_LNK_ST_DLUP_WE 8
+#define LPU_LINK_LAYER_STATUS_INIT_FC_SM_STS 4
+#define LPU_LINK_LAYER_STATUS_INIT_FC_SM_STS_MASK 0x3
+#define LPU_LINK_LAYER_STATUS_DLUP_STS 3
+#define LPU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS 0
+#define LPU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_MASK 0x7
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS 0xe2210
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_LINK_ERR_ACT 31
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_UNSPRTD_DLLP 22
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_DLLP_RCV_ERR 21
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_BAD_DLLP 20
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_TLP_RCV_ERR 18
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_SRC_ERR_TLP 17
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_BAD_TLP 16
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_RTRY_BUF_UDF_ERR 9
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_RTRY_BUF_OVF_ERR 8
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_EG_TLP_MIN_ERR 7
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_EG_TRNC_FRM_ERR 6
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_RTRY_BUF_PE 5
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_EGRESS_PE 4
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_RPLAY_TMR_TO 2
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_RPLAY_NUM_RO 1
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_DLNK_PES 0
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST 0xe2218
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_UNSPRTD_DLLP 22
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_DLLP_RCV_ERR 21
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_BAD_DLLP 20
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_TLP_RCV_ERR 18
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_SRC_ERR_TLP 17
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_BAD_TLP 16
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_RTRY_BUF_UDF_ERR 9
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_RTRY_BUF_OVF 8
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_EG_TLP_MIN_ERR 7
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_EG_TRNC_FRM_ERR 6
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_RTRY_BUF_PE 5
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_EGRESS_PE 4
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_RPLAY_TMR_TO 2
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_RPLAY_NUM_RO 1
+#define LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST_TST_DLNK_PES 0
+#define LPU_LINK_LAYER_INTERRUPT_MASK 0xe2220
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_LINK_ERR_ACT 31
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_UNSPRTD_DLLP 22
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_DLLP_RCV_ERR 21
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_BAD_DLLP 20
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_UNUSED_2 19
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_TLP_RCV_ERR 18
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_SRC_ERR_TLP 17
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_BAD_TLP 16
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_UNUSED_1 10
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_UNUSED_1_MASK 0x3f
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_RTRY_UNF_OVF 9
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_RTRY_BUF_OVF 8
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_EG_TLP_MIN_ERR 7
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_EG_TRNC_FRM_ERR 6
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_RTRY_BUF_PE 5
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_EGRESS_PE 4
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_UNUSED_0 3
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_RPLAY_TMR_TO 2
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_RPLAY_NUM_RO 1
+#define LPU_LINK_LAYER_INTERRUPT_MASK_MSK_DLNK_PES 0
+
+/* Reserved 0xe2228 - 0xe2238 */
+
+#define LPU_FLOW_CONTROL_UPDATE_CONTROL 0xe2240
+#define LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_C_EN 2
+#define LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN 1
+#define LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN 0
+
+/* Reserved 0xe2248 - 0xe2258 */
+
+#define LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE 0xe2260
+#define LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE_FC_UPDATE_TO 0
+#define LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE_FC_UPDATE_TO_MASK \
+ 0x7fff
+#define LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 0xe2268
+#define LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0_VC0_FC_UP_TMR_NP 16
+#define LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0_VC0_FC_UP_TMR_NP_MASK \
+ 0x7fff
+#define LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0_VC0_FC_UP_TMR_P 0
+#define LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0_VC0_FC_UP_TMR_P_MASK \
+ 0x7fff
+#define LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 0xe2270
+#define LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1_VC0_FC_UP_TMR_CPL 0
+#define LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1_VC0_FC_UP_TMR_CPL_MASK \
+ 0x7fff
+
+/* Reserved 0xe2278 - 0xe23f8 */
+
+#define LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD 0xe2400
+#define LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD_ACK_NAK_THR 0
+#define LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD_ACK_NAK_THR_MASK \
+ 0xffff
+#define LPU_TXLINK_ACKNAK_LATENCY_TIMER 0xe2408
+#define LPU_TXLINK_ACKNAK_LATENCY_TIMER_ACK_NAK_TMR 0
+#define LPU_TXLINK_ACKNAK_LATENCY_TIMER_ACK_NAK_TMR_MASK 0xffff
+#define LPU_TXLINK_REPLAY_TIMER_THRESHOLD 0xe2410
+#define LPU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR 0
+#define LPU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR_MASK 0xfffff
+#define LPU_TXLINK_REPLAY_TIMER 0xe2418
+#define LPU_TXLINK_REPLAY_TIMER_RPLAY_TMR 0
+#define LPU_TXLINK_REPLAY_TIMER_RPLAY_TMR_MASK 0xfffff
+#define LPU_TXLINK_REPLAY_NUMBER_STATUS 0xe2420
+#define LPU_TXLINK_REPLAY_NUMBER_STATUS_WE 31
+#define LPU_TXLINK_REPLAY_NUMBER_STATUS_RPLAY_NUM_CNTR 0
+#define LPU_TXLINK_REPLAY_NUMBER_STATUS_RPLAY_NUM_CNTR_MASK 0x3
+#define LPU_REPLAY_BUFFER_MAX_ADDRESS 0xe2428
+#define LPU_REPLAY_BUFFER_MAX_ADDRESS_RTRY_BUFF_MAX_ADDR 0
+#define LPU_REPLAY_BUFFER_MAX_ADDRESS_RTRY_BUFF_MAX_ADDR_MASK 0xffff
+#define LPU_TXLINK_RETRY_FIFO_POINTER 0xe2430
+#define LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR 16
+#define LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_MASK 0xffff
+#define LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR 0
+#define LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_MASK 0xffff
+#define LPU_TXLINK_RETRY_FIFO_R_W_POINTER 0xe2438
+#define LPU_TXLINK_RETRY_FIFO_R_W_POINTER_RTRY_BFFR_WRPTR 16
+#define LPU_TXLINK_RETRY_FIFO_R_W_POINTER_RTRY_BFFR_WRPTR_MASK 0xffff
+#define LPU_TXLINK_RETRY_FIFO_R_W_POINTER_RTRY_BFFR_RDPTR 0
+#define LPU_TXLINK_RETRY_FIFO_R_W_POINTER_RTRY_BFFR_RDPTR_MASK 0xffff
+#define LPU_TXLINK_RETRY_FIFO_CREDIT 0xe2440
+#define LPU_TXLINK_RETRY_FIFO_CREDIT_RTRY_FIFO_CRDT 0
+#define LPU_TXLINK_RETRY_FIFO_CREDIT_RTRY_FIFO_CRDT_MASK 0xffff
+#define LPU_TXLINK_SEQUENCE_COUNTER 0xe2448
+#define LPU_TXLINK_SEQUENCE_COUNTER_WE 31
+#define LPU_TXLINK_SEQUENCE_COUNTER_ACK_SEQ_WE 30
+#define LPU_TXLINK_SEQUENCE_COUNTER_ACK_SEQ_CNTR 16
+#define LPU_TXLINK_SEQUENCE_COUNTER_ACK_SEQ_CNTR_MASK 0xfff
+#define LPU_TXLINK_SEQUENCE_COUNTER_NXT_TX_SEQ_CNTR 0
+#define LPU_TXLINK_SEQUENCE_COUNTER_NXT_TX_SEQ_CNTR_MASK 0xfff
+#define LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER 0xe2450
+#define LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER_SEQ_NUM 0
+#define LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER_SEQ_NUM_MASK 0xfff
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR 0xe2458
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR_SEQ_CNT_MAX_ADDR 0
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR_SEQ_CNT_MAX_ADDR_MASK 0xfff
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS 0xe2460
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS_SEQ_CNT_TLPTR 16
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS_SEQ_CNT_TLPTR_MASK 0xfff
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS_SEQ_CNT_HDPTR 0
+#define LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS_SEQ_CNT_HDPTR_MASK 0xfff
+#define LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS 0xe2468
+#define LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS_SEQ_CNT_WRPTR 16
+#define LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS_SEQ_CNT_WRPTR_MASK 0xfff
+#define LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS_SEQ_CNT_RDPTR 0
+#define LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS_SEQ_CNT_RDPTR_MASK 0xfff
+#define LPU_TXLINK_TEST_CONTROL 0xe2470
+#define LPU_TXLINK_TEST_CONTROL_DIS_ACK 3
+#define LPU_TXLINK_TEST_CONTROL_FORCE_NAK 2
+#define LPU_TXLINK_TEST_CONTROL_FORCE_BAD_TLP_CRC 1
+#define LPU_TXLINK_TEST_CONTROL_FORCE_RTX_TLP 0
+
+/* Reserved 0xe2478 - 0xe2478 */
+
+#define LPU_TXLINK_MEMORY_ADDRESS_CONTROL 0xe2480
+#define LPU_TXLINK_MEMORY_ADDRESS_CONTROL_DONE 31
+#define LPU_TXLINK_MEMORY_ADDRESS_CONTROL_GO_BIT 30
+#define LPU_TXLINK_MEMORY_ADDRESS_CONTROL_RD_WR_SEL 29
+#define LPU_TXLINK_MEMORY_ADDRESS_CONTROL_FIFO_SEL 28
+#define LPU_TXLINK_MEMORY_ADDRESS_CONTROL_MEM_ADDR 0
+#define LPU_TXLINK_MEMORY_ADDRESS_CONTROL_MEM_ADDR_MASK 0xffff
+#define LPU_TXLINK_MEMORY_DATA_LOAD0 0xe2488
+#define LPU_TXLINK_MEMORY_DATA_LOAD0_MEM_RD_WR_DATA0 0
+#define LPU_TXLINK_MEMORY_DATA_LOAD0_MEM_RD_WR_DATA0_MASK 0xffffffff
+#define LPU_TXLINK_MEMORY_DATA_LOAD1 0xe2490
+#define LPU_TXLINK_MEMORY_DATA_LOAD1_MEM_RD_WR_DATA1 0
+#define LPU_TXLINK_MEMORY_DATA_LOAD1_MEM_RD_WR_DATA1_MASK 0xffffffff
+#define LPU_TXLINK_MEMORY_DATA_LOAD2 0xe2498
+#define LPU_TXLINK_MEMORY_DATA_LOAD3 0xe24a0
+#define LPU_TXLINK_MEMORY_DATA_LOAD4 0xe24a8
+#define LPU_TXLINK_MEMORY_DATA_LOAD4_MEM_RD_WR_DATA4 0
+#define LPU_TXLINK_MEMORY_DATA_LOAD4_MEM_RD_WR_DATA4_MASK 0xff
+
+/* Reserved 0xe24b0 - 0xe24b8 */
+
+#define LPU_TXLINK_RETRY_DATA_COUNT 0xe24c0
+#define LPU_TXLINK_RETRY_DATA_COUNT_RTRY_DATA_CNT 0
+#define LPU_TXLINK_RETRY_DATA_COUNT_RTRY_DATA_CNT_MASK 0xffff
+#define LPU_TXLINK_SEQUENCE_BUFFER_COUNT 0xe24c8
+#define LPU_TXLINK_SEQUENCE_BUFFER_COUNT_SEQ_BUFF_CNT 0
+#define LPU_TXLINK_SEQUENCE_BUFFER_COUNT_SEQ_BUFF_CNT_MASK 0xfff
+#define LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA 0xe24d0
+#define LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA_SBUF_BDATA_PAR 30
+#define LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA_SBDATA_SEQ_NUM 18
+#define LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA_SBDATA_SEQ_NUM_MASK 0xfff
+#define LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA_SBDATA_RTRY_PTR 2
+#define LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA_SBDATA_RTRY_PTR_MASK 0xffff
+#define LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA_SBDATA_EOP_POS 0
+#define LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA_SBDATA_EOP_POS_MASK 0x3
+
+/* Reserved 0xe24d8 - 0xe24d8 */
+
+#define LPU_TXLINK_ACK_LATENCY_TIMER_THRESHOLD 0xe24e0
+#define LPU_TXLINK_ACK_LATENCY_TIMER_THRESHOLD_ACK_LAT_THHOLD 0
+#define LPU_TXLINK_ACK_LATENCY_TIMER_THRESHOLD_ACK_LAT_THHOLD_MASK 0xffff
+
+/* Reserved 0xe24e8 - 0xe24f8 */
+
+#define LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER 0xe2500
+#define LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER_NXT_RX_SEQ_CNTR 0
+#define LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER_NXT_RX_SEQ_CNTR_MASK \
+ 0xfff
+#define LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED 0xe2508
+#define LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED_BYTE3 24
+#define LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED_BYTE3_MASK 0xff
+#define LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED_BYTE2 16
+#define LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED_BYTE2_MASK 0xff
+#define LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED_BYTE1 8
+#define LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED_BYTE1_MASK 0xff
+#define LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED_BYTE0 0
+#define LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED_BYTE0_MASK 0xff
+#define LPU_RXLINK_TEST_CONTROL 0xe2510
+#define LPU_RXLINK_TEST_CONTROL_FORCE_SEND_INIT_FC_DLLP 1
+#define LPU_RXLINK_TEST_CONTROL_FORCE_PAR_ERR_DLLP 0
+
+/* Reserved 0xe2518 - 0xe25f8 */
+
+#define LPU_PHYSICAL_LAYER_CONFIGURATION 0xe2600
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_PHY_TST_EN 31
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_FAST_SIM 30
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_UNUSED 29
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_FRCE_EXTEN_SYNC 28
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_TX_EIDLE_POST_EN 11
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_TX_OS_POST_VAL 8
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_TX_OS_POST_VAL_MASK 0x7
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_TX_OS_BYTE_SEL 7
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_TX_OS_PREAM_VAL 4
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_TX_OS_PREAM_VAL_MASK 0x7
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_TX_RDET_BYP_MODE 3
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_TX_RDET_SAFE_MODE 2
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_TX_UNUSED 1
+#define LPU_PHYSICAL_LAYER_CONFIGURATION_TX_PAR_ERR 0
+#define LPU_PHY_LAYER_STATUS 0xe2608
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS 0xe2610
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_PHY_LAYER_ERR 31
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_KCHAR_DLLP_ERR 11
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_ILL_END_POS_ERR 10
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_LNK_ERR 9
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_TRN_ERR 8
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_EDB_DET 7
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_SDP_END 6
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_STP_END_EDB 5
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_INVLD_CHAR_ERR 4
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_MULTI_SDP 3
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_MULTI_STP 2
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_ILL_SDP_POS 1
+#define LPU_PHY_LAYER_INTERRUPT_AND_STATUS_INT_ILL_STP_POS 0
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST 0xe2618
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_KCHAR_DLLP_ERR 11
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_ILL_END_POS_ERR 10
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_LNK_ERR 9
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_TRN_ERR 8
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_EDB_DET 7
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_SDP_END 6
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_STP_END_EDB 5
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_INVLD_CHAR_ERR 4
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_MULTI_SDP 3
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_MULTI_STP 2
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_ILL_SDP_POS 1
+#define LPU_PHY_INTERRUPT_AND_STATUS_TEST_TST_ILL_STP_POS 0
+#define LPU_PHY_INTERRUPT_MASK 0xe2620
+#define LPU_PHY_INTERRUPT_MASK_MSK_PHY_LAYER_ERR 31
+#define LPU_PHY_INTERRUPT_MASK_MSK_KCHAR_DLLP_ERR 11
+#define LPU_PHY_INTERRUPT_MASK_MSK_ILL_END_POS_ERR 10
+#define LPU_PHY_INTERRUPT_MASK_MSK_LNK_ERR 9
+#define LPU_PHY_INTERRUPT_MASK_MSK_TRN_ERR 8
+#define LPU_PHY_INTERRUPT_MASK_MSK_EDB_DET 7
+#define LPU_PHY_INTERRUPT_MASK_MSK_SDP_END 6
+#define LPU_PHY_INTERRUPT_MASK_MSK_STP_END_EDB 5
+#define LPU_PHY_INTERRUPT_MASK_MSK_INVLD_CHAR_ERR 4
+#define LPU_PHY_INTERRUPT_MASK_MSK_MULTI_SDP 3
+#define LPU_PHY_INTERRUPT_MASK_MSK_MULTI_STP 2
+#define LPU_PHY_INTERRUPT_MASK_MSK_ILL_SDP_POS 1
+#define LPU_PHY_INTERRUPT_MASK_MSK_ILL_STP_POS 0
+
+/* Reserved 0xe2628 - 0xe2678 */
+
+#define LPU_RECEIVE_PHY_CONFIG 0xe2680
+#define LPU_RECEIVE_PHY_CONFIG_RX_PHY_TST 31
+#define LPU_RECEIVE_PHY_CONFIG_UNUSED_0 18
+#define LPU_RECEIVE_PHY_CONFIG_UNUSED_0_MASK 0x1fff
+#define LPU_RECEIVE_PHY_CONFIG_WM_SEL_FIFO 16
+#define LPU_RECEIVE_PHY_CONFIG_WM_SEL_FIFO_MASK 0x3
+#define LPU_RECEIVE_PHY_CONFIG_UNUSED_1 8
+#define LPU_RECEIVE_PHY_CONFIG_UNUSED_1_MASK 0xff
+#define LPU_RECEIVE_PHY_CONFIG_RST_RCV_LANE 0
+#define LPU_RECEIVE_PHY_CONFIG_RST_RCV_LANE_MASK 0xff
+#define LPU_RECEIVE_PHY_STATUS1 0xe2688
+#define LPU_RECEIVE_PHY_STATUS1_ALIGN_STS 16
+#define LPU_RECEIVE_PHY_STATUS1_RX_PHY_STS 0
+#define LPU_RECEIVE_PHY_STATUS1_RX_PHY_STS_MASK 0xffff
+#define LPU_RECEIVE_PHY_STATUS2 0xe2690
+#define LPU_RECEIVE_PHY_STATUS2_RCV_DIS_SCRAM 27
+#define LPU_RECEIVE_PHY_STATUS2_RCV_EN_LOOPBACK 26
+#define LPU_RECEIVE_PHY_STATUS2_RCV_DIS_LINK 25
+#define LPU_RECEIVE_PHY_STATUS2_RCV_HOT_RST 24
+#define LPU_RECEIVE_PHY_STATUS2_RCV_DATA_RATE 16
+#define LPU_RECEIVE_PHY_STATUS2_RCV_DATA_RATE_MASK 0xff
+#define LPU_RECEIVE_PHY_STATUS2_RCV_FTS_NUM 8
+#define LPU_RECEIVE_PHY_STATUS2_RCV_FTS_NUM_MASK 0xff
+#define LPU_RECEIVE_PHY_STATUS2_RCV_LINK_NUM 0
+#define LPU_RECEIVE_PHY_STATUS2_RCV_LINK_NUM_MASK 0xff
+#define LPU_RECEIVE_PHY_STATUS3 0xe2698
+#define LPU_RECEIVE_PHY_STATUS3_POL_REV_STS 16
+#define LPU_RECEIVE_PHY_STATUS3_POL_REV_STS_MASK 0xff
+#define LPU_RECEIVE_PHY_STATUS3_BYTE_SYNC_STS 0
+#define LPU_RECEIVE_PHY_STATUS3_BYTE_SYNC_STS_MASK 0xff
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS 0xe26a0
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_INT_RCV_PHY 31
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_INT_UNUSED 3
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_INT_UNUSED_MASK 0x1ff
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_INT_ALIGN_ERR 2
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_INT_ELSTC_FIFO_OVRFLW 1
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_INT_ELSTC_FIFO_UNDRFLW 0
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST 0xe26a8
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST_TST_UNUSED 3
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST_TST_UNUSED_MASK 0x1ff
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST_TST_ALIGN_ERR 2
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST_TST_ELSTC_FIFO_OVRFLW 1
+#define LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST_TST_ELSTC_FIFO_UNDRFLW 0
+#define LPU_RECEIVE_PHY_INTERRUPT_MASK 0xe26b0
+#define LPU_RECEIVE_PHY_INTERRUPT_MASK_MSK_RCV_PHY_INT 31
+#define LPU_RECEIVE_PHY_INTERRUPT_MASK_MSK_UNUSED 3
+#define LPU_RECEIVE_PHY_INTERRUPT_MASK_MSK_UNUSED_MASK 0x1ff
+#define LPU_RECEIVE_PHY_INTERRUPT_MASK_MSK_ALIGN_ERR 2
+#define LPU_RECEIVE_PHY_INTERRUPT_MASK_MSK_ELSTC_FIFO_OVRFLW 1
+#define LPU_RECEIVE_PHY_INTERRUPT_MASK_MSK_ELSTC_FIFO_UNDRFLW 0
+
+/* Reserved 0xe26b8 - 0xe26f8 */
+
+#define LPU_TRANSMIT_PHY_CONFIG 0xe2700
+#define LPU_TRANSMIT_PHY_CONFIG_FRCE_RCVR_DET 16
+#define LPU_TRANSMIT_PHY_CONFIG_FRCE_RCVR_DET_MASK 0xffff
+#define LPU_TRANSMIT_PHY_CONFIG_FRCE_ELEC_IDLE 0
+#define LPU_TRANSMIT_PHY_CONFIG_FRCE_ELEC_IDLE_MASK 0xffff
+#define LPU_TRANSMIT_PHY_STATUS 0xe2708
+#define LPU_TRANSMIT_PHY_STATUS_NEG_LANE_WDTH 28
+#define LPU_TRANSMIT_PHY_STATUS_NEG_LANE_WDTH_MASK 0xf
+#define LPU_TRANSMIT_PHY_STATUS_TXPHY_SCRAM_EN 27
+#define LPU_TRANSMIT_PHY_STATUS_TX_LANE_REV 26
+#define LPU_TRANSMIT_PHY_STATUS_TX_LANE_PAD 25
+#define LPU_TRANSMIT_PHY_STATUS_TX_LINK_PAD 24
+#define LPU_TRANSMIT_PHY_STATUS_TX_PHY_SMS 0
+#define LPU_TRANSMIT_PHY_STATUS_TX_PHY_SMS_MASK 0x7fffff
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS 0xe2710
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_UNMSK 31
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_RCV_IDLE 11
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_RCV_TS2 10
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_RCV_TS1 9
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_SKP_ERR 8
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_SKP_DONE_BK2BK 7
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_SKP_ACK_DECR 6
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_SKP_DONE_DECR 5
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_SKP_TRIG 4
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_UNUSED_2 2
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_UNUSED_2_MASK 0x3
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_RCVR_DET_VALID 1
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_INT_TX_PAR_ERR 0
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST 0xe2718
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST_TST_TX_PHY_INT 0
+#define LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST_TST_TX_PHY_INT_MASK 0xfff
+#define LPU_TRANSMIT_PHY_INTERRUPT_MASK 0xe2720
+#define LPU_TRANSMIT_PHY_INTERRUPT_MASK_MSK_GLOBL_INT 31
+#define LPU_TRANSMIT_PHY_INTERRUPT_MASK_MSK_IMPLEM_INT 0
+#define LPU_TRANSMIT_PHY_INTERRUPT_MASK_MSK_IMPLEM_INT_MASK 0xfff
+#define LPU_TRANSMIT_PHY_STATUS_2 0xe2728
+#define LPU_TRANSMIT_PHY_STATUS_2_RECV_DET_STS 16
+#define LPU_TRANSMIT_PHY_STATUS_2_RECV_DET_STS_MASK 0xffff
+#define LPU_TRANSMIT_PHY_STATUS_2_RECV_DET_RAW_STS 0
+#define LPU_TRANSMIT_PHY_STATUS_2_RECV_DET_RAW_STS_MASK 0xffff
+
+/* Reserved 0xe2730 - 0xe2778 */
+
+#define LPU_LTSSM_CONFIG1 0xe2780
+#define LPU_LTSSM_CONFIG1_LTSSM_TST 31
+#define LPU_LTSSM_CONFIG1_CFG_UNUSED 18
+#define LPU_LTSSM_CONFIG1_CFG_UNUSED_MASK 0x1fff
+#define LPU_LTSSM_CONFIG1_LPBK_MSTR 17
+#define LPU_LTSSM_CONFIG1_HI_DATA_SUP 16
+#define LPU_LTSSM_CONFIG1_LTSSM_8_TO 8
+#define LPU_LTSSM_CONFIG1_LTSSM_8_TO_MASK 0xff
+#define LPU_LTSSM_CONFIG1_LTSSM_20_TO 0
+#define LPU_LTSSM_CONFIG1_LTSSM_20_TO_MASK 0xff
+#define LPU_LTSSM_CONFIG2 0xe2788
+#define LPU_LTSSM_CONFIG2_LTSSM_12_TO 0
+#define LPU_LTSSM_CONFIG2_LTSSM_12_TO_MASK 0xffffffff
+#define LPU_LTSSM_CONFIG3 0xe2790
+#define LPU_LTSSM_CONFIG3_LTSSM_2_TO 0
+#define LPU_LTSSM_CONFIG3_LTSSM_2_TO_MASK 0xffffffff
+#define LPU_LTSSM_CONFIG4 0xe2798
+#define LPU_LTSSM_CONFIG4_TRN_CNTRL 24
+#define LPU_LTSSM_CONFIG4_TRN_CNTRL_MASK 0xff
+#define LPU_LTSSM_CONFIG4_DATA_RATE 16
+#define LPU_LTSSM_CONFIG4_DATA_RATE_MASK 0xff
+#define LPU_LTSSM_CONFIG4_N_FTS 8
+#define LPU_LTSSM_CONFIG4_N_FTS_MASK 0xff
+#define LPU_LTSSM_CONFIG4_LNK_NUM 0
+#define LPU_LTSSM_CONFIG4_LNK_NUM_MASK 0xff
+#define LPU_LTSSM_CONFIG5 0xe27a0
+#define LPU_LTSSM_CONFIG5_CFG_UNUSED_0 13
+#define LPU_LTSSM_CONFIG5_CFG_UNUSED_0_MASK 0x7ffff
+#define LPU_LTSSM_CONFIG5_RCV_DET_TST_MODE 12
+#define LPU_LTSSM_CONFIG5_POLL_CMPLNC_DIS 11
+#define LPU_LTSSM_CONFIG5_TX_IDLE_TX_FTS 10
+#define LPU_LTSSM_CONFIG5_RX_FTS_RVR_LK 9
+#define LPU_LTSSM_CONFIG5_CFG_UNUSED_1 7
+#define LPU_LTSSM_CONFIG5_CFG_UNUSED_1_MASK 0x3
+#define LPU_LTSSM_CONFIG5_LPBK_ENTRY_ACTIVE 6
+#define LPU_LTSSM_CONFIG5_LPBK_ENTRY_EXIT 5
+#define LPU_LTSSM_CONFIG5_LPBK_ACTIVE_EXIT 4
+#define LPU_LTSSM_CONFIG5_L1_IDLE_RCVRY_LK 3
+#define LPU_LTSSM_CONFIG5_L0_TRN_CNTRL_RST 2
+#define LPU_LTSSM_CONFIG5_L0_LPBK 1
+#define LPU_LTSSM_CONFIG5_CFG_UNUSED_2 0
+#define LPU_LTSSM_STATUS1 0xe27a8
+#define LPU_LTSSM_STATUS1_RX_LN_EN_MSK 16
+#define LPU_LTSSM_STATUS1_RX_LN_EN_MSK_MASK 0xffff
+#define LPU_LTSSM_STATUS1_RX_ALGN_CMD 15
+#define LPU_LTSSM_STATUS1_MSTR_LN_SEL 14
+#define LPU_LTSSM_STATUS1_LNK_OT_RX 13
+#define LPU_LTSSM_STATUS1_LNK_OT_TX 12
+#define LPU_LTSSM_STATUS1_LN_RVRSD 11
+#define LPU_LTSSM_STATUS1_LNK_UP_DWN_STS 10
+#define LPU_LTSSM_STATUS1_LTSSM_STATE 4
+#define LPU_LTSSM_STATUS1_LTSSM_STATE_MASK 0x3f
+#define LPU_LTSSM_STATUS1_CNFG_LNK_WDTH 0
+#define LPU_LTSSM_STATUS1_CNFG_LNK_WDTH_MASK 0xf
+#define LPU_LTSSM_STATUS2 0xe27b0
+#define LPU_LTSSM_STATUS2_TX_CMD_TX_PHY 16
+#define LPU_LTSSM_STATUS2_TX_CMD_TX_PHY_MASK 0xffff
+#define LPU_LTSSM_STATUS2_RX_CMD_RX_PHY 0
+#define LPU_LTSSM_STATUS2_RX_CMD_RX_PHY_MASK 0xffff
+#define LPU_LTSSM_INTERRUPT_AND_STATUS 0xe27b8
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_ANY 31
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_SKIP_OS 15
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_FTS 14
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_TS2_RECOV 13
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_8IDLE_DATA 12
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_IDLE_DATA 11
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_TSX_POLL 10
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_TSX_INV 9
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_EIDLE_EXIT 8
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_TSX_COMP 7
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_TSX_LB 6
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_TSX_DIS 5
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_TSX_RST 4
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_EIDLE 3
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_TS2 2
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_TS1 1
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_INT_NONE 0
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST 0xe27c0
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_SKIP_OS 15
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_FTS 14
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_TS2_RECOV 13
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_8IDLE_DATA 12
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_IDLE_DATA 11
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_TSX_POLL 10
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_TSX_INV 9
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_EIDLE_EXIT 8
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_TSX_COMP 7
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_TSX_LB 6
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_TSX_DIS 5
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_TSX_RST 4
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_EIDLE 3
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_TS2 2
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_TS1 1
+#define LPU_LTSSM_INTERRUPT_AND_STATUS_TEST_TST_NONE 0
+#define LPU_LTSSM_INTERRUPT_MASK 0xe27c8
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_GLB 31
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_SKIP_OS 15
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_FTS 14
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_TS2_RECOV 13
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_8IDLE_DATA 12
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_IDLE_DATA 11
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_TSX_POLL 10
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_TSX_INV 9
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_EIDLE_EXIT 8
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_TSX_COMP 7
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_TSX_LB 6
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_TSX_DIS 5
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_TSX_RST 4
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_EIDLE 3
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_TS2 2
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_TS1 1
+#define LPU_LTSSM_INTERRUPT_MASK_MSK_NONE 0
+#define LPU_LTSSM_STATUS_WRITE_ENABLE 0xe27d0
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE_UNUSED 11
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE_UNUSED_MASK 0x1fffff
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE1_LTSSM_STS2 10
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE2_LTSSM_STS2 9
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE1_LTSSM_STS1 8
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE2_LTSSM_STS1 7
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE3_LTSSM_STS1 6
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE4_LTSSM_STS1 5
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE5_LTSSM_STS1 4
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE6_LTSSM_STS1 3
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE7_LTSSM_STS1 2
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE8_LTSSM_STS1 1
+#define LPU_LTSSM_STATUS_WRITE_ENABLE_WE9_LTSSM_STS1 0
+
+/* Reserved 0xe27d8 - 0xe27f8 */
+
+#define LPU_GIGABLAZE_GLUE_CONFIG1 0xe2800
+#define LPU_GIGABLAZE_GLUE_CONFIG1_UNUSED_CNTL1 28
+#define LPU_GIGABLAZE_GLUE_CONFIG1_UNUSED_CNTL1_MASK 0xf
+#define LPU_GIGABLAZE_GLUE_CONFIG1_STM_SEL 24
+#define LPU_GIGABLAZE_GLUE_CONFIG1_STM_SEL_MASK 0xf
+#define LPU_GIGABLAZE_GLUE_CONFIG1_UNUSED_CNTL2 22
+#define LPU_GIGABLAZE_GLUE_CONFIG1_UNUSED_CNTL2_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG1_REV_LPBK_SEL 20
+#define LPU_GIGABLAZE_GLUE_CONFIG1_REV_LPBK_SEL_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG1_REV_LPBK_MODE 19
+#define LPU_GIGABLAZE_GLUE_CONFIG1_LPBK_ENB 18
+#define LPU_GIGABLAZE_GLUE_CONFIG1_LPBK_MODE_SEL 16
+#define LPU_GIGABLAZE_GLUE_CONFIG1_LPBK_MODE_SEL_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG1_RXLOS_FLTR_EN 15
+#define LPU_GIGABLAZE_GLUE_CONFIG1_RXLOS_ADJUST 12
+#define LPU_GIGABLAZE_GLUE_CONFIG1_RXLOS_ADJUST_MASK 0x7
+#define LPU_GIGABLAZE_GLUE_CONFIG1_RXLOS_SMPL_RT 8
+#define LPU_GIGABLAZE_GLUE_CONFIG1_RXLOS_SMPL_RT_MASK 0xf
+#define LPU_GIGABLAZE_GLUE_CONFIG1_RXLOS_THRSH_CN 0
+#define LPU_GIGABLAZE_GLUE_CONFIG1_RXLOS_THRSH_CN_MASK 0xff
+#define LPU_GIGABLAZE_GLUE_CONFIG2 0xe2808
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_VPULSE_CTL 30
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_VPULSE_CTL_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_VMUX_CTL 28
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_VMUX_CTL_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_RISE_FALL 25
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_RISE_FALL_MASK 0x7
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_PRE_EMPH 22
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_PRE_EMPH_MASK 0x7
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_VSWNG_CTL 18
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_VSWNG_CTL_MASK 0xf
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_PLL_ZERO_CTL 16
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_PLL_ZERO_CTL_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_PLL_POLE_CTL 14
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_PLL_POLE_CTL_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG2_RX_PLL_ZERO_CTL 12
+#define LPU_GIGABLAZE_GLUE_CONFIG2_RX_PLL_ZERO_CTL_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG2_RX_PLL_POLE_CTL 10
+#define LPU_GIGABLAZE_GLUE_CONFIG2_RX_PLL_POLE_CTL_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG2_RX_EQLIZR_CTL 6
+#define LPU_GIGABLAZE_GLUE_CONFIG2_RX_EQLIZR_CTL_MASK 0xf
+#define LPU_GIGABLAZE_GLUE_CONFIG2_OHM_SEL 5
+#define LPU_GIGABLAZE_GLUE_CONFIG2_RTRIMEN 4
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_TERM 2
+#define LPU_GIGABLAZE_GLUE_CONFIG2_TX_TERM_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG2_RX_TERM 0
+#define LPU_GIGABLAZE_GLUE_CONFIG2_RX_TERM_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG3 0xe2810
+#define LPU_GIGABLAZE_GLUE_CONFIG3_UNUSED_CNTL3 27
+#define LPU_GIGABLAZE_GLUE_CONFIG3_UNUSED_CNTL3_MASK 0x1f
+#define LPU_GIGABLAZE_GLUE_CONFIG3_OUT_BIAS_CTL 26
+#define LPU_GIGABLAZE_GLUE_CONFIG3_TX_RCV_DET 24
+#define LPU_GIGABLAZE_GLUE_CONFIG3_TX_RCV_DET_MASK 0x3
+#define LPU_GIGABLAZE_GLUE_CONFIG3_TX_PLL_HLF_RT_CTL 23
+#define LPU_GIGABLAZE_GLUE_CONFIG3_TX_PLL_FDBK_DIV 20
+#define LPU_GIGABLAZE_GLUE_CONFIG3_TX_PLL_FDBK_DIV_MASK 0x7
+#define LPU_GIGABLAZE_GLUE_CONFIG3_RX_PLL_HLF_RT_CTL 19
+#define LPU_GIGABLAZE_GLUE_CONFIG3_RX_PLL_FDBK_DIV 16
+#define LPU_GIGABLAZE_GLUE_CONFIG3_RX_PLL_FDBK_DIV_MASK 0x7
+#define LPU_GIGABLAZE_GLUE_CONFIG3_BIT_LCK_TM 0
+#define LPU_GIGABLAZE_GLUE_CONFIG3_BIT_LCK_TM_MASK 0xffff
+#define LPU_GIGABLAZE_GLUE_CONFIG4 0xe2818
+#define LPU_GIGABLAZE_GLUE_CONFIG4_CFG_UNUSED 20
+#define LPU_GIGABLAZE_GLUE_CONFIG4_CFG_UNUSED_MASK 0xfff
+#define LPU_GIGABLAZE_GLUE_CONFIG4_INIT_TIME 0
+#define LPU_GIGABLAZE_GLUE_CONFIG4_INIT_TIME_MASK 0xfffff
+#define LPU_GIGABLAZE_GLUE_STATUS 0xe2820
+#define LPU_GIGABLAZE_GLUE_STATUS_RCV_ELECT_IDLE 16
+#define LPU_GIGABLAZE_GLUE_STATUS_RCV_ELECT_IDLE_MASK 0xffff
+#define LPU_GIGABLAZE_GLUE_STATUS_BIT_SYNC_DN 0
+#define LPU_GIGABLAZE_GLUE_STATUS_BIT_SYNC_DN_MASK 0xffff
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS 0xe2828
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_INT_GLOBL_UNMSK 31
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_INT_UNUSED 16
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_INT_UNUSED_MASK 0xff
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_INT_BYTE_SYNC_STS 0
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_INT_BYTE_SYNC_STS_MASK 0xffff
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST 0xe2830
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST_TST_W1S_INT 16
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST_TST_W1S_INT_MASK 0xff
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST_TST_BSSS_INT 0
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST_TST_BSSS_INT_MASK 0xffff
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_MASK 0xe2838
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_MASK_MSK_GLOBL_INT 31
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_MASK_MSK_INT 0
+#define LPU_GIGABLAZE_GLUE_INTERRUPT_MASK_MSK_INT_MASK 0xffffff
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN1 0xe2840
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN1_TX_PWR_DN 16
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN1_TX_PWR_DN_MASK 0xffff
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN1_THE 0
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN1_THE_MASK 0x1
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN1_RX_PWR_DN 0
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN1_RX_PWR_DN_MASK 0xffff
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN2 0xe2848
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN2_PD_UNUSED 22
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN2_PD_UNUSED_MASK 0x3ff
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN2_PWR_DN_CLK_BUF 21
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN2_PWR_DN_RES_TRIM 20
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN2_TX_PLL_PWR_D 16
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN2_TX_PLL_PWR_D_MASK 0xf
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN2_RXLOS_PWR_DN 0
+#define LPU_GIGABLAZE_GLUE_POWER_DOWN2_RXLOS_PWR_DN_MASK 0xffff
+#define LPU_GIGABLAZE_GLUE_CONFIG5 0xe2850
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PX_REGS_H */
diff --git a/usr/src/uts/sun4u/io/px/px_tools.c b/usr/src/uts/sun4u/io/px/px_tools.c
new file mode 100644
index 0000000000..6e3ac9705c
--- /dev/null
+++ b/usr/src/uts/sun4u/io/px/px_tools.c
@@ -0,0 +1,1038 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/sysmacros.h>
+#include <sys/machsystm.h>
+#include <sys/promif.h>
+#include <sys/cpuvar.h>
+
+#include <sys/ddi_implfuncs.h>
+#include <sys/pci/pci_regs.h>
+#include <px_csr.h>
+#include <px_regs.h> /* XXX - remove it later */
+#include <px_obj.h>
+#include <px_lib4u.h>
+#include <px_asm.h>
+#include <px_tools.h>
+#include <px_tools_var.h>
+
+/*
+ * Extract 64 bit parent or size values from 32 bit cells of
+ * px_ranges_t property.
+ *
+ * Only bits 42-32 are relevant in parent_high.
+ */
+#define PX_GET_RANGE_PROP(ranges, bank) \
+ ((((uint64_t)(ranges[bank].parent_high & 0x7ff)) << 32) | \
+ ranges[bank].parent_low)
+
+#define PX_GET_RANGE_PROP_SIZE(ranges, bank) \
+ ((((uint64_t)(ranges[bank].size_high)) << 32) | \
+ ranges[bank].size_low)
+
+/* Big and little endian as boolean values. */
+#define BE B_TRUE
+#define LE B_FALSE
+
+#define SUCCESS 0
+
+/*
+ * PX hardware shifts bus / dev / function bits 4 to the left of their
+ * normal PCI placement.
+ */
+#define PX_PCI_BDF_OFFSET_DELTA 4
+
+/* Mechanism for getting offsets of smaller datatypes aligned in 64 bit long */
+typedef union {
+ uint64_t u64;
+ uint32_t u32;
+ uint16_t u16;
+ uint8_t u8;
+} peek_poke_value_t;
+
+/*
+ * Offsets of BARS in config space. First entry of 0 means config space.
+ * Entries here correlate to pcitool_bars_t enumerated type.
+ */
+static uint8_t pci_bars[] = {
+ 0x0,
+ PCI_CONF_BASE0,
+ PCI_CONF_BASE1,
+ PCI_CONF_BASE2,
+ PCI_CONF_BASE3,
+ PCI_CONF_BASE4,
+ PCI_CONF_BASE5,
+ PCI_CONF_ROM
+};
+
+static int px_safe_phys_peek(boolean_t, size_t, uint64_t, uint64_t *);
+static int px_safe_phys_poke(boolean_t, size_t, uint64_t, uint64_t);
+static int px_validate_cpuid(uint32_t);
+static uint8_t px_ib_get_ino_devs(px_ib_t *ib_p, uint32_t ino,
+ uint8_t *devs_ret, pci_intr_dev_t *devs);
+static int px_access(dev_info_t *, uint64_t, uint64_t, uint64_t *,
+ uint8_t, boolean_t, boolean_t, uint32_t *);
+static int px_intr_get_max_ino(uint32_t *, int);
+static int px_get_intr(dev_info_t *, void *, int, px_t *);
+static int px_set_intr(dev_info_t *, void *, int, px_t *);
+
+/*
+ * Safe C wrapper around assy language routine px_phys_peek
+ *
+ * Type is TRUE for big endian, FALSE for little endian.
+ * Size is 1, 2, 4 or 8 bytes.
+ * paddr is the physical address in IO space to access read.
+ * value_p is where the value is returned.
+ */
+static int
+px_safe_phys_peek(boolean_t type, size_t size, uint64_t paddr,
+ uint64_t *value_p)
+{
+ on_trap_data_t otd;
+ int err = DDI_SUCCESS;
+ uintptr_t tramp;
+ peek_poke_value_t peek_value;
+
+ /* Set up trap handling to make the access safe. */
+
+ /*
+ * on_trap works like setjmp.
+ * Set it up to not panic on data access error,
+ * but to call peek_fault instead.
+ * Call px_phys_peek after trap handling is setup.
+ * When on_trap returns FALSE, it has been setup.
+ * When it returns TRUE, an it has caught an error.
+ */
+ if (!on_trap(&otd, OT_DATA_ACCESS)) {
+ tramp = otd.ot_trampoline;
+ otd.ot_trampoline = (uintptr_t)&peek_fault;
+ err = px_phys_peek(size, paddr, &peek_value.u64, type);
+ otd.ot_trampoline = tramp;
+ } else {
+ err = DDI_FAILURE;
+ }
+
+ no_trap();
+
+ if (err != DDI_FAILURE) {
+ switch (size) {
+ case 8:
+ *value_p = (uint64_t)peek_value.u64;
+ break;
+ case 4:
+ *value_p = (uint64_t)peek_value.u32;
+ break;
+ case 2:
+ *value_p = (uint64_t)peek_value.u16;
+ break;
+ case 1:
+ *value_p = (uint64_t)peek_value.u8;
+ break;
+ default:
+ err = DDI_FAILURE;
+ }
+ }
+
+ return (err);
+}
+
+/*
+ * Safe C wrapper around assy language routine px_phys_poke
+ *
+ * Type is TRUE for big endian, FALSE for little endian.
+ * Size is 1,2,4 or 8 bytes.
+ * paddr is the physical address in IO space to access read.
+ * value contains the value to be written.
+ */
+static int
+px_safe_phys_poke(boolean_t type, size_t size, uint64_t paddr, uint64_t value)
+{
+ on_trap_data_t otd;
+ int err = DDI_SUCCESS;
+ uintptr_t tramp;
+ peek_poke_value_t poke_value;
+
+ switch (size) {
+ case 8:
+ poke_value.u64 = value;
+ break;
+ case 4:
+ poke_value.u32 = (uint32_t)value;
+ break;
+ case 2:
+ poke_value.u16 = (uint16_t)value;
+ break;
+ case 1:
+ poke_value.u8 = (uint8_t)value;
+ break;
+ default:
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * on_trap works like setjmp.
+ * Set it up to not panic on data access error,
+ * but to call poke_fault instead.
+ * Call px_phys_poke after trap handling is setup.
+ * When on_trap returns FALSE, it has been setup.
+ * When it returns TRUE, an it has caught an error.
+ */
+ if (!on_trap(&otd, OT_DATA_ACCESS)) {
+
+ tramp = otd.ot_trampoline;
+ otd.ot_trampoline = (uintptr_t)&poke_fault;
+ err = px_phys_poke(size, paddr, &poke_value.u64, type);
+ otd.ot_trampoline = tramp;
+ } else {
+ err = DDI_FAILURE;
+ }
+
+ no_trap();
+ return (err);
+}
+
+
+/*
+ * Validate the cpu_id passed in.
+ * A value of 1 will be returned for success and zero for failure.
+ */
+static int
+px_validate_cpuid(uint32_t cpu_id)
+{
+ extern cpu_t *cpu[NCPU];
+ int rval = 1;
+
+ ASSERT(mutex_owned(&cpu_lock));
+
+ if (cpu_id >= NCPU) {
+ rval = 0;
+
+ } else if (cpu[cpu_id] == NULL) {
+ rval = 0;
+
+ } else if (!(cpu_is_online(cpu[cpu_id]))) {
+ rval = 0;
+ }
+
+ return (rval);
+}
+
+
+/*
+ * Return the dips or number of dips associated with a given interrupt block.
+ * Size of dips array arg is passed in as dips_ret arg.
+ * Number of dips returned is returned in dips_ret arg.
+ * Array of dips gets returned in the dips argument.
+ * Function returns number of dips existing for the given interrupt block.
+ *
+ */
+static uint8_t
+px_ib_get_ino_devs(
+ px_ib_t *ib_p, uint32_t ino, uint8_t *devs_ret, pci_intr_dev_t *devs)
+{
+ px_ib_ino_info_t *ino_p;
+ px_ih_t *ih_p;
+ uint32_t num_devs = 0;
+ int i;
+
+ mutex_enter(&ib_p->ib_ino_lst_mutex);
+ ino_p = px_ib_locate_ino(ib_p, ino);
+ if (ino_p != NULL) {
+ num_devs = ino_p->ino_ih_size;
+ for (i = 0, ih_p = ino_p->ino_ih_head;
+ ((i < ino_p->ino_ih_size) && (i < *devs_ret));
+ i++, ih_p = ih_p->ih_next) {
+ (void) strncpy(devs[i].driver_name,
+ ddi_driver_name(ih_p->ih_dip), MAXMODCONFNAME-1);
+ devs[i].driver_name[MAXMODCONFNAME] = '\0';
+ (void) ddi_pathname(ih_p->ih_dip, devs[i].path);
+ devs[i].dev_inst = ddi_get_instance(ih_p->ih_dip);
+ }
+ *devs_ret = i;
+ }
+
+ mutex_exit(&ib_p->ib_ino_lst_mutex);
+
+ return (num_devs);
+}
+
+
+/* Return the number of interrupts on a pci bus. */
+static int
+px_intr_get_max_ino(uint32_t *arg, int mode)
+{
+ uint32_t num_intr = INTERRUPT_MAPPING_ENTRIES;
+
+ if (ddi_copyout(&num_intr, arg, sizeof (uint32_t), mode) !=
+ DDI_SUCCESS) {
+
+ return (EFAULT);
+ } else {
+
+ return (SUCCESS);
+ }
+}
+
+
+/*
+ * Get interrupt information for a given ino.
+ * Returns info only for inos mapped to devices.
+ *
+ * Returned info is valid only when iget.num_devs is returned > 0.
+ * If ino is not enabled or is not mapped to a device,
+ * iget.num_devs will be returned as = 0.
+ */
+/*ARGSUSED*/
+static int
+px_get_intr(dev_info_t *dip, void *arg, int mode, px_t *px_p)
+{
+ /* Array part isn't used here, but oh well... */
+ pci_intr_get_t partial_iget;
+ pci_intr_get_t *iget = &partial_iget;
+ size_t iget_kmem_alloc_size = 0;
+ px_ib_t *ib_p = px_p->px_ib_p;
+ uint64_t csrbase = (uint64_t)px_p->px_address[PX_REG_CSR];
+ uint64_t imregval;
+ uint32_t ino;
+ uint8_t num_devs_ret;
+ int copyout_rval;
+ int rval = SUCCESS;
+
+ /* Read in just the header part, no array section. */
+ if (ddi_copyin(arg, &partial_iget, IGET_SIZE(0), mode) != DDI_SUCCESS) {
+
+ return (EFAULT);
+ }
+
+ ino = partial_iget.ino;
+ num_devs_ret = partial_iget.num_devs_ret;
+
+ /* Validate argument. */
+ if (partial_iget.ino > INTERRUPT_MAPPING_ENTRIES) {
+ partial_iget.status = PCITOOL_INVALID_INO;
+ partial_iget.num_devs_ret = 0; /* XXX ret this for all errs. */
+ rval = EINVAL;
+ goto done_get_intr;
+ }
+
+ /* Caller wants device information returned. */
+ if (num_devs_ret > 0) {
+
+ /*
+ * Allocate room.
+ * Note if num_devs == 0 iget remains pointing to
+ * partial_iget.
+ */
+ iget_kmem_alloc_size = IGET_SIZE(num_devs_ret);
+ iget = kmem_alloc(iget_kmem_alloc_size, KM_SLEEP);
+
+ /* Read in whole structure to verify there's room. */
+ if (ddi_copyin(arg, iget, iget_kmem_alloc_size, mode) !=
+ SUCCESS) {
+
+ /* Be consistent and just return EFAULT here. */
+ kmem_free(iget, iget_kmem_alloc_size);
+
+ return (EFAULT);
+ }
+ }
+
+ bzero(iget, IGET_SIZE(num_devs_ret));
+ iget->ino = ino;
+ iget->num_devs_ret = num_devs_ret;
+
+ imregval = CSRA_XR(csrbase, INTERRUPT_MAPPING, iget->ino);
+
+ /*
+ * Read "valid" bit. If set, interrupts are enabled.
+ * This bit happens to be the same on Fire and Tomatillo.
+ */
+ if (imregval & (1ULL << INTERRUPT_MAPPING_ENTRIES_V)) {
+
+ /*
+ * The following looks up the px_ib_ino_info and returns
+ * info of devices mapped to this ino.
+ */
+ iget->num_devs = px_ib_get_ino_devs(
+ ib_p, ino, &iget->num_devs_ret, iget->dev);
+
+ /*
+ * Consider only inos mapped to devices (as opposed to
+ * inos mapped to the bridge itself.
+ */
+ if (iget->num_devs > 0) {
+
+ /*
+ * These 2 items are platform specific,
+ * extracted from the bridge.
+ */
+ iget->ctlr = (imregval >>
+ INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM) &
+ INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK;
+ iget->cpu_id = (imregval >>
+ INTERRUPT_MAPPING_ENTRIES_T_JPID) &
+ INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK;
+ }
+ }
+done_get_intr:
+ copyout_rval = ddi_copyout(iget, arg, IGET_SIZE(num_devs_ret), mode);
+ if (iget_kmem_alloc_size > 0) {
+ kmem_free(iget, iget_kmem_alloc_size);
+ }
+ if (copyout_rval != DDI_SUCCESS) {
+ rval = EFAULT;
+ }
+
+ return (rval);
+}
+
+
+/*
+ * Associate a new CPU with a given ino.
+ *
+ * Operate only on inos which are already mapped to devices.
+ */
+static int
+px_set_intr(dev_info_t *dip, void *arg, int mode, px_t *px_p)
+{
+ uint8_t zero = 0;
+ pci_intr_set_t iset;
+ uint32_t old_cpu_id;
+ hrtime_t start_time;
+ uint64_t csrbase = (uint64_t)px_p->px_address[PX_REG_CSR];
+ px_ib_t *ib_p = px_p->px_ib_p;
+ uint64_t imregval;
+ uint64_t new_imregval;
+ uint64_t *idregp;
+ int rval = SUCCESS;
+
+ if (ddi_copyin(arg, &iset, sizeof (pci_intr_set_t), mode) !=
+ DDI_SUCCESS) {
+
+ return (EFAULT);
+ }
+
+ /* Validate input argument. */
+ if (iset.ino > INTERRUPT_MAPPING_ENTRIES) {
+ iset.status = PCITOOL_INVALID_INO;
+ rval = EINVAL;
+ goto done_set_intr;
+ }
+
+ /* Validate that ino given belongs to a device. */
+ if (px_ib_get_ino_devs(ib_p, iset.ino, &zero, NULL) == 0) {
+ iset.status = PCITOOL_INVALID_INO;
+ rval = EINVAL;
+ goto done_set_intr;
+ }
+
+ idregp = (uint64_t *)PX_INTR_DIAG_REG(csrbase, iset.ino);
+
+ DBG(DBG_TOOLS, dip, "set_intr: cpu:%d, ino:0x%x\n",
+ iset.cpu_id, iset.ino);
+
+ /* Save original mapreg value. */
+ imregval = CSRA_XR(csrbase, INTERRUPT_MAPPING, iset.ino);
+
+ DBG(DBG_TOOLS, dip, "orig mapreg value: 0x%llx\n", imregval);
+
+ /* Is this request a noop? */
+ old_cpu_id = (imregval >> INTERRUPT_MAPPING_ENTRIES_T_JPID) &
+ INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK;
+ if (old_cpu_id == iset.cpu_id) {
+ iset.status = PCITOOL_SUCCESS;
+ goto done_set_intr;
+ }
+
+ /* Operate only on inos which are already enabled. */
+ if (!(imregval & (1ULL << INTERRUPT_MAPPING_ENTRIES_V))) {
+ iset.status = PCITOOL_INVALID_INO;
+ rval = EINVAL;
+ goto done_set_intr;
+ }
+
+ /* Clear the interrupt valid/enable bit for particular ino. */
+ DBG(DBG_TOOLS, dip, "Clearing intr_enabled...\n");
+
+ CSRA_BC(csrbase, INTERRUPT_MAPPING, iset.ino, ENTRIES_V);
+
+ /* Wait until there are no more pending interrupts. */
+ start_time = gethrtime();
+
+ DBG(DBG_TOOLS, dip, "About to check for pending interrupts...\n");
+
+ while (PX_INTR_STATUS(idregp, iset.ino) ==
+ COMMON_CLEAR_INTR_REG_PENDING) {
+
+ DBG(DBG_TOOLS, dip, "Waiting for pending ints to clear\n");
+
+ if ((gethrtime() - start_time) < px_intrpend_timeout) {
+ continue;
+
+ /* Timed out waiting. */
+ } else {
+ iset.status = PCITOOL_PENDING_INTRTIMEOUT;
+ rval = ETIME;
+ goto done_set_intr;
+ }
+ }
+
+ new_imregval = CSRA_XR(csrbase, INTERRUPT_MAPPING, iset.ino);
+
+ DBG(DBG_TOOLS, dip,
+ "after disabling intr, mapreg value: 0x%llx\n", new_imregval);
+
+ /* Prepare new mapreg value with interrupts enabled and new cpu_id. */
+ new_imregval |= (1ULL << INTERRUPT_MAPPING_ENTRIES_V);
+ new_imregval &= ~(INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK <<
+ INTERRUPT_MAPPING_ENTRIES_T_JPID);
+ new_imregval |= (iset.cpu_id << INTERRUPT_MAPPING_ENTRIES_T_JPID);
+
+ /*
+ * Get lock, validate cpu and write new mapreg value.
+ * Return original cpu value to caller via iset.cpu.
+ */
+ mutex_enter(&cpu_lock);
+ if (px_validate_cpuid(iset.cpu_id)) {
+
+ DBG(DBG_TOOLS, dip, "Writing new mapreg value:0x%llx\n",
+ new_imregval);
+
+ CSRA_XS(csrbase, INTERRUPT_MAPPING, iset.ino, new_imregval);
+ mutex_exit(&cpu_lock);
+ iset.cpu_id = old_cpu_id;
+ iset.status = PCITOOL_SUCCESS;
+
+ /* Invalid cpu. Restore original register image. */
+ } else {
+
+ DBG(DBG_TOOLS, dip,
+ "Invalid cpuid: writing orig mapreg value\n");
+
+ CSRA_XS(csrbase, INTERRUPT_MAPPING, iset.ino, imregval);
+ mutex_exit(&cpu_lock);
+ iset.status = PCITOOL_INVALID_CPUID;
+ rval = EINVAL;
+ }
+done_set_intr:
+ if (ddi_copyout(&iset, arg, sizeof (pci_intr_set_t), mode) !=
+ DDI_SUCCESS) {
+
+ rval = EFAULT;
+ }
+
+ return (rval);
+}
+
+
+/* Main function for handling interrupt CPU binding requests and queries. */
+int
+px_intr_admn(dev_info_t *dip, void *arg, int cmd, int mode, px_t *px_p)
+{
+ int rval = SUCCESS;
+
+ switch (cmd) {
+
+ /* Return the number of interrupts supported by a PCI bus. */
+ case PCITOOL_DEVICE_NUM_INTR:
+ rval = px_intr_get_max_ino(arg, mode);
+ break;
+
+ /* Get interrupt information for a given ino. */
+ case PCITOOL_DEVICE_GET_INTR:
+ rval = px_get_intr(dip, arg, mode, px_p);
+ break;
+
+ /* Associate a new CPU with a given ino. */
+ case PCITOOL_DEVICE_SET_INTR:
+ rval = px_set_intr(dip, arg, mode, px_p);
+ break;
+
+ default:
+ rval = ENOTTY;
+ }
+
+ return (rval);
+}
+
+
+/*
+ * Wrapper around px_safe_phys_peek/poke.
+ *
+ * Validates arguments and calls px_safe_phys_peek/poke appropriately.
+ *
+ * Dip is of the nexus,
+ * phys_addr is the address to write in physical space,
+ * max_addr is the upper bound on the physical space used for bounds checking,
+ * pcitool_status returns more detailed status in addition to a more generic
+ * errno-style function return value.
+ * other args are self-explanatory.
+ */
+static int
+px_access(dev_info_t *dip, uint64_t phys_addr, uint64_t max_addr,
+ uint64_t *data, uint8_t size, boolean_t write, boolean_t endian,
+ uint32_t *pcitool_status)
+{
+
+ int rval = SUCCESS;
+
+ /* Upper bounds checking. */
+ if (phys_addr > max_addr) {
+ DBG(DBG_TOOLS, dip,
+ "Phys addr 0x%llx out of range (max 0x%llx).\n",
+ phys_addr, max_addr);
+ *pcitool_status = PCITOOL_INVALID_ADDRESS;
+
+ rval = EINVAL;
+
+ /* Alignment checking. */
+ } else if (!IS_P2ALIGNED(phys_addr, size)) {
+ DBG(DBG_TOOLS, dip, "not aligned.\n");
+ *pcitool_status = PCITOOL_NOT_ALIGNED;
+
+ rval = EINVAL;
+
+ /* Made it through checks. Do the access. */
+ } else if (write) {
+
+ DBG(DBG_PHYS_ACC, dip,
+ "%d byte %s px_safe_phys_poke at addr 0x%llx\n",
+ size, (endian ? "BE" : "LE"), phys_addr);
+
+ if (px_safe_phys_poke(endian, size, phys_addr, *data) !=
+ DDI_SUCCESS) {
+ DBG(DBG_PHYS_ACC, dip,
+ "%d byte %s px_safe_phys_poke at addr "
+ "0x%llx failed\n",
+ size, (endian ? "BE" : "LE"), phys_addr);
+ *pcitool_status = PCITOOL_INVALID_ADDRESS;
+
+ rval = EFAULT;
+ }
+
+ /* Read */
+ } else {
+
+ DBG(DBG_PHYS_ACC, dip,
+ "%d byte %s px_safe_phys_peek at addr 0x%llx\n",
+ size, (endian ? "BE" : "LE"), phys_addr);
+
+ if (px_safe_phys_peek(endian, size, phys_addr, data) !=
+ DDI_SUCCESS) {
+ DBG(DBG_PHYS_ACC, dip,
+ "%d byte %s px_safe_phys_peek at addr "
+ "0x%llx failed\n",
+ size, (endian ? "BE" : "LE"), phys_addr);
+ *pcitool_status = PCITOOL_INVALID_ADDRESS;
+
+ rval = EFAULT;
+ }
+ }
+ return (rval);
+}
+
+/*
+ * Perform register accesses on the nexus device itself.
+ */
+int
+px_bus_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode)
+{
+
+ px_nexus_regspec_t *px_rp = NULL;
+ pci_reg_t pr;
+ uint64_t base_addr;
+ uint64_t max_addr;
+ uint32_t reglen;
+ uint32_t numbanks = 0;
+ uint8_t size;
+ uint32_t rval = 0;
+ boolean_t write_flag = B_FALSE;
+
+ switch (cmd) {
+ case PCITOOL_NEXUS_SET_REG:
+ write_flag = B_TRUE;
+
+ /*FALLTHRU*/
+ case PCITOOL_NEXUS_GET_REG:
+ DBG(DBG_TOOLS, dip, "px_bus_reg_ops set/get reg\n");
+
+ /* Read data from userland. */
+ if (ddi_copyin(arg, &pr, sizeof (pci_reg_t),
+ mode) != DDI_SUCCESS) {
+ DBG(DBG_TOOLS, dip, "Error reading arguments\n");
+ return (EFAULT);
+ }
+
+ /*
+ * Read reg property which contains starting addr
+ * and size of banks.
+ */
+ if (ddi_prop_lookup_int_array(
+ DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "reg", (int **)&px_rp, &reglen) == DDI_SUCCESS) {
+ if (((reglen * sizeof (int)) %
+ sizeof (px_nexus_regspec_t)) != 0) {
+ DBG(DBG_TOOLS, dip,
+ "reg prop not well-formed");
+ pr.status = PCITOOL_REGPROP_NOTWELLFORMED;
+ rval = EIO;
+ goto done;
+ }
+ }
+
+ numbanks =
+ (reglen * sizeof (int)) / sizeof (px_nexus_regspec_t);
+
+ /* Bounds check the bank number. */
+ if (pr.barnum >= numbanks) {
+ pr.status = PCITOOL_OUT_OF_RANGE;
+ rval = EINVAL;
+ goto done;
+ }
+
+ size = PX_ACC_ATTR_SIZE(pr.acc_attr);
+ base_addr = px_rp[pr.barnum].phys_addr;
+ max_addr = base_addr + px_rp[pr.barnum].size;
+ pr.phys_addr = base_addr + pr.offset;
+
+ DBG(DBG_TOOLS, dip,
+ "px_bus_reg_ops: nexus: base:0x%llx, offset:0x%llx, "
+ "addr:0x%llx, max_addr:0x%llx\n",
+ base_addr, pr.offset, pr.phys_addr, max_addr);
+
+ /* Access device. pr.status is modified. */
+ rval = px_access(dip,
+ pr.phys_addr, max_addr, &pr.data, size, write_flag,
+ (pr.acc_attr & PX_ACC_ATTR_ENDN_BIG), /* BE/LE */
+ &pr.status);
+
+ break;
+
+ default:
+ return (ENOTTY);
+ }
+
+done:
+ if (px_rp != NULL)
+ ddi_prop_free(px_rp);
+
+ if (ddi_copyout(&pr, arg, sizeof (pci_reg_t),
+ mode) != DDI_SUCCESS) {
+ DBG(DBG_TOOLS, dip, "Copyout failed.\n");
+ return (EFAULT);
+ }
+
+ return (rval);
+}
+
+
+/* Perform register accesses on PCI leaf devices. */
+int
+px_dev_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode, px_t *px_p)
+{
+ pci_reg_t prg;
+ uint64_t max_addr;
+ uint64_t base_addr;
+ uint64_t range_prop;
+ uint64_t range_prop_size;
+ uint64_t bar = 0;
+ int rval = 0;
+ boolean_t write_flag = B_FALSE;
+ px_ranges_t *rp = px_p->px_ranges_p;
+ uint8_t size;
+ uint8_t bar_offset;
+
+ switch (cmd) {
+ case (PCITOOL_DEVICE_SET_REG):
+ write_flag = B_TRUE;
+
+ /*FALLTHRU*/
+ case (PCITOOL_DEVICE_GET_REG):
+ DBG(DBG_TOOLS, dip, "px_dev_reg_ops set/get reg\n");
+ if (ddi_copyin(arg, &prg, sizeof (pci_reg_t),
+ mode) != DDI_SUCCESS) {
+ DBG(DBG_TOOLS, dip, "Error reading arguments\n");
+ return (EFAULT);
+ }
+
+ if (prg.barnum >= (sizeof (pci_bars) / sizeof (pci_bars[0]))) {
+ prg.status = PCITOOL_OUT_OF_RANGE;
+ rval = EINVAL;
+ goto done_reg;
+ }
+
+ DBG(DBG_TOOLS, dip, "raw bus:0x%x, dev:0x%x, func:0x%x\n",
+ prg.bus_no, prg.dev_no, prg.func_no);
+
+ /* Validate address arguments of bus / dev / func */
+ if (((prg.bus_no &
+ (PCI_REG_BUS_M >> PCI_REG_BUS_SHIFT)) !=
+ prg.bus_no) ||
+ ((prg.dev_no &
+ (PCI_REG_DEV_M >> PCI_REG_DEV_SHIFT)) !=
+ prg.dev_no) ||
+ ((prg.func_no &
+ (PCI_REG_FUNC_M >> PCI_REG_FUNC_SHIFT)) !=
+ prg.func_no)) {
+ prg.status = PCITOOL_INVALID_ADDRESS;
+ rval = EINVAL;
+ goto done_reg;
+ }
+
+ size = PX_ACC_ATTR_SIZE(prg.acc_attr);
+
+ /* Get config space first. */
+ range_prop = PX_GET_RANGE_PROP(rp, PCI_CONFIG_RANGE_BANK);
+ range_prop_size =
+ PX_GET_RANGE_PROP_SIZE(rp, PCI_CONFIG_RANGE_BANK);
+ max_addr = range_prop + range_prop_size;
+
+ /*
+ * Build device address based on base addr from range prop, and
+ * bus, dev and func values passed in. This address is where
+ * config space begins.
+ */
+ base_addr =
+ (prg.bus_no << PCI_REG_BUS_SHIFT) +
+ (prg.dev_no << PCI_REG_DEV_SHIFT) +
+ (prg.func_no << PCI_REG_FUNC_SHIFT);
+
+ /* BDF bits are shifted left addl bits with fire hardware. */
+ base_addr = (base_addr << PX_PCI_BDF_OFFSET_DELTA) + range_prop;
+
+ if ((base_addr < range_prop) || (base_addr >= max_addr)) {
+ prg.status = PCITOOL_OUT_OF_RANGE;
+ rval = EINVAL;
+ goto done_reg;
+ }
+
+ DBG(DBG_TOOLS, dip,
+ "range_prop:0x%llx, shifted: bus:0x%x, dev:0x%x "
+ "func:0x%x, addr:0x%x",
+ range_prop,
+ prg.bus_no << (PCI_REG_BUS_SHIFT + PX_PCI_BDF_OFFSET_DELTA),
+ prg.dev_no << (PCI_REG_DEV_SHIFT + PX_PCI_BDF_OFFSET_DELTA),
+ prg.func_no <<
+ (PCI_REG_FUNC_SHIFT + PX_PCI_BDF_OFFSET_DELTA),
+ base_addr);
+
+ /* Proper config space desired. */
+ if (prg.barnum == 0) {
+
+ /* Access config space and we're done. */
+ prg.phys_addr = base_addr + prg.offset;
+
+ DBG(DBG_TOOLS, dip,
+ "config access: base:0x%llx, offset:0x%llx, "
+ "phys_addr:0x%llx, end:%s\n",
+ base_addr, prg.offset, prg.phys_addr,
+ (prg.acc_attr & PX_ACC_ATTR_ENDN_BIG)?"big":"ltl");
+
+ /* Access device. pr.status is modified. */
+ rval = px_access(dip,
+ prg.phys_addr, max_addr,
+ &prg.data, size, write_flag,
+ (prg.acc_attr & PX_ACC_ATTR_ENDN_BIG), /* BE/LE */
+ &prg.status);
+
+ DBG(DBG_TOOLS, dip, "config access: data:0x%llx\n",
+ prg.data);
+
+ /* IO/ MEM/ MEM64 space. */
+ } else {
+
+ /*
+ * Translate BAR number into offset of the BAR in
+ * the device's config space.
+ */
+ bar_offset = pci_bars[prg.barnum];
+
+ DBG(DBG_TOOLS, dip, "barnum:%d, bar_offset:0x%x\n",
+ prg.barnum, bar_offset);
+
+ /*
+ * Get Bus Address Register (BAR) from config space.
+ * bar_offset is the offset into config space of the
+ * BAR desired. prg.status is modified on error.
+ */
+ rval = px_access(dip,
+ base_addr + bar_offset,
+ max_addr, &bar,
+ 4, /* 4 bytes. */
+ B_FALSE, /* Read */
+ B_FALSE, /* Little endian. */
+ &prg.status);
+ if (rval != SUCCESS) {
+ goto done_reg;
+ }
+
+ /*
+ * Reference proper PCI space based on the BAR.
+ * If 64 bit MEM space, need to load other half of the
+ * BAR first.
+ */
+
+ DBG(DBG_TOOLS, dip, "bar returned is 0x%llx\n", bar);
+ if (!bar) {
+ rval = EINVAL;
+ prg.status = PCITOOL_INVALID_ADDRESS;
+ goto done_reg;
+ }
+
+ /*
+ * BAR has bits saying this space is IO space, unless
+ * this is the ROM address register.
+ */
+ if (((PCI_BASE_SPACE_M & bar) == PCI_BASE_SPACE_IO) &&
+ (bar_offset != PCI_CONF_ROM)) {
+ DBG(DBG_TOOLS, dip, "IO space\n");
+
+ /* Reposition to focus on IO space. */
+ range_prop = PX_GET_RANGE_PROP(rp,
+ PCI_IO_RANGE_BANK);
+ range_prop_size = PX_GET_RANGE_PROP_SIZE(rp,
+ PCI_IO_RANGE_BANK);
+
+ bar &= PCI_BASE_IO_ADDR_M;
+
+ /*
+ * BAR has bits saying this space is 64 bit memory
+ * space, unless this is the ROM address register.
+ *
+ * The 64 bit address stored in two BAR cells is not
+ * necessarily aligned on an 8-byte boundary.
+ * Need to keep the first 4 bytes read,
+ * and do a separate read of the high 4 bytes.
+ */
+
+ } else if ((PCI_BASE_TYPE_ALL & bar) &&
+ (bar_offset != PCI_CONF_ROM)) {
+
+ uint32_t low_bytes =
+ (uint32_t)(bar & ~PCI_BASE_TYPE_ALL);
+
+ /*
+ * Don't try to read the next 4 bytes
+ * past the end of BARs.
+ */
+ if (bar_offset >= PCI_CONF_BASE5) {
+ prg.status = PCITOOL_OUT_OF_RANGE;
+ rval = EIO;
+ goto done_reg;
+ }
+
+ /*
+ * Access device.
+ * prg.status is modified on error.
+ */
+ rval = px_access(dip,
+ base_addr + bar_offset + 4,
+ max_addr, &bar,
+ 4, /* 4 bytes. */
+ B_FALSE, /* Read */
+ B_FALSE, /* Little endian. */
+ &prg.status);
+ if (rval != SUCCESS) {
+ goto done_reg;
+ }
+
+ bar = (bar << 32) + low_bytes;
+
+ DBG(DBG_TOOLS, dip,
+ "64 bit mem space. 64-bit bar is 0x%llx\n",
+ bar);
+
+ /* Reposition to MEM64 range space. */
+ range_prop = PX_GET_RANGE_PROP(rp,
+ PCI_MEM64_RANGE_BANK);
+ range_prop_size = PX_GET_RANGE_PROP_SIZE(rp,
+ PCI_MEM64_RANGE_BANK);
+
+ /* Mem32 space, including ROM */
+ } else {
+
+ if (bar_offset == PCI_CONF_ROM) {
+
+ DBG(DBG_TOOLS, dip,
+ "Additional ROM checking\n");
+
+ /* Can't write to ROM */
+ if (write_flag) {
+ prg.status = PCITOOL_ROM_WRITE;
+ rval = EINVAL;
+ goto done_reg;
+
+ /* ROM disabled for reading */
+ } else if (!(bar & 0x00000001)) {
+ prg.status =
+ PCITOOL_ROM_DISABLED;
+ rval = EINVAL;
+ goto done_reg;
+ }
+ }
+
+ DBG(DBG_TOOLS, dip, "32 bit mem space\n");
+ range_prop = PX_GET_RANGE_PROP(rp,
+ PCI_MEM_RANGE_BANK);
+ range_prop_size = PX_GET_RANGE_PROP_SIZE(rp,
+ PCI_MEM_RANGE_BANK);
+ }
+
+ /* Common code for all IO/MEM range spaces. */
+ max_addr = range_prop + range_prop_size;
+ base_addr = range_prop + bar;
+
+ DBG(DBG_TOOLS, dip,
+ "addr portion of bar is 0x%llx, base=0x%llx, "
+ "offset:0x%x\n", bar, base_addr, prg.offset);
+
+ /*
+ * Use offset provided by caller to index into
+ * desired space, then access.
+ * Note that prg.status is modified on error.
+ */
+ prg.phys_addr = base_addr + prg.offset;
+ rval = px_access(dip,
+ prg.phys_addr,
+ max_addr, &prg.data, size, write_flag,
+ (prg.acc_attr & PX_ACC_ATTR_ENDN_BIG), /* BE/LE */
+ &prg.status);
+ }
+done_reg:
+ if (ddi_copyout(&prg, arg, sizeof (pci_reg_t),
+ mode) != DDI_SUCCESS) {
+ DBG(DBG_TOOLS, dip, "Error returning arguments.\n");
+ rval = EFAULT;
+ }
+ break;
+ default:
+ rval = ENOTTY;
+ break;
+ }
+ return (rval);
+}
diff --git a/usr/src/uts/sun4u/io/px/px_tools_var.h b/usr/src/uts/sun4u/io/px/px_tools_var.h
new file mode 100644
index 0000000000..76b6e0f890
--- /dev/null
+++ b/usr/src/uts/sun4u/io/px/px_tools_var.h
@@ -0,0 +1,55 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PX_TOOLS_VAR_H
+#define _SYS_PX_TOOLS_VAR_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Functions exported from the px_tools.c module.
+ */
+int px_dev_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode, px_t *px_p);
+int px_bus_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode);
+int px_intr_admn(dev_info_t *dip, void *arg, int cmd, int mode, px_t *px_p);
+
+/*
+ * PCI Space definitions.
+ */
+#define PCI_CONFIG_RANGE_BANK (PCI_REG_ADDR_G(PCI_ADDR_CONFIG))
+#define PCI_IO_RANGE_BANK (PCI_REG_ADDR_G(PCI_ADDR_IO))
+#define PCI_MEM_RANGE_BANK (PCI_REG_ADDR_G(PCI_ADDR_MEM32))
+#define PCI_MEM64_RANGE_BANK (PCI_REG_ADDR_G(PCI_ADDR_MEM64))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PX_TOOLS_VAR_H */